Go to the documentation of this file.
135 *
data = matrix_encoding;
149 w_align = 1 <<
desc->log2_chroma_w;
150 h_align = 1 <<
desc->log2_chroma_h;
153 switch (
s->pix_fmt) {
317 w_align =
FFMAX(w_align, 8);
337 for (
i = 0;
i < 4;
i++)
344 int chroma_shift =
desc->log2_chroma_w;
349 align =
FFMAX(linesize_align[0], linesize_align[3]);
350 linesize_align[1] <<= chroma_shift;
351 linesize_align[2] <<= chroma_shift;
352 align =
FFMAX3(align, linesize_align[1], linesize_align[2]);
362 *xpos = (
pos&1) * 128;
363 *ypos = ((
pos>>1)^(
pos<4)) * 128;
381 int buf_size,
int align)
386 frame->nb_samples, sample_fmt,
388 if (buf_size < needed_size)
400 (uint8_t *)(intptr_t)buf, nb_channels,
frame->nb_samples,
401 sample_fmt, align)) < 0) {
421 for (p = 0; p<
desc->nb_components; p++) {
422 uint8_t *dst =
frame->data[p];
423 int is_chroma = p == 1 || p == 2;
426 if (
desc->comp[0].depth >= 9) {
427 ((uint16_t*)dst)[0] =
c[p];
429 dst +=
frame->linesize[p];
430 for (y = 1; y <
height; y++) {
431 memcpy(dst,
frame->data[p], 2*bytes);
432 dst +=
frame->linesize[p];
435 for (y = 0; y <
height; y++) {
436 memset(dst,
c[p], bytes);
437 dst +=
frame->linesize[p];
464 return "unknown_codec";
581 if (be < 0 || be > 1)
607 uint32_t
tag,
int bits_per_coded_sample, int64_t
bitrate,
608 uint8_t * extradata,
int frame_size,
int frame_bytes)
611 int framecount = (ba > 0 && frame_bytes / ba > 0) ? frame_bytes / ba : 1;
614 if (
bps > 0 && ch > 0 && frame_bytes > 0 && ch < 32768 &&
bps < 32768)
615 return (frame_bytes * 8LL) / (
bps * ch);
616 bps = bits_per_coded_sample;
634 if (framecount > INT_MAX/1024)
636 return 1024 * framecount;
646 return 256 * sr / 245;
648 return 588 * sr / 44100;
652 return (480 << (sr / 22050));
656 return sr <= 24000 ? 576 : 1152;
676 if (frame_bytes > 0) {
679 return 240 * (frame_bytes / 32);
681 return 256 * (frame_bytes / 64);
683 return 160 * (frame_bytes / 20);
688 return frame_bytes * 8 /
bps;
691 if (ch > 0 && ch < INT_MAX/16) {
695 return frame_bytes / (40 * ch) * 256;
697 return (frame_bytes - 4 * ch) / (128 * ch) * 256;
699 return frame_bytes / (9 * ch) * 16;
702 frame_bytes /= 16 * ch;
703 if (frame_bytes > INT_MAX / 28)
705 return frame_bytes * 28;
710 return (frame_bytes - 4 * ch) * 2 / ch;
712 return (frame_bytes - 4) * 2 / ch;
714 return (frame_bytes - 8) * 2;
718 return frame_bytes * 14LL / (8 * ch);
721 return (frame_bytes / 128) * 224 / ch;
723 return (frame_bytes - 6 - ch) / ch;
725 return (frame_bytes - 8) / ch;
727 return (frame_bytes - 2 * ch) / ch;
729 return 3 * frame_bytes / ch;
731 return 6 * frame_bytes / ch;
733 return 2 * (frame_bytes / (5 * ch));
736 return 4 * frame_bytes / ch;
743 return frame_bytes / ch;
745 return frame_bytes * 2 / ch;
751 int blocks = frame_bytes / ba;
755 if (bps < 2 || bps > 5)
757 tmp = blocks * (1LL + (ba - 4 * ch) / (
bps * ch) * 8);
760 tmp = blocks * (((ba - 16LL) * 2 / 3 * 4) / ch);
763 tmp = blocks * (1 + (ba - 4LL * ch) * 2 / ch);
766 tmp = blocks * ((ba - 4LL * ch) * 2 / ch);
769 tmp = blocks * (2 + (ba - 7LL * ch) * 2LL / ch);
772 tmp = blocks * (ba - 16LL) * 2 / ch;
786 if(
bps<4 || frame_bytes<3)
788 return 2 * ((frame_bytes - 3) / ((
bps * 2 / 8) * ch));
790 if(
bps<4 || frame_bytes<4)
792 return (frame_bytes - 4) / ((
FFALIGN(ch, 2) *
bps) / 8);
794 return 2 * (frame_bytes / ((
bps + 4) / 4)) / ch;
806 if (
bitrate > 0 && frame_bytes > 0 && sr > 0 && ba > 1) {
808 return (frame_bytes * 8LL * sr) /
bitrate;
818 #if FF_API_OLD_CHANNEL_LAYOUT
836 #if FF_API_OLD_CHANNEL_LAYOUT
927 f->owner[0] =
f->owner[1] = avctx;
939 f->owner[0] =
f->owner[1] =
NULL;
987 uint32_t *av_restrict
state)
995 for (
i = 0;
i < 3;
i++) {
998 if (
tmp == 0x100 || p == end)
1003 if (p[-1] > 1 ) p += 3;
1004 else if (p[-2] ) p += 2;
1005 else if (p[-3]|(p[-1]-1)) p++;
1012 p =
FFMIN(p, end) - 4;
1025 *
size =
sizeof(*props);
1065 unsigned low = bcd & 0xf;
1066 unsigned high = bcd >> 4;
1067 if (low > 9 || high > 9)
1069 return low + 10*high;
1073 void **
data,
size_t *sei_size)
1088 tc = (uint32_t*)sd->
data;
1091 *sei_size =
sizeof(uint32_t) * 4;
1095 sei_data = (uint8_t*)*
data + prefix_len;
1100 for (
int j = 1; j <= m; j++) {
1101 uint32_t tcsmpte =
tc[j];
1102 unsigned hh =
bcd2uint(tcsmpte & 0x3f);
1103 unsigned mm =
bcd2uint(tcsmpte>>8 & 0x7f);
1105 unsigned ff =
bcd2uint(tcsmpte>>24 & 0x3f);
1106 unsigned drop = tcsmpte & 1<<30 && !0;
1113 pc = !!(tcsmpte & 1 << 7);
1115 pc = !!(tcsmpte & 1 << 23);
1116 ff = (ff + pc) & 0x7f;
1147 if (!bits_per_coded_sample) {
1158 const int * array_valid_values,
int default_value)
1163 ref_val = array_valid_values[
i];
1164 if (ref_val == INT_MAX)
1172 "%s %d are not supported. Set to default value : %d\n", val_name,
val, default_value);
1173 return default_value;
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
int frame_size
Number of samples per channel in an audio frame.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it be(in the first position) for now. Options ------- Then comes the options array. This is what will define the user accessible options. For example
#define FF_ENABLE_DEPRECATION_WARNINGS
@ AV_SAMPLE_FMT_FLTP
float, planar
@ AV_PIX_FMT_YUV420P9LE
planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
#define AV_LOG_WARNING
Something somehow does not look correct.
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
AVPixelFormat
Pixel format.
@ AV_CODEC_ID_ADPCM_IMA_QT
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ FF_CODEC_CB_TYPE_RECEIVE_PACKET
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
int sample_rate
samples per second
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
This struct describes the properties of an encoded stream.
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
@ AV_PIX_FMT_GBRP10BE
planar GBR 4:4:4 30bpp, big-endian
const char * name
short name for the profile
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_YUV422P14LE
planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
const char * name
Name of the codec described by this descriptor.
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
int ff_thread_can_start_frame(AVCodecContext *avctx)
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, const uint8_t *buf, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Fill plane data pointers and linesize for samples with sample format sample_fmt.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
@ AV_CODEC_ID_PCM_S32LE_PLANAR
const AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
@ AV_PIX_FMT_YUVA444P10BE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV440P12BE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@ AV_CODEC_ID_PCM_S16BE_PLANAR
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
@ AV_PIX_FMT_YUV420P14BE
planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height)
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
@ AV_PIX_FMT_YUV420P16LE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_PIX_FMT_GBRP14BE
planar GBR 4:4:4 42bpp, big-endian
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
@ FF_CODEC_CB_TYPE_ENCODE_SUB
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
@ AV_PIX_FMT_YUVA444P9BE
planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
@ AV_PIX_FMT_YUV422P9BE
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
const char * avcodec_profile_name(enum AVCodecID codec_id, int profile)
Return a name for the specified profile, if available.
int nb_channels
Number of channels in this layout.
@ AV_CODEC_ID_PCM_S16LE_PLANAR
@ AV_CODEC_ID_ADPCM_THP_LE
@ AV_PIX_FMT_YUV444P16LE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
@ AV_CODEC_ID_DSD_MSBF_PLANAR
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos)
Converts AVChromaLocation to swscale x/y chroma position.
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
@ AV_PIX_FMT_YUV420P12LE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
AVChannelLayout ch_layout
Audio channel layout.
const uint8_t * avpriv_find_start_code(const uint8_t *av_restrict p, const uint8_t *end, uint32_t *av_restrict state)
static const struct twinvq_data tab
AVCodecContext * owner[2]
static double val(void *priv, double ch)
@ AV_SAMPLE_FMT_S64P
signed 64 bits, planar
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
#define ss(width, name, subs,...)
@ AV_PIX_FMT_YUVA444P16BE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
@ AV_PIX_FMT_YUV444P10BE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
@ AV_PIX_FMT_YUV420P10LE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ FF_CODEC_CB_TYPE_DECODE
@ AV_PIX_FMT_YUV444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
@ AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_PIX_FMT_YUV422P12BE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_YUV444P14LE
planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
#define FF_ARRAY_ELEMS(a)
int frame_size
Audio only.
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
@ AV_CODEC_ID_ADPCM_IMA_ACORN
This struct describes the properties of a single codec described by an AVCodecID.
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_CEIL_RSHIFT(a, b)
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_PIX_FMT_GBRP12LE
planar GBR 4:4:4 36bpp, little-endian
#define FF_PROFILE_UNKNOWN
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
@ AV_CODEC_ID_ADPCM_IMA_APC
@ AV_PIX_FMT_YUVA420P16BE
planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_side_data_update_matrix_encoding(AVFrame *frame, enum AVMatrixEncoding matrix_encoding)
Add or update AV_FRAME_DATA_MATRIXENCODING side data.
@ AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_BINKAUDIO_DCT
int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, enum AVSampleFormat sample_fmt, const uint8_t *buf, int buf_size, int align)
Fill AVFrame audio data and linesize pointers.
@ FF_CODEC_CB_TYPE_ENCODE
int ff_alloc_entries(AVCodecContext *avctx, int count)
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_PIX_FMT_GBRP10LE
planar GBR 4:4:4 30bpp, little-endian
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
This structure describes the bitrate properties of an encoded bitstream.
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
@ AV_PIX_FMT_YUV444P10LE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_YUVA422P10LE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
@ AV_PIX_FMT_YUV444P9BE
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
@ AV_PIX_FMT_YUV422P10BE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_YUV422P16LE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_CODEC_ID_ADPCM_EA_XAS
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
@ AV_PIX_FMT_GBRAP12BE
planar GBR 4:4:4:4 48bpp, big-endian
@ AV_CODEC_ID_INTERPLAY_VIDEO
@ AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_IMA_WS
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Rational number (pair of numerator and denominator).
@ AV_CODEC_ID_INTERPLAY_DPCM
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
@ FF_CODEC_CB_TYPE_DECODE_SUB
@ AV_CODEC_ID_ADPCM_IMA_DK4
int64_t bit_rate
the average bitrate
enum AVPacketSideDataType type
@ AV_PIX_FMT_YUVA422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian
int ff_slice_thread_init_progress(AVCodecContext *avctx)
@ AV_PIX_FMT_YUVA444P12BE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian
const AVProfile * profiles
array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
@ AV_PIX_FMT_YUVA444P9LE
planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
@ AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_PIX_FMT_YUVA420P16LE
planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
@ AV_PIX_FMT_YUV440P10LE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
AVChannelLayout ch_layout
Audio only.
@ AV_PIX_FMT_YUVA420P9LE
planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
void ff_color_frame(AVFrame *frame, const int c[4])
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int sample_rate
Audio only.
@ AV_PIX_FMT_YUV420P14LE
planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_CODEC_ID_PCM_S24LE_PLANAR
@ AV_CODEC_ID_GSM
as in Berlin toast format
AVCodecID
Identify the syntax and semantics of the bitstream.
@ AV_PIX_FMT_YUV444P14BE
planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
@ AV_PIX_FMT_YUV420P9BE
The following 12 formats have the disadvantage of needing 1 format for each bit depth.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int av_codec_is_decoder(const AVCodec *avcodec)
int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
@ AV_PIX_FMT_YUV440P12LE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
@ AV_SAMPLE_FMT_U8P
unsigned 8 bits, planar
@ AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_PIX_FMT_YUV420P12BE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_PIX_FMT_YUV422P14BE
planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec)
#define AV_NUM_DATA_POINTERS
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
@ AV_PIX_FMT_GBRP9BE
planar GBR 4:4:4 27bpp, big-endian
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
static const av_always_inline FFCodec * ffcodec(const AVCodec *codec)
@ AVCHROMA_LOC_UNSPECIFIED
@ AV_PIX_FMT_YUV420P10BE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
@ AV_PIX_FMT_GBRP9LE
planar GBR 4:4:4 27bpp, little-endian
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
void ff_reset_entries(AVCodecContext *avctx)
const struct AVCodecHWConfigInternal *const * hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
int ff_int_from_list_or_default(void *ctx, const char *val_name, int val, const int *array_valid_values, int default_value)
Check if a value is in the list.
@ AV_PIX_FMT_YUVA420P10LE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
void ff_thread_await_progress(ThreadFrame *f, int progress, int field)
Wait for earlier decoding threads to finish reference pictures.
AVChromaLocation
Location of chroma samples.
@ AV_PIX_FMT_YUVA422P10BE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
int av_codec_is_encoder(const AVCodec *avcodec)
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
@ AV_PIX_FMT_YUVA444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian
@ AV_PIX_FMT_YUVA422P9BE
planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
unsigned caps_internal
Internal codec capabilities FF_CODEC_CAP_*.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
#define i(width, name, range_min, range_max)
const char * av_get_profile_name(const AVCodec *codec, int profile)
Return a name for the specified profile, if available.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
int block_align
Audio only.
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
@ AV_CODEC_ID_DSD_LSBF_PLANAR
AVSampleFormat
Audio sample formats.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define AV_PIX_FMT_RGB555
unsigned int av_xiphlacing(unsigned char *s, unsigned int v)
Encode extradata length to a buffer.
@ AV_CODEC_ID_ADPCM_IMA_APM
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
@ AV_CODEC_ID_ADPCM_IMA_DAT4
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
@ AV_PIX_FMT_YUV444P16BE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int64_t ff_guess_coded_bitrate(AVCodecContext *avctx)
Get an estimated video bitrate based on frame size, frame rate and coded bits per pixel.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
@ AV_PIX_FMT_YVYU422
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba, uint32_t tag, int bits_per_coded_sample, int64_t bitrate, uint8_t *extradata, int frame_size, int frame_bytes)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_INPUT_BUFFER_PADDING_SIZE
@ AV_PIX_FMT_GBRP12BE
planar GBR 4:4:4 36bpp, big-endian
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
@ AV_PIX_FMT_YUV444P12BE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
main external API structure.
@ AV_CODEC_ID_ADPCM_G726LE
@ AV_PIX_FMT_YUV444P9LE
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call.
@ AV_SAMPLE_FMT_DBLP
double, planar
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
@ FF_CODEC_CB_TYPE_RECEIVE_FRAME
enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos)
Converts swscale x/y chroma position to AVChromaLocation.
unsigned cb_type
This field determines the type of the codec (decoder/encoder) and also the exact callback cb implemen...
@ AV_PIX_FMT_YUVA420P10BE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *avcodec, int index)
Retrieve supported hardware configurations for a codec.
@ AV_PIX_FMT_YUV420P16BE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
static int shift(int a, int b)
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
Return the PCM codec associated with a sample format.
@ AV_PIX_FMT_YUV422P16BE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
int bits_per_coded_sample
The number of bits per sample in the codedwords.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static unsigned bcd2uint(uint8_t bcd)
@ AV_CODEC_ID_ADPCM_SBPRO_4
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
@ AV_PIX_FMT_YUVA444P10LE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
Structure to hold side data for an AVFrame.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
@ AV_CODEC_ID_PCM_S24DAUD
@ AV_CODEC_ID_ADPCM_IMA_SSI
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
const VDPAUPixFmtMap * map
int ff_alloc_timecode_sei(const AVFrame *frame, AVRational rate, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for S12M timecode side data and allocate and fill TC SEI message with timecode info.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
@ AV_CODEC_ID_ADPCM_IMA_WAV
int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes)
This function is the same as av_get_audio_frame_duration(), except it works with AVCodecParameters in...
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV440P10BE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@ AV_PIX_FMT_YUVA422P16BE
planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV422P9LE
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_PIX_FMT_YUVA422P16LE
planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
@ AV_PIX_FMT_GBRP14LE
planar GBR 4:4:4 42bpp, little-endian
@ AV_CODEC_ID_PCM_S8_PLANAR
int width
picture width / height.
#define flags(name, subs,...)
@ AVCHROMA_LOC_NB
Not part of ABI.
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
int64_t bit_rate
The average bitrate of the encoded data (in bits per second).
@ AV_SAMPLE_FMT_DBL
double
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
@ AV_PIX_FMT_YUVA444P16LE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
@ AV_PIX_FMT_YUVA422P12BE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
void ff_thread_report_progress(ThreadFrame *f, int progress, int field)
Notify later decoding threads when part of their reference picture is ready.
@ AV_SAMPLE_FMT_S32
signed 32 bits
@ AV_PIX_FMT_YUV422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
int ff_thread_init(AVCodecContext *s)
@ AV_PIX_FMT_YUVA420P9BE
planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
@ AV_PIX_FMT_UYYVYY411
packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
@ AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_PIX_FMT_YUVA422P9LE
planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian