21 #ifndef AVFILTER_FRAMESYNC_H
22 #define AVFILTER_FRAMESYNC_H
void ff_framesync_drop(FFFrameSync *fs)
Drop the current frame event.
This structure describes decoded (raw) audio or video data.
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
int ff_framesync_request_frame(FFFrameSync *fs, AVFilterLink *outlink)
Request a frame on the filter output.
void ff_framesync_next(FFFrameSync *fs)
Prepare the next frame event.
int64_t pts
Timestamp of the current event.
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
Structure holding the queue.
uint8_t have_next
Boolean flagging the next frame, for internal use.
unsigned sync_level
Synchronization level: only inputs with the same sync level are sync sources.
FFFrameSyncIn * in
Pointer to array of inputs.
int ff_framesync_add_frame(FFFrameSync *fs, unsigned in, AVFrame *frame)
Add a frame to an input.
int ff_framesync_filter_frame(FFFrameSync *fs, AVFilterLink *inlink, AVFrame *in)
Accept a frame on a filter input.
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
A link between two filters.
AVFrame * frame_next
Next frame, for internal use.
AVRational time_base
Time base for the incoming frames.
int ff_framesync_init(FFFrameSync *fs, void *parent, unsigned nb_in)
Initialize a frame sync structure.
uint8_t eof
Flag indicating that output has reached EOF.
unsigned in_request
Index of the input that requires a request.
FFFrameSyncExtMode
This API is intended as a helper for filters that have several video input and need to combine them s...
AVRational time_base
Time base for the output events.
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, unsigned get)
Get the current frame in an input.
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
struct FFBufQueue queue
Queue of incoming AVFrame, and NULL to mark EOF.
void * opaque
Opaque pointer, not used by the API.
Extend the frame to infinity.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
uint8_t state
State: before first, in stream or after EOF, for internal use.
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events...
Describe the class of an AVClass context structure.
Rational number (pair of numerator and denominator).
Ignore this stream and continue processing the other ones.
int ff_framesync_process_frame(FFFrameSync *fs, unsigned all)
Process one or several frame using the on_event callback.
unsigned nb_in
Number of input streams.
AVFrame * frame
Current frame, may be NULL before the first one or after EOF.
uint8_t frame_ready
Flag indicating that a frame event is ready.
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
int64_t pts
PTS of the current frame.
Completely stop all streams with this one.
int64_t pts_next
PTS of the next frame, for internal use.