Go to the documentation of this file.
36 #define MAX_BACKWARD_FILTER_ORDER LPC
37 #define MAX_BACKWARD_FILTER_LEN NFRSZ
38 #define MAX_BACKWARD_FILTER_NONREC NONR
58 for (
int j = 0; j <
IDIM; j++)
67 for (
int i = 0;
i <
LPC;
i++)
115 int order,
int n,
int non_rec,
float *
out,
116 const float *hist,
float *out2,
const float *
window)
119 return out[order] != 0.0f;
124 float *gstate =
s->sbg +
NSBGSZ - 2;
126 for (
int idx = 0; idx <
NUPDATE; idx++) {
134 gain_db -=
s->gp[
i] * gstate[-
i];
146 for (
int i = 0;
i <
IDIM;
i++) {
148 dst[idx*
IDIM +
i] = statelpc[
i] * (1.0f/(1<<12));
165 }
else if (idx == 1) {
167 memcpy(
s->a,
s->atmp,
sizeof(
float)*
LPC);
180 int *got_frame_ptr,
AVPacket *avpkt)
185 int nb_frames = avpkt->
size / 5;
193 #define SAMPLES_PER_FRAME 20
199 for (
int i = 0;
i < nb_frames;
i++)
204 return nb_frames * 5;
static const uint16_t g728_wnrg[NSBGSZ]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static float g728_wnr_r[FFALIGN(NSBSZ, 16)]
The official guide to swscale for confused that is
static const int16_t codetable[128][5]
static int g728_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
This structure describes decoded (raw) audio or video data.
static av_cold int g728_decode_close(AVCodecContext *avctx)
#define SAMPLES_PER_FRAME
void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs, const float *in, int buffer_length, int filter_length)
LP synthesis filter.
const FFCodec ff_g728_decoder
static av_cold void close(AVCodecParserContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static SDL_Window * window
AVCodec p
The public AVCodec.
AVChannelLayout ch_layout
Audio channel layout.
int flags
AV_CODEC_FLAG_*.
static int ff_thread_once(char *control, void(*routine)(void))
#define FF_ARRAY_ELEMS(a)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define FF_CODEC_DECODE_CB(func)
static const float gain_bw_tab[FFALIGN(10, 16)]
gain bandwidth broadening table
#define CODEC_LONG_NAME(str)
static float g728_facv_f[FFALIGN(LPC, 16)]
float ff_scalarproduct_float_c(const float *v1, const float *v2, int len)
Return the scalar product of two vectors of floats.
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
An AVChannelLayout holds information about the channel layout of audio data.
#define DECLARE_ALIGNED(n, t, v)
void(* vector_fmul)(float *dst, const float *src0, const float *src1, int len)
Calculate the entry wise product of two vectors of floats and store the result in a vector of floats.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
enum AVSampleFormat sample_fmt
audio sample format
static const uint16_t g728_wnr[NSBSZ]
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static void do_hybrid_window(void(*vector_fmul)(float *dst, const float *src0, const float *src1, int len), int order, int n, int non_rec, float *out, const float *hist, float *out2, const float *window)
Hybrid window filtering, see blocks 36 and 49 of the G.728 specification.
static float g728_y_db[128]
static const uint16_t g728_facv[LPC]
static av_cold int g728_decode_init(AVCodecContext *avctx)
#define i(width, name, range_min, range_max)
static int compute_lpc_coefs(const LPC_TYPE *autoc, int i, int max_order, LPC_TYPE *lpc, int lpc_stride, int fail, int normalize, LPC_TYPE *err_ptr)
Levinson-Durbin recursion.
static int hybrid_window(AVFloatDSPContext *fdsp, int order, int n, int non_rec, float *out, const float *hist, float *out2, const float *window)
const char * name
Name of the codec implementation.
static void decode_frame(G728Context *s, GetBitContext *gb, float *dst)
static float g728_gq_db[8]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define AV_CHANNEL_LAYOUT_MONO
static const float amptable[8]
This structure stores compressed data.
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
static av_cold void g728_init_static_data(void)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static float g728_wnrg_r[FFALIGN(NSBGSZ, 16)]