Go to the documentation of this file.
22 #include "config_components.h"
49 #define MAX_CHANNELS 2
80 #define LATTICE_SHIFT 10
81 #define SAMPLE_SHIFT 4
82 #define LATTICE_FACTOR (1 << LATTICE_SHIFT)
83 #define SAMPLE_FACTOR (1 << SAMPLE_SHIFT)
85 #define BASE_QUANT 0.6
86 #define RATE_VARIATION 3.0
90 return (
a+(1<<(
b-1))) >>
b;
101 #define put_rac(C,S,B) \
105 rc_stat2[(S)-state][B]++;\
120 for(
i=e-1;
i>=0;
i--){
132 for(
i=e-1;
i>=0;
i--){
159 for(
i=e-1;
i>=0;
i--){
173 for (
i = 0;
i < entries;
i++)
183 for (
i = 0;
i < entries;
i++)
193 for (
i = 0;
i < entries;
i++)
203 for (
i = 0;
i < entries;
i++)
211 #define ADAPT_LEVEL 8
213 static int bits_to_store(uint64_t x)
263 int i, j, x = 0, low_bits = 0,
max = 0;
264 int step = 256,
pos = 0, dominant = 0, any = 0;
275 for (
i = 0;
i < entries;
i++)
276 energy +=
abs(buf[
i]);
278 low_bits = bits_to_store(energy / (entries * 2));
285 for (
i = 0;
i < entries;
i++)
300 for (
i = 0;
i <=
max;
i++)
302 for (j = 0; j < entries; j++)
310 int steplet =
step >> 8;
312 if (
pos + steplet > x)
315 for (
i = 0;
i < steplet;
i++)
330 while (((
pos + interloper) < x) && (
bits[
pos + interloper] == dominant))
334 write_uint_max(pb, interloper, (
step >> 8) - 1);
336 pos += interloper + 1;
343 dominant = !dominant;
348 for (
i = 0;
i < entries;
i++)
360 int i, low_bits = 0, x = 0;
361 int n_zeros = 0,
step = 256, dominant = 0;
373 for (
i = 0;
i < entries;
i++)
379 while (n_zeros < entries)
381 int steplet =
step >> 8;
385 for (
i = 0;
i < steplet;
i++)
386 bits[x++] = dominant;
395 int actual_run = read_uint_max(gb, steplet-1);
399 for (
i = 0;
i < actual_run;
i++)
400 bits[x++] = dominant;
402 bits[x++] = !dominant;
405 n_zeros += actual_run;
415 dominant = !dominant;
421 for (
i = 0; n_zeros < entries;
i++)
428 level += 1 << low_bits;
438 buf[
pos] += 1 << low_bits;
447 for (
i = 0;
i < entries;
i++)
461 for (
i = order-2;
i >= 0;
i--)
465 for (j = 0, p =
i+1; p < order; j++,p++)
479 int *k_ptr = &(k[order-2]),
480 *state_ptr = &(
state[order-2]);
481 for (
i = order-2;
i >= 0;
i--, k_ptr--, state_ptr--)
483 int k_value = *k_ptr, state_value = *state_ptr;
488 for (
i = order-2;
i >= 0;
i--)
504 #if CONFIG_SONIC_ENCODER || CONFIG_SONIC_LS_ENCODER
509 static void modified_levinson_durbin(
int *
window,
int window_entries,
510 int *
out,
int out_entries,
int channels,
int *tap_quant)
517 for (
i = 0;
i < out_entries;
i++)
520 double xx = 0.0, xy = 0.0;
523 int *state_ptr = &(
state[0]);
524 j = window_entries -
step;
525 for (;j>0;j--,x_ptr++,state_ptr++)
527 double x_value = *x_ptr;
528 double state_value = *state_ptr;
529 xx += state_value*state_value;
530 xy += x_value*state_value;
533 for (j = 0; j <= (window_entries -
step); j++);
536 double stateval =
window[j];
539 xx += stateval*stateval;
540 xy += stepval*stateval;
558 state_ptr = &(
state[0]);
559 j = window_entries -
step;
560 for (;j>0;j--,x_ptr++,state_ptr++)
562 int x_value = *x_ptr;
563 int state_value = *state_ptr;
568 for (j=0; j <= (window_entries -
step); j++)
571 int stateval=
state[j];
579 static inline int code_samplerate(
int samplerate)
583 case 44100:
return 0;
584 case 22050:
return 1;
585 case 11025:
return 2;
586 case 96000:
return 3;
587 case 48000:
return 4;
588 case 32000:
return 5;
589 case 24000:
return 6;
590 case 16000:
return 7;
614 s->decorrelation = 3;
621 s->quantization = 0.0;
627 s->quantization = 1.0;
631 if (
s->num_taps < 32 ||
s->num_taps > 1024 ||
s->num_taps % 32) {
637 s->tap_quant =
av_calloc(
s->num_taps,
sizeof(*
s->tap_quant));
641 for (
i = 0;
i <
s->num_taps;
i++)
647 s->block_align = 2048LL*
s->samplerate/(44100*
s->downsampling);
648 s->frame_size =
s->channels*
s->block_align*
s->downsampling;
650 s->tail_size =
s->num_taps*
s->channels;
655 s->predictor_k =
av_calloc(
s->num_taps,
sizeof(*
s->predictor_k) );
659 coded_samples =
av_calloc(
s->block_align,
s->channels *
sizeof(**
s->coded_samples));
662 for (
i = 0;
i <
s->channels;
i++, coded_samples +=
s->block_align)
663 s->coded_samples[
i] = coded_samples;
665 s->int_samples =
av_calloc(
s->frame_size,
sizeof(*
s->int_samples));
667 s->window_size = ((2*
s->tail_size)+
s->frame_size);
668 s->window =
av_calloc(
s->window_size, 2 *
sizeof(*
s->window));
669 if (!
s->window || !
s->int_samples)
680 if (
s->version >= 2) {
685 put_bits(&pb, 4, code_samplerate(
s->samplerate));
698 av_log(avctx,
AV_LOG_INFO,
"Sonic: ver: %d.%d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
699 s->version,
s->minor_version,
s->lossless,
s->decorrelation,
s->num_taps,
s->block_align,
s->frame_size,
s->downsampling);
725 int i, j, ch,
quant = 0, x = 0;
738 for (
i = 0;
i <
s->frame_size;
i++)
742 for (
i = 0;
i <
s->frame_size;
i++)
745 switch(
s->decorrelation)
748 for (
i = 0;
i <
s->frame_size;
i +=
s->channels)
750 s->int_samples[
i] +=
s->int_samples[
i+1];
751 s->int_samples[
i+1] -=
shift(
s->int_samples[
i], 1);
755 for (
i = 0;
i <
s->frame_size;
i +=
s->channels)
756 s->int_samples[
i+1] -=
s->int_samples[
i];
759 for (
i = 0;
i <
s->frame_size;
i +=
s->channels)
760 s->int_samples[
i] -=
s->int_samples[
i+1];
764 memset(
s->window, 0,
s->window_size *
sizeof(*
s->window));
766 for (
i = 0;
i <
s->tail_size;
i++)
767 s->window[x++] =
s->tail[
i];
769 for (
i = 0;
i <
s->frame_size;
i++)
770 s->window[x++] =
s->int_samples[
i];
772 for (
i = 0;
i <
s->tail_size;
i++)
775 for (
i = 0;
i <
s->tail_size;
i++)
776 s->tail[
i] =
s->int_samples[
s->frame_size -
s->tail_size +
i];
779 modified_levinson_durbin(
s->window,
s->window_size,
780 s->predictor_k,
s->num_taps,
s->channels,
s->tap_quant);
785 for (ch = 0; ch <
s->channels; ch++)
788 for (
i = 0;
i <
s->block_align;
i++)
791 for (j = 0; j <
s->downsampling; j++, x +=
s->channels)
793 s->coded_samples[ch][
i] = sum;
800 double energy1 = 0.0, energy2 = 0.0;
801 for (ch = 0; ch <
s->channels; ch++)
803 for (
i = 0;
i <
s->block_align;
i++)
805 double sample =
s->coded_samples[ch][
i];
811 energy2 = sqrt(energy2/(
s->channels*
s->block_align));
812 energy1 =
M_SQRT2*energy1/(
s->channels*
s->block_align);
817 if (energy2 > energy1)
831 for (ch = 0; ch <
s->channels; ch++)
834 for (
i = 0;
i <
s->block_align;
i++)
848 #if CONFIG_SONIC_DECODER
849 static const int samplerate_table[] =
850 { 44100, 22050, 11025, 96000, 48000, 32000, 24000, 16000, 8000 };
874 if (
s->version >= 2) {
886 int sample_rate_index;
888 sample_rate_index =
get_bits(&gb, 4);
893 s->samplerate = samplerate_table[sample_rate_index];
895 s->channels,
s->samplerate);
911 if (
s->decorrelation != 3 &&
s->channels != 2) {
917 if (!
s->downsampling) {
926 s->block_align = 2048LL*
s->samplerate/(44100*
s->downsampling);
927 s->frame_size =
s->channels*
s->block_align*
s->downsampling;
930 if (
s->num_taps *
s->channels >
s->frame_size) {
932 "number of taps times channels (%d * %d) larger than frame size %d\n",
933 s->num_taps,
s->channels,
s->frame_size);
937 av_log(avctx,
AV_LOG_INFO,
"Sonic: ver: %d.%d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
938 s->version,
s->minor_version,
s->lossless,
s->decorrelation,
s->num_taps,
s->block_align,
s->frame_size,
s->downsampling);
941 s->tap_quant =
av_calloc(
s->num_taps,
sizeof(*
s->tap_quant));
945 for (
i = 0;
i <
s->num_taps;
i++)
948 s->predictor_k =
av_calloc(
s->num_taps,
sizeof(*
s->predictor_k));
950 tmp =
av_calloc(
s->num_taps,
s->channels *
sizeof(**
s->predictor_state));
953 for (
i = 0;
i <
s->channels;
i++,
tmp +=
s->num_taps)
954 s->predictor_state[
i] =
tmp;
956 tmp =
av_calloc(
s->block_align,
s->channels *
sizeof(**
s->coded_samples));
959 for (
i = 0;
i <
s->channels;
i++,
tmp +=
s->block_align)
960 s->coded_samples[
i] =
tmp;
962 s->int_samples =
av_calloc(
s->frame_size,
sizeof(*
s->int_samples));
984 int *got_frame_ptr,
AVPacket *avpkt)
986 const uint8_t *buf = avpkt->
data;
987 int buf_size = avpkt->
size;
994 if (buf_size == 0)
return 0;
1010 for (
i = 0;
i <
s->num_taps;
i++)
1011 s->predictor_k[
i] *= (
unsigned)
s->tap_quant[
i];
1020 for (ch = 0; ch <
s->channels; ch++)
1031 for (
i = 0;
i <
s->block_align;
i++)
1033 for (j = 0; j <
s->downsampling - 1; j++)
1043 for (
i = 0;
i <
s->num_taps;
i++)
1044 s->predictor_state[ch][
i] =
s->int_samples[
s->frame_size -
s->channels + ch -
i*
s->channels];
1047 switch(
s->decorrelation)
1050 for (
i = 0;
i <
s->frame_size;
i +=
s->channels)
1052 s->int_samples[
i+1] +=
shift(
s->int_samples[
i], 1);
1053 s->int_samples[
i] -=
s->int_samples[
i+1];
1057 for (
i = 0;
i <
s->frame_size;
i +=
s->channels)
1058 s->int_samples[
i+1] +=
s->int_samples[
i];
1061 for (
i = 0;
i <
s->frame_size;
i +=
s->channels)
1062 s->int_samples[
i] +=
s->int_samples[
i+1];
1067 for (
i = 0;
i <
s->frame_size;
i++)
1071 for (
i = 0;
i <
s->frame_size;
i++)
1085 .
init = sonic_decode_init,
1086 .close = sonic_decode_close,
1093 #if CONFIG_SONIC_ENCODER
1100 .
init = sonic_encode_init,
1105 .close = sonic_encode_close,
1109 #if CONFIG_SONIC_LS_ENCODER
1111 .
p.
name =
"sonicls",
1116 .
init = sonic_encode_init,
1121 .close = sonic_encode_close,
static void error(const char *err)
static int intlist_write(RangeCoder *c, uint8_t *state, int *buf, int entries, int base_2_part)
int frame_size
Number of samples per channel in an audio frame.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int put_bytes_output(const PutBitContext *s)
int sample_rate
samples per second
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
enum AVChannelOrder order
Channel order used in this layout.
int nb_channels
Number of channels in this layout.
static void skip_bits(GetBitContext *s, int n)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static SDL_Window * window
AVCodec p
The public AVCodec.
const struct AVCodec * codec
AVChannelLayout ch_layout
Audio channel layout.
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
#define FF_CODEC_ENCODE_CB(func)
static int quant(float coef, const float Q, const float rounding)
Quantize one coefficient.
exp golomb vlc writing stuff
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_CODEC_CAP_EXPERIMENTAL
Codec is experimental and is thus avoided in favor of non experimental encoders.
#define FF_ARRAY_ELEMS(a)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static av_flatten int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
#define FF_CODEC_DECODE_CB(func)
static __device__ float floor(float a)
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
static int predictor_calc_error(int *k, int *state, int order, int error)
int * coded_samples[MAX_CHANNELS]
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
const FFCodec ff_sonic_encoder
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static int intlist_read(RangeCoder *c, uint8_t *state, int *buf, int entries, int base_2_part)
static __device__ float fabs(float a)
static void predictor_init_state(int *k, int *state, int order)
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
#define ROUNDED_DIV(a, b)
static unsigned int get_bits1(GetBitContext *s)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
static av_always_inline av_flatten void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2])
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
const FFCodec ff_sonic_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static void copy(const float *p1, float *p2, const int length)
enum AVSampleFormat sample_fmt
audio sample format
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
static void set_se_golomb(PutBitContext *pb, int i)
write signed exp golomb code.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
#define AV_LOG_INFO
Standard information.
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
AVSampleFormat
Audio sample formats.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const FFCodec ff_sonic_ls_encoder
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static int get_rac(RangeCoder *c, uint8_t *const state)
void * av_calloc(size_t nmemb, size_t size)
int * predictor_state[MAX_CHANNELS]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
Filter the word “frame” indicates either a video frame or a group of audio samples
static int shift(int a, int b)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
This structure stores compressed data.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int shift_down(int a, int b)
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.