Go to the documentation of this file.
36 #define MAX_CHANNELS 8
37 #define MAX_BLOCKSIZE 65535
39 #define OUT_BUFFER_SIZE 16384
43 #define WAVE_FORMAT_PCM 0x0001
45 #define DEFAULT_BLOCK_SIZE 256
51 #define BITSHIFTSIZE 2
64 #define V2LPCQOFFSET (1 << LPCQUANT)
72 #define FN_BLOCKSIZE 5
81 #define VERBATIM_CKSIZE_SIZE 5
82 #define VERBATIM_BYTE_SIZE 8
83 #define CANONICAL_HEADER_SIZE 44
131 for (chan = 0; chan <
s->channels; chan++) {
136 if (
s->blocksize + (uint64_t)
s->nwrap >= UINT_MAX /
sizeof(
int32_t)) {
138 "s->blocksize + s->nwrap too large\n");
148 sizeof(
s->decoded_base[0][0]))) < 0)
150 for (
i = 0;
i <
s->nwrap;
i++)
151 s->decoded_base[chan][
i] = 0;
152 s->decoded[chan] =
s->decoded_base[chan] +
s->nwrap;
163 if (
s->version != 0) {
175 if (
s->bitshift == 32) {
176 for (
i = 0;
i <
s->blocksize;
i++)
178 }
else if (
s->bitshift != 0) {
179 for (
i = 0;
i <
s->blocksize;
i++)
188 int nblock =
FFMAX(1,
s->nmean);
190 switch (
s->internal_ftype) {
204 for (chan = 0; chan <
s->channels; chan++)
205 for (
i = 0;
i < nblock;
i++)
206 s->offset[chan][
i] = mean;
221 if (bytestream2_get_le32(&gb) !=
MKTAG(
'F',
'O',
'R',
'M')) {
228 tag = bytestream2_get_le32(&gb);
229 if (
tag !=
MKTAG(
'A',
'I',
'F',
'F') &&
235 while (bytestream2_get_le32(&gb) !=
MKTAG(
'C',
'O',
'M',
'M')) {
236 len = bytestream2_get_be32(&gb);
243 len = bytestream2_get_be32(&gb);
251 bps = bytestream2_get_be16(&gb);
254 s->swap =
tag ==
MKTAG(
'A',
'I',
'F',
'C');
256 if (
bps != 16 &&
bps != 8) {
261 exp = bytestream2_get_be16(&gb) - 16383 - 63;
262 val = bytestream2_get_be64(&gb);
263 if (exp < -63 || exp > 63) {
287 if (bytestream2_get_le32(&gb) !=
MKTAG(
'R',
'I',
'F',
'F')) {
294 if (bytestream2_get_le32(&gb) !=
MKTAG(
'W',
'A',
'V',
'E')) {
299 while (bytestream2_get_le32(&gb) !=
MKTAG(
'f',
'm',
't',
' ')) {
300 len = bytestream2_get_le32(&gb);
307 len = bytestream2_get_le32(&gb);
314 wave_format = bytestream2_get_le16(&gb);
316 switch (wave_format) {
328 bps = bytestream2_get_le16(&gb);
331 if (
bps != 16 &&
bps != 8) {
351 int residual_size,
int32_t coffset)
353 int pred_order, sum, qshift, init_sum,
i, j;
359 if ((
unsigned)pred_order >
s->nwrap) {
365 for (
i = 0;
i < pred_order;
i++)
384 for (
i = -pred_order;
i < 0;
i++)
385 s->decoded[
channel][
i] -= (
unsigned)coffset;
388 init_sum = pred_order ? (
command ==
FN_QLPC ?
s->lpcqoffset : 0) : coffset;
389 for (
i = 0;
i <
s->blocksize;
i++) {
391 for (j = 0; j < pred_order; j++)
392 sum += coeffs[j] * (
unsigned)
s->decoded[
channel][
i - j - 1];
394 (unsigned)(sum >> qshift);
399 for (
i = 0;
i <
s->blocksize;
i++)
400 s->decoded[
channel][
i] += (
unsigned)coffset;
431 s->avctx->channels =
s->channels;
434 if (
s->version > 0) {
441 "invalid or unsupported block size: %d\n",
445 s->blocksize = blocksize;
448 if (maxnlpc > 1024
U) {
453 if (
s->nmean > 32768
U) {
472 if (
s->avctx->extradata_size > 0)
477 "missing verbatim section at beginning of stream\n");
489 for (
i = 0;
i <
s->header_size;
i++)
521 int *got_frame_ptr,
AVPacket *avpkt)
525 int buf_size = avpkt->
size;
527 int i, input_buf_size = 0;
531 if (
s->max_framesize == 0) {
533 s->max_framesize = 8192;
537 s->max_framesize = 0;
541 memset(tmp_ptr, 0,
s->allocated_bitstream_size);
542 s->bitstream = tmp_ptr;
546 buf_size =
FFMIN(buf_size,
s->max_framesize -
s->bitstream_size);
547 input_buf_size = buf_size;
550 s->allocated_bitstream_size) {
551 memmove(
s->bitstream, &
s->bitstream[
s->bitstream_index],
553 s->bitstream_index = 0;
556 memcpy(&
s->bitstream[
s->bitstream_index +
s->bitstream_size],
buf,
558 buf = &
s->bitstream[
s->bitstream_index];
559 buf_size +=
s->bitstream_size;
560 s->bitstream_size = buf_size;
564 if (buf_size < s->max_framesize && avpkt->
data) {
566 return input_buf_size;
574 if (!
s->got_header) {
583 max_framesize =
FFMAX(
s->max_framesize,
s->blocksize *
s->channels * 8);
590 s->bitstream = tmp_ptr;
591 s->max_framesize = max_framesize;
598 if (
s->got_quit_command) {
604 while (
s->cur_chan <
s->channels) {
641 s->bitshift = bitshift;
646 if (blocksize >
s->blocksize) {
648 "Increasing block size");
653 "block size: %d\n", blocksize);
656 s->blocksize = blocksize;
660 s->got_quit_command = 1;
667 int residual_size = 0;
678 if (residual_size > 30
U) {
688 int32_t sum = (
s->version < 2) ? 0 :
s->nmean / 2;
691 coffset = sum /
s->nmean;
693 coffset =
s->bitshift == 0 ? coffset : coffset >>
s->bitshift - 1 >> 1;
698 for (
i = 0;
i <
s->blocksize;
i++)
702 residual_size, coffset)) < 0)
708 int64_t sum = (
s->version < 2) ? 0 :
s->blocksize / 2;
709 for (
i = 0;
i <
s->blocksize;
i++)
716 s->offset[
channel][
s->nmean - 1] = sum /
s->blocksize;
718 s->offset[
channel][
s->nmean - 1] =
s->bitshift == 32 ? 0 : (sum /
s->blocksize) * (1LL <<
s->bitshift);
722 for (
i = -
s->nwrap;
i < 0;
i++)
731 if (
s->cur_chan ==
s->channels) {
733 int16_t *samples_s16;
737 frame->nb_samples =
s->blocksize;
741 for (chan = 0; chan <
s->channels; chan++) {
743 samples_s16 = ((int16_t **)
frame->extended_data)[chan];
744 for (
i = 0;
i <
s->blocksize;
i++) {
745 switch (
s->internal_ftype) {
747 *samples_u8++ = av_clip_uint8(
s->decoded[chan][
i]);
751 *samples_s16++ = av_clip_int16(
s->decoded[chan][
i]);
755 if (
s->swap &&
s->internal_ftype !=
TYPE_U8)
756 s->bdsp.bswap16_buf(((uint16_t **)
frame->extended_data)[chan],
757 ((uint16_t **)
frame->extended_data)[chan],
766 if (
s->cur_chan <
s->channels)
774 s->bitstream_size = 0;
775 s->bitstream_index = 0;
778 if (
s->bitstream_size) {
779 s->bitstream_index +=
i;
780 s->bitstream_size -=
i;
781 return input_buf_size;
791 for (
i = 0;
i <
s->channels;
i++) {
static av_cold int init(AVCodecContext *avctx)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_cold int shorten_decode_init(AVCodecContext *avctx)
int sample_rate
samples per second
static enum AVSampleFormat sample_fmts[]
#define MKTAG(a, b, c, d)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int get_bits_count(const GetBitContext *s)
static const uint8_t is_audio_command[10]
indicates if the FN_* command is audio or non-audio
static av_cold int end(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static unsigned int get_ur_golomb_shorten(GetBitContext *gb, int k)
read unsigned golomb rice code (shorten).
AVCodec ff_shorten_decoder
static int init_offset(ShortenContext *s)
int32_t * offset[MAX_CHANNELS]
uint8_t header[OUT_BUFFER_SIZE]
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
static void skip_bits(GetBitContext *s, int n)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static const int fixed_coeffs[][3]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static int decode_wave_header(AVCodecContext *avctx, const uint8_t *header, int header_size)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
static int allocate_buffers(ShortenContext *s)
#define VERBATIM_BYTE_SIZE
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
@ AV_SAMPLE_FMT_U8P
unsigned 8 bits, planar
int32_t * decoded[MAX_CHANNELS]
#define CANONICAL_HEADER_SIZE
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
static int decode_aiff_header(AVCodecContext *avctx, const uint8_t *header, int header_size)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
const char const char void * val
static const uint8_t header[24]
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
#define AV_LOG_INFO
Standard information.
static void fix_bitshift(ShortenContext *s, int32_t *buffer)
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
int32_t * decoded_base[MAX_CHANNELS]
#define i(width, name, range_min, range_max)
AVSampleFormat
Audio sample formats.
const char * name
Name of the codec implementation.
static int get_sr_golomb_shorten(GetBitContext *gb, int k)
read signed golomb rice code (shorten).
static int shorten_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
static av_cold int shorten_decode_close(AVCodecContext *avctx)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
unsigned int allocated_bitstream_size
#define AV_INPUT_BUFFER_PADDING_SIZE
#define FF_ARRAY_ELEMS(a)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static int decode_subframe_lpc(ShortenContext *s, int command, int channel, int residual_size, int32_t coffset)
main external API structure.
#define DEFAULT_BLOCK_SIZE
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time,...
static int read_header(ShortenContext *s)
This structure stores compressed data.
static unsigned int get_uint(ShortenContext *s, int k)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define VERBATIM_CKSIZE_SIZE
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static const av_unused uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.