Go to the documentation of this file.
  105 #define WMAPRO_MAX_CHANNELS    8                              
  106 #define MAX_SUBFRAMES  32                                    
 
  108 #define MAX_FRAMESIZE  32768                                 
 
  109 #define XMA_MAX_STREAMS         8
 
  110 #define XMA_MAX_CHANNELS_STREAM 2 
  111 #define XMA_MAX_CHANNELS        (XMA_MAX_STREAMS * XMA_MAX_CHANNELS_STREAM) 
  113 #define WMAPRO_BLOCK_MIN_BITS  6                                            
  114 #define WMAPRO_BLOCK_MAX_BITS 13                                           
 
  115 #define WMAPRO_BLOCK_MIN_SIZE (1 << WMAPRO_BLOCK_MIN_BITS)                 
 
  116 #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS)                 
 
  117 #define WMAPRO_BLOCK_SIZES    (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1) 
 
  121 #define SCALEVLCBITS       8 
  122 #define VEC4MAXDEPTH    ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS) 
  123 #define VEC2MAXDEPTH    ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS) 
  124 #define VEC1MAXDEPTH    ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS) 
  125 #define SCALEMAXDEPTH   ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS) 
  126 #define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS) 
  257 #define PRINT(a, b)     av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b); 
  258 #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %"PRIx32"\n", a, b); 
  260     PRINT(
"ed sample bit depth", 
s->bits_per_sample);
 
  261     PRINT_HEX(
"ed decode flags", 
s->decode_flags);
 
  262     PRINT(
"samples per frame",   
s->samples_per_frame);
 
  263     PRINT(
"log2 frame size",     
s->log2_frame_size);
 
  264     PRINT(
"max num subframes",   
s->max_num_subframes);
 
  265     PRINT(
"len prefix",          
s->len_prefix);
 
  266     PRINT(
"num channels",        
s->nb_channels);
 
  318     unsigned int channel_mask;
 
  320     int log2_max_num_subframes;
 
  321     int num_possible_block_sizes;
 
  344         s->decode_flags    = 0x10d6;
 
  345         s->bits_per_sample = 16;
 
  352         s->decode_flags    = 0x10d6;
 
  353         s->bits_per_sample = 16;
 
  355         s->nb_channels = edata_ptr[32 + ((edata_ptr[0]==3)?0:8) + 4*num_stream + 0]; 
 
  357         s->decode_flags    = 0x10d6;
 
  358         s->bits_per_sample = 16;
 
  360         s->nb_channels     = edata_ptr[8 + 20*num_stream + 17]; 
 
  362         s->decode_flags    = 
AV_RL16(edata_ptr+14);
 
  363         channel_mask       = 
AV_RL32(edata_ptr+2);
 
  364         s->bits_per_sample = 
AV_RL16(edata_ptr);
 
  367         if (
s->bits_per_sample > 32 || 
s->bits_per_sample < 1) {
 
  378     if (
s->log2_frame_size > 25) {
 
  390     s->len_prefix  = (
s->decode_flags & 0x40);
 
  399         s->samples_per_frame = 1 << 
bits;
 
  401         s->samples_per_frame = 512;
 
  405     log2_max_num_subframes       = ((
s->decode_flags & 0x38) >> 3);
 
  406     s->max_num_subframes         = 1 << log2_max_num_subframes;
 
  407     if (
s->max_num_subframes == 16 || 
s->max_num_subframes == 4)
 
  408         s->max_subframe_len_bit = 1;
 
  409     s->subframe_len_bits = 
av_log2(log2_max_num_subframes) + 1;
 
  411     num_possible_block_sizes     = log2_max_num_subframes + 1;
 
  412     s->min_samples_per_subframe  = 
s->samples_per_frame / 
s->max_num_subframes;
 
  413     s->dynamic_range_compression = (
s->decode_flags & 0x80);
 
  417                s->max_num_subframes);
 
  423                s->min_samples_per_subframe);
 
  427     if (
s->avctx->sample_rate <= 0) {
 
  432     if (
s->nb_channels <= 0) {
 
  447     for (
i = 0; 
i < 
s->nb_channels; 
i++)
 
  448         s->channel[
i].prev_block_len = 
s->samples_per_frame;
 
  453     if (channel_mask & 8) {
 
  456             if (channel_mask & 
mask)
 
  491     for (
i = 0; 
i < num_possible_block_sizes; 
i++) {
 
  492         int subframe_len = 
s->samples_per_frame >> 
i;
 
  497         s->sfb_offsets[
i][0] = 0;
 
  499         for (x = 0; x < 
MAX_BANDS-1 && 
s->sfb_offsets[
i][band - 1] < subframe_len; x++) {
 
  502             if (
offset > 
s->sfb_offsets[
i][band - 1])
 
  505             if (
offset >= subframe_len)
 
  508         s->sfb_offsets[
i][band - 1] = subframe_len;
 
  509         s->num_sfb[
i]               = band - 1;
 
  510         if (
s->num_sfb[
i] <= 0) {
 
  522     for (
i = 0; 
i < num_possible_block_sizes; 
i++) {
 
  524         for (
b = 0; 
b < 
s->num_sfb[
i]; 
b++) {
 
  527                            + 
s->sfb_offsets[
i][
b + 1] - 1) << 
i) >> 1;
 
  528             for (x = 0; x < num_possible_block_sizes; x++) {
 
  530                 while (
s->sfb_offsets[x][v + 1] << x < 
offset) {
 
  534                 s->sf_offsets[
i][x][
b] = v;
 
  547                      / (1ll << (
s->bits_per_sample - 1)));
 
  557     for (
i = 0; 
i < num_possible_block_sizes; 
i++) {
 
  558         int block_size = 
s->samples_per_frame >> 
i;
 
  559         int cutoff = (440*block_size + 3LL * (
s->avctx->sample_rate >> 1) - 1)
 
  560                      / 
s->avctx->sample_rate;
 
  561         s->subwoofer_cutoffs[
i] = av_clip(cutoff, 4, block_size);
 
  565     for (
i = 0; 
i < 33; 
i++)
 
  596     int frame_len_shift = 0;
 
  600     if (
offset == 
s->samples_per_frame - 
s->min_samples_per_subframe)
 
  601         return s->min_samples_per_subframe;
 
  607     if (
s->max_subframe_len_bit) {
 
  609             frame_len_shift = 1 + 
get_bits(&
s->gb, 
s->subframe_len_bits-1);
 
  611         frame_len_shift = 
get_bits(&
s->gb, 
s->subframe_len_bits);
 
  613     subframe_len = 
s->samples_per_frame >> frame_len_shift;
 
  616     if (subframe_len < s->min_samples_per_subframe ||
 
  617         subframe_len > 
s->samples_per_frame) {
 
  649     int channels_for_cur_subframe = 
s->nb_channels;   
 
  650     int fixed_channel_layout = 0;                     
 
  651     int min_channel_len = 0;                          
 
  661     for (
c = 0; 
c < 
s->nb_channels; 
c++)
 
  662         s->channel[
c].num_subframes = 0;
 
  665         fixed_channel_layout = 1;
 
  672         for (
c = 0; 
c < 
s->nb_channels; 
c++) {
 
  673             if (num_samples[
c] == min_channel_len) {
 
  674                 if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
 
  675                    (min_channel_len == 
s->samples_per_frame - 
s->min_samples_per_subframe))
 
  676                     contains_subframe[
c] = 1;
 
  680                 contains_subframe[
c] = 0;
 
  688         min_channel_len += subframe_len;
 
  689         for (
c = 0; 
c < 
s->nb_channels; 
c++) {
 
  692             if (contains_subframe[
c]) {
 
  695                            "broken frame: num subframes > 31\n");
 
  699                 num_samples[
c] += subframe_len;
 
  701                 if (num_samples[
c] > 
s->samples_per_frame) {
 
  703                            "channel len > samples_per_frame\n");
 
  706             } 
else if (num_samples[
c] <= min_channel_len) {
 
  707                 if (num_samples[
c] < min_channel_len) {
 
  708                     channels_for_cur_subframe = 0;
 
  709                     min_channel_len = num_samples[
c];
 
  711                 ++channels_for_cur_subframe;
 
  714     } 
while (min_channel_len < s->samples_per_frame);
 
  716     for (
c = 0; 
c < 
s->nb_channels; 
c++) {
 
  719         for (
i = 0; 
i < 
s->channel[
c].num_subframes; 
i++) {
 
  720             ff_dlog(
s->avctx, 
"frame[%"PRIu32
"] channel[%i] subframe[%i]" 
  721                     " len %i\n", 
s->frame_num, 
c, 
i,
 
  722                     s->channel[
c].subframe_len[
i]);
 
  723             s->channel[
c].subframe_offset[
i] = 
offset;
 
  754         for (x = 0; x < 
i; x++) {
 
  756             for (y = 0; y < 
i + 1; y++) {
 
  759                 int n = rotation_offset[
offset + x];
 
  772                                                (v1 * sinv) - (v2 * cosv);
 
  774                                                (v1 * cosv) + (v2 * sinv);
 
  796     if (
s->nb_channels > 1) {
 
  797         int remaining_channels = 
s->channels_for_cur_subframe;
 
  801                                   "Channel transform bit");
 
  805         for (
s->num_chgroups = 0; remaining_channels &&
 
  806              s->num_chgroups < 
s->channels_for_cur_subframe; 
s->num_chgroups++) {
 
  813             if (remaining_channels > 2) {
 
  814                 for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
  815                     int channel_idx = 
s->channel_indexes_for_cur_subframe[
i];
 
  816                     if (!
s->channel[channel_idx].grouped
 
  819                         s->channel[channel_idx].grouped = 1;
 
  820                         *channel_data++ = 
s->channel[channel_idx].coeffs;
 
  825                 for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
  826                     int channel_idx = 
s->channel_indexes_for_cur_subframe[
i];
 
  827                     if (!
s->channel[channel_idx].grouped)
 
  828                         *channel_data++ = 
s->channel[channel_idx].coeffs;
 
  829                     s->channel[channel_idx].grouped = 1;
 
  838                                               "Unknown channel transform type");
 
  843                     if (
s->nb_channels == 2) {
 
  865                                                   "Coupled channels > 6");
 
  881                     for (
i = 0; 
i < 
s->num_bands; 
i++) {
 
  905     static const uint32_t fval_tab[16] = {
 
  906         0x00000000, 0x3f800000, 0x40000000, 0x40400000,
 
  907         0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
 
  908         0x41000000, 0x41100000, 0x41200000, 0x41300000,
 
  909         0x41400000, 0x41500000, 0x41600000, 0x41700000,
 
  920     ff_dlog(
s->avctx, 
"decode coefficients for channel %i\n", 
c);
 
  935     while ((
s->transmit_num_vec_coeffs || !rl_mode) &&
 
  944             for (
i = 0; 
i < 4; 
i += 2) {
 
  969         for (
i = 0; 
i < 4; 
i++) {
 
  975                 ci->
coeffs[cur_coeff] = 0;
 
  978                 rl_mode |= (++num_zeros > 
s->subframe_len >> 8);
 
  985     if (cur_coeff < s->subframe_len) {
 
  986         memset(&ci->
coeffs[cur_coeff], 0,
 
  987                sizeof(*ci->
coeffs) * (
s->subframe_len - cur_coeff));
 
  990                                     cur_coeff, 
s->subframe_len,
 
  991                                     s->subframe_len, 
s->esc_len, 0))
 
 1011     for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1012         int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1015         s->channel[
c].scale_factors = 
s->channel[
c].saved_scale_factors[!
s->channel[
c].scale_factor_idx];
 
 1016         sf_end = 
s->channel[
c].scale_factors + 
s->num_bands;
 
 1023         if (
s->channel[
c].reuse_sf) {
 
 1024             const int8_t* sf_offsets = 
s->sf_offsets[
s->table_idx][
s->channel[
c].table_idx];
 
 1026             for (
b = 0; 
b < 
s->num_bands; 
b++)
 
 1027                 s->channel[
c].scale_factors[
b] =
 
 1028                     s->channel[
c].saved_scale_factors[
s->channel[
c].scale_factor_idx][*sf_offsets++];
 
 1031         if (!
s->channel[
c].cur_subframe || 
get_bits1(&
s->gb)) {
 
 1033             if (!
s->channel[
c].reuse_sf) {
 
 1036                 s->channel[
c].scale_factor_step = 
get_bits(&
s->gb, 2) + 1;
 
 1037                 val = 45 / 
s->channel[
c].scale_factor_step;
 
 1038                 for (sf = 
s->channel[
c].scale_factors; sf < sf_end; sf++) {
 
 1045                 for (
i = 0; 
i < 
s->num_bands; 
i++) {
 
 1056                         sign = (
code & 1) - 1;
 
 1057                         skip = (
code & 0x3f) >> 1;
 
 1058                     } 
else if (idx == 1) {
 
 1067                     if (
i >= 
s->num_bands) {
 
 1069                                "invalid scale factor coding\n");
 
 1072                     s->channel[
c].scale_factors[
i] += (
val ^ sign) - sign;
 
 1076             s->channel[
c].scale_factor_idx = !
s->channel[
c].scale_factor_idx;
 
 1077             s->channel[
c].table_idx = 
s->table_idx;
 
 1078             s->channel[
c].reuse_sf  = 1;
 
 1082         s->channel[
c].max_scale_factor = 
s->channel[
c].scale_factors[0];
 
 1083         for (sf = 
s->channel[
c].scale_factors + 1; sf < sf_end; sf++) {
 
 1084             s->channel[
c].max_scale_factor =
 
 1085                 FFMAX(
s->channel[
c].max_scale_factor, *sf);
 
 1100     for (
i = 0; 
i < 
s->num_chgroups; 
i++) {
 
 1101         if (
s->chgroup[
i].transform) {
 
 1103             const int num_channels = 
s->chgroup[
i].num_channels;
 
 1104             float** ch_data = 
s->chgroup[
i].channel_data;
 
 1105             float** ch_end = ch_data + num_channels;
 
 1106             const int8_t* 
tb = 
s->chgroup[
i].transform_band;
 
 1110             for (sfb = 
s->cur_sfb_offsets;
 
 1111                  sfb < s->cur_sfb_offsets + 
s->num_bands; sfb++) {
 
 1115                     for (y = sfb[0]; y < 
FFMIN(sfb[1], 
s->subframe_len); y++) {
 
 1116                         const float* mat = 
s->chgroup[
i].decorrelation_matrix;
 
 1117                         const float* data_end = 
data + num_channels;
 
 1118                         float* data_ptr = 
data;
 
 1121                         for (
ch = ch_data; 
ch < ch_end; 
ch++)
 
 1122                             *data_ptr++ = (*
ch)[y];
 
 1124                         for (
ch = ch_data; 
ch < ch_end; 
ch++) {
 
 1127                             while (data_ptr < data_end)
 
 1128                                 sum += *data_ptr++ * *mat++;
 
 1133                 } 
else if (
s->nb_channels == 2) {
 
 1134                     int len = 
FFMIN(sfb[1], 
s->subframe_len) - sfb[0];
 
 1135                     s->fdsp->vector_fmul_scalar(ch_data[0] + sfb[0],
 
 1136                                                ch_data[0] + sfb[0],
 
 1138                     s->fdsp->vector_fmul_scalar(ch_data[1] + sfb[0],
 
 1139                                                ch_data[1] + sfb[0],
 
 1154     for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1155         int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1157         int winlen = 
s->channel[
c].prev_block_len;
 
 1158         float* 
start = 
s->channel[
c].coeffs - (winlen >> 1);
 
 1160         if (
s->subframe_len < winlen) {
 
 1161             start += (winlen - 
s->subframe_len) >> 1;
 
 1162             winlen = 
s->subframe_len;
 
 1172         s->channel[
c].prev_block_len = 
s->subframe_len;
 
 1183     int offset = 
s->samples_per_frame;
 
 1184     int subframe_len = 
s->samples_per_frame;
 
 1186     int total_samples   = 
s->samples_per_frame * 
s->nb_channels;
 
 1187     int transmit_coeffs = 0;
 
 1188     int cur_subwoofer_cutoff;
 
 1196     for (
i = 0; 
i < 
s->nb_channels; 
i++) {
 
 1197         s->channel[
i].grouped = 0;
 
 1198         if (
offset > 
s->channel[
i].decoded_samples) {
 
 1199             offset = 
s->channel[
i].decoded_samples;
 
 1201                 s->channel[
i].subframe_len[
s->channel[
i].cur_subframe];
 
 1206             "processing subframe with offset %i len %i\n", 
offset, subframe_len);
 
 1209     s->channels_for_cur_subframe = 0;
 
 1210     for (
i = 0; 
i < 
s->nb_channels; 
i++) {
 
 1211         const int cur_subframe = 
s->channel[
i].cur_subframe;
 
 1213         total_samples -= 
s->channel[
i].decoded_samples;
 
 1216         if (
offset == 
s->channel[
i].decoded_samples &&
 
 1217             subframe_len == 
s->channel[
i].subframe_len[cur_subframe]) {
 
 1218             total_samples -= 
s->channel[
i].subframe_len[cur_subframe];
 
 1219             s->channel[
i].decoded_samples +=
 
 1220                 s->channel[
i].subframe_len[cur_subframe];
 
 1221             s->channel_indexes_for_cur_subframe[
s->channels_for_cur_subframe] = 
i;
 
 1222             ++
s->channels_for_cur_subframe;
 
 1229         s->parsed_all_subframes = 1;
 
 1232     ff_dlog(
s->avctx, 
"subframe is part of %i channels\n",
 
 1233             s->channels_for_cur_subframe);
 
 1236     s->table_idx         = 
av_log2(
s->samples_per_frame/subframe_len);
 
 1237     s->num_bands         = 
s->num_sfb[
s->table_idx];
 
 1238     s->cur_sfb_offsets   = 
s->sfb_offsets[
s->table_idx];
 
 1239     cur_subwoofer_cutoff = 
s->subwoofer_cutoffs[
s->table_idx];
 
 1242     offset += 
s->samples_per_frame >> 1;
 
 1244     for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1245         int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1247         s->channel[
c].coeffs = &
s->channel[
c].out[
offset];
 
 1250     s->subframe_len = subframe_len;
 
 1251     s->esc_len = 
av_log2(
s->subframe_len - 1) + 1;
 
 1256         if (!(num_fill_bits = 
get_bits(&
s->gb, 2))) {
 
 1261         if (num_fill_bits >= 0) {
 
 1282     for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1283         int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1284         if ((
s->channel[
c].transmit_coefs = 
get_bits1(&
s->gb)))
 
 1285             transmit_coeffs = 1;
 
 1289     if (transmit_coeffs) {
 
 1291         int quant_step = 90 * 
s->bits_per_sample >> 4;
 
 1294         if ((
s->transmit_num_vec_coeffs = 
get_bits1(&
s->gb))) {
 
 1295             int num_bits = 
av_log2((
s->subframe_len + 3)/4) + 1;
 
 1296             for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1297                 int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1298                 int num_vec_coeffs = 
get_bits(&
s->gb, num_bits) << 2;
 
 1299                 if (num_vec_coeffs > 
s->subframe_len) {
 
 1304                 s->channel[
c].num_vec_coeffs = num_vec_coeffs;
 
 1307             for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1308                 int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1309                 s->channel[
c].num_vec_coeffs = 
s->subframe_len;
 
 1316             const int sign = (
step == 31) - 1;
 
 1322             quant_step += ((
quant + 
step) ^ sign) - sign;
 
 1324         if (quant_step < 0) {
 
 1330         if (
s->channels_for_cur_subframe == 1) {
 
 1331             s->channel[
s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step;
 
 1334             for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1335                 int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1336                 s->channel[
c].quant_step = quant_step;
 
 1339                         s->channel[
c].quant_step += 
get_bits(&
s->gb, modifier_len) + 1;
 
 1341                         ++
s->channel[
c].quant_step;
 
 1351     ff_dlog(
s->avctx, 
"BITSTREAM: subframe header length was %i\n",
 
 1355     for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1356         int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1357         if (
s->channel[
c].transmit_coefs &&
 
 1361             memset(
s->channel[
c].coeffs, 0,
 
 1362                    sizeof(*
s->channel[
c].coeffs) * subframe_len);
 
 1365     ff_dlog(
s->avctx, 
"BITSTREAM: subframe length was %i\n",
 
 1368     if (transmit_coeffs) {
 
 1372         for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1373             int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1374             const int* sf = 
s->channel[
c].scale_factors;
 
 1377             if (
c == 
s->lfe_channel)
 
 1378                 memset(&
s->tmp[cur_subwoofer_cutoff], 0, 
sizeof(*
s->tmp) *
 
 1379                        (subframe_len - cur_subwoofer_cutoff));
 
 1382             for (
b = 0; 
b < 
s->num_bands; 
b++) {
 
 1383                 const int end = 
FFMIN(
s->cur_sfb_offsets[
b+1], 
s->subframe_len);
 
 1384                 const int exp = 
s->channel[
c].quant_step -
 
 1385                             (
s->channel[
c].max_scale_factor - *sf++) *
 
 1386                             s->channel[
c].scale_factor_step;
 
 1388                 int start = 
s->cur_sfb_offsets[
b];
 
 1389                 s->fdsp->vector_fmul_scalar(
s->tmp + 
start,
 
 1403     for (
i = 0; 
i < 
s->channels_for_cur_subframe; 
i++) {
 
 1404         int c = 
s->channel_indexes_for_cur_subframe[
i];
 
 1405         if (
s->channel[
c].cur_subframe >= 
s->channel[
c].num_subframes) {
 
 1409         ++
s->channel[
c].cur_subframe;
 
 1424     int more_frames = 0;
 
 1432     ff_dlog(
s->avctx, 
"decoding frame with length %x\n", 
len);
 
 1443             for (
i = 0; 
i < 
s->nb_channels * 
s->nb_channels; 
i++)
 
 1449     if (
s->dynamic_range_compression) {
 
 1451         ff_dlog(
s->avctx, 
"drc_gain %i\n", 
s->drc_gain);
 
 1462             ff_dlog(
s->avctx, 
"start skip: %i\n", skip);
 
 1468             ff_dlog(
s->avctx, 
"end skip: %i\n", skip);
 
 1473     ff_dlog(
s->avctx, 
"BITSTREAM: frame header length was %i\n",
 
 1477     s->parsed_all_subframes = 0;
 
 1478     for (
i = 0; 
i < 
s->nb_channels; 
i++) {
 
 1479         s->channel[
i].decoded_samples = 0;
 
 1480         s->channel[
i].cur_subframe    = 0;
 
 1481         s->channel[
i].reuse_sf        = 0;
 
 1485     while (!
s->parsed_all_subframes) {
 
 1493     for (
i = 0; 
i < 
s->nb_channels; 
i++)
 
 1494         memcpy(
frame->extended_data[
i], 
s->channel[
i].out,
 
 1495                s->samples_per_frame * 
sizeof(*
s->channel[
i].out));
 
 1497     for (
i = 0; 
i < 
s->nb_channels; 
i++) {
 
 1499         memcpy(&
s->channel[
i].out[0],
 
 1500                &
s->channel[
i].out[
s->samples_per_frame],
 
 1501                s->samples_per_frame * 
sizeof(*
s->channel[
i].out) >> 1);
 
 1504     if (
s->skip_frame) {
 
 1512     if (
s->len_prefix) {
 
 1516                    "frame[%"PRIu32
"] would have to skip %i bits\n",
 
 1566         s->num_saved_bits = 
s->frame_offset;
 
 1568         buflen = (
s->num_saved_bits      + 
len + 7) >> 3;
 
 1580     s->num_saved_bits += 
len;
 
 1607     int buf_size       = avpkt->
size;
 
 1608     int num_bits_prev_frame;
 
 1609     int packet_sequence_number;
 
 1624         for (
i = 0; 
i < 
s->nb_channels; 
i++) {
 
 1625             memset(
frame->extended_data[
i], 0,
 
 1626             s->samples_per_frame * 
sizeof(*
s->channel[
i].out));
 
 1628             memcpy(
frame->extended_data[
i], 
s->channel[
i].out,
 
 1629                    s->samples_per_frame * 
sizeof(*
s->channel[
i].out) >> 1);
 
 1640     else if (
s->packet_done || 
s->packet_loss) {
 
 1658         s->buf_bit_size = buf_size << 3;
 
 1663             packet_sequence_number = 
get_bits(gb, 4);
 
 1668             packet_sequence_number = 0;
 
 1672         num_bits_prev_frame = 
get_bits(gb, 
s->log2_frame_size);
 
 1680                 num_bits_prev_frame);
 
 1684             ((
s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
 
 1687                    "Packet loss detected! seq %"PRIx8
" vs %x\n",
 
 1688                    s->packet_sequence_number, packet_sequence_number);
 
 1690         s->packet_sequence_number = packet_sequence_number;
 
 1692         if (num_bits_prev_frame > 0) {
 
 1694             if (num_bits_prev_frame >= remaining_packet_bits) {
 
 1695                 num_bits_prev_frame = remaining_packet_bits;
 
 1702             ff_dlog(avctx, 
"accumulated %x bits of frame data\n",
 
 1703                     s->num_saved_bits - 
s->frame_offset);
 
 1706             if (!
s->packet_loss)
 
 1708         } 
else if (
s->num_saved_bits - 
s->frame_offset) {
 
 1709             ff_dlog(avctx, 
"ignoring %x previously saved bits\n",
 
 1710                     s->num_saved_bits - 
s->frame_offset);
 
 1713         if (
s->packet_loss) {
 
 1717             s->num_saved_bits = 0;
 
 1723         if (avpkt->
size < 
s->next_packet_start) {
 
 1728         s->buf_bit_size = (avpkt->
size - 
s->next_packet_start) << 3;
 
 1735             if (!
s->packet_loss)
 
 1737         } 
else if (!
s->len_prefix
 
 1757     if (
s->packet_done && !
s->packet_loss &&
 
 1779                                 int *got_frame_ptr, 
AVPacket *avpkt)
 
 1786     frame->nb_samples = 
s->samples_per_frame;
 
 1796                              int *got_frame_ptr, 
AVPacket *avpkt)
 
 1799     int got_stream_frame_ptr = 0;
 
 1803     if (!
s->frames[
s->current_stream]->data[0]) {
 
 1804         s->frames[
s->current_stream]->nb_samples = 512;
 
 1811                         &got_stream_frame_ptr, avpkt);
 
 1813     if (got_stream_frame_ptr && 
s->offset[
s->current_stream] >= 64) {
 
 1814         got_stream_frame_ptr = 0;
 
 1819     if (got_stream_frame_ptr) {
 
 1820         int start_ch = 
s->start_channel[
s->current_stream];
 
 1821         memcpy(&
s->samples[start_ch + 0][
s->offset[
s->current_stream] * 512],
 
 1822                s->frames[
s->current_stream]->extended_data[0], 512 * 4);
 
 1823         if (
s->xma[
s->current_stream].nb_channels > 1)
 
 1824             memcpy(&
s->samples[start_ch + 1][
s->offset[
s->current_stream] * 512],
 
 1825                    s->frames[
s->current_stream]->extended_data[1], 512 * 4);
 
 1826         s->offset[
s->current_stream]++;
 
 1827     } 
else if (
ret < 0) {
 
 1828         memset(
s->offset, 0, 
sizeof(
s->offset));
 
 1829         s->current_stream = 0;
 
 1836     if (
s->xma[
s->current_stream].packet_done ||
 
 1837         s->xma[
s->current_stream].packet_loss) {
 
 1840         if (
s->xma[
s->current_stream].skip_packets != 0) {
 
 1843             min[0] = 
s->xma[0].skip_packets;
 
 1846             for (
i = 1; 
i < 
s->num_streams; 
i++) {
 
 1847                 if (
s->xma[
i].skip_packets < 
min[0]) {
 
 1848                     min[0] = 
s->xma[
i].skip_packets;
 
 1853             s->current_stream = 
min[1];
 
 1857         for (
i = 0; 
i < 
s->num_streams; 
i++) {
 
 1858             s->xma[
i].skip_packets = 
FFMAX(0, 
s->xma[
i].skip_packets - 1);
 
 1862         for (
i = 0; 
i < 
s->num_streams; 
i++) {
 
 1873             for (
i = 0; 
i < 
s->num_streams; 
i++) {
 
 1874                 int start_ch = 
s->start_channel[
i];
 
 1875                 memcpy(
frame->extended_data[start_ch + 0], 
s->samples[start_ch + 0], 
frame->nb_samples * 4);
 
 1876                 if (
s->xma[
i].nb_channels > 1)
 
 1877                     memcpy(
frame->extended_data[start_ch + 1], 
s->samples[start_ch + 1], 
frame->nb_samples * 4);
 
 1881                     memmove(
s->samples[start_ch + 0], 
s->samples[start_ch + 0] + 
frame->nb_samples, 
s->offset[
i] * 4 * 512);
 
 1882                     if (
s->xma[
i].nb_channels > 1)
 
 1883                         memmove(
s->samples[start_ch + 1], 
s->samples[start_ch + 1] + 
frame->nb_samples, 
s->offset[
i] * 4 * 512);
 
 1897     int i, 
ret, start_channels = 0;
 
 1904         s->num_streams = (avctx->
channels + 1) / 2;
 
 1934     for (
i = 0; 
i < 
s->num_streams; 
i++) {
 
 1942         s->start_channel[
i] = start_channels;
 
 1943         start_channels += 
s->xma[
i].nb_channels;
 
 1945     if (start_channels != avctx->
channels)
 
 1956     for (
i = 0; 
i < 
s->num_streams; 
i++) {
 
 1970     for (
i = 0; 
i < 
s->nb_channels; 
i++)
 
 1971         memset(
s->channel[
i].out, 0, 
s->samples_per_frame *
 
 1972                sizeof(*
s->channel[
i].out));
 
 1974     s->skip_packets = 0;
 
 1995     for (
i = 0; 
i < 
s->num_streams; 
i++)
 
 1998     memset(
s->offset, 0, 
sizeof(
s->offset));
 
 1999     s->current_stream = 0;
 
  
uint16_t num_vec_coeffs
number of vector coded coefficients
 
static const float *const default_decorrelation[]
default decorrelation matrix offsets
 
static av_cold int xma_decode_init(AVCodecContext *avctx)
 
int subframe_offset
subframe offset in the bit reservoir
 
@ AV_SAMPLE_FMT_FLTP
float, planar
 
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
 
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
 
static av_cold int init(AVCodecContext *avctx)
 
static int get_bits_left(GetBitContext *gb)
 
void AAC_RENAME() ff_init_ff_sine_windows(int index)
initialize the specified entry of ff_sine_windows
 
static int decode_subframe(WMAProDecodeCtx *s)
Decode a single subframe (block).
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
static uint8_t * append(uint8_t *buf, const uint8_t *src, int size)
 
GetBitContext gb
bitstream reader context
 
uint16_t samples_per_frame
number of samples to output
 
uint64_t channel_layout
Audio channel layout.
 
int8_t scale_factor_step
scaling step for the current subframe
 
static const uint8_t scale_huffbits[HUFF_SCALE_SIZE]
 
static void wmapro_window(WMAProDecodeCtx *s)
Apply sine window and reconstruct the output buffer.
 
#define WMAPRO_BLOCK_MAX_BITS
log2 of max block size
 
uint16_t min_samples_per_subframe
 
int sample_rate
samples per second
 
static enum AVSampleFormat sample_fmts[]
 
uint16_t subframe_offset[MAX_SUBFRAMES]
subframe positions in the current frame
 
static int decode_tilehdr(WMAProDecodeCtx *s)
Decode how the data in the frame is split into subframes.
 
#define INIT_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size)
 
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
 
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
 
static int get_bits_count(const GetBitContext *s)
 
static const uint16_t coef0_run[HUFF_COEF0_SIZE]
 
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
 
AVCodecContext * avctx
codec context for av_log
 
static av_cold int end(AVCodecContext *avctx)
 
static VLC sf_rl_vlc
scale factor run length vlc
 
This structure describes decoded (raw) audio or video data.
 
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
 
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
 
static av_cold int wmapro_decode_init(AVCodecContext *avctx)
Initialize the decoder.
 
static void flush(WMAProDecodeCtx *s)
 
static int decode_packet(AVCodecContext *avctx, WMAProDecodeCtx *s, void *data, int *got_frame_ptr, AVPacket *avpkt)
 
static av_cold int get_rate(AVCodecContext *avctx)
 
#define WMAPRO_BLOCK_MIN_SIZE
minimum block size
 
static int decode_scale_factors(WMAProDecodeCtx *s)
Extract scale factors from the bitstream.
 
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
 
static const uint8_t scale_rl_huffbits[HUFF_SCALE_RL_SIZE]
 
#define WMAPRO_BLOCK_MAX_SIZE
maximum block size
 
float samples[XMA_MAX_CHANNELS][512 *64]
 
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
 
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
 
PutBitContext pb
context for filling the frame_data buffer
 
static av_cold int decode_init(WMAProDecodeCtx *s, AVCodecContext *avctx, int num_stream)
Initialize the decoder.
 
static av_cold int decode_end(WMAProDecodeCtx *s)
Uninitialize the decoder and free all resources.
 
int16_t sfb_offsets[WMAPRO_BLOCK_SIZES][MAX_BANDS]
scale factor band offsets (multiples of 4)
 
static void skip_bits(GetBitContext *s, int n)
 
static float sin64[33]
sine table for decorrelation
 
#define HUFF_SCALE_RL_SIZE
 
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
 
AVCodec ff_wmapro_decoder
wmapro decoder
 
static SDL_Window * window
 
static VLC vec2_vlc
2 coefficients per symbol
 
static av_cold int wmapro_decode_end(AVCodecContext *avctx)
 
uint8_t num_chgroups
number of channel groups
 
uint8_t drc_gain
gain for the DRC tool
 
static int put_bits_left(PutBitContext *s)
 
int flags
AV_CODEC_FLAG_*.
 
int8_t num_bands
number of scale factor bands
 
float tmp[WMAPRO_BLOCK_MAX_SIZE]
IMDCT output buffer.
 
static const uint8_t coef1_huffbits[555]
 
int8_t sf_offsets[WMAPRO_BLOCK_SIZES][WMAPRO_BLOCK_SIZES][MAX_BANDS]
scale factor resample matrix
 
WMAProChannelGrp chgroup[WMAPRO_MAX_CHANNELS]
channel group information
 
static const uint32_t coef1_huffcodes[555]
 
int max_scale_factor
maximum scale factor for the current subframe
 
int quant_step
quantization step for the current subframe
 
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
 
uint8_t table_idx
index in sf_offsets for the scale factor reference block
 
static int decode_subframe_length(WMAProDecodeCtx *s, int offset)
Decode the subframe length.
 
float out[WMAPRO_BLOCK_MAX_SIZE+WMAPRO_BLOCK_MAX_SIZE/2]
output buffer
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
int buf_bit_size
buffer size in bits
 
static const uint16_t symbol_to_vec4[HUFF_VEC4_SIZE]
 
uint8_t subframe_len_bits
number of bits used for the subframe length
 
static const uint16_t mask[17]
 
static void decode_decorrelation_matrix(WMAProDecodeCtx *s, WMAProChannelGrp *chgroup)
Calculate a decorrelation matrix from the bitstream parameters.
 
frame specific decoder context for a single channel
 
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
 
int * scale_factors
pointer to the scale factor values used for decoding
 
int8_t skip_frame
skip output step
 
int16_t subwoofer_cutoffs[WMAPRO_BLOCK_SIZES]
subwoofer cutoff values
 
uint32_t decode_flags
used compression features
 
static const uint16_t vec2_huffcodes[HUFF_VEC2_SIZE]
 
uint8_t packet_loss
set in case of bitstream error
 
static const uint8_t symbol_to_vec2[HUFF_VEC2_SIZE]
 
static const uint16_t vec4_huffcodes[HUFF_VEC4_SIZE]
 
#define av_assert0(cond)
assert() equivalent, that is always enabled.
 
static void inverse_channel_transform(WMAProDecodeCtx *s)
Reconstruct the individual channel data.
 
static int get_sbits(GetBitContext *s, int n)
 
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
 
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
 
WMAProDecodeCtx xma[XMA_MAX_STREAMS]
 
static int decode_coeffs(WMAProDecodeCtx *s, int c)
Extract the coefficients from the bitstream.
 
#define XMA_MAX_CHANNELS_STREAM
 
int16_t prev_block_len
length of the previous block
 
int8_t transmit_num_vec_coeffs
number of vector coded coefficients is part of the bitstream
 
int8_t channel_indexes_for_cur_subframe[WMAPRO_MAX_CHANNELS]
 
uint8_t grouped
channel is part of a group
 
int start_channel[XMA_MAX_STREAMS]
 
static const uint8_t vec4_huffbits[HUFF_VEC4_SIZE]
 
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
 
static void wmapro_flush(AVCodecContext *avctx)
Clear decoder buffers (for seeking).
 
const float * windows[WMAPRO_BLOCK_SIZES]
windows for the different block sizes
 
int8_t transform
transform on / off
 
static unsigned int get_bits1(GetBitContext *s)
 
int8_t nb_channels
number of channels in stream (XMA1/2)
 
static void xma_flush(AVCodecContext *avctx)
 
#define WMAPRO_MAX_CHANNELS
current decoder limitations
 
channel group for channel transformations
 
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
 
static const uint8_t scale_rl_level[HUFF_SCALE_RL_SIZE]
 
uint8_t eof_done
set when EOF reached and extra subframe is written (XMA1/2)
 
void(* imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input)
 
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
 
uint32_t frame_num
current frame number (not used for decoding)
 
static VLC sf_vlc
scale factor DPCM vlc
 
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
 
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
 
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
 
float * coeffs
pointer to the subframe decode buffer
 
uint8_t len_prefix
frame is prefixed with its length
 
static const uint16_t critical_freq[]
frequencies to divide the frequency spectrum into scale factor bands
 
#define WMAPRO_BLOCK_SIZES
possible block sizes
 
enum AVSampleFormat sample_fmt
audio sample format
 
uint8_t frame_data[MAX_FRAMESIZE+AV_INPUT_BUFFER_PADDING_SIZE]
compressed frame data
 
static const uint8_t coef0_huffbits[666]
 
int8_t scale_factor_idx
index for the transmitted scale factor values (used for resampling)
 
#define MAX_SUBFRAMES
max number of subframes per channel
 
const char const char void * val
 
static const uint8_t vec1_huffbits[HUFF_VEC1_SIZE]
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
 
FFTContext mdct_ctx[WMAPRO_BLOCK_SIZES]
MDCT context per block size.
 
int8_t transform_band[MAX_BANDS]
controls if the transform is enabled for a certain band
 
uint8_t max_num_subframes
 
int8_t reuse_sf
share scale factors between subframes
 
int channels
number of audio channels
 
#define DECLARE_ALIGNED(n, t, v)
 
int next_packet_start
start offset of the next wma packet in the demuxer packet
 
static const uint32_t scale_rl_huffcodes[HUFF_SCALE_RL_SIZE]
 
#define i(width, name, range_min, range_max)
 
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
 
static int put_bits_count(PutBitContext *s)
 
uint8_t cur_subframe
current subframe number
 
static const uint8_t scale_rl_run[HUFF_SCALE_RL_SIZE]
 
uint16_t decoded_samples
number of already processed samples
 
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
 
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
 
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
 
static const float coef1_level[HUFF_COEF1_SIZE]
 
static VLC vec1_vlc
1 coefficient per symbol
 
AVSampleFormat
Audio sample formats.
 
#define MAX_BANDS
max number of scale factor bands
 
static const uint16_t coef1_run[HUFF_COEF1_SIZE]
 
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
 
const char * name
Name of the codec implementation.
 
static VLC coef_vlc[2]
coefficient run length vlc codes
 
tables for wmapro decoding
 
static int xma_decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
 
GetBitContext pgb
bitstream reader context for the packet
 
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
 
int offset[XMA_MAX_STREAMS]
 
static void save_bits(WMAProDecodeCtx *s, GetBitContext *gb, int len, int append)
Fill the bit reservoir with a (partial) frame.
 
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
 
uint8_t num_channels
number of channels in the group
 
const AVS_VideoInfo int align
 
static const uint32_t coef0_huffcodes[666]
 
static av_cold void dump_context(WMAProDecodeCtx *s)
helper function to print the most important members of the context
 
#define AV_INPUT_BUFFER_PADDING_SIZE
 
static int decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr)
Decode one WMA frame.
 
#define FF_ARRAY_ELEMS(a)
 
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
 
static const uint16_t scale_huffcodes[HUFF_SCALE_SIZE]
 
int8_t channels_for_cur_subframe
number of channels that contain the subframe
 
main external API structure.
 
av_cold int ff_wma_get_frame_len_bits(int sample_rate, int version, unsigned int decode_flags)
Get the samples per frame for this stream.
 
int8_t esc_len
length of escaped coefficients
 
uint8_t table_idx
index for the num_sfb, sfb_offsets, sf_offsets and subwoofer_cutoffs tables
 
int8_t num_sfb[WMAPRO_BLOCK_SIZES]
scale factor bands per block size
 
static const uint8_t vec2_huffbits[HUFF_VEC2_SIZE]
 
uint16_t subframe_len[MAX_SUBFRAMES]
subframe length in samples
 
uint8_t bits_per_sample
integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1....
 
uint8_t max_subframe_len_bit
flag indicating that the subframe is of maximum size when the first subframe length bit is 1
 
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
 
uint8_t packet_offset
frame offset in the packet
 
uint8_t skip_packets
packets to skip to find next packet in a stream (XMA1/2)
 
float * channel_data[WMAPRO_MAX_CHANNELS]
transformation coefficients
 
int saved_scale_factors[2][MAX_BANDS]
resampled and (previously) transmitted scale factor values
 
int frame_offset
frame offset in the bit reservoir
 
AVFrame * frames[XMA_MAX_STREAMS]
 
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
 
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time,...
 
uint8_t packet_done
set when a packet is fully decoded
 
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
 
int frame_number
Frame counter, set by libavcodec.
 
#define avpriv_request_sample(...)
 
static av_cold int xma_decode_end(AVCodecContext *avctx)
 
static int wmapro_decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Decode a single WMA packet.
 
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
 
int8_t lfe_channel
lfe channel index
 
int ff_wma_run_level_decode(AVCodecContext *avctx, GetBitContext *gb, VLC *vlc, const float *level_table, const uint16_t *run_table, int version, WMACoef *ptr, int offset, int num_coefs, int block_len, int frame_len_bits, int coef_nb_bits)
Decode run level compressed coefficients.
 
int16_t * cur_sfb_offsets
sfb offsets for the current block
 
static int decode_channel_transform(WMAProDecodeCtx *s)
Decode channel transformation parameters.
 
int16_t subframe_len
current subframe length
 
int8_t parsed_all_subframes
all subframes decoded?
 
This structure stores compressed data.
 
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
 
static const float coef0_level[HUFF_COEF0_SIZE]
 
#define MAX_FRAMESIZE
maximum compressed frame size
 
uint8_t packet_sequence_number
current packet number
 
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
 
uint8_t dynamic_range_compression
frame contains DRC data
 
static int remaining_bits(WMAProDecodeCtx *s, GetBitContext *gb)
Calculate remaining input buffer length.
 
unsigned int ff_wma_get_large_val(GetBitContext *gb)
Decode an uncompressed coefficient.
 
#define FF_DEBUG_BITSTREAM
 
int num_saved_bits
saved number of bits
 
VLC_TYPE(* table)[2]
code, bits
 
float decorrelation_matrix[WMAPRO_MAX_CHANNELS *WMAPRO_MAX_CHANNELS]
 
static VLC vec4_vlc
4 coefficients per symbol
 
static const uint16_t vec1_huffcodes[HUFF_VEC1_SIZE]
 
#define WMAPRO_BLOCK_MIN_BITS
log2 of min block size