Go to the documentation of this file.
21 #define BITSTREAM_READER_LE
86 { 0, -1, -1, -1, -1 },
96 { 0, -1, -1, -1, -1 },
134 for (
i = 0;
i < 256;
i++)
137 for (
i = 0;
i < 16;
i++)
146 int i, ps, si,
code, step_i;
152 value = (((ps & 0x7fffff) ^ -si) + si) * (1.0
f / 0x7fffff);
155 if (step_i > step_max) {
162 for (
i = 0;
i < 64;
i++) {
188 step_i = av_clip(step_i, 0, step_max);
191 s->lfe_data[
i] =
value *
s->lfe_scale;
200 int i, ps, si,
code, step_i;
206 value = (((ps & 0x7fff) ^ -si) + si) * (1.0
f / 0x7fff);
209 if (step_i > step_max) {
216 for (
i = 0;
i < 64;
i++) {
238 step_i = av_clip(step_i, 0, step_max);
241 s->lfe_data[
i] =
value *
s->lfe_scale;
262 if (chunk->
len >= 52)
264 if (chunk->
len >= 35)
285 int sf, sf_idx,
ch, main_ch, freq;
286 int ch_nbits = av_ceil_log2(
s->nchannels_total);
289 for (sf = 0; sf < 1 << group; sf +=
diff ? 8 : 1) {
290 sf_idx = ((
s->framenum << group) + sf) & 31;
291 s->tonal_bounds[group][sf_idx][0] =
s->ntones;
294 for (freq = 1;; freq++) {
311 if (freq >> (5 - group) >
s->nsubbands * 4 - 6) {
320 +
s->limited_range - 2;
321 amp[main_ch] = main_amp <
AMP_MAX ? main_amp : 0;
325 for (
ch = 0;
ch <
s->nchannels_total;
ch++) {
342 t->
x_freq = freq >> (5 - group);
343 t->
f_delt = (freq & ((1 << (5 - group)) - 1)) << group;
349 for (
ch = 0;
ch <
s->nchannels;
ch++) {
356 s->tonal_bounds[group][sf_idx][1] =
s->ntones;
380 for (sb = 0; sb < 6; sb++)
386 for (group = 0; group < 5; group++) {
427 int i, sf, prev, next, dist;
436 for (sf = 0; sf < 7; sf += dist) {
456 next = prev + ((next + 1) >> 1);
458 next = prev - ( next >> 1);
464 scf[sf + 1] = prev + ((next - prev) >> 1);
466 scf[sf + 1] = prev - ((prev - next) >> 1);
471 scf[sf + 1] = prev + ( (next - prev) >> 2);
472 scf[sf + 2] = prev + ( (next - prev) >> 1);
473 scf[sf + 3] = prev + (((next - prev) * 3) >> 2);
475 scf[sf + 1] = prev - ( (prev - next) >> 2);
476 scf[sf + 2] = prev - ( (prev - next) >> 1);
477 scf[sf + 3] = prev - (((prev - next) * 3) >> 2);
482 for (
i = 1;
i < dist;
i++)
483 scf[sf +
i] = prev + (next - prev) *
i / dist;
511 int ch, sb, sf, nsubbands,
ret;
522 for (sb = 2; sb < nsubbands; sb++) {
537 for (sb = 0; sb <
s->nsubbands - 4; sb++) {
540 if (sb + 4 <
s->min_mono_subband)
543 s->grid_3_avg[ch2][sb] =
s->grid_3_avg[ch1][sb];
562 nsubbands = (
s->nsubbands -
s->min_mono_subband + 3) / 4;
563 for (sb = 0; sb < nsubbands; sb++)
564 for (
ch = ch1;
ch <= ch2;
ch++)
565 for (sf = 1; sf <= 4; sf++)
569 s->part_stereo_pres |= 1 << ch1;
579 int sb, nsubbands,
ret;
583 for (sb = 2; sb < nsubbands; sb++) {
592 for (sb = 0; sb <
s->nsubbands - 4; sb++) {
593 if (sb + 4 >=
s->min_mono_subband) {
607 for (
ch = ch1;
ch <= ch2;
ch++) {
608 if ((
ch != ch1 && sb + 4 >=
s->min_mono_subband) !=
flag)
611 if (
s->grid_3_pres[
ch] & (1
U << sb))
614 for (
i = 0;
i < 8;
i++) {
621 s->grid_3_pres[
ch] |= 1
U << sb;
627 s->lbr_rand = 1103515245
U *
s->lbr_rand + 12345
U;
628 return s->lbr_rand *
s->sb_scf[sb];
637 int i, j,
code, nblocks, coding_method;
644 switch (quant_level) {
649 for (j = 0; j < 8; j++)
667 for (j = 0; j < 5; j++)
678 for (j = 0; j < 3; j++)
691 for (
i = 0;
i < nblocks;
i++)
705 s->ch_pres[
ch] |= 1
U << sb;
709 int start_sb,
int end_sb,
int flag)
711 int sb, sb_g3, sb_reorder, quant_level;
713 for (sb = start_sb; sb < end_sb; sb++) {
717 }
else if (
flag && sb < s->max_mono_subband) {
718 sb_reorder =
s->sb_indices[sb];
722 sb_reorder =
get_bits(&
s->gb,
s->limited_range + 3);
725 s->sb_indices[sb] = sb_reorder;
727 if (sb_reorder >=
s->nsubbands)
732 for (sb_g3 = 0; sb_g3 <
s->g3_avg_only_start_sb - 4; sb_g3++)
734 }
else if (sb < 12 && sb_reorder >= 4) {
742 if (!
flag || sb_reorder >=
s->max_mono_subband)
743 s->sec_ch_sbms[ch1 / 2][sb_reorder] =
get_bits(&
s->gb, 8);
744 if (
flag && sb_reorder >=
s->min_mono_subband)
745 s->sec_ch_lrms[ch1 / 2][sb_reorder] =
get_bits(&
s->gb, 8);
748 quant_level =
s->quant_levels[ch1 / 2][sb];
753 if (sb < s->max_mono_subband && sb_reorder >=
s->min_mono_subband) {
755 parse_ch(
s, ch1, sb_reorder, quant_level, 0);
757 parse_ch(
s, ch2, sb_reorder, quant_level, 1);
759 parse_ch(
s, ch1, sb_reorder, quant_level, 0);
761 parse_ch(
s, ch2, sb_reorder, quant_level, 0);
775 for (
i = 0;
i < 8;
i++) {
777 for (j = 0; j < (
i + 1) / 2; j++) {
778 float tmp1 =
coeff[ j ];
779 float tmp2 =
coeff[
i - j - 1];
780 coeff[ j ] = tmp1 + rc * tmp2;
781 coeff[
i - j - 1] = tmp2 + rc * tmp1;
789 int f =
s->framenum & 1;
790 int i, sb,
ch, codes[16];
793 for (sb = start_sb; sb < end_sb; sb++) {
794 int ncodes = 8 * (1 + (sb < 2));
795 for (
ch = ch1;
ch <= ch2;
ch++) {
798 for (
i = 0;
i < ncodes;
i++)
800 for (
i = 0;
i < ncodes / 8;
i++)
830 for (sb = 0; sb <
s->nsubbands; sb++) {
831 int f = sb *
s->limited_rate /
s->nsubbands;
832 int a = 18000 / (12 *
f / 1000 + 100 + 40 * st) + 20 * ol;
834 quant_levels[sb] = 1;
836 quant_levels[sb] = 2;
838 quant_levels[sb] = 3;
840 quant_levels[sb] = 4;
842 quant_levels[sb] = 5;
846 for (sb = 0; sb < 8; sb++)
848 for (; sb <
s->nsubbands; sb++)
849 s->quant_levels[ch1 / 2][sb] = quant_levels[sb];
862 for (sb = 0; sb < 2; sb++)
863 for (
ch = ch1;
ch <= ch2;
ch++)
871 int start_sb,
int end_sb,
int flag)
873 int i, j, sb,
ch, nsubbands;
876 if (end_sb > nsubbands)
879 for (sb = start_sb; sb < end_sb; sb++) {
880 for (
ch = ch1;
ch <= ch2;
ch++) {
885 memcpy(g2_scf,
s->grid_2_scf[ch1][sb], 64);
890 for (
i = 0;
i < 8;
i++, g2_scf += 8) {
892 memset(g2_scf, 0, 64 -
i * 8);
897 for (j = 0; j < 8; j++) {
903 memset(g2_scf, 0, 8);
940 if ((
ret =
parse_ts(
s, ch1, ch2, 6,
s->max_mono_subband, 0)) < 0)
948 if ((
ret =
parse_ts(
s, ch1, ch2,
s->min_mono_subband,
s->nsubbands, 1)) < 0)
955 double scale = (-1.0 / (1 << 17)) * sqrt(1 << (2 -
s->limited_range));
956 int i, br_per_ch =
s->bit_rate_scaled /
s->nchannels_total;
965 for (
i = 0;
i < 32 <<
s->freq_range;
i++)
968 if (br_per_ch < 14000)
970 else if (br_per_ch < 32000)
971 scale = (br_per_ch - 14000) * (1.0 / 120000) + 0.85;
975 scale *= 1.0 / INT_MAX;
977 for (
i = 0;
i <
s->nsubbands;
i++) {
981 s->sb_scf[
i] = (
i - 1) * 0.25 * 0.785 * scale;
983 s->sb_scf[
i] = 0.785 * scale;
986 s->lfe_scale = (16 <<
s->freq_range) * 0.0000078265894;
995 int nsamples = nchsamples *
s->nchannels *
s->nsubbands;
1005 for (
ch = 0;
ch <
s->nchannels;
ch++) {
1006 for (sb = 0; sb <
s->nsubbands; sb++) {
1007 s->time_samples[
ch][sb] = ptr;
1017 int old_rate =
s->sample_rate;
1018 int old_band_limit =
s->band_limit;
1019 int old_nchannels =
s->nchannels;
1021 unsigned int sr_code;
1024 sr_code = bytestream2_get_byte(gb);
1030 if (
s->sample_rate > 48000) {
1036 s->ch_mask = bytestream2_get_le16(gb);
1037 if (!(
s->ch_mask & 0x7)) {
1041 if ((
s->ch_mask & 0xfff0) && !(
s->warned & 1)) {
1047 version = bytestream2_get_le16(gb);
1048 if ((
version & 0xff00) != 0x0800) {
1054 s->flags = bytestream2_get_byte(gb);
1060 if (!(
s->warned & 2)) {
1068 bit_rate_hi = bytestream2_get_byte(gb);
1071 s->bit_rate_orig = bytestream2_get_le16(gb) | ((bit_rate_hi & 0x0F) << 16);
1074 s->bit_rate_scaled = bytestream2_get_le16(gb) | ((bit_rate_hi & 0xF0) << 12);
1100 if (
s->bit_rate_orig >= 44000 * (
s->nchannels_total + 2))
1102 else if (
s->bit_rate_orig >= 25000 * (
s->nchannels_total + 2))
1108 s->limited_rate =
s->sample_rate >>
s->band_limit;
1109 s->limited_range =
s->freq_range -
s->band_limit;
1110 if (
s->limited_range < 0) {
1115 s->nsubbands = 8 <<
s->limited_range;
1118 if (
s->g3_avg_only_start_sb >
s->nsubbands)
1119 s->g3_avg_only_start_sb =
s->nsubbands;
1121 s->min_mono_subband =
s->nsubbands * 2000 / (
s->limited_rate / 2);
1122 if (
s->min_mono_subband >
s->nsubbands)
1123 s->min_mono_subband =
s->nsubbands;
1125 s->max_mono_subband =
s->nsubbands * 14000 / (
s->limited_rate / 2);
1126 if (
s->max_mono_subband >
s->nsubbands)
1127 s->max_mono_subband =
s->nsubbands;
1130 if ((old_rate !=
s->sample_rate || old_band_limit !=
s->band_limit) &&
init_sample_rate(
s) < 0)
1149 s->nchannels_total += 2;
1156 if (old_rate !=
s->sample_rate
1157 || old_band_limit !=
s->band_limit
1158 || old_nchannels !=
s->nchannels) {
1181 int i,
ch, sb, sf,
ret, group, chunk_id, chunk_len;
1192 switch (bytestream2_get_byte(&gb)) {
1194 if (!
s->sample_rate) {
1211 chunk_id = bytestream2_get_byte(&gb);
1212 chunk_len = (chunk_id & 0x80) ? bytestream2_get_be16(&gb) : bytestream2_get_byte(&gb);
1223 switch (chunk_id & 0x7f) {
1226 int checksum = bytestream2_get_be16(&gb);
1227 uint16_t res = chunk_id;
1228 res += (chunk_len >> 8) & 0xff;
1229 res += chunk_len & 0xff;
1230 for (
i = 0;
i < chunk_len - 2;
i++)
1249 memset(
s->quant_levels, 0,
sizeof(
s->quant_levels));
1250 memset(
s->sb_indices, 0xff,
sizeof(
s->sb_indices));
1251 memset(
s->sec_ch_sbms, 0,
sizeof(
s->sec_ch_sbms));
1252 memset(
s->sec_ch_lrms, 0,
sizeof(
s->sec_ch_lrms));
1253 memset(
s->ch_pres, 0,
sizeof(
s->ch_pres));
1254 memset(
s->grid_1_scf, 0,
sizeof(
s->grid_1_scf));
1255 memset(
s->grid_2_scf, 0,
sizeof(
s->grid_2_scf));
1256 memset(
s->grid_3_avg, 0,
sizeof(
s->grid_3_avg));
1257 memset(
s->grid_3_scf, 0,
sizeof(
s->grid_3_scf));
1258 memset(
s->grid_3_pres, 0,
sizeof(
s->grid_3_pres));
1259 memset(
s->tonal_scf, 0,
sizeof(
s->tonal_scf));
1260 memset(
s->lfe_data, 0,
sizeof(
s->lfe_data));
1261 s->part_stereo_pres = 0;
1262 s->framenum = (
s->framenum + 1) & 31;
1264 for (
ch = 0;
ch <
s->nchannels;
ch++) {
1265 for (sb = 0; sb <
s->nsubbands / 4; sb++) {
1266 s->part_stereo[
ch][sb][0] =
s->part_stereo[
ch][sb][4];
1267 s->part_stereo[
ch][sb][4] = 16;
1271 memset(
s->lpc_coeff[
s->framenum & 1], 0,
sizeof(
s->lpc_coeff[0]));
1273 for (group = 0; group < 5; group++) {
1274 for (sf = 0; sf < 1 << group; sf++) {
1275 int sf_idx = ((
s->framenum << group) + sf) & 31;
1276 s->tonal_bounds[group][sf_idx][0] =
1277 s->tonal_bounds[group][sf_idx][1] =
s->ntones;
1283 chunk_id = bytestream2_get_byte(&gb);
1284 chunk_len = (chunk_id & 0x80) ? bytestream2_get_be16(&gb) : bytestream2_get_byte(&gb);
1296 chunk.lfe.len = chunk_len;
1297 chunk.lfe.data = gb.
buffer;
1303 chunk.tonal.id = chunk_id;
1304 chunk.tonal.len = chunk_len;
1305 chunk.tonal.data = gb.
buffer;
1314 chunk.tonal_grp[
i].id =
i;
1315 chunk.tonal_grp[
i].len = chunk_len;
1316 chunk.tonal_grp[
i].data = gb.
buffer;
1325 chunk.tonal_grp[
i].id =
i;
1326 chunk.tonal_grp[
i].len = chunk_len;
1327 chunk.tonal_grp[
i].data = gb.
buffer;
1334 chunk.grid1[
i].len = chunk_len;
1335 chunk.grid1[
i].data = gb.
buffer;
1342 chunk.hr_grid[
i].len = chunk_len;
1343 chunk.hr_grid[
i].data = gb.
buffer;
1350 chunk.ts1[
i].len = chunk_len;
1351 chunk.ts1[
i].data = gb.
buffer;
1358 chunk.ts2[
i].len = chunk_len;
1359 chunk.ts2[
i].data = gb.
buffer;
1371 for (
i = 0;
i < 5;
i++)
1374 for (
i = 0;
i < (
s->nchannels + 1) / 2;
i++) {
1376 int ch2 =
FFMIN(ch1 + 1,
s->nchannels - 1);
1385 if (!chunk.grid1[
i].len || !chunk.hr_grid[
i].len || !chunk.ts1[
i].len)
1408 for (
ch = ch1;
ch <= ch2;
ch++) {
1409 for (sb = 0; sb <
s->nsubbands; sb++) {
1412 uint8_t *g1_scf_a =
s->grid_1_scf[
ch][g1_sb ];
1413 uint8_t *g1_scf_b =
s->grid_1_scf[
ch][g1_sb + 1];
1421 for (
i = 0;
i < 8;
i++) {
1422 int scf = w1 * g1_scf_a[
i] + w2 * g1_scf_b[
i];
1423 hr_scf[
i] = scf >> 7;
1426 int8_t *g3_scf =
s->grid_3_scf[
ch][sb - 4];
1427 int g3_avg =
s->grid_3_avg[
ch][sb - 4];
1429 for (
i = 0;
i < 8;
i++) {
1430 int scf = w1 * g1_scf_a[
i] + w2 * g1_scf_b[
i];
1431 hr_scf[
i] = (scf >> 7) - g3_avg - g3_scf[
i];
1443 int i, j, k,
ch, sb;
1445 for (
ch = ch1;
ch <= ch2;
ch++) {
1446 for (sb = 0; sb <
s->nsubbands; sb++) {
1449 if (
s->ch_pres[
ch] & (1
U << sb))
1455 }
else if (sb < 10) {
1460 float accum[8] = { 0 };
1463 for (k = 2; k < 6; k++) {
1464 float *other = &
s->time_samples[
ch][k][
i * 8];
1465 for (j = 0; j < 8; j++)
1466 accum[j] += fabs(other[j]);
1469 for (j = 0; j < 8; j++)
1481 for (
i = 0;
i < nsamples;
i++) {
1483 for (j = 0; j < 8; j++)
1491 int f =
s->framenum & 1;
1494 for (
ch = ch1;
ch <= ch2;
ch++) {
1497 if (!(
s->ch_pres[
ch] & (1
U << sb)))
1515 for (sb = 0; sb <
s->nsubbands; sb++) {
1517 for (
ch = ch1;
ch <= ch2;
ch++) {
1522 unsigned int scf = hr_scf[
i];
1525 for (j = 0; j < 16; j++)
1531 unsigned int scf = hr_scf[
i / 8] - g2_scf[
i];
1542 float *samples_l =
s->time_samples[ch1][sb];
1543 float *samples_r =
s->time_samples[ch2][sb];
1544 int ch2_pres =
s->ch_pres[ch2] & (1
U << sb);
1547 int sbms = (
s->sec_ch_sbms[ch1 / 2][sb] >>
i) & 1;
1548 int lrms = (
s->sec_ch_lrms[ch1 / 2][sb] >>
i) & 1;
1550 if (sb >=
s->min_mono_subband) {
1551 if (lrms && ch2_pres) {
1553 for (j = 0; j < 16; j++) {
1554 float tmp = samples_l[j];
1555 samples_l[j] = samples_r[j];
1556 samples_r[j] = -
tmp;
1559 for (j = 0; j < 16; j++) {
1560 float tmp = samples_l[j];
1561 samples_l[j] = samples_r[j];
1565 }
else if (!ch2_pres) {
1566 if (sbms && (
s->part_stereo_pres & (1 << ch1))) {
1567 for (j = 0; j < 16; j++)
1568 samples_r[j] = -samples_l[j];
1570 for (j = 0; j < 16; j++)
1571 samples_r[j] = samples_l[j];
1574 }
else if (sbms && ch2_pres) {
1575 for (j = 0; j < 16; j++) {
1576 float tmp = samples_l[j];
1577 samples_l[j] = (
tmp + samples_r[j]) * 0.5
f;
1578 samples_r[j] = (
tmp - samples_r[j]) * 0.5
f;
1600 for (
ch = ch1;
ch <= ch2;
ch++) {
1601 for (sb =
s->min_mono_subband; sb < s->nsubbands; sb++) {
1602 uint8_t *pt_st =
s->part_stereo[
ch][(sb -
s->min_mono_subband) / 4];
1605 if (
s->ch_pres[ch2] & (1
U << sb))
1608 for (sf = 1; sf <= 4; sf++,
samples += 32) {
1612 for (
i = 0;
i < 32;
i++)
1623 int group,
int group_sf,
int synth_idx)
1630 start =
s->tonal_bounds[group][group_sf][0];
1666 values[x_freq - 5] += cf[ 0] * -
s;
1667 p4:
values[x_freq - 4] += cf[ 1] *
c;
1668 p3:
values[x_freq - 3] += cf[ 2] *
s;
1669 p2:
values[x_freq - 2] += cf[ 3] * -
c;
1670 p1:
values[x_freq - 1] += cf[ 4] * -
s;
1671 p0:
values[x_freq ] += cf[ 5] *
c;
1672 values[x_freq + 1] += cf[ 6] *
s;
1673 values[x_freq + 2] += cf[ 7] * -
c;
1674 values[x_freq + 3] += cf[ 8] * -
s;
1675 values[x_freq + 4] += cf[ 9] *
c;
1676 values[x_freq + 5] += cf[10] *
s;
1691 for (group = 0; group < 5; group++) {
1692 int group_sf = (
s->framenum << group) + ((sf - 22) >> (5 - group));
1693 int synth_idx = ((((sf - 22) & 31) << group) & 31) + (1 << group) - 1;
1704 int sf, sb, nsubbands =
s->nsubbands, noutsubbands = 8 <<
s->freq_range;
1707 if (nsubbands < noutsubbands)
1708 memset(
values[nsubbands], 0, (noutsubbands - nsubbands) *
sizeof(
values[0]));
1712 s->dcadsp->lbr_bank(
values,
s->time_samples[
ch],
1721 s->history[
ch], noutsubbands * 4);
1722 s->fdsp->vector_fmul_reverse(
s->history[
ch],
result[noutsubbands],
1723 s->window, noutsubbands * 4);
1724 output += noutsubbands * 4;
1728 for (sb = 0; sb < nsubbands; sb++) {
1737 int i,
ret, nchannels, ch_conf = (
s->ch_mask & 0x7) - 1;
1738 const int8_t *reorder;
1756 frame->nb_samples = 1024 <<
s->freq_range;
1761 for (
i = 0;
i < (
s->nchannels + 1) / 2;
i++) {
1763 int ch2 =
FFMIN(ch1 + 1,
s->nchannels - 1);
1771 if (ch1 != ch2 && (
s->part_stereo_pres & (1 << ch1)))
1774 if (ch1 < nchannels)
1777 if (ch1 != ch2 && ch2 < nchannels)
1785 s->lfe_history, 16 <<
s->freq_range);
1798 if (!
s->sample_rate)
1802 memset(
s->part_stereo, 16,
sizeof(
s->part_stereo));
1803 memset(
s->lpc_coeff, 0,
sizeof(
s->lpc_coeff));
1804 memset(
s->history, 0,
sizeof(
s->history));
1805 memset(
s->tonal_bounds, 0,
sizeof(
s->tonal_bounds));
1806 memset(
s->lfe_history, 0,
sizeof(
s->lfe_history));
1810 for (
ch = 0;
ch <
s->nchannels;
ch++) {
1811 for (sb = 0; sb <
s->nsubbands; sb++) {
const uint8_t ff_dca_grid_2_to_scf[3]
@ AV_SAMPLE_FMT_FLTP
float, planar
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const uint8_t ff_dca_grid_1_weights[12][32]
uint64_t channel_layout
Audio channel layout.
const uint8_t ff_dca_sb_reorder[8][8]
int sample_rate
samples per second
const float ff_dca_rsd_level_2b[2]
static void parse_ch(DCALbrDecoder *s, int ch, int sb, int quant_level, int flag)
Parse time samples for one subband, filling truncated samples with randomness.
static int ensure_bits(GetBitContext *s, int n)
Check point to ensure that enough bits are left.
const float ff_dca_lfe_step_size_24[144]
#define AV_CH_LAYOUT_MONO
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static void filter_ts(DCALbrDecoder *s, int ch1, int ch2)
static const uint8_t lfe_index[7]
This structure describes decoded (raw) audio or video data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
const float ff_dca_quant_amp[57]
@ LBR_CHUNK_RES_TS_2_LAST
static void synth_tones(DCALbrDecoder *s, int ch, float *values, int group, int group_sf, int synth_idx)
Synthesise tones in the given group for the given tonal subframe.
#define DCA_SPEAKER_LAYOUT_STEREO
int request_channel_layout
Converted from avctx.request_channel_layout.
uint8_t phs[DCA_LBR_CHANNELS]
Per-channel phase.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
const uint8_t ff_dca_scf_to_grid_1[32]
@ LBR_FLAG_BAND_LIMIT_1_8
uint8_t x_freq
Spectral line offset.
static void decode_grid(DCALbrDecoder *s, int ch1, int ch2)
Reconstruct high-frequency resolution grid from first and third grids.
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
int lbr_offset
Offset to LBR component from start of substream.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static void synth_lpc(DCALbrDecoder *s, int ch1, int ch2, int sb)
uint8_t amp[DCA_LBR_CHANNELS]
Per-channel amplitude.
const float ff_dca_rsd_level_5[5]
#define AV_CH_LAYOUT_STEREO
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int alloc_sample_buffer(DCALbrDecoder *s)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static int parse_st_code(GetBitContext *s, int min_v)
#define AV_CH_LOW_FREQUENCY
@ LBR_FLAG_BAND_LIMIT_1_2
static int parse_lpc(DCALbrDecoder *s, int ch1, int ch2, int start_sb, int end_sb)
static int parse_ts(DCALbrDecoder *s, int ch1, int ch2, int start_sb, int end_sb, int flag)
const float ff_dca_rsd_level_16[16]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static int init_sample_rate(DCALbrDecoder *s)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static int parse_grid_1_chunk(DCALbrDecoder *s, LBRChunk *chunk, int ch1, int ch2)
@ LBR_FLAG_BAND_LIMIT_2_3
@ LBR_CHUNK_FRAME_NO_CSUM
@ LBR_FLAG_BAND_LIMIT_1_3
const int8_t ff_dca_lfe_delta_index_16[8]
static void random_ts(DCALbrDecoder *s, int ch1, int ch2)
Fill unallocated subbands with randomness.
int ff_dca_lbr_filter_frame(DCALbrDecoder *s, AVFrame *frame)
const float ff_dca_synth_env[32]
and forward the result(frame or status change) to the corresponding input. If nothing is possible
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int lbr_size
Size of LBR component in extension substream.
static int parse_ts1_chunk(DCALbrDecoder *s, LBRChunk *chunk, int ch1, int ch2)
@ LBR_FLAG_BAND_LIMIT_MASK
int64_t bit_rate
the average bitrate
const uint16_t ff_dca_avg_g3_freqs[3]
const float ff_dca_corr_cf[32][11]
static unsigned int get_bits1(GetBitContext *s)
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
const float ff_dca_lfe_iir[5][4]
@ LBR_FLAG_BAND_LIMIT_NONE
static int parse_lfe_chunk(DCALbrDecoder *s, LBRChunk *chunk)
#define AV_CH_FRONT_CENTER
static void transform_channel(DCALbrDecoder *s, int ch, float *output)
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define DCA_LBR_CHANNELS_TOTAL
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
@ LBR_CHUNK_RES_GRID_HR_LAST
#define AV_EF_CAREFUL
consider things that violate the spec, are fast to calculate and have not been seen in the wild as er...
const float ff_dca_bank_coeff[10]
@ DCA_LBR_HEADER_SYNC_ONLY
uint8_t ph_rot
Phase rotation.
static int parse_lfe_24(DCALbrDecoder *s)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
static float lbr_rand(DCALbrDecoder *s, int sb)
@ AV_MATRIX_ENCODING_NONE
static av_cold void init_tables(void)
enum AVSampleFormat sample_fmt
audio sample format
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static int parse_tonal_group(DCALbrDecoder *s, LBRChunk *chunk)
const float ff_dca_rsd_level_3[3]
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ LBR_CHUNK_TONAL_SCF_GRP_3
@ DCA_LBR_HEADER_DECODER_INIT
uint8_t f_delt
Difference between original and center frequency.
VLC ff_dca_vlc_tnl_grp[5]
int channels
number of audio channels
const float ff_dca_rsd_level_8[8]
#define AV_CH_LAYOUT_5POINT0
const float ff_dca_lfe_step_size_16[101]
const uint8_t ff_dca_scf_to_grid_2[32]
const float ff_dca_rsd_level_2a[2]
static int parse_vlc(GetBitContext *s, VLC *vlc, int max_depth)
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
const uint8_t ff_dca_rsd_pack_3_in_7[128][3]
static void base_func_synth(DCALbrDecoder *s, int ch, float *values, int sf)
Synthesise all tones in all groups for the given residual subframe.
VLC ff_dca_vlc_fst_rsd_amp
static void decode_part_stereo(DCALbrDecoder *s, int ch1, int ch2)
Modulate by interpolated partial stereo coefficients.
@ LBR_FLAG_BAND_LIMIT_1_4
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
av_cold int ff_dca_lbr_init(DCALbrDecoder *s)
const uint8_t ff_dca_freq_to_sb[32]
@ LBR_CHUNK_RES_GRID_LR_LAST
static int parse_scale_factors(DCALbrDecoder *s, uint8_t *scf)
static const int8_t channel_reorder_nolfe[7][5]
static void predict(float *samples, const float *coeff, int nsamples)
static int parse_high_res_grid(DCALbrDecoder *s, LBRChunk *chunk, int ch1, int ch2)
av_cold void ff_dca_lbr_flush(DCALbrDecoder *s)
av_cold void ff_dca_lbr_close(DCALbrDecoder *s)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int parse_ts2_chunk(DCALbrDecoder *s, LBRChunk *chunk, int ch1, int ch2)
#define AV_CH_LAYOUT_SURROUND
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
static int parse_decoder_init(DCALbrDecoder *s, GetByteContext *gb)
static volatile int checksum
static const int8_t channel_reorder_lfe[7][5]
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
#define FF_ARRAY_ELEMS(a)
static void parse_grid_3(DCALbrDecoder *s, int ch1, int ch2, int sb, int flag)
static const uint8_t channel_counts[7]
main external API structure.
static int parse_grid_1_sec_ch(DCALbrDecoder *s, int ch2)
const uint32_t ff_dca_sampling_freqs[16]
static int parse_tonal_chunk(DCALbrDecoder *s, LBRChunk *chunk)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Filter the word “frame” indicates either a video frame or a group of audio samples
int ff_dca_lbr_parse(DCALbrDecoder *s, uint8_t *data, DCAExssAsset *asset)
const uint8_t ff_dca_grid_1_to_scf[11]
@ LBR_CHUNK_TONAL_SCF_GRP_2
static int shift(int a, int b)
const float ff_dca_long_window[128]
int ff_side_data_update_matrix_encoding(AVFrame *frame, enum AVMatrixEncoding matrix_encoding)
Add or update AV_FRAME_DATA_MATRIXENCODING side data.
static void convert_lpc(float *coeff, const int *codes)
Convert from reflection coefficients to direct form coefficients.
const uint8_t ff_dca_freq_ranges[16]
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
const uint16_t ff_dca_fst_amp[44]
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static int parse_tonal(DCALbrDecoder *s, int group)
static const uint16_t channel_layouts[7]
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
@ LBR_CHUNK_RES_TS_1_LAST
static const double coeff[2][5]
const int8_t ff_dca_ph0_shift[8]
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define LOCAL_ALIGNED_32(t, v,...)
#define DCA_LBR_TIME_HISTORY
static float cos_tab[256]
@ LBR_CHUNK_TONAL_SCF_GRP_5
VLC_TYPE(* table)[2]
code, bits
#define DCA_LBR_TIME_SAMPLES
@ LBR_CHUNK_TONAL_SCF_GRP_1
static int ff_dca_count_chs_for_mask(unsigned int mask)
Return number of individual channels in DCASpeakerPair mask.
@ LBR_CHUNK_TONAL_SCF_GRP_4
const uint16_t ff_dca_rsd_pack_5_in_8[256]
#define FF_PROFILE_DTS_EXPRESS
static int parse_lfe_16(DCALbrDecoder *s)
static int parse_grid_2(DCALbrDecoder *s, int ch1, int ch2, int start_sb, int end_sb, int flag)
const float ff_dca_st_coeff[34]
const int8_t ff_dca_lfe_delta_index_24[32]