Go to the documentation of this file.
26 #define BITSTREAM_READER_LE
41 #define DSD_BYTE_READY(low,high) (!(((low) ^ (high)) & 0xff000000))
44 #define PTABLE_BINS (1<<PTABLE_BITS)
45 #define PTABLE_MASK (PTABLE_BINS-1)
48 #define DOWN 0x00010000
52 #define VALUE_ONE (1 << PRECISION)
53 #define PRECISION_USE 12
57 #define MAX_HISTORY_BITS 5
58 #define MAX_HISTORY_BINS (1 << MAX_HISTORY_BITS)
59 #define MAX_BIN_BYTES 1280 // for value_lookup, per bin (2k - 512 - 256)
98 #define WV_MAX_FRAME_DECODERS 14
119 #define LEVEL_DECAY(a) (((a) + 0x80) >> 8)
128 e = (1 << (p + 1)) - k - 1;
139 for (
i = 0;
i <=
ctx->stereo_in;
i++) {
140 if (
ctx->ch[
i].bitrate_acc > UINT_MAX -
ctx->ch[
i].bitrate_delta)
142 ctx->ch[
i].bitrate_acc +=
ctx->ch[
i].bitrate_delta;
143 br[
i] =
ctx->ch[
i].bitrate_acc >> 16;
146 if (
ctx->stereo_in &&
ctx->hybrid_bitrate) {
147 int balance = (sl[1] - sl[0] + br[1] + 1) >> 1;
148 if (balance > br[0]) {
151 }
else if (-balance > br[0]) {
155 br[1] = br[0] + balance;
156 br[0] = br[0] - balance;
159 for (
i = 0;
i <=
ctx->stereo_in;
i++) {
160 if (
ctx->hybrid_bitrate) {
161 if (sl[
i] - br[
i] > -0x100)
164 ctx->ch[
i].error_limit = 0;
182 if ((
ctx->ch[0].median[0] < 2
U) && (
ctx->ch[1].median[0] < 2
U) &&
183 !
ctx->zero && !
ctx->one) {
202 memset(
ctx->ch[0].median, 0,
sizeof(
ctx->ch[0].median));
203 memset(
ctx->ch[1].median, 0,
sizeof(
ctx->ch[1].median));
267 if (!
c->error_limit) {
268 if (add >= 0x2000000U) {
276 int mid = (
base * 2
U + add + 1) >> 1;
277 while (add >
c->error_limit) {
281 add -= (mid - (unsigned)
base);
284 add = mid - (unsigned)
base - 1;
285 mid = (
base * 2
U + add + 1) >> 1;
290 if (
ctx->hybrid_bitrate)
309 S *= 1 <<
s->extra_bits;
311 if (
s->got_extra_bits &&
314 *crc = *crc * 9 + (
S & 0xffff) * 3 + ((
unsigned)
S >> 16);
318 bit = (
S &
s->and) |
s->or;
322 bit = av_clip(
bit,
s->hybrid_minclip,
s->hybrid_maxclip);
324 return bit <<
s->post_shift;
335 int exp =
s->float_max_exp;
337 if (
s->got_extra_bits) {
338 const int max_bits = 1 + 23 + 8 + 1;
346 S *= 1
U <<
s->float_shift;
350 if (
S >= 0x1000000U) {
351 if (
s->got_extra_bits &&
get_bits1(&
s->gb_extra_bits))
358 exp =
s->float_max_exp;
366 (
s->got_extra_bits &&
370 }
else if (
s->got_extra_bits &&
376 exp =
s->float_max_exp;
385 if (
s->float_max_exp >= 25)
395 *crc = *crc * 27 +
S * 9 +
exp * 3 + sign;
397 value.u = (sign << 31) | (
exp << 23) |
S;
402 uint32_t crc_extra_bits)
408 if (
s->got_extra_bits && crc_extra_bits !=
s->crc_extra_bits) {
418 int value = 0x808000, rate = rate_i << 8;
420 for (
int c = (rate + 128) >> 8;
c--;)
427 if (
value > 0x010000) {
428 rate += (rate * rate_s + 128) >> 8;
430 for (
int c = (rate + 64) >> 7;
c--;)
444 uint8_t *dst_l = dst_left, *dst_r = dst_right;
445 int total_samples =
s->samples, stereo = dst_r ? 1 : 0;
448 uint32_t low, high,
value;
453 rate_i = bytestream2_get_byte(&
s->gbyte);
454 rate_s = bytestream2_get_byte(&
s->gbyte);
464 sp->fltr1 = bytestream2_get_byte(&
s->gbyte) << (
PRECISION - 8);
465 sp->fltr2 = bytestream2_get_byte(&
s->gbyte) << (
PRECISION - 8);
466 sp->fltr3 = bytestream2_get_byte(&
s->gbyte) << (
PRECISION - 8);
467 sp->fltr4 = bytestream2_get_byte(&
s->gbyte) << (
PRECISION - 8);
468 sp->fltr5 = bytestream2_get_byte(&
s->gbyte) << (
PRECISION - 8);
470 sp->factor = bytestream2_get_byte(&
s->gbyte) & 0xff;
471 sp->factor |= (bytestream2_get_byte(&
s->gbyte) << 8) & 0xff00;
472 sp->factor = (
int32_t)((uint32_t)
sp->factor << 16) >> 16;
475 value = bytestream2_get_be32(&
s->gbyte);
479 while (total_samples--) {
482 sp[0].value =
sp[0].fltr1 -
sp[0].fltr5 + ((
sp[0].fltr6 *
sp[0].factor) >> 2);
485 sp[1].value =
sp[1].fltr1 -
sp[1].fltr5 + ((
sp[1].fltr6 *
sp[1].factor) >> 2);
489 uint32_t
split = low + ((high - low) >> 8) * (*pp >> 16);
502 value = (
value << 8) | bytestream2_get_byte(&
s->gbyte);
503 high = (high << 8) | 0xff;
507 sp[0].value +=
sp[0].fltr6 * 8;
508 sp[0].byte = (
sp[0].byte << 1) | (
sp[0].fltr0 & 1);
509 sp[0].factor += (((
sp[0].value ^
sp[0].fltr0) >> 31) | 1) &
513 sp[0].fltr3 += (
sp[0].fltr2 -
sp[0].fltr3) >> 4;
514 sp[0].fltr4 += (
sp[0].fltr3 -
sp[0].fltr4) >> 4;
515 sp[0].value = (
sp[0].fltr4 -
sp[0].fltr5) >> 4;
516 sp[0].fltr5 +=
sp[0].value;
517 sp[0].fltr6 += (
sp[0].value -
sp[0].fltr6) >> 3;
518 sp[0].value =
sp[0].fltr1 -
sp[0].fltr5 + ((
sp[0].fltr6 *
sp[0].factor) >> 2);
524 split = low + ((high - low) >> 8) * (*pp >> 16);
537 value = (
value << 8) | bytestream2_get_byte(&
s->gbyte);
538 high = (high << 8) | 0xff;
542 sp[1].value +=
sp[1].fltr6 * 8;
543 sp[1].byte = (
sp[1].byte << 1) | (
sp[1].fltr0 & 1);
544 sp[1].factor += (((
sp[1].value ^
sp[1].fltr0) >> 31) | 1) &
548 sp[1].fltr3 += (
sp[1].fltr2 -
sp[1].fltr3) >> 4;
549 sp[1].fltr4 += (
sp[1].fltr3 -
sp[1].fltr4) >> 4;
550 sp[1].value = (
sp[1].fltr4 -
sp[1].fltr5) >> 4;
551 sp[1].fltr5 +=
sp[1].value;
552 sp[1].fltr6 += (
sp[1].value -
sp[1].fltr6) >> 3;
553 sp[1].value =
sp[1].fltr1 -
sp[1].fltr5 + ((
sp[1].fltr6 *
sp[1].factor) >> 2);
557 sp[0].factor -= (
sp[0].factor + 512) >> 10;
571 memset(dst_left, 0x69,
s->samples * 4);
574 memset(dst_right, 0x69,
s->samples * 4);
582 uint8_t *dst_l = dst_left, *dst_r = dst_right;
583 uint8_t history_bits, max_probability;
584 int total_summed_probabilities = 0;
585 int total_samples =
s->samples;
586 uint8_t *vlb =
s->value_lookup_buffer;
587 int history_bins, p0, p1, chan;
589 uint32_t low, high,
value;
594 history_bits = bytestream2_get_byte(&
s->gbyte);
599 history_bins = 1 << history_bits;
600 max_probability = bytestream2_get_byte(&
s->gbyte);
602 if (max_probability < 0xff) {
604 uint8_t *outend = outptr +
sizeof(*
s->probabilities) * history_bins;
607 int code = bytestream2_get_byte(&
s->gbyte);
609 if (
code > max_probability) {
610 int zcount =
code - max_probability;
612 while (outptr < outend && zcount--)
622 if (outptr < outend ||
627 sizeof(*
s->probabilities) * history_bins);
632 for (p0 = 0; p0 < history_bins; p0++) {
635 for (
int i = 0;
i < 256;
i++)
636 s->summed_probabilities[p0][
i] = sum_values +=
s->probabilities[p0][
i];
639 total_summed_probabilities += sum_values;
641 if (total_summed_probabilities > history_bins *
MAX_BIN_BYTES)
644 s->value_lookup[p0] = vlb;
646 for (
int i = 0;
i < 256;
i++) {
647 int c =
s->probabilities[p0][
i];
659 low = 0; high = 0xffffffff;
660 value = bytestream2_get_be32(&
s->gbyte);
665 while (total_samples--) {
668 if (!
s->summed_probabilities[p0][255])
671 mult = (high - low) /
s->summed_probabilities[p0][255];
675 value = bytestream2_get_be32(&
s->gbyte);
679 mult = high /
s->summed_probabilities[p0][255];
687 if (
index >=
s->summed_probabilities[p0][255])
691 if ((*dst_l =
code =
s->value_lookup[p0][
index]))
692 low +=
s->summed_probabilities[p0][
code-1] *
mult;
697 low +=
s->summed_probabilities[p0][
code-1] *
mult;
711 high = low +
s->probabilities[p0][
code] *
mult - 1;
715 p0 =
code & (history_bins-1);
718 p1 =
code & (history_bins-1);
722 value = (
value << 8) | bytestream2_get_byte(&
s->gbyte);
723 high = (high << 8) | 0xff;
732 memset(dst_left, 0x69,
s->samples * 4);
735 memset(dst_right, 0x69,
s->samples * 4);
743 uint8_t *dst_l = dst_left, *dst_r = dst_right;
744 int total_samples =
s->samples;
750 while (total_samples--) {
764 memset(dst_left, 0x69,
s->samples * 4);
767 memset(dst_right, 0x69,
s->samples * 4);
774 void *dst_l,
void *dst_r,
const int type)
780 uint32_t crc = 0xFFFFFFFF;
781 uint32_t crc_extra_bits = 0xFFFFFFFF;
782 int16_t *dst16_l = dst_l;
783 int16_t *dst16_r = dst_r;
786 float *dstfl_l = dst_l;
787 float *dstfl_r = dst_r;
789 s->one =
s->zero =
s->zeroes = 0;
797 for (
i = 0;
i <
s->terms;
i++) {
798 t =
s->decorr[
i].value;
802 A = 2
U *
s->decorr[
i].samplesA[0] -
s->decorr[
i].samplesA[1];
803 B = 2
U *
s->decorr[
i].samplesB[0] -
s->decorr[
i].samplesB[1];
805 A = (
int)(3
U *
s->decorr[
i].samplesA[0] -
s->decorr[
i].samplesA[1]) >> 1;
806 B = (
int)(3
U *
s->decorr[
i].samplesB[0] -
s->decorr[
i].samplesB[1]) >> 1;
808 s->decorr[
i].samplesA[1] =
s->decorr[
i].samplesA[0];
809 s->decorr[
i].samplesB[1] =
s->decorr[
i].samplesB[0];
812 A =
s->decorr[
i].samplesA[
pos];
813 B =
s->decorr[
i].samplesB[
pos];
817 L2 =
L + ((
s->decorr[
i].weightA * (int64_t)
A + 512) >> 10);
818 R2 =
R + ((
s->decorr[
i].weightB * (int64_t)
B + 512) >> 10);
820 L2 =
L + (unsigned)((
int)(
s->decorr[
i].weightA * (unsigned)
A + 512) >> 10);
821 R2 =
R + (unsigned)((
int)(
s->decorr[
i].weightB * (unsigned)
B + 512) >> 10);
824 s->decorr[
i].weightA -= ((((
L ^
A) >> 30) & 2) - 1) *
s->decorr[
i].delta;
826 s->decorr[
i].weightB -= ((((
R ^
B) >> 30) & 2) - 1) *
s->decorr[
i].delta;
827 s->decorr[
i].samplesA[j] =
L =
L2;
828 s->decorr[
i].samplesB[j] =
R =
R2;
829 }
else if (t == -1) {
831 L2 =
L + ((
s->decorr[
i].weightA * (int64_t)
s->decorr[
i].samplesA[0] + 512) >> 10);
833 L2 =
L + (unsigned)((
int)(
s->decorr[
i].weightA * (unsigned)
s->decorr[
i].samplesA[0] + 512) >> 10);
837 R2 =
R + ((
s->decorr[
i].weightB * (int64_t)
L2 + 512) >> 10);
839 R2 =
R + (unsigned)((
int)(
s->decorr[
i].weightB * (unsigned)
L2 + 512) >> 10);
842 s->decorr[
i].samplesA[0] =
R;
845 R2 =
R + ((
s->decorr[
i].weightB * (int64_t)
s->decorr[
i].samplesB[0] + 512) >> 10);
847 R2 =
R + (unsigned)((
int)(
s->decorr[
i].weightB * (unsigned)
s->decorr[
i].samplesB[0] + 512) >> 10);
852 R2 =
s->decorr[
i].samplesA[0];
853 s->decorr[
i].samplesA[0] =
R;
857 L2 =
L + ((
s->decorr[
i].weightA * (int64_t)
R2 + 512) >> 10);
859 L2 =
L + (unsigned)((
int)(
s->decorr[
i].weightA * (unsigned)
R2 + 512) >> 10);
862 s->decorr[
i].samplesB[0] =
L;
875 L += (unsigned)(
R -= (
unsigned)(
L >> 1));
876 crc = (crc * 3 +
L) * 3 +
R;
889 }
while (!last && count < s->
samples);
891 if (last && count < s->
samples) {
905 void *dst,
const int type)
911 uint32_t crc = 0xFFFFFFFF;
912 uint32_t crc_extra_bits = 0xFFFFFFFF;
913 int16_t *dst16 = dst;
917 s->one =
s->zero =
s->zeroes = 0;
923 for (
i = 0;
i <
s->terms;
i++) {
924 t =
s->decorr[
i].value;
927 A = 2
U *
s->decorr[
i].samplesA[0] -
s->decorr[
i].samplesA[1];
929 A = (
int)(3
U *
s->decorr[
i].samplesA[0] -
s->decorr[
i].samplesA[1]) >> 1;
930 s->decorr[
i].samplesA[1] =
s->decorr[
i].samplesA[0];
933 A =
s->decorr[
i].samplesA[
pos];
937 S =
T + ((
s->decorr[
i].weightA * (int64_t)
A + 512) >> 10);
939 S =
T + (unsigned)((
int)(
s->decorr[
i].weightA * (unsigned)
A + 512) >> 10);
941 s->decorr[
i].weightA -= ((((
T ^
A) >> 30) & 2) - 1) *
s->decorr[
i].delta;
942 s->decorr[
i].samplesA[j] =
T =
S;
955 }
while (!last && count < s->
samples);
957 if (last && count < s->
samples) {
977 if (!
c->fdec[
c->fdec_num])
980 c->fdec[
c->fdec_num - 1]->avctx =
c->avctx;
996 if (
channels > INT_MAX /
sizeof(*
s->dsdctx))
1006 memset(
s->dsdctx[
i].buf, 0x69,
sizeof(
s->dsdctx[
i].buf));
1053 if (!
s->curr_frame.f || !
s->prev_frame.f)
1065 for (
int i = 0;
i <
s->fdec_num;
i++)
1081 const uint8_t *buf,
int buf_size)
1087 void *samples_l =
NULL, *samples_r =
NULL;
1089 int got_terms = 0, got_weights = 0, got_samples = 0,
1090 got_entropy = 0, got_pcm = 0, got_float = 0, got_hybrid = 0;
1092 int i, j,
id,
size, ssize, weights, t;
1093 int bpp, chan = 0, orig_bpp,
sample_rate = 0, rate_x = 1, dsd_mode = 0;
1095 uint64_t chmask = 0;
1102 s = wc->
fdec[block_no];
1110 memset(
s->ch, 0,
sizeof(
s->ch));
1112 s->and =
s->or =
s->shift = 0;
1113 s->got_extra_bits = 0;
1117 s->samples = bytestream2_get_le32(&gb);
1120 "a sequence: %d and %d\n", wc->
samples,
s->samples);
1123 s->frame_flags = bytestream2_get_le32(&gb);
1127 else if ((
s->frame_flags & 0x03) <= 1)
1136 orig_bpp = ((
s->frame_flags & 0x03) + 1) << 3;
1139 s->stereo = !(
s->frame_flags &
WV_MONO);
1144 s->post_shift = bpp * 8 - orig_bpp + ((
s->frame_flags >> 13) & 0x1f);
1145 if (
s->post_shift < 0 ||
s->post_shift > 31) {
1148 s->hybrid_maxclip = ((1LL << (orig_bpp - 1)) - 1);
1149 s->hybrid_minclip = ((-1UL << (orig_bpp - 1)));
1150 s->CRC = bytestream2_get_le32(&gb);
1154 id = bytestream2_get_byte(&gb);
1155 size = bytestream2_get_byte(&gb);
1157 size |= (bytestream2_get_le16u(&gb)) << 8;
1164 "Got incorrect block %02X with size %i\n",
id,
size);
1169 "Block size %i is out of bounds\n",
size);
1181 for (
i = 0;
i <
s->terms;
i++) {
1183 s->decorr[
s->terms -
i - 1].value = (
val & 0x1F) - 5;
1184 s->decorr[
s->terms -
i - 1].delta =
val >> 5;
1193 weights =
size >>
s->stereo_in;
1194 if (weights >
MAX_TERMS || weights >
s->terms) {
1199 for (
i = 0;
i < weights;
i++) {
1200 t = (int8_t)bytestream2_get_byte(&gb);
1201 s->decorr[
s->terms -
i - 1].weightA = t * (1 << 3);
1202 if (
s->decorr[
s->terms -
i - 1].weightA > 0)
1203 s->decorr[
s->terms -
i - 1].weightA +=
1204 (
s->decorr[
s->terms -
i - 1].weightA + 64) >> 7;
1206 t = (int8_t)bytestream2_get_byte(&gb);
1207 s->decorr[
s->terms -
i - 1].weightB = t * (1 << 3);
1208 if (
s->decorr[
s->terms -
i - 1].weightB > 0)
1209 s->decorr[
s->terms -
i - 1].weightB +=
1210 (
s->decorr[
s->terms -
i - 1].weightB + 64) >> 7;
1221 for (
i =
s->terms - 1; (
i >= 0) && (t <
size);
i--) {
1222 if (
s->decorr[
i].value > 8) {
1223 s->decorr[
i].samplesA[0] =
1224 wp_exp2(bytestream2_get_le16(&gb));
1225 s->decorr[
i].samplesA[1] =
1226 wp_exp2(bytestream2_get_le16(&gb));
1229 s->decorr[
i].samplesB[0] =
1230 wp_exp2(bytestream2_get_le16(&gb));
1231 s->decorr[
i].samplesB[1] =
1232 wp_exp2(bytestream2_get_le16(&gb));
1236 }
else if (
s->decorr[
i].value < 0) {
1237 s->decorr[
i].samplesA[0] =
1238 wp_exp2(bytestream2_get_le16(&gb));
1239 s->decorr[
i].samplesB[0] =
1240 wp_exp2(bytestream2_get_le16(&gb));
1243 for (j = 0; j <
s->decorr[
i].value; j++) {
1244 s->decorr[
i].samplesA[j] =
1245 wp_exp2(bytestream2_get_le16(&gb));
1247 s->decorr[
i].samplesB[j] =
1248 wp_exp2(bytestream2_get_le16(&gb));
1251 t +=
s->decorr[
i].value * 2 * (
s->stereo_in + 1);
1257 if (
size != 6 * (
s->stereo_in + 1)) {
1259 "Entropy vars size should be %i, got %i.\n",
1260 6 * (
s->stereo_in + 1),
size);
1264 for (j = 0; j <=
s->stereo_in; j++)
1265 for (
i = 0;
i < 3;
i++) {
1266 s->ch[j].median[
i] =
wp_exp2(bytestream2_get_le16(&gb));
1271 if (
s->hybrid_bitrate) {
1272 for (
i = 0;
i <=
s->stereo_in;
i++) {
1273 s->ch[
i].slow_level =
wp_exp2(bytestream2_get_le16(&gb));
1277 for (
i = 0;
i < (
s->stereo_in + 1);
i++) {
1278 s->ch[
i].bitrate_acc = bytestream2_get_le16(&gb) << 16;
1282 for (
i = 0;
i < (
s->stereo_in + 1);
i++) {
1283 s->ch[
i].bitrate_delta =
1284 wp_exp2((int16_t)bytestream2_get_le16(&gb));
1287 for (
i = 0;
i < (
s->stereo_in + 1);
i++)
1288 s->ch[
i].bitrate_delta = 0;
1296 "Invalid INT32INFO, size = %i\n",
1304 "Invalid INT32INFO, extra_bits = %d (> 30)\n",
val[0]);
1306 }
else if (
val[0]) {
1307 s->extra_bits =
val[0];
1308 }
else if (
val[1]) {
1310 }
else if (
val[2]) {
1313 }
else if (
val[3]) {
1317 if (
s->shift > 31) {
1319 "Invalid INT32INFO, shift = %d (> 31)\n",
s->shift);
1320 s->and =
s->or =
s->shift = 0;
1325 if (
s->hybrid && bpp == 4 &&
s->post_shift < 8 &&
s->shift > 8) {
1328 s->hybrid_maxclip >>= 8;
1329 s->hybrid_minclip >>= 8;
1336 "Invalid FLOATINFO, size = %i\n",
size);
1340 s->float_flag = bytestream2_get_byte(&gb);
1341 s->float_shift = bytestream2_get_byte(&gb);
1342 s->float_max_exp = bytestream2_get_byte(&gb);
1343 if (
s->float_shift > 31) {
1345 "Invalid FLOATINFO, shift = %d (> 31)\n",
s->float_shift);
1365 rate_x = bytestream2_get_byte(&gb);
1368 rate_x = 1 << rate_x;
1369 dsd_mode = bytestream2_get_byte(&gb);
1370 if (dsd_mode && dsd_mode != 1 && dsd_mode != 3) {
1390 s->got_extra_bits = 1;
1395 "Insufficient channel information\n");
1398 chan = bytestream2_get_byte(&gb);
1401 chmask = bytestream2_get_byte(&gb);
1404 chmask = bytestream2_get_le16(&gb);
1407 chmask = bytestream2_get_le24(&gb);
1410 chmask = bytestream2_get_le32(&gb);
1413 size = bytestream2_get_byte(&gb);
1414 chan |= (bytestream2_get_byte(&gb) & 0xF) << 8;
1418 " instead of %i.\n", chan, avctx->
channels);
1419 chmask = bytestream2_get_le24(&gb);
1422 size = bytestream2_get_byte(&gb);
1423 chan |= (bytestream2_get_byte(&gb) & 0xF) << 8;
1427 " instead of %i.\n", chan, avctx->
channels);
1428 chmask = bytestream2_get_le32(&gb);
1468 if (
s->hybrid && !got_hybrid) {
1478 const int wanted =
s->samples *
s->extra_bits <<
s->stereo_in;
1479 if (
size < wanted) {
1481 s->got_extra_bits = 0;
1486 if (!got_pcm && !got_dsd) {
1498 int new_channels = avctx->
channels;
1501 int sr = (
s->frame_flags >> 23) & 0
xf;
1511 if (new_samplerate * (uint64_t)rate_x > INT_MAX)
1513 new_samplerate *= rate_x;
1517 new_channels = chan;
1519 new_chmask = chmask;
1521 new_channels =
s->stereo ? 2 : 1;
1536 !!got_dsd != !!wc->
dsdctx) {
1575 if (dsd_mode == 3) {
1577 }
else if (dsd_mode == 1) {
1589 if (dsd_mode == 3) {
1591 }
else if (dsd_mode == 1) {
1603 memcpy(samples_r, samples_l, bpp *
s->samples);
1623 (
float *)
frame->extended_data[jobnr], 1);
1629 int *got_frame_ptr,
AVPacket *avpkt)
1633 int buf_size = avpkt->
size;
1645 frame_flags =
AV_RL32(buf + 24);
1658 if (frame_size <= 0 || frame_size > buf_size) {
1660 "Block %d has invalid size (size %d vs. %d bytes left)\n",
static void error(const char *err)
#define WV_HYBRID_BITRATE
@ AV_SAMPLE_FMT_FLTP
float, planar
#define AV_LOG_WARNING
Something somehow does not look correct.
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L2
static av_cold int init(AVCodecContext *avctx)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint64_t channel_layout
Audio channel layout.
int sample_rate
samples per second
#define FFSWAP(type, a, b)
#define u(width, name, range_min, range_max)
uint8_t * data
The data buffer.
static const struct PPFilter filters[]
static int wv_unpack_dsd_high(WavpackFrameContext *s, uint8_t *dst_left, uint8_t *dst_right)
#define AV_CH_LAYOUT_MONO
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
static void wavpack_decode_flush(AVCodecContext *avctx)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
#define WV_FLT_SHIFT_ONES
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static const uint16_t table[]
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define bit(string, value)
static int wv_unpack_dsd_copy(WavpackFrameContext *s, uint8_t *dst_left, uint8_t *dst_right)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static int update_error_limit(WavpackFrameContext *ctx)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int wavpack_decode_block(AVCodecContext *avctx, int block_no, const uint8_t *buf, int buf_size)
AVCodec ff_wavpack_decoder
#define AV_CH_LAYOUT_STEREO
static av_always_inline int wp_log2(uint32_t val)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int16_t mult(Float11 *f1, Float11 *f2)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
static const int wv_rates[16]
static av_always_inline int wp_exp2(int16_t val)
uint16_t summed_probabilities[MAX_HISTORY_BINS][256]
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
static void flush(AVCodecContext *avctx)
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
#define DSD_BYTE_READY(low, high)
static unsigned int get_bits1(GetBitContext *s)
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
#define AV_EF_EXPLODE
abort decoding on minor error detection
static int get_unary_0_33(GetBitContext *gb)
Get unary code terminated by a 0 with a maximum length of 33.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
static int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, unsigned S)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
av_cold void ff_init_dsd_data(void)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static int wv_check_crc(WavpackFrameContext *s, uint32_t crc, uint32_t crc_extra_bits)
static int wavpack_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
enum AVSampleFormat sample_fmt
audio sample format
static char * split(char *message, char delim)
static void init_ptable(int *table, int rate_i, int rate_s)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define WV_FLT_SHIFT_SENT
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
WavpackFrameContext * fdec[WV_MAX_FRAME_DECODERS]
static int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, void *dst_l, void *dst_r, const int type)
int channels
number of audio channels
static int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void *dst, const int type)
int nb_samples
number of audio samples (per channel) described by this frame
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static av_cold int wavpack_decode_end(AVCodecContext *avctx)
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
uint8_t ** extended_data
pointers to the data planes/channels.
AVSampleFormat
Audio sample formats.
#define xf(width, name, var, range_min, range_max, subs,...)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
#define WV_FLT_SHIFT_SAME
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static av_always_inline unsigned get_tail(GetBitContext *gb, int k)
const char * name
Name of the codec implementation.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
void ff_dsd2pcm_translate(DSDContext *s, size_t samples, int lsbf, const uint8_t *src, ptrdiff_t src_stride, float *dst, ptrdiff_t dst_stride)
uint8_t value_lookup_buffer[MAX_HISTORY_BINS *MAX_BIN_BYTES]
static int wv_unpack_dsd_fast(WavpackFrameContext *s, uint8_t *dst_left, uint8_t *dst_right)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
static volatile int checksum
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
#define WV_MAX_FRAME_DECODERS
#define AV_INPUT_BUFFER_PADDING_SIZE
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
GetBitContext gb_extra_bits
static av_cold int wavpack_decode_init(AVCodecContext *avctx)
main external API structure.
#define UPDATE_WEIGHT_CLIP(weight, delta, samples, in)
static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb, int channel, int *last)
static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
Filter the word “frame” indicates either a video frame or a group of audio samples
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static const int factor[16]
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
static int shift(int a, int b)
static av_cold int wv_alloc_frame_context(WavpackContext *c)
A reference to a data buffer.
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
uint8_t probabilities[MAX_HISTORY_BINS][256]
This structure stores compressed data.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int wv_dsd_reset(WavpackContext *s, int channels)
uint8_t * value_lookup[MAX_HISTORY_BINS]
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
static int dsd_channel(AVCodecContext *avctx, void *frmptr, int jobnr, int threadnr)