FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 
38 #include "config_components.h"
39 
40 #include "avcodec.h"
41 #include "get_bits.h"
42 #include "bytestream.h"
43 #include "adpcm.h"
44 #include "adpcm_data.h"
45 #include "codec_internal.h"
46 #include "decode.h"
47 
48 /**
49  * @file
50  * ADPCM decoders
51  * Features and limitations:
52  *
53  * Reference documents:
54  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
55  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
56  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
57  * http://openquicktime.sourceforge.net/
58  * XAnim sources (xa_codec.c) http://xanim.polter.net/
59  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
60  * SoX source code http://sox.sourceforge.net/
61  *
62  * CD-ROM XA:
63  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
64  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
65  * readstr http://www.geocities.co.jp/Playtown/2004/
66  */
67 
68 #define CASE_0(codec_id, ...)
69 #define CASE_1(codec_id, ...) \
70  case codec_id: \
71  { __VA_ARGS__ } \
72  break;
73 #define CASE_2(enabled, codec_id, ...) \
74  CASE_ ## enabled(codec_id, __VA_ARGS__)
75 #define CASE_3(config, codec_id, ...) \
76  CASE_2(config, codec_id, __VA_ARGS__)
77 #define CASE(codec, ...) \
78  CASE_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
79 
80 /* These are for CD-ROM XA ADPCM */
81 static const int8_t xa_adpcm_table[5][2] = {
82  { 0, 0 },
83  { 60, 0 },
84  { 115, -52 },
85  { 98, -55 },
86  { 122, -60 }
87 };
88 
89 static const int16_t afc_coeffs[2][16] = {
90  { 0, 2048, 0, 1024, 4096, 3584, 3072, 4608, 4200, 4800, 5120, 2048, 1024, -1024, -1024, -2048 },
91  { 0, 0, 2048, 1024, -2048, -1536, -1024, -2560, -2248, -2300, -3072, -2048, -1024, 1024, 0, 0 }
92 };
93 
94 static const int16_t ea_adpcm_table[] = {
95  0, 240, 460, 392,
96  0, 0, -208, -220,
97  0, 1, 3, 4,
98  7, 8, 10, 11,
99  0, -1, -3, -4
100 };
101 
102 /*
103  * Dumped from the binaries:
104  * - FantasticJourney.exe - 0x794D2, DGROUP:0x47A4D2
105  * - BigRaceUSA.exe - 0x9B8AA, DGROUP:0x49C4AA
106  * - Timeshock!.exe - 0x8506A, DGROUP:0x485C6A
107  */
108 static const int8_t ima_cunning_index_table[9] = {
109  -1, -1, -1, -1, 1, 2, 3, 4, -1
110 };
111 
112 /*
113  * Dumped from the binaries:
114  * - FantasticJourney.exe - 0x79458, DGROUP:0x47A458
115  * - BigRaceUSA.exe - 0x9B830, DGROUP:0x49C430
116  * - Timeshock!.exe - 0x84FF0, DGROUP:0x485BF0
117  */
118 static const int16_t ima_cunning_step_table[61] = {
119  1, 1, 1, 1, 2, 2, 3, 3, 4, 5,
120  6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
121  32, 40, 48, 56, 64, 80, 96, 112, 128, 160,
122  192, 224, 256, 320, 384, 448, 512, 640, 768, 896,
123  1024, 1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120,
124  6144, 7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576, 28672, 0
125 };
126 
127 static const int8_t adpcm_index_table2[4] = {
128  -1, 2,
129  -1, 2,
130 };
131 
132 static const int8_t adpcm_index_table3[8] = {
133  -1, -1, 1, 2,
134  -1, -1, 1, 2,
135 };
136 
137 static const int8_t adpcm_index_table5[32] = {
138  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
139  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
140 };
141 
142 static const int8_t * const adpcm_index_tables[4] = {
143  &adpcm_index_table2[0],
144  &adpcm_index_table3[0],
146  &adpcm_index_table5[0],
147 };
148 
149 static const int16_t mtaf_stepsize[32][16] = {
150  { 1, 5, 9, 13, 16, 20, 24, 28,
151  -1, -5, -9, -13, -16, -20, -24, -28, },
152  { 2, 6, 11, 15, 20, 24, 29, 33,
153  -2, -6, -11, -15, -20, -24, -29, -33, },
154  { 2, 7, 13, 18, 23, 28, 34, 39,
155  -2, -7, -13, -18, -23, -28, -34, -39, },
156  { 3, 9, 15, 21, 28, 34, 40, 46,
157  -3, -9, -15, -21, -28, -34, -40, -46, },
158  { 3, 11, 18, 26, 33, 41, 48, 56,
159  -3, -11, -18, -26, -33, -41, -48, -56, },
160  { 4, 13, 22, 31, 40, 49, 58, 67,
161  -4, -13, -22, -31, -40, -49, -58, -67, },
162  { 5, 16, 26, 37, 48, 59, 69, 80,
163  -5, -16, -26, -37, -48, -59, -69, -80, },
164  { 6, 19, 31, 44, 57, 70, 82, 95,
165  -6, -19, -31, -44, -57, -70, -82, -95, },
166  { 7, 22, 38, 53, 68, 83, 99, 114,
167  -7, -22, -38, -53, -68, -83, -99, -114, },
168  { 9, 27, 45, 63, 81, 99, 117, 135,
169  -9, -27, -45, -63, -81, -99, -117, -135, },
170  { 10, 32, 53, 75, 96, 118, 139, 161,
171  -10, -32, -53, -75, -96, -118, -139, -161, },
172  { 12, 38, 64, 90, 115, 141, 167, 193,
173  -12, -38, -64, -90, -115, -141, -167, -193, },
174  { 15, 45, 76, 106, 137, 167, 198, 228,
175  -15, -45, -76, -106, -137, -167, -198, -228, },
176  { 18, 54, 91, 127, 164, 200, 237, 273,
177  -18, -54, -91, -127, -164, -200, -237, -273, },
178  { 21, 65, 108, 152, 195, 239, 282, 326,
179  -21, -65, -108, -152, -195, -239, -282, -326, },
180  { 25, 77, 129, 181, 232, 284, 336, 388,
181  -25, -77, -129, -181, -232, -284, -336, -388, },
182  { 30, 92, 153, 215, 276, 338, 399, 461,
183  -30, -92, -153, -215, -276, -338, -399, -461, },
184  { 36, 109, 183, 256, 329, 402, 476, 549,
185  -36, -109, -183, -256, -329, -402, -476, -549, },
186  { 43, 130, 218, 305, 392, 479, 567, 654,
187  -43, -130, -218, -305, -392, -479, -567, -654, },
188  { 52, 156, 260, 364, 468, 572, 676, 780,
189  -52, -156, -260, -364, -468, -572, -676, -780, },
190  { 62, 186, 310, 434, 558, 682, 806, 930,
191  -62, -186, -310, -434, -558, -682, -806, -930, },
192  { 73, 221, 368, 516, 663, 811, 958, 1106,
193  -73, -221, -368, -516, -663, -811, -958, -1106, },
194  { 87, 263, 439, 615, 790, 966, 1142, 1318,
195  -87, -263, -439, -615, -790, -966, -1142, -1318, },
196  { 104, 314, 523, 733, 942, 1152, 1361, 1571,
197  -104, -314, -523, -733, -942, -1152, -1361, -1571, },
198  { 124, 374, 623, 873, 1122, 1372, 1621, 1871,
199  -124, -374, -623, -873, -1122, -1372, -1621, -1871, },
200  { 148, 445, 743, 1040, 1337, 1634, 1932, 2229,
201  -148, -445, -743, -1040, -1337, -1634, -1932, -2229, },
202  { 177, 531, 885, 1239, 1593, 1947, 2301, 2655,
203  -177, -531, -885, -1239, -1593, -1947, -2301, -2655, },
204  { 210, 632, 1053, 1475, 1896, 2318, 2739, 3161,
205  -210, -632, -1053, -1475, -1896, -2318, -2739, -3161, },
206  { 251, 753, 1255, 1757, 2260, 2762, 3264, 3766,
207  -251, -753, -1255, -1757, -2260, -2762, -3264, -3766, },
208  { 299, 897, 1495, 2093, 2692, 3290, 3888, 4486,
209  -299, -897, -1495, -2093, -2692, -3290, -3888, -4486, },
210  { 356, 1068, 1781, 2493, 3206, 3918, 4631, 5343,
211  -356, -1068, -1781, -2493, -3206, -3918, -4631, -5343, },
212  { 424, 1273, 2121, 2970, 3819, 4668, 5516, 6365,
213  -424, -1273, -2121, -2970, -3819, -4668, -5516, -6365, },
214 };
215 
216 static const int16_t oki_step_table[49] = {
217  16, 17, 19, 21, 23, 25, 28, 31, 34, 37,
218  41, 45, 50, 55, 60, 66, 73, 80, 88, 97,
219  107, 118, 130, 143, 157, 173, 190, 209, 230, 253,
220  279, 307, 337, 371, 408, 449, 494, 544, 598, 658,
221  724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552
222 };
223 
224 // padded to zero where table size is less then 16
225 static const int8_t swf_index_tables[4][16] = {
226  /*2*/ { -1, 2 },
227  /*3*/ { -1, -1, 2, 4 },
228  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
229  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
230 };
231 
232 static const int8_t zork_index_table[8] = {
233  -1, -1, -1, 1, 4, 7, 10, 12,
234 };
235 
236 static const int8_t mtf_index_table[16] = {
237  8, 6, 4, 2, -1, -1, -1, -1,
238  -1, -1, -1, -1, 2, 4, 6, 8,
239 };
240 
241 /* end of tables */
242 
243 typedef struct ADPCMDecodeContext {
245  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
246  int has_status; /**< Status flag. Reset to 0 after a flush. */
248 
249 static void adpcm_flush(AVCodecContext *avctx);
250 
252 {
253  ADPCMDecodeContext *c = avctx->priv_data;
254  unsigned int min_channels = 1;
255  unsigned int max_channels = 2;
256 
257  adpcm_flush(avctx);
258 
259  switch(avctx->codec->id) {
261  max_channels = 1;
262  break;
265  min_channels = 2;
266  break;
273  max_channels = 6;
274  break;
276  min_channels = 2;
277  max_channels = 8;
278  if (avctx->ch_layout.nb_channels & 1) {
279  avpriv_request_sample(avctx, "channel count %d", avctx->ch_layout.nb_channels);
280  return AVERROR_PATCHWELCOME;
281  }
282  break;
284  max_channels = 8;
285  if (avctx->ch_layout.nb_channels <= 0 ||
286  avctx->block_align % (16 * avctx->ch_layout.nb_channels))
287  return AVERROR_INVALIDDATA;
288  break;
292  max_channels = 14;
293  break;
294  }
295  if (avctx->ch_layout.nb_channels < min_channels ||
296  avctx->ch_layout.nb_channels > max_channels) {
297  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
298  return AVERROR(EINVAL);
299  }
300 
301  switch(avctx->codec->id) {
303  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
304  return AVERROR_INVALIDDATA;
305  break;
307  if (avctx->bits_per_coded_sample != 4 ||
308  avctx->block_align != 17 * avctx->ch_layout.nb_channels)
309  return AVERROR_INVALIDDATA;
310  break;
312  if (avctx->bits_per_coded_sample != 8)
313  return AVERROR_INVALIDDATA;
314  break;
315  default:
316  break;
317  }
318 
319  switch (avctx->codec->id) {
341  break;
343  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
345  break;
347  avctx->sample_fmt = avctx->ch_layout.nb_channels > 2 ? AV_SAMPLE_FMT_S16P :
349  break;
350  default:
351  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
352  }
353  return 0;
354 }
355 
356 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
357 {
358  int delta, pred, step, add;
359 
360  pred = c->predictor;
361  delta = nibble & 7;
362  step = c->step;
363  add = (delta * 2 + 1) * step;
364  if (add < 0)
365  add = add + 7;
366 
367  if ((nibble & 8) == 0)
368  pred = av_clip(pred + (add >> 3), -32767, 32767);
369  else
370  pred = av_clip(pred - (add >> 3), -32767, 32767);
371 
372  switch (delta) {
373  case 7:
374  step *= 0x99;
375  break;
376  case 6:
377  c->step = av_clip(c->step * 2, 127, 24576);
378  c->predictor = pred;
379  return pred;
380  case 5:
381  step *= 0x66;
382  break;
383  case 4:
384  step *= 0x4d;
385  break;
386  default:
387  step *= 0x39;
388  break;
389  }
390 
391  if (step < 0)
392  step += 0x3f;
393 
394  c->step = step >> 6;
395  c->step = av_clip(c->step, 127, 24576);
396  c->predictor = pred;
397  return pred;
398 }
399 
400 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
401 {
402  int step_index;
403  int predictor;
404  int sign, delta, diff, step;
405 
406  step = ff_adpcm_step_table[c->step_index];
407  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
408  step_index = av_clip(step_index, 0, 88);
409 
410  sign = nibble & 8;
411  delta = nibble & 7;
412  /* perform direct multiplication instead of series of jumps proposed by
413  * the reference ADPCM implementation since modern CPUs can do the mults
414  * quickly enough */
415  diff = ((2 * delta + 1) * step) >> shift;
416  predictor = c->predictor;
417  if (sign) predictor -= diff;
418  else predictor += diff;
419 
420  c->predictor = av_clip_int16(predictor);
421  c->step_index = step_index;
422 
423  return (int16_t)c->predictor;
424 }
425 
426 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
427 {
428  int step_index;
429  int predictor;
430  int sign, delta, diff, step;
431 
432  step = ff_adpcm_step_table[c->step_index];
433  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
434  step_index = av_clip(step_index, 0, 88);
435 
436  sign = nibble & 8;
437  delta = nibble & 7;
438  diff = (delta * step) >> shift;
439  predictor = c->predictor;
440  if (sign) predictor -= diff;
441  else predictor += diff;
442 
443  c->predictor = av_clip_int16(predictor);
444  c->step_index = step_index;
445 
446  return (int16_t)c->predictor;
447 }
448 
449 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
450 {
451  int step_index, step, delta, predictor;
452 
453  step = ff_adpcm_step_table[c->step_index];
454 
455  delta = step * (2 * nibble - 15);
456  predictor = c->predictor + delta;
457 
458  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
459  c->predictor = av_clip_int16(predictor >> 4);
460  c->step_index = av_clip(step_index, 0, 88);
461 
462  return (int16_t)c->predictor;
463 }
464 
465 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
466 {
467  int step_index;
468  int predictor;
469  int step;
470 
471  nibble = sign_extend(nibble & 0xF, 4);
472 
473  step = ima_cunning_step_table[c->step_index];
474  step_index = c->step_index + ima_cunning_index_table[abs(nibble)];
475  step_index = av_clip(step_index, 0, 60);
476 
477  predictor = c->predictor + step * nibble;
478 
479  c->predictor = av_clip_int16(predictor);
480  c->step_index = step_index;
481 
482  return c->predictor;
483 }
484 
486 {
487  int nibble, step_index, predictor, sign, delta, diff, step, shift;
488 
489  shift = bps - 1;
490  nibble = get_bits_le(gb, bps),
491  step = ff_adpcm_step_table[c->step_index];
492  step_index = c->step_index + adpcm_index_tables[bps - 2][nibble];
493  step_index = av_clip(step_index, 0, 88);
494 
495  sign = nibble & (1 << shift);
496  delta = av_mod_uintp2(nibble, shift);
497  diff = ((2 * delta + 1) * step) >> shift;
498  predictor = c->predictor;
499  if (sign) predictor -= diff;
500  else predictor += diff;
501 
502  c->predictor = av_clip_int16(predictor);
503  c->step_index = step_index;
504 
505  return (int16_t)c->predictor;
506 }
507 
508 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
509 {
510  int step_index;
511  int predictor;
512  int diff, step;
513 
514  step = ff_adpcm_step_table[c->step_index];
515  step_index = c->step_index + ff_adpcm_index_table[nibble];
516  step_index = av_clip(step_index, 0, 88);
517 
518  diff = step >> 3;
519  if (nibble & 4) diff += step;
520  if (nibble & 2) diff += step >> 1;
521  if (nibble & 1) diff += step >> 2;
522 
523  if (nibble & 8)
524  predictor = c->predictor - diff;
525  else
526  predictor = c->predictor + diff;
527 
528  c->predictor = av_clip_int16(predictor);
529  c->step_index = step_index;
530 
531  return c->predictor;
532 }
533 
534 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
535 {
536  int predictor;
537 
538  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
539  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
540 
541  c->sample2 = c->sample1;
542  c->sample1 = av_clip_int16(predictor);
543  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
544  if (c->idelta < 16) c->idelta = 16;
545  if (c->idelta > INT_MAX/768) {
546  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
547  c->idelta = INT_MAX/768;
548  }
549 
550  return c->sample1;
551 }
552 
553 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
554 {
555  int step_index, predictor, sign, delta, diff, step;
556 
557  step = oki_step_table[c->step_index];
558  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
559  step_index = av_clip(step_index, 0, 48);
560 
561  sign = nibble & 8;
562  delta = nibble & 7;
563  diff = ((2 * delta + 1) * step) >> 3;
564  predictor = c->predictor;
565  if (sign) predictor -= diff;
566  else predictor += diff;
567 
568  c->predictor = av_clip_intp2(predictor, 11);
569  c->step_index = step_index;
570 
571  return c->predictor * 16;
572 }
573 
574 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
575 {
576  int sign, delta, diff;
577  int new_step;
578 
579  sign = nibble & 8;
580  delta = nibble & 7;
581  /* perform direct multiplication instead of series of jumps proposed by
582  * the reference ADPCM implementation since modern CPUs can do the mults
583  * quickly enough */
584  diff = ((2 * delta + 1) * c->step) >> 3;
585  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
586  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
587  c->predictor = av_clip_int16(c->predictor);
588  /* calculate new step and clamp it to range 511..32767 */
589  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
590  c->step = av_clip(new_step, 511, 32767);
591 
592  return (int16_t)c->predictor;
593 }
594 
595 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
596 {
597  int sign, delta, diff;
598 
599  sign = nibble & (1<<(size-1));
600  delta = nibble & ((1<<(size-1))-1);
601  diff = delta << (7 + c->step + shift);
602 
603  /* clamp result */
604  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
605 
606  /* calculate new step */
607  if (delta >= (2*size - 3) && c->step < 3)
608  c->step++;
609  else if (delta == 0 && c->step > 0)
610  c->step--;
611 
612  return (int16_t) c->predictor;
613 }
614 
615 static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
616 {
617  if(!c->step) {
618  c->predictor = 0;
619  c->step = 127;
620  }
621 
622  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
623  c->predictor = av_clip_int16(c->predictor);
624  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
625  c->step = av_clip(c->step, 127, 24576);
626  return c->predictor;
627 }
628 
629 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
630 {
631  c->predictor += mtaf_stepsize[c->step][nibble];
632  c->predictor = av_clip_int16(c->predictor);
633  c->step += ff_adpcm_index_table[nibble];
634  c->step = av_clip_uintp2(c->step, 5);
635  return c->predictor;
636 }
637 
638 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
639 {
640  int16_t index = c->step_index;
641  uint32_t lookup_sample = ff_adpcm_step_table[index];
642  int32_t sample = 0;
643 
644  if (nibble & 0x40)
645  sample += lookup_sample;
646  if (nibble & 0x20)
647  sample += lookup_sample >> 1;
648  if (nibble & 0x10)
649  sample += lookup_sample >> 2;
650  if (nibble & 0x08)
651  sample += lookup_sample >> 3;
652  if (nibble & 0x04)
653  sample += lookup_sample >> 4;
654  if (nibble & 0x02)
655  sample += lookup_sample >> 5;
656  if (nibble & 0x01)
657  sample += lookup_sample >> 6;
658  if (nibble & 0x80)
659  sample = -sample;
660 
661  sample += c->predictor;
663 
664  index += zork_index_table[(nibble >> 4) & 7];
665  index = av_clip(index, 0, 88);
666 
667  c->predictor = sample;
668  c->step_index = index;
669 
670  return sample;
671 }
672 
673 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
674  const uint8_t *in, ADPCMChannelStatus *left,
675  ADPCMChannelStatus *right, int channels, int sample_offset)
676 {
677  int i, j;
678  int shift,filter,f0,f1;
679  int s_1,s_2;
680  int d,s,t;
681 
682  out0 += sample_offset;
683  if (channels == 1)
684  out1 = out0 + 28;
685  else
686  out1 += sample_offset;
687 
688  for(i=0;i<4;i++) {
689  shift = 12 - (in[4+i*2] & 15);
690  filter = in[4+i*2] >> 4;
692  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
693  filter=0;
694  }
695  if (shift < 0) {
696  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
697  shift = 0;
698  }
699  f0 = xa_adpcm_table[filter][0];
700  f1 = xa_adpcm_table[filter][1];
701 
702  s_1 = left->sample1;
703  s_2 = left->sample2;
704 
705  for(j=0;j<28;j++) {
706  d = in[16+i+j*4];
707 
708  t = sign_extend(d, 4);
709  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
710  s_2 = s_1;
711  s_1 = av_clip_int16(s);
712  out0[j] = s_1;
713  }
714 
715  if (channels == 2) {
716  left->sample1 = s_1;
717  left->sample2 = s_2;
718  s_1 = right->sample1;
719  s_2 = right->sample2;
720  }
721 
722  shift = 12 - (in[5+i*2] & 15);
723  filter = in[5+i*2] >> 4;
724  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
725  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
726  filter=0;
727  }
728  if (shift < 0) {
729  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
730  shift = 0;
731  }
732 
733  f0 = xa_adpcm_table[filter][0];
734  f1 = xa_adpcm_table[filter][1];
735 
736  for(j=0;j<28;j++) {
737  d = in[16+i+j*4];
738 
739  t = sign_extend(d >> 4, 4);
740  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
741  s_2 = s_1;
742  s_1 = av_clip_int16(s);
743  out1[j] = s_1;
744  }
745 
746  if (channels == 2) {
747  right->sample1 = s_1;
748  right->sample2 = s_2;
749  } else {
750  left->sample1 = s_1;
751  left->sample2 = s_2;
752  }
753 
754  out0 += 28 * (3 - channels);
755  out1 += 28 * (3 - channels);
756  }
757 
758  return 0;
759 }
760 
761 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
762 {
763  ADPCMDecodeContext *c = avctx->priv_data;
764  GetBitContext gb;
765  const int8_t *table;
766  int channels = avctx->ch_layout.nb_channels;
767  int k0, signmask, nb_bits, count;
768  int size = buf_size*8;
769  int i;
770 
771  init_get_bits(&gb, buf, size);
772 
773  //read bits & initial values
774  nb_bits = get_bits(&gb, 2)+2;
775  table = swf_index_tables[nb_bits-2];
776  k0 = 1 << (nb_bits-2);
777  signmask = 1 << (nb_bits-1);
778 
779  while (get_bits_count(&gb) <= size - 22 * channels) {
780  for (i = 0; i < channels; i++) {
781  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
782  c->status[i].step_index = get_bits(&gb, 6);
783  }
784 
785  for (count = 0; get_bits_count(&gb) <= size - nb_bits * channels && count < 4095; count++) {
786  int i;
787 
788  for (i = 0; i < channels; i++) {
789  // similar to IMA adpcm
790  int delta = get_bits(&gb, nb_bits);
791  int step = ff_adpcm_step_table[c->status[i].step_index];
792  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
793  int k = k0;
794 
795  do {
796  if (delta & k)
797  vpdiff += step;
798  step >>= 1;
799  k >>= 1;
800  } while(k);
801  vpdiff += step;
802 
803  if (delta & signmask)
804  c->status[i].predictor -= vpdiff;
805  else
806  c->status[i].predictor += vpdiff;
807 
808  c->status[i].step_index += table[delta & (~signmask)];
809 
810  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
811  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
812 
813  *samples++ = c->status[i].predictor;
814  }
815  }
816  }
817 }
818 
819 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
820 {
821  int sample = sign_extend(nibble, 4) * (1 << shift);
822 
823  if (flag)
824  sample += (8 * cs->sample1) - (4 * cs->sample2);
825  else
826  sample += 4 * cs->sample1;
827 
828  sample = av_clip_int16(sample >> 2);
829 
830  cs->sample2 = cs->sample1;
831  cs->sample1 = sample;
832 
833  return sample;
834 }
835 
836 /**
837  * Get the number of samples (per channel) that will be decoded from the packet.
838  * In one case, this is actually the maximum number of samples possible to
839  * decode with the given buf_size.
840  *
841  * @param[out] coded_samples set to the number of samples as coded in the
842  * packet, or 0 if the codec does not encode the
843  * number of samples in each frame.
844  * @param[out] approx_nb_samples set to non-zero if the number of samples
845  * returned is an approximation.
846  */
848  int buf_size, int *coded_samples, int *approx_nb_samples)
849 {
850  ADPCMDecodeContext *s = avctx->priv_data;
851  int nb_samples = 0;
852  int ch = avctx->ch_layout.nb_channels;
853  int has_coded_samples = 0;
854  int header_size;
855 
856  *coded_samples = 0;
857  *approx_nb_samples = 0;
858 
859  if(ch <= 0)
860  return 0;
861 
862  switch (avctx->codec->id) {
863  /* constant, only check buf_size */
865  if (buf_size < 76 * ch)
866  return 0;
867  nb_samples = 128;
868  break;
870  if (buf_size < 34 * ch)
871  return 0;
872  nb_samples = 64;
873  break;
874  /* simple 4-bit adpcm */
887  nb_samples = buf_size * 2 / ch;
888  break;
889  }
890  if (nb_samples)
891  return nb_samples;
892 
893  /* simple 4-bit adpcm, with header */
894  header_size = 0;
895  switch (avctx->codec->id) {
901  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
902  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
903  }
904  if (header_size > 0)
905  return (buf_size - header_size) * 2 / ch;
906 
907  /* more complex formats */
908  switch (avctx->codec->id) {
910  bytestream2_skip(gb, 4);
911  has_coded_samples = 1;
912  *coded_samples = bytestream2_get_le32u(gb);
913  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
914  bytestream2_seek(gb, -8, SEEK_CUR);
915  break;
917  has_coded_samples = 1;
918  *coded_samples = bytestream2_get_le32(gb);
919  *coded_samples -= *coded_samples % 28;
920  nb_samples = (buf_size - 12) / 30 * 28;
921  break;
923  has_coded_samples = 1;
924  *coded_samples = bytestream2_get_le32(gb);
925  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
926  break;
928  nb_samples = (buf_size - ch) / ch * 2;
929  break;
933  /* maximum number of samples */
934  /* has internal offsets and a per-frame switch to signal raw 16-bit */
935  has_coded_samples = 1;
936  switch (avctx->codec->id) {
938  header_size = 4 + 9 * ch;
939  *coded_samples = bytestream2_get_le32(gb);
940  break;
942  header_size = 4 + 5 * ch;
943  *coded_samples = bytestream2_get_le32(gb);
944  break;
946  header_size = 4 + 5 * ch;
947  *coded_samples = bytestream2_get_be32(gb);
948  break;
949  }
950  *coded_samples -= *coded_samples % 28;
951  nb_samples = (buf_size - header_size) * 2 / ch;
952  nb_samples -= nb_samples % 28;
953  *approx_nb_samples = 1;
954  break;
956  if (avctx->block_align > 0)
957  buf_size = FFMIN(buf_size, avctx->block_align);
958  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
959  break;
961  if (avctx->block_align > 0)
962  buf_size = FFMIN(buf_size, avctx->block_align);
963  if (buf_size < 4 * ch)
964  return AVERROR_INVALIDDATA;
965  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
966  break;
968  if (avctx->block_align > 0)
969  buf_size = FFMIN(buf_size, avctx->block_align);
970  nb_samples = (buf_size - 4 * ch) * 2 / ch;
971  break;
972  CASE(ADPCM_IMA_WAV,
973  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
974  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
975  if (avctx->block_align > 0)
976  buf_size = FFMIN(buf_size, avctx->block_align);
977  if (buf_size < 4 * ch)
978  return AVERROR_INVALIDDATA;
979  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
980  ) /* End of CASE */
982  if (avctx->block_align > 0)
983  buf_size = FFMIN(buf_size, avctx->block_align);
984  nb_samples = (buf_size - 6 * ch) * 2 / ch;
985  break;
987  if (avctx->block_align > 0)
988  buf_size = FFMIN(buf_size, avctx->block_align);
989  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
990  break;
994  {
995  int samples_per_byte;
996  switch (avctx->codec->id) {
997  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
998  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
999  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
1000  }
1001  if (!s->status[0].step_index) {
1002  if (buf_size < ch)
1003  return AVERROR_INVALIDDATA;
1004  nb_samples++;
1005  buf_size -= ch;
1006  }
1007  nb_samples += buf_size * samples_per_byte / ch;
1008  break;
1009  }
1010  case AV_CODEC_ID_ADPCM_SWF:
1011  {
1012  int buf_bits = buf_size * 8 - 2;
1013  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
1014  int block_hdr_size = 22 * ch;
1015  int block_size = block_hdr_size + nbits * ch * 4095;
1016  int nblocks = buf_bits / block_size;
1017  int bits_left = buf_bits - nblocks * block_size;
1018  nb_samples = nblocks * 4096;
1019  if (bits_left >= block_hdr_size)
1020  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
1021  break;
1022  }
1023  case AV_CODEC_ID_ADPCM_THP:
1025  if (avctx->extradata) {
1026  nb_samples = buf_size * 14 / (8 * ch);
1027  break;
1028  }
1029  has_coded_samples = 1;
1030  bytestream2_skip(gb, 4); // channel size
1031  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
1032  bytestream2_get_le32(gb) :
1033  bytestream2_get_be32(gb);
1034  buf_size -= 8 + 36 * ch;
1035  buf_size /= ch;
1036  nb_samples = buf_size / 8 * 14;
1037  if (buf_size % 8 > 1)
1038  nb_samples += (buf_size % 8 - 1) * 2;
1039  *approx_nb_samples = 1;
1040  break;
1041  case AV_CODEC_ID_ADPCM_AFC:
1042  nb_samples = buf_size / (9 * ch) * 16;
1043  break;
1044  case AV_CODEC_ID_ADPCM_XA:
1045  nb_samples = (buf_size / 128) * 224 / ch;
1046  break;
1047  case AV_CODEC_ID_ADPCM_XMD:
1048  nb_samples = buf_size / (21 * ch) * 32;
1049  break;
1050  case AV_CODEC_ID_ADPCM_DTK:
1051  case AV_CODEC_ID_ADPCM_PSX:
1052  nb_samples = buf_size / (16 * ch) * 28;
1053  break;
1055  nb_samples = buf_size / avctx->block_align * 32;
1056  break;
1058  nb_samples = buf_size / ch;
1059  break;
1060  }
1061 
1062  /* validate coded sample count */
1063  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
1064  return AVERROR_INVALIDDATA;
1065 
1066  return nb_samples;
1067 }
1068 
1070  int *got_frame_ptr, AVPacket *avpkt)
1071 {
1072  const uint8_t *buf = avpkt->data;
1073  int buf_size = avpkt->size;
1074  ADPCMDecodeContext *c = avctx->priv_data;
1075  int channels = avctx->ch_layout.nb_channels;
1076  int16_t *samples;
1077  int16_t **samples_p;
1078  int st; /* stereo */
1079  int nb_samples, coded_samples, approx_nb_samples, ret;
1080  GetByteContext gb;
1081 
1082  bytestream2_init(&gb, buf, buf_size);
1083  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
1084  if (nb_samples <= 0) {
1085  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
1086  return AVERROR_INVALIDDATA;
1087  }
1088 
1089  /* get output buffer */
1090  frame->nb_samples = nb_samples;
1091  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1092  return ret;
1093  samples = (int16_t *)frame->data[0];
1094  samples_p = (int16_t **)frame->extended_data;
1095 
1096  /* use coded_samples when applicable */
1097  /* it is always <= nb_samples, so the output buffer will be large enough */
1098  if (coded_samples) {
1099  if (!approx_nb_samples && coded_samples != nb_samples)
1100  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
1101  frame->nb_samples = nb_samples = coded_samples;
1102  }
1103 
1104  st = channels == 2 ? 1 : 0;
1105 
1106  switch(avctx->codec->id) {
1107  CASE(ADPCM_IMA_QT,
1108  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
1109  Channel data is interleaved per-chunk. */
1110  for (int channel = 0; channel < channels; channel++) {
1111  ADPCMChannelStatus *cs = &c->status[channel];
1112  int predictor;
1113  int step_index;
1114  /* (pppppp) (piiiiiii) */
1115 
1116  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
1117  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1118  step_index = predictor & 0x7F;
1119  predictor &= ~0x7F;
1120 
1121  if (cs->step_index == step_index) {
1122  int diff = predictor - cs->predictor;
1123  if (diff < 0)
1124  diff = - diff;
1125  if (diff > 0x7f)
1126  goto update;
1127  } else {
1128  update:
1129  cs->step_index = step_index;
1130  cs->predictor = predictor;
1131  }
1132 
1133  if (cs->step_index > 88u){
1134  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1135  channel, cs->step_index);
1136  return AVERROR_INVALIDDATA;
1137  }
1138 
1139  samples = samples_p[channel];
1140 
1141  for (int m = 0; m < 64; m += 2) {
1142  int byte = bytestream2_get_byteu(&gb);
1143  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1144  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1145  }
1146  }
1147  ) /* End of CASE */
1148  CASE(ADPCM_IMA_WAV,
1149  for (int i = 0; i < channels; i++) {
1150  ADPCMChannelStatus *cs = &c->status[i];
1151  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1152 
1153  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1154  if (cs->step_index > 88u){
1155  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1156  i, cs->step_index);
1157  return AVERROR_INVALIDDATA;
1158  }
1159  }
1160 
1161  if (avctx->bits_per_coded_sample != 4) {
1162  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1163  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1164  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
1165  GetBitContext g;
1166 
1167  for (int n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1168  for (int i = 0; i < channels; i++) {
1169  ADPCMChannelStatus *cs = &c->status[i];
1170  samples = &samples_p[i][1 + n * samples_per_block];
1171  for (int j = 0; j < block_size; j++) {
1172  temp[j] = buf[4 * channels + block_size * n * channels +
1173  (j % 4) + (j / 4) * (channels * 4) + i * 4];
1174  }
1175  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1176  if (ret < 0)
1177  return ret;
1178  for (int m = 0; m < samples_per_block; m++) {
1180  avctx->bits_per_coded_sample);
1181  }
1182  }
1183  }
1184  bytestream2_skip(&gb, avctx->block_align - channels * 4);
1185  } else {
1186  for (int n = 0; n < (nb_samples - 1) / 8; n++) {
1187  for (int i = 0; i < channels; i++) {
1188  ADPCMChannelStatus *cs = &c->status[i];
1189  samples = &samples_p[i][1 + n * 8];
1190  for (int m = 0; m < 8; m += 2) {
1191  int v = bytestream2_get_byteu(&gb);
1192  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1193  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1194  }
1195  }
1196  }
1197  }
1198  ) /* End of CASE */
1199  CASE(ADPCM_4XM,
1200  for (int i = 0; i < channels; i++)
1201  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1202 
1203  for (int i = 0; i < channels; i++) {
1204  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1205  if (c->status[i].step_index > 88u) {
1206  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1207  i, c->status[i].step_index);
1208  return AVERROR_INVALIDDATA;
1209  }
1210  }
1211 
1212  for (int i = 0; i < channels; i++) {
1213  ADPCMChannelStatus *cs = &c->status[i];
1214  samples = (int16_t *)frame->data[i];
1215  for (int n = nb_samples >> 1; n > 0; n--) {
1216  int v = bytestream2_get_byteu(&gb);
1217  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1218  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1219  }
1220  }
1221  ) /* End of CASE */
1222  CASE(ADPCM_AGM,
1223  for (int i = 0; i < channels; i++)
1224  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1225  for (int i = 0; i < channels; i++)
1226  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1227 
1228  for (int n = 0; n < nb_samples >> (1 - st); n++) {
1229  int v = bytestream2_get_byteu(&gb);
1230  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1231  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1232  }
1233  ) /* End of CASE */
1234  CASE(ADPCM_MS,
1235  int block_predictor;
1236 
1237  if (avctx->ch_layout.nb_channels > 2) {
1238  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
1239  samples = samples_p[channel];
1240  block_predictor = bytestream2_get_byteu(&gb);
1241  if (block_predictor > 6) {
1242  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1243  channel, block_predictor);
1244  return AVERROR_INVALIDDATA;
1245  }
1246  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1247  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1248  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1249  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1250  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1251  *samples++ = c->status[channel].sample2;
1252  *samples++ = c->status[channel].sample1;
1253  for (int n = (nb_samples - 2) >> 1; n > 0; n--) {
1254  int byte = bytestream2_get_byteu(&gb);
1255  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1256  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1257  }
1258  }
1259  } else {
1260  block_predictor = bytestream2_get_byteu(&gb);
1261  if (block_predictor > 6) {
1262  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1263  block_predictor);
1264  return AVERROR_INVALIDDATA;
1265  }
1266  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1267  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1268  if (st) {
1269  block_predictor = bytestream2_get_byteu(&gb);
1270  if (block_predictor > 6) {
1271  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1272  block_predictor);
1273  return AVERROR_INVALIDDATA;
1274  }
1275  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1276  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1277  }
1278  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1279  if (st){
1280  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1281  }
1282 
1283  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1284  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1285  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1286  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1287 
1288  *samples++ = c->status[0].sample2;
1289  if (st) *samples++ = c->status[1].sample2;
1290  *samples++ = c->status[0].sample1;
1291  if (st) *samples++ = c->status[1].sample1;
1292  for (int n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1293  int byte = bytestream2_get_byteu(&gb);
1294  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1295  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1296  }
1297  }
1298  ) /* End of CASE */
1299  CASE(ADPCM_MTAF,
1300  for (int channel = 0; channel < channels; channel += 2) {
1301  bytestream2_skipu(&gb, 4);
1302  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1303  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1304  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1305  bytestream2_skipu(&gb, 2);
1306  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1307  bytestream2_skipu(&gb, 2);
1308  for (int n = 0; n < nb_samples; n += 2) {
1309  int v = bytestream2_get_byteu(&gb);
1310  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1311  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1312  }
1313  for (int n = 0; n < nb_samples; n += 2) {
1314  int v = bytestream2_get_byteu(&gb);
1315  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1316  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1317  }
1318  }
1319  ) /* End of CASE */
1320  CASE(ADPCM_IMA_DK4,
1321  for (int channel = 0; channel < channels; channel++) {
1322  ADPCMChannelStatus *cs = &c->status[channel];
1323  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1324  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1325  if (cs->step_index > 88u){
1326  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1327  channel, cs->step_index);
1328  return AVERROR_INVALIDDATA;
1329  }
1330  }
1331  for (int n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1332  int v = bytestream2_get_byteu(&gb);
1333  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1334  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1335  }
1336  ) /* End of CASE */
1337 
1338  /* DK3 ADPCM support macro */
1339 #define DK3_GET_NEXT_NIBBLE() \
1340  if (decode_top_nibble_next) { \
1341  nibble = last_byte >> 4; \
1342  decode_top_nibble_next = 0; \
1343  } else { \
1344  last_byte = bytestream2_get_byteu(&gb); \
1345  nibble = last_byte & 0x0F; \
1346  decode_top_nibble_next = 1; \
1347  }
1348  CASE(ADPCM_IMA_DK3,
1349  int last_byte = 0;
1350  int nibble;
1351  int decode_top_nibble_next = 0;
1352  int diff_channel;
1353  const int16_t *samples_end = samples + channels * nb_samples;
1354 
1355  bytestream2_skipu(&gb, 10);
1356  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1357  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1358  c->status[0].step_index = bytestream2_get_byteu(&gb);
1359  c->status[1].step_index = bytestream2_get_byteu(&gb);
1360  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1361  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1362  c->status[0].step_index, c->status[1].step_index);
1363  return AVERROR_INVALIDDATA;
1364  }
1365  /* sign extend the predictors */
1366  diff_channel = c->status[1].predictor;
1367 
1368  while (samples < samples_end) {
1369 
1370  /* for this algorithm, c->status[0] is the sum channel and
1371  * c->status[1] is the diff channel */
1372 
1373  /* process the first predictor of the sum channel */
1375  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1376 
1377  /* process the diff channel predictor */
1379  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1380 
1381  /* process the first pair of stereo PCM samples */
1382  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1383  *samples++ = c->status[0].predictor + c->status[1].predictor;
1384  *samples++ = c->status[0].predictor - c->status[1].predictor;
1385 
1386  /* process the second predictor of the sum channel */
1388  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1389 
1390  /* process the second pair of stereo PCM samples */
1391  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1392  *samples++ = c->status[0].predictor + c->status[1].predictor;
1393  *samples++ = c->status[0].predictor - c->status[1].predictor;
1394  }
1395 
1396  if ((bytestream2_tell(&gb) & 1))
1397  bytestream2_skip(&gb, 1);
1398  ) /* End of CASE */
1399  CASE(ADPCM_IMA_ISS,
1400  for (int channel = 0; channel < channels; channel++) {
1401  ADPCMChannelStatus *cs = &c->status[channel];
1402  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1403  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1404  if (cs->step_index > 88u){
1405  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1406  channel, cs->step_index);
1407  return AVERROR_INVALIDDATA;
1408  }
1409  }
1410 
1411  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1412  int v1, v2;
1413  int v = bytestream2_get_byteu(&gb);
1414  /* nibbles are swapped for mono */
1415  if (st) {
1416  v1 = v >> 4;
1417  v2 = v & 0x0F;
1418  } else {
1419  v2 = v >> 4;
1420  v1 = v & 0x0F;
1421  }
1422  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1423  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1424  }
1425  ) /* End of CASE */
1426  CASE(ADPCM_IMA_MOFLEX,
1427  for (int channel = 0; channel < channels; channel++) {
1428  ADPCMChannelStatus *cs = &c->status[channel];
1429  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1430  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1431  if (cs->step_index > 88u){
1432  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1433  channel, cs->step_index);
1434  return AVERROR_INVALIDDATA;
1435  }
1436  }
1437 
1438  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1439  for (int channel = 0; channel < channels; channel++) {
1440  samples = samples_p[channel] + 256 * subframe;
1441  for (int n = 0; n < 256; n += 2) {
1442  int v = bytestream2_get_byteu(&gb);
1443  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1444  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1445  }
1446  }
1447  }
1448  ) /* End of CASE */
1449  CASE(ADPCM_IMA_DAT4,
1450  for (int channel = 0; channel < channels; channel++) {
1451  ADPCMChannelStatus *cs = &c->status[channel];
1452  samples = samples_p[channel];
1453  bytestream2_skip(&gb, 4);
1454  for (int n = 0; n < nb_samples; n += 2) {
1455  int v = bytestream2_get_byteu(&gb);
1456  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1457  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1458  }
1459  }
1460  ) /* End of CASE */
1461  CASE(ADPCM_IMA_APC,
1462  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1463  int v = bytestream2_get_byteu(&gb);
1464  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1465  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1466  }
1467  ) /* End of CASE */
1468  CASE(ADPCM_IMA_SSI,
1469  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1470  int v = bytestream2_get_byteu(&gb);
1471  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1472  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1473  }
1474  ) /* End of CASE */
1475  CASE(ADPCM_IMA_APM,
1476  for (int n = nb_samples / 2; n > 0; n--) {
1477  for (int channel = 0; channel < channels; channel++) {
1478  int v = bytestream2_get_byteu(&gb);
1479  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1480  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1481  }
1482  samples += channels;
1483  }
1484  ) /* End of CASE */
1485  CASE(ADPCM_IMA_ALP,
1486  for (int n = nb_samples / 2; n > 0; n--) {
1487  for (int channel = 0; channel < channels; channel++) {
1488  int v = bytestream2_get_byteu(&gb);
1489  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1490  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1491  }
1492  samples += channels;
1493  }
1494  ) /* End of CASE */
1495  CASE(ADPCM_IMA_CUNNING,
1496  for (int channel = 0; channel < channels; channel++) {
1497  int16_t *smp = samples_p[channel];
1498  for (int n = 0; n < nb_samples / 2; n++) {
1499  int v = bytestream2_get_byteu(&gb);
1500  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1501  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1502  }
1503  }
1504  ) /* End of CASE */
1505  CASE(ADPCM_IMA_OKI,
1506  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1507  int v = bytestream2_get_byteu(&gb);
1508  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1509  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1510  }
1511  ) /* End of CASE */
1512  CASE(ADPCM_IMA_RAD,
1513  for (int channel = 0; channel < channels; channel++) {
1514  ADPCMChannelStatus *cs = &c->status[channel];
1515  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1516  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1517  if (cs->step_index > 88u){
1518  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1519  channel, cs->step_index);
1520  return AVERROR_INVALIDDATA;
1521  }
1522  }
1523  for (int n = 0; n < nb_samples / 2; n++) {
1524  int byte[2];
1525 
1526  byte[0] = bytestream2_get_byteu(&gb);
1527  if (st)
1528  byte[1] = bytestream2_get_byteu(&gb);
1529  for (int channel = 0; channel < channels; channel++) {
1530  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1531  }
1532  for (int channel = 0; channel < channels; channel++) {
1533  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1534  }
1535  }
1536  ) /* End of CASE */
1537  CASE(ADPCM_IMA_WS,
1538  if (c->vqa_version == 3) {
1539  for (int channel = 0; channel < channels; channel++) {
1540  int16_t *smp = samples_p[channel];
1541 
1542  for (int n = nb_samples / 2; n > 0; n--) {
1543  int v = bytestream2_get_byteu(&gb);
1544  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1545  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1546  }
1547  }
1548  } else {
1549  for (int n = nb_samples / 2; n > 0; n--) {
1550  for (int channel = 0; channel < channels; channel++) {
1551  int v = bytestream2_get_byteu(&gb);
1552  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1553  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1554  }
1555  samples += channels;
1556  }
1557  }
1558  bytestream2_seek(&gb, 0, SEEK_END);
1559  ) /* End of CASE */
1560  CASE(ADPCM_XMD,
1561  int bytes_remaining, block = 0;
1562  while (bytestream2_get_bytes_left(&gb) >= 21 * channels) {
1563  for (int channel = 0; channel < channels; channel++) {
1564  int16_t *out = samples_p[channel] + block * 32;
1565  int16_t history[2];
1566  uint16_t scale;
1567 
1568  history[1] = sign_extend(bytestream2_get_le16(&gb), 16);
1569  history[0] = sign_extend(bytestream2_get_le16(&gb), 16);
1570  scale = bytestream2_get_le16(&gb);
1571 
1572  out[0] = history[1];
1573  out[1] = history[0];
1574 
1575  for (int n = 0; n < 15; n++) {
1576  unsigned byte = bytestream2_get_byte(&gb);
1577  int32_t nibble[2];
1578 
1579  nibble[0] = sign_extend(byte & 15, 4);
1580  nibble[1] = sign_extend(byte >> 4, 4);
1581 
1582  out[2+n*2] = nibble[0]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1583  history[1] = history[0];
1584  history[0] = out[2+n*2];
1585 
1586  out[2+n*2+1] = nibble[1]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1587  history[1] = history[0];
1588  history[0] = out[2+n*2+1];
1589  }
1590  }
1591 
1592  block++;
1593  }
1594  bytes_remaining = bytestream2_get_bytes_left(&gb);
1595  if (bytes_remaining > 0) {
1596  bytestream2_skip(&gb, bytes_remaining);
1597  }
1598  ) /* End of CASE */
1599  CASE(ADPCM_XA,
1600  int16_t *out0 = samples_p[0];
1601  int16_t *out1 = samples_p[1];
1602  int samples_per_block = 28 * (3 - channels) * 4;
1603  int sample_offset = 0;
1604  int bytes_remaining;
1605  while (bytestream2_get_bytes_left(&gb) >= 128) {
1606  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1607  &c->status[0], &c->status[1],
1608  channels, sample_offset)) < 0)
1609  return ret;
1610  bytestream2_skipu(&gb, 128);
1611  sample_offset += samples_per_block;
1612  }
1613  /* Less than a full block of data left, e.g. when reading from
1614  * 2324 byte per sector XA; the remainder is padding */
1615  bytes_remaining = bytestream2_get_bytes_left(&gb);
1616  if (bytes_remaining > 0) {
1617  bytestream2_skip(&gb, bytes_remaining);
1618  }
1619  ) /* End of CASE */
1620  CASE(ADPCM_IMA_EA_EACS,
1621  for (int i = 0; i <= st; i++) {
1622  c->status[i].step_index = bytestream2_get_le32u(&gb);
1623  if (c->status[i].step_index > 88u) {
1624  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1625  i, c->status[i].step_index);
1626  return AVERROR_INVALIDDATA;
1627  }
1628  }
1629  for (int i = 0; i <= st; i++) {
1630  c->status[i].predictor = bytestream2_get_le32u(&gb);
1631  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1632  return AVERROR_INVALIDDATA;
1633  }
1634 
1635  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1636  int byte = bytestream2_get_byteu(&gb);
1637  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1638  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1639  }
1640  ) /* End of CASE */
1641  CASE(ADPCM_IMA_EA_SEAD,
1642  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1643  int byte = bytestream2_get_byteu(&gb);
1644  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1645  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1646  }
1647  ) /* End of CASE */
1648  CASE(ADPCM_EA,
1649  int previous_left_sample, previous_right_sample;
1650  int current_left_sample, current_right_sample;
1651  int next_left_sample, next_right_sample;
1652  int coeff1l, coeff2l, coeff1r, coeff2r;
1653  int shift_left, shift_right;
1654 
1655  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1656  each coding 28 stereo samples. */
1657 
1658  if (channels != 2)
1659  return AVERROR_INVALIDDATA;
1660 
1661  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1662  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1663  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1664  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1665 
1666  for (int count1 = 0; count1 < nb_samples / 28; count1++) {
1667  int byte = bytestream2_get_byteu(&gb);
1668  coeff1l = ea_adpcm_table[ byte >> 4 ];
1669  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1670  coeff1r = ea_adpcm_table[ byte & 0x0F];
1671  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1672 
1673  byte = bytestream2_get_byteu(&gb);
1674  shift_left = 20 - (byte >> 4);
1675  shift_right = 20 - (byte & 0x0F);
1676 
1677  for (int count2 = 0; count2 < 28; count2++) {
1678  byte = bytestream2_get_byteu(&gb);
1679  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1680  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1681 
1682  next_left_sample = (next_left_sample +
1683  (current_left_sample * coeff1l) +
1684  (previous_left_sample * coeff2l) + 0x80) >> 8;
1685  next_right_sample = (next_right_sample +
1686  (current_right_sample * coeff1r) +
1687  (previous_right_sample * coeff2r) + 0x80) >> 8;
1688 
1689  previous_left_sample = current_left_sample;
1690  current_left_sample = av_clip_int16(next_left_sample);
1691  previous_right_sample = current_right_sample;
1692  current_right_sample = av_clip_int16(next_right_sample);
1693  *samples++ = current_left_sample;
1694  *samples++ = current_right_sample;
1695  }
1696  }
1697 
1698  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1699  ) /* End of CASE */
1700  CASE(ADPCM_EA_MAXIS_XA,
1701  int coeff[2][2], shift[2];
1702 
1703  for (int channel = 0; channel < channels; channel++) {
1704  int byte = bytestream2_get_byteu(&gb);
1705  for (int i = 0; i < 2; i++)
1706  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1707  shift[channel] = 20 - (byte & 0x0F);
1708  }
1709  for (int count1 = 0; count1 < nb_samples / 2; count1++) {
1710  int byte[2];
1711 
1712  byte[0] = bytestream2_get_byteu(&gb);
1713  if (st) byte[1] = bytestream2_get_byteu(&gb);
1714  for (int i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1715  for (int channel = 0; channel < channels; channel++) {
1716  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1717  sample = (sample +
1718  c->status[channel].sample1 * coeff[channel][0] +
1719  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1720  c->status[channel].sample2 = c->status[channel].sample1;
1721  c->status[channel].sample1 = av_clip_int16(sample);
1722  *samples++ = c->status[channel].sample1;
1723  }
1724  }
1725  }
1726  bytestream2_seek(&gb, 0, SEEK_END);
1727  ) /* End of CASE */
1728 #if CONFIG_ADPCM_EA_R1_DECODER || CONFIG_ADPCM_EA_R2_DECODER || CONFIG_ADPCM_EA_R3_DECODER
1731  case AV_CODEC_ID_ADPCM_EA_R3: {
1732  /* channel numbering
1733  2chan: 0=fl, 1=fr
1734  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1735  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1736  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1737  int previous_sample, current_sample, next_sample;
1738  int coeff1, coeff2;
1739  int shift;
1740  uint16_t *samplesC;
1741  int count = 0;
1742  int offsets[6];
1743 
1744  for (unsigned channel = 0; channel < channels; channel++)
1745  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1746  bytestream2_get_le32(&gb)) +
1747  (channels + 1) * 4;
1748 
1749  for (unsigned channel = 0; channel < channels; channel++) {
1750  int count1;
1751 
1752  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1753  samplesC = samples_p[channel];
1754 
1755  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1756  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1757  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1758  } else {
1759  current_sample = c->status[channel].predictor;
1760  previous_sample = c->status[channel].prev_sample;
1761  }
1762 
1763  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1764  int byte = bytestream2_get_byte(&gb);
1765  if (byte == 0xEE) { /* only seen in R2 and R3 */
1766  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1767  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1768 
1769  for (int count2 = 0; count2 < 28; count2++)
1770  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1771  } else {
1772  coeff1 = ea_adpcm_table[ byte >> 4 ];
1773  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1774  shift = 20 - (byte & 0x0F);
1775 
1776  for (int count2 = 0; count2 < 28; count2++) {
1777  if (count2 & 1)
1778  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1779  else {
1780  byte = bytestream2_get_byte(&gb);
1781  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1782  }
1783 
1784  next_sample += (current_sample * coeff1) +
1785  (previous_sample * coeff2);
1786  next_sample = av_clip_int16(next_sample >> 8);
1787 
1788  previous_sample = current_sample;
1789  current_sample = next_sample;
1790  *samplesC++ = current_sample;
1791  }
1792  }
1793  }
1794  if (!count) {
1795  count = count1;
1796  } else if (count != count1) {
1797  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1798  count = FFMAX(count, count1);
1799  }
1800 
1801  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1802  c->status[channel].predictor = current_sample;
1803  c->status[channel].prev_sample = previous_sample;
1804  }
1805  }
1806 
1807  frame->nb_samples = count * 28;
1808  bytestream2_seek(&gb, 0, SEEK_END);
1809  break;
1810  }
1811 #endif /* CONFIG_ADPCM_EA_Rx_DECODER */
1812  CASE(ADPCM_EA_XAS,
1813  for (int channel=0; channel < channels; channel++) {
1814  int coeff[2][4], shift[4];
1815  int16_t *s = samples_p[channel];
1816  for (int n = 0; n < 4; n++, s += 32) {
1817  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1818  for (int i = 0; i < 2; i++)
1819  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1820  s[0] = val & ~0x0F;
1821 
1822  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1823  shift[n] = 20 - (val & 0x0F);
1824  s[1] = val & ~0x0F;
1825  }
1826 
1827  for (int m = 2; m < 32; m += 2) {
1828  s = &samples_p[channel][m];
1829  for (int n = 0; n < 4; n++, s += 32) {
1830  int level, pred;
1831  int byte = bytestream2_get_byteu(&gb);
1832 
1833  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1834  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1835  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1836 
1837  level = sign_extend(byte, 4) * (1 << shift[n]);
1838  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1839  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1840  }
1841  }
1842  }
1843  ) /* End of CASE */
1844  CASE(ADPCM_IMA_ACORN,
1845  for (int channel = 0; channel < channels; channel++) {
1846  ADPCMChannelStatus *cs = &c->status[channel];
1847  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1848  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
1849  if (cs->step_index > 88u){
1850  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1851  channel, cs->step_index);
1852  return AVERROR_INVALIDDATA;
1853  }
1854  }
1855  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1856  int byte = bytestream2_get_byteu(&gb);
1857  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
1858  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
1859  }
1860  ) /* End of CASE */
1861  CASE(ADPCM_IMA_AMV,
1862  av_assert0(channels == 1);
1863 
1864  /*
1865  * Header format:
1866  * int16_t predictor;
1867  * uint8_t step_index;
1868  * uint8_t reserved;
1869  * uint32_t frame_size;
1870  *
1871  * Some implementations have step_index as 16-bits, but others
1872  * only use the lower 8 and store garbage in the upper 8.
1873  */
1874  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1875  c->status[0].step_index = bytestream2_get_byteu(&gb);
1876  bytestream2_skipu(&gb, 5);
1877  if (c->status[0].step_index > 88u) {
1878  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1879  c->status[0].step_index);
1880  return AVERROR_INVALIDDATA;
1881  }
1882 
1883  for (int n = nb_samples >> 1; n > 0; n--) {
1884  int v = bytestream2_get_byteu(&gb);
1885 
1886  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1887  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1888  }
1889 
1890  if (nb_samples & 1) {
1891  int v = bytestream2_get_byteu(&gb);
1892  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1893 
1894  if (v & 0x0F) {
1895  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1896  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1897  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1898  }
1899  }
1900  ) /* End of CASE */
1901  CASE(ADPCM_IMA_SMJPEG,
1902  for (int i = 0; i < channels; i++) {
1903  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1904  c->status[i].step_index = bytestream2_get_byteu(&gb);
1905  bytestream2_skipu(&gb, 1);
1906  if (c->status[i].step_index > 88u) {
1907  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1908  c->status[i].step_index);
1909  return AVERROR_INVALIDDATA;
1910  }
1911  }
1912 
1913  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1914  int v = bytestream2_get_byteu(&gb);
1915 
1916  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1917  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1918  }
1919  ) /* End of CASE */
1920  CASE(ADPCM_CT,
1921  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1922  int v = bytestream2_get_byteu(&gb);
1923  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1924  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1925  }
1926  ) /* End of CASE */
1927 #if CONFIG_ADPCM_SBPRO_2_DECODER || CONFIG_ADPCM_SBPRO_3_DECODER || \
1928  CONFIG_ADPCM_SBPRO_4_DECODER
1932  if (!c->status[0].step_index) {
1933  /* the first byte is a raw sample */
1934  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1935  if (st)
1936  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1937  c->status[0].step_index = 1;
1938  nb_samples--;
1939  }
1940  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1941  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1942  int byte = bytestream2_get_byteu(&gb);
1943  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1944  byte >> 4, 4, 0);
1945  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1946  byte & 0x0F, 4, 0);
1947  }
1948  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1949  for (int n = (nb_samples<<st) / 3; n > 0; n--) {
1950  int byte = bytestream2_get_byteu(&gb);
1951  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1952  byte >> 5 , 3, 0);
1953  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1954  (byte >> 2) & 0x07, 3, 0);
1955  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1956  byte & 0x03, 2, 0);
1957  }
1958  } else {
1959  for (int n = nb_samples >> (2 - st); n > 0; n--) {
1960  int byte = bytestream2_get_byteu(&gb);
1961  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1962  byte >> 6 , 2, 2);
1963  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1964  (byte >> 4) & 0x03, 2, 2);
1965  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1966  (byte >> 2) & 0x03, 2, 2);
1967  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1968  byte & 0x03, 2, 2);
1969  }
1970  }
1971  break;
1972 #endif /* CONFIG_ADPCM_SBPRO_x_DECODER */
1973  CASE(ADPCM_SWF,
1974  adpcm_swf_decode(avctx, buf, buf_size, samples);
1975  bytestream2_seek(&gb, 0, SEEK_END);
1976  ) /* End of CASE */
1977  CASE(ADPCM_YAMAHA,
1978  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1979  int v = bytestream2_get_byteu(&gb);
1980  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1981  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1982  }
1983  ) /* End of CASE */
1984  CASE(ADPCM_AICA,
1985  for (int channel = 0; channel < channels; channel++) {
1986  samples = samples_p[channel];
1987  for (int n = nb_samples >> 1; n > 0; n--) {
1988  int v = bytestream2_get_byteu(&gb);
1989  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1990  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1991  }
1992  }
1993  ) /* End of CASE */
1994  CASE(ADPCM_AFC,
1995  int samples_per_block;
1996  int blocks;
1997 
1998  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1999  samples_per_block = avctx->extradata[0] / 16;
2000  blocks = nb_samples / avctx->extradata[0];
2001  } else {
2002  samples_per_block = nb_samples / 16;
2003  blocks = 1;
2004  }
2005 
2006  for (int m = 0; m < blocks; m++) {
2007  for (int channel = 0; channel < channels; channel++) {
2008  int prev1 = c->status[channel].sample1;
2009  int prev2 = c->status[channel].sample2;
2010 
2011  samples = samples_p[channel] + m * 16;
2012  /* Read in every sample for this channel. */
2013  for (int i = 0; i < samples_per_block; i++) {
2014  int byte = bytestream2_get_byteu(&gb);
2015  int scale = 1 << (byte >> 4);
2016  int index = byte & 0xf;
2017  int factor1 = afc_coeffs[0][index];
2018  int factor2 = afc_coeffs[1][index];
2019 
2020  /* Decode 16 samples. */
2021  for (int n = 0; n < 16; n++) {
2022  int32_t sampledat;
2023 
2024  if (n & 1) {
2025  sampledat = sign_extend(byte, 4);
2026  } else {
2027  byte = bytestream2_get_byteu(&gb);
2028  sampledat = sign_extend(byte >> 4, 4);
2029  }
2030 
2031  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
2032  sampledat * scale;
2033  *samples = av_clip_int16(sampledat);
2034  prev2 = prev1;
2035  prev1 = *samples++;
2036  }
2037  }
2038 
2039  c->status[channel].sample1 = prev1;
2040  c->status[channel].sample2 = prev2;
2041  }
2042  }
2043  bytestream2_seek(&gb, 0, SEEK_END);
2044  ) /* End of CASE */
2045 #if CONFIG_ADPCM_THP_DECODER || CONFIG_ADPCM_THP_LE_DECODER
2046  case AV_CODEC_ID_ADPCM_THP:
2048  {
2049  int table[14][16];
2050 
2051 #define THP_GET16(g) \
2052  sign_extend( \
2053  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
2054  bytestream2_get_le16u(&(g)) : \
2055  bytestream2_get_be16u(&(g)), 16)
2056 
2057  if (avctx->extradata) {
2059  if (avctx->extradata_size < 32 * channels) {
2060  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
2061  return AVERROR_INVALIDDATA;
2062  }
2063 
2064  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
2065  for (int i = 0; i < channels; i++)
2066  for (int n = 0; n < 16; n++)
2067  table[i][n] = THP_GET16(tb);
2068  } else {
2069  for (int i = 0; i < channels; i++)
2070  for (int n = 0; n < 16; n++)
2071  table[i][n] = THP_GET16(gb);
2072 
2073  if (!c->has_status) {
2074  /* Initialize the previous sample. */
2075  for (int i = 0; i < channels; i++) {
2076  c->status[i].sample1 = THP_GET16(gb);
2077  c->status[i].sample2 = THP_GET16(gb);
2078  }
2079  c->has_status = 1;
2080  } else {
2081  bytestream2_skip(&gb, channels * 4);
2082  }
2083  }
2084 
2085  for (int ch = 0; ch < channels; ch++) {
2086  samples = samples_p[ch];
2087 
2088  /* Read in every sample for this channel. */
2089  for (int i = 0; i < (nb_samples + 13) / 14; i++) {
2090  int byte = bytestream2_get_byteu(&gb);
2091  int index = (byte >> 4) & 7;
2092  unsigned int exp = byte & 0x0F;
2093  int64_t factor1 = table[ch][index * 2];
2094  int64_t factor2 = table[ch][index * 2 + 1];
2095 
2096  /* Decode 14 samples. */
2097  for (int n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
2098  int32_t sampledat;
2099 
2100  if (n & 1) {
2101  sampledat = sign_extend(byte, 4);
2102  } else {
2103  byte = bytestream2_get_byteu(&gb);
2104  sampledat = sign_extend(byte >> 4, 4);
2105  }
2106 
2107  sampledat = ((c->status[ch].sample1 * factor1
2108  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
2109  *samples = av_clip_int16(sampledat);
2110  c->status[ch].sample2 = c->status[ch].sample1;
2111  c->status[ch].sample1 = *samples++;
2112  }
2113  }
2114  }
2115  break;
2116  }
2117 #endif /* CONFIG_ADPCM_THP(_LE)_DECODER */
2118  CASE(ADPCM_DTK,
2119  for (int channel = 0; channel < channels; channel++) {
2120  samples = samples_p[channel];
2121 
2122  /* Read in every sample for this channel. */
2123  for (int i = 0; i < nb_samples / 28; i++) {
2124  int byte, header;
2125  if (channel)
2126  bytestream2_skipu(&gb, 1);
2127  header = bytestream2_get_byteu(&gb);
2128  bytestream2_skipu(&gb, 3 - channel);
2129 
2130  /* Decode 28 samples. */
2131  for (int n = 0; n < 28; n++) {
2132  int32_t sampledat, prev;
2133 
2134  switch (header >> 4) {
2135  case 1:
2136  prev = (c->status[channel].sample1 * 0x3c);
2137  break;
2138  case 2:
2139  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
2140  break;
2141  case 3:
2142  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
2143  break;
2144  default:
2145  prev = 0;
2146  }
2147 
2148  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
2149 
2150  byte = bytestream2_get_byteu(&gb);
2151  if (!channel)
2152  sampledat = sign_extend(byte, 4);
2153  else
2154  sampledat = sign_extend(byte >> 4, 4);
2155 
2156  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2157  *samples++ = av_clip_int16(sampledat >> 6);
2158  c->status[channel].sample2 = c->status[channel].sample1;
2159  c->status[channel].sample1 = sampledat;
2160  }
2161  }
2162  if (!channel)
2163  bytestream2_seek(&gb, 0, SEEK_SET);
2164  }
2165  ) /* End of CASE */
2166  CASE(ADPCM_PSX,
2167  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * channels); block++) {
2168  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * channels) / (16 * channels);
2169  for (int channel = 0; channel < channels; channel++) {
2170  samples = samples_p[channel] + block * nb_samples_per_block;
2171  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2172 
2173  /* Read in every sample for this channel. */
2174  for (int i = 0; i < nb_samples_per_block / 28; i++) {
2175  int filter, shift, flag, byte;
2176 
2177  filter = bytestream2_get_byteu(&gb);
2178  shift = filter & 0xf;
2179  filter = filter >> 4;
2181  return AVERROR_INVALIDDATA;
2182  flag = bytestream2_get_byteu(&gb) & 0x7;
2183 
2184  /* Decode 28 samples. */
2185  for (int n = 0; n < 28; n++) {
2186  int sample = 0, scale;
2187 
2188  if (n & 1) {
2189  scale = sign_extend(byte >> 4, 4);
2190  } else {
2191  byte = bytestream2_get_byteu(&gb);
2192  scale = sign_extend(byte, 4);
2193  }
2194 
2195  if (flag < 0x07) {
2196  scale = scale * (1 << 12);
2197  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2198  }
2200  c->status[channel].sample2 = c->status[channel].sample1;
2201  c->status[channel].sample1 = sample;
2202  }
2203  }
2204  }
2205  }
2206  ) /* End of CASE */
2207  CASE(ADPCM_ARGO,
2208  /*
2209  * The format of each block:
2210  * uint8_t left_control;
2211  * uint4_t left_samples[nb_samples];
2212  * ---- and if stereo ----
2213  * uint8_t right_control;
2214  * uint4_t right_samples[nb_samples];
2215  *
2216  * Format of the control byte:
2217  * MSB [SSSSRDRR] LSB
2218  * S = (Shift Amount - 2)
2219  * D = Decoder flag.
2220  * R = Reserved
2221  *
2222  * Each block relies on the previous two samples of each channel.
2223  * They should be 0 initially.
2224  */
2225  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2226  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
2227  ADPCMChannelStatus *cs = c->status + channel;
2228  int control, shift;
2229 
2230  samples = samples_p[channel] + block * 32;
2231 
2232  /* Get the control byte and decode the samples, 2 at a time. */
2233  control = bytestream2_get_byteu(&gb);
2234  shift = (control >> 4) + 2;
2235 
2236  for (int n = 0; n < 16; n++) {
2237  int sample = bytestream2_get_byteu(&gb);
2238  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2239  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2240  }
2241  }
2242  }
2243  ) /* End of CASE */
2244  CASE(ADPCM_ZORK,
2245  for (int n = 0; n < nb_samples * channels; n++) {
2246  int v = bytestream2_get_byteu(&gb);
2247  *samples++ = adpcm_zork_expand_nibble(&c->status[n % channels], v);
2248  }
2249  ) /* End of CASE */
2250  CASE(ADPCM_IMA_MTF,
2251  for (int n = nb_samples / 2; n > 0; n--) {
2252  for (int channel = 0; channel < channels; channel++) {
2253  int v = bytestream2_get_byteu(&gb);
2254  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2255  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2256  }
2257  samples += channels;
2258  }
2259  ) /* End of CASE */
2260  default:
2261  av_assert0(0); // unsupported codec_id should not happen
2262  }
2263 
2264  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2265  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2266  return AVERROR_INVALIDDATA;
2267  }
2268 
2269  *got_frame_ptr = 1;
2270 
2271  if (avpkt->size < bytestream2_tell(&gb)) {
2272  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2273  return avpkt->size;
2274  }
2275 
2276  return bytestream2_tell(&gb);
2277 }
2278 
2279 static void adpcm_flush(AVCodecContext *avctx)
2280 {
2281  ADPCMDecodeContext *c = avctx->priv_data;
2282 
2283  /* Just nuke the entire state and re-init. */
2284  memset(c, 0, sizeof(ADPCMDecodeContext));
2285 
2286  switch(avctx->codec_id) {
2287  case AV_CODEC_ID_ADPCM_CT:
2288  c->status[0].step = c->status[1].step = 511;
2289  break;
2290 
2292  if (avctx->extradata && avctx->extradata_size >= 8) {
2293  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2294  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2295  }
2296  break;
2297 
2299  if (avctx->extradata && avctx->extradata_size >= 28) {
2300  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2301  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2302  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2303  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2304  }
2305  break;
2306 
2308  if (avctx->extradata && avctx->extradata_size >= 2)
2309  c->vqa_version = AV_RL16(avctx->extradata);
2310  break;
2311  default:
2312  /* Other codecs may want to handle this during decoding. */
2313  c->has_status = 0;
2314  return;
2315  }
2316 
2317  c->has_status = 1;
2318 }
2319 
2320 
2328 
2329 #define ADPCM_DECODER_0(id_, sample_fmts_, name_, long_name_)
2330 #define ADPCM_DECODER_1(id_, sample_fmts_, name_, long_name_) \
2331 const FFCodec ff_ ## name_ ## _decoder = { \
2332  .p.name = #name_, \
2333  CODEC_LONG_NAME(long_name_), \
2334  .p.type = AVMEDIA_TYPE_AUDIO, \
2335  .p.id = id_, \
2336  .p.capabilities = AV_CODEC_CAP_DR1, \
2337  .p.sample_fmts = sample_fmts_, \
2338  .priv_data_size = sizeof(ADPCMDecodeContext), \
2339  .init = adpcm_decode_init, \
2340  FF_CODEC_DECODE_CB(adpcm_decode_frame), \
2341  .flush = adpcm_flush, \
2342 };
2343 #define ADPCM_DECODER_2(enabled, codec_id, name, sample_fmts, long_name) \
2344  ADPCM_DECODER_ ## enabled(codec_id, name, sample_fmts, long_name)
2345 #define ADPCM_DECODER_3(config, codec_id, name, sample_fmts, long_name) \
2346  ADPCM_DECODER_2(config, codec_id, name, sample_fmts, long_name)
2347 #define ADPCM_DECODER(codec, name, sample_fmts, long_name) \
2348  ADPCM_DECODER_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, \
2349  name, sample_fmts, long_name)
2350 
2351 /* Note: Do not forget to add new entries to the Makefile as well. */
2352 ADPCM_DECODER(ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie")
2353 ADPCM_DECODER(ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC")
2354 ADPCM_DECODER(ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie")
2355 ADPCM_DECODER(ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA")
2356 ADPCM_DECODER(ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games")
2357 ADPCM_DECODER(ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology")
2358 ADPCM_DECODER(ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK")
2359 ADPCM_DECODER(ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts")
2360 ADPCM_DECODER(ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA")
2361 ADPCM_DECODER(ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1")
2362 ADPCM_DECODER(ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2")
2363 ADPCM_DECODER(ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3")
2364 ADPCM_DECODER(ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS")
2365 ADPCM_DECODER(ADPCM_IMA_ACORN, sample_fmts_s16, adpcm_ima_acorn, "ADPCM IMA Acorn Replay")
2366 ADPCM_DECODER(ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV")
2367 ADPCM_DECODER(ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC")
2368 ADPCM_DECODER(ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM")
2369 ADPCM_DECODER(ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments")
2370 ADPCM_DECODER(ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4")
2371 ADPCM_DECODER(ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3")
2372 ADPCM_DECODER(ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4")
2373 ADPCM_DECODER(ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS")
2374 ADPCM_DECODER(ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD")
2375 ADPCM_DECODER(ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS")
2376 ADPCM_DECODER(ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX")
2377 ADPCM_DECODER(ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework")
2378 ADPCM_DECODER(ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI")
2379 ADPCM_DECODER(ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime")
2380 ADPCM_DECODER(ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical")
2381 ADPCM_DECODER(ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive")
2382 ADPCM_DECODER(ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG")
2383 ADPCM_DECODER(ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP")
2384 ADPCM_DECODER(ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV")
2385 ADPCM_DECODER(ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood")
2386 ADPCM_DECODER(ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft")
2387 ADPCM_DECODER(ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF")
2388 ADPCM_DECODER(ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation")
2389 ADPCM_DECODER(ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit")
2390 ADPCM_DECODER(ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit")
2391 ADPCM_DECODER(ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit")
2392 ADPCM_DECODER(ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash")
2393 ADPCM_DECODER(ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)")
2394 ADPCM_DECODER(ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP")
2395 ADPCM_DECODER(ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA")
2396 ADPCM_DECODER(ADPCM_XMD, sample_fmts_s16p, adpcm_xmd, "ADPCM Konami XMD")
2397 ADPCM_DECODER(ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha")
2398 ADPCM_DECODER(ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork")
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:375
adpcm_index_table5
static const int8_t adpcm_index_table5[32]
Definition: adpcm.c:137
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:369
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
out
FILE * out
Definition: movenc.c:54
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:402
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
R3
#define R3
Definition: simple_idct.c:173
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:232
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:54
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:123
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:417
AVPacket::data
uint8_t * data
Definition: packet.h:491
table
static const uint16_t table[]
Definition: prosumer.c:205
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:390
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:407
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:401
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:508
R1
#define R1
Definition: simple_idct.c:171
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:317
AV_CODEC_ID_ADPCM_XMD
@ AV_CODEC_ID_ADPCM_XMD
Definition: codec_id.h:420
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:405
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:595
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:381
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:450
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2107
GetBitContext
Definition: get_bits.h:108
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:449
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:400
val
static double val(void *priv, double ch)
Definition: aeval.c:78
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2279
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:77
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:386
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2323
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:426
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:615
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:543
AV_CODEC_ID_ADPCM_IMA_ACORN
@ AV_CODEC_ID_ADPCM_IMA_ACORN
Definition: codec_id.h:419
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:638
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:400
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:392
g
const char * g
Definition: vf_curves.c:127
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:371
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:398
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:356
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:396
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:374
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:534
frame
static AVFrame * frame
Definition: demux_decode.c:54
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:451
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
if
if(ret)
Definition: filter_design.txt:179
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2321
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:394
av_clip_int16
#define av_clip_int16
Definition: common.h:111
NULL
#define NULL
Definition: coverity.c:32
ADPCM_DECODER
#define ADPCM_DECODER(codec, name, sample_fmts, long_name)
Definition: adpcm.c:2347
bits_left
#define bits_left
Definition: bitstream.h:114
av_clip_intp2
#define av_clip_intp2
Definition: common.h:117
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:383
oki_step_table
static const int16_t oki_step_table[49]
Definition: adpcm.c:216
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:373
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:393
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:411
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:372
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:388
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:94
ima_cunning_index_table
static const int8_t ima_cunning_index_table[9]
Definition: adpcm.c:108
exp
int8_t exp
Definition: eval.c:72
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:377
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:574
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:553
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:1069
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:413
afc_coeffs
static const int16_t afc_coeffs[2][16]
Definition: adpcm.c:89
ADPCMDecodeContext
Definition: adpcm.c:243
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:74
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1617
AVPacket::size
int size
Definition: packet.h:492
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:262
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:403
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:465
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:415
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bps
unsigned bps
Definition: movenc.c:1738
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:39
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1080
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:847
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
R2
#define R2
Definition: simple_idct.c:172
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:382
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:67
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:673
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
adpcm_index_table3
static const int8_t adpcm_index_table3[8]
Definition: adpcm.c:132
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
flag
#define flag(name)
Definition: cbs_av1.c:466
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1510
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2325
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:409
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:420
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:395
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:65
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:542
adpcm_index_tables
static const int8_t *const adpcm_index_tables[4]
Definition: adpcm.c:142
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:401
MT
#define MT(...)
Definition: codec_desc.c:32
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:590
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:414
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
tb
#define tb
Definition: regdef.h:68
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:245
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:408
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:819
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:81
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:30
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:379
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:416
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1113
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:485
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:410
mtaf_stepsize
static const int16_t mtaf_stepsize[32][16]
Definition: adpcm.c:149
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:69
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:389
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:391
temp
else temp
Definition: vf_mcdeint.c:263
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:387
adpcm_index_table2
static const int8_t adpcm_index_table2[4]
Definition: adpcm.c:127
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:384
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:761
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:412
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:251
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:246
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:418
AVPacket
This structure stores compressed data.
Definition: packet.h:468
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:370
d
d
Definition: ffmpeg_filter.c:368
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:376
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:356
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:406
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:629
CASE
#define CASE(codec,...)
Definition: adpcm.c:77
ima_cunning_step_table
static const int16_t ima_cunning_step_table[61]
Definition: adpcm.c:118
int
int
Definition: ffmpeg_filter.c:368
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:236
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:385
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:244
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:225