FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  * Sanyo LD-ADPCM decoder by Peter Ross (pross@xvid.org)
21  *
22  * This file is part of FFmpeg.
23  *
24  * FFmpeg is free software; you can redistribute it and/or
25  * modify it under the terms of the GNU Lesser General Public
26  * License as published by the Free Software Foundation; either
27  * version 2.1 of the License, or (at your option) any later version.
28  *
29  * FFmpeg is distributed in the hope that it will be useful,
30  * but WITHOUT ANY WARRANTY; without even the implied warranty of
31  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
32  * Lesser General Public License for more details.
33  *
34  * You should have received a copy of the GNU Lesser General Public
35  * License along with FFmpeg; if not, write to the Free Software
36  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
37  */
38 
39 #include "config_components.h"
40 
41 #include "avcodec.h"
42 #include "get_bits.h"
43 #include "bytestream.h"
44 #include "adpcm.h"
45 #include "adpcm_data.h"
46 #include "codec_internal.h"
47 #include "decode.h"
48 
49 #include "libavutil/attributes.h"
50 
51 /**
52  * @file
53  * ADPCM decoders
54  * Features and limitations:
55  *
56  * Reference documents:
57  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
58  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
59  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
60  * http://openquicktime.sourceforge.net/
61  * XAnim sources (xa_codec.c) http://xanim.polter.net/
62  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
63  * SoX source code http://sox.sourceforge.net/
64  *
65  * CD-ROM XA:
66  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
67  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
68  * readstr http://www.geocities.co.jp/Playtown/2004/
69  */
70 
71 #define CASE_0(codec_id, ...)
72 #define CASE_1(codec_id, ...) \
73  case codec_id: \
74  { __VA_ARGS__ } \
75  break;
76 #define CASE_2(enabled, codec_id, ...) \
77  CASE_ ## enabled(codec_id, __VA_ARGS__)
78 #define CASE_3(config, codec_id, ...) \
79  CASE_2(config, codec_id, __VA_ARGS__)
80 #define CASE(codec, ...) \
81  CASE_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
82 
83 /* These are for CD-ROM XA ADPCM */
84 static const int8_t xa_adpcm_table[5][2] = {
85  { 0, 0 },
86  { 60, 0 },
87  { 115, -52 },
88  { 98, -55 },
89  { 122, -60 }
90 };
91 
92 static const int16_t afc_coeffs[2][16] = {
93  { 0, 2048, 0, 1024, 4096, 3584, 3072, 4608, 4200, 4800, 5120, 2048, 1024, -1024, -1024, -2048 },
94  { 0, 0, 2048, 1024, -2048, -1536, -1024, -2560, -2248, -2300, -3072, -2048, -1024, 1024, 0, 0 }
95 };
96 
97 static const int16_t ea_adpcm_table[] = {
98  0, 240, 460, 392,
99  0, 0, -208, -220,
100  0, 1, 3, 4,
101  7, 8, 10, 11,
102  0, -1, -3, -4
103 };
104 
105 /*
106  * Dumped from the binaries:
107  * - FantasticJourney.exe - 0x794D2, DGROUP:0x47A4D2
108  * - BigRaceUSA.exe - 0x9B8AA, DGROUP:0x49C4AA
109  * - Timeshock!.exe - 0x8506A, DGROUP:0x485C6A
110  */
111 static const int8_t ima_cunning_index_table[9] = {
112  -1, -1, -1, -1, 1, 2, 3, 4, -1
113 };
114 
115 /*
116  * Dumped from the binaries:
117  * - FantasticJourney.exe - 0x79458, DGROUP:0x47A458
118  * - BigRaceUSA.exe - 0x9B830, DGROUP:0x49C430
119  * - Timeshock!.exe - 0x84FF0, DGROUP:0x485BF0
120  */
121 static const int16_t ima_cunning_step_table[61] = {
122  1, 1, 1, 1, 2, 2, 3, 3, 4, 5,
123  6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
124  32, 40, 48, 56, 64, 80, 96, 112, 128, 160,
125  192, 224, 256, 320, 384, 448, 512, 640, 768, 896,
126  1024, 1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120,
127  6144, 7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576, 28672, 0
128 };
129 
130 static const int8_t adpcm_index_table2[4] = {
131  -1, 2,
132  -1, 2,
133 };
134 
135 static const int8_t adpcm_index_table3[8] = {
136  -1, -1, 1, 2,
137  -1, -1, 1, 2,
138 };
139 
140 static const int8_t adpcm_index_table5[32] = {
141  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
142  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
143 };
144 
145 static const int8_t * const adpcm_index_tables[4] = {
146  &adpcm_index_table2[0],
147  &adpcm_index_table3[0],
149  &adpcm_index_table5[0],
150 };
151 
152 static const int16_t mtaf_stepsize[32][16] = {
153  { 1, 5, 9, 13, 16, 20, 24, 28,
154  -1, -5, -9, -13, -16, -20, -24, -28, },
155  { 2, 6, 11, 15, 20, 24, 29, 33,
156  -2, -6, -11, -15, -20, -24, -29, -33, },
157  { 2, 7, 13, 18, 23, 28, 34, 39,
158  -2, -7, -13, -18, -23, -28, -34, -39, },
159  { 3, 9, 15, 21, 28, 34, 40, 46,
160  -3, -9, -15, -21, -28, -34, -40, -46, },
161  { 3, 11, 18, 26, 33, 41, 48, 56,
162  -3, -11, -18, -26, -33, -41, -48, -56, },
163  { 4, 13, 22, 31, 40, 49, 58, 67,
164  -4, -13, -22, -31, -40, -49, -58, -67, },
165  { 5, 16, 26, 37, 48, 59, 69, 80,
166  -5, -16, -26, -37, -48, -59, -69, -80, },
167  { 6, 19, 31, 44, 57, 70, 82, 95,
168  -6, -19, -31, -44, -57, -70, -82, -95, },
169  { 7, 22, 38, 53, 68, 83, 99, 114,
170  -7, -22, -38, -53, -68, -83, -99, -114, },
171  { 9, 27, 45, 63, 81, 99, 117, 135,
172  -9, -27, -45, -63, -81, -99, -117, -135, },
173  { 10, 32, 53, 75, 96, 118, 139, 161,
174  -10, -32, -53, -75, -96, -118, -139, -161, },
175  { 12, 38, 64, 90, 115, 141, 167, 193,
176  -12, -38, -64, -90, -115, -141, -167, -193, },
177  { 15, 45, 76, 106, 137, 167, 198, 228,
178  -15, -45, -76, -106, -137, -167, -198, -228, },
179  { 18, 54, 91, 127, 164, 200, 237, 273,
180  -18, -54, -91, -127, -164, -200, -237, -273, },
181  { 21, 65, 108, 152, 195, 239, 282, 326,
182  -21, -65, -108, -152, -195, -239, -282, -326, },
183  { 25, 77, 129, 181, 232, 284, 336, 388,
184  -25, -77, -129, -181, -232, -284, -336, -388, },
185  { 30, 92, 153, 215, 276, 338, 399, 461,
186  -30, -92, -153, -215, -276, -338, -399, -461, },
187  { 36, 109, 183, 256, 329, 402, 476, 549,
188  -36, -109, -183, -256, -329, -402, -476, -549, },
189  { 43, 130, 218, 305, 392, 479, 567, 654,
190  -43, -130, -218, -305, -392, -479, -567, -654, },
191  { 52, 156, 260, 364, 468, 572, 676, 780,
192  -52, -156, -260, -364, -468, -572, -676, -780, },
193  { 62, 186, 310, 434, 558, 682, 806, 930,
194  -62, -186, -310, -434, -558, -682, -806, -930, },
195  { 73, 221, 368, 516, 663, 811, 958, 1106,
196  -73, -221, -368, -516, -663, -811, -958, -1106, },
197  { 87, 263, 439, 615, 790, 966, 1142, 1318,
198  -87, -263, -439, -615, -790, -966, -1142, -1318, },
199  { 104, 314, 523, 733, 942, 1152, 1361, 1571,
200  -104, -314, -523, -733, -942, -1152, -1361, -1571, },
201  { 124, 374, 623, 873, 1122, 1372, 1621, 1871,
202  -124, -374, -623, -873, -1122, -1372, -1621, -1871, },
203  { 148, 445, 743, 1040, 1337, 1634, 1932, 2229,
204  -148, -445, -743, -1040, -1337, -1634, -1932, -2229, },
205  { 177, 531, 885, 1239, 1593, 1947, 2301, 2655,
206  -177, -531, -885, -1239, -1593, -1947, -2301, -2655, },
207  { 210, 632, 1053, 1475, 1896, 2318, 2739, 3161,
208  -210, -632, -1053, -1475, -1896, -2318, -2739, -3161, },
209  { 251, 753, 1255, 1757, 2260, 2762, 3264, 3766,
210  -251, -753, -1255, -1757, -2260, -2762, -3264, -3766, },
211  { 299, 897, 1495, 2093, 2692, 3290, 3888, 4486,
212  -299, -897, -1495, -2093, -2692, -3290, -3888, -4486, },
213  { 356, 1068, 1781, 2493, 3206, 3918, 4631, 5343,
214  -356, -1068, -1781, -2493, -3206, -3918, -4631, -5343, },
215  { 424, 1273, 2121, 2970, 3819, 4668, 5516, 6365,
216  -424, -1273, -2121, -2970, -3819, -4668, -5516, -6365, },
217 };
218 
219 static const int16_t oki_step_table[49] = {
220  16, 17, 19, 21, 23, 25, 28, 31, 34, 37,
221  41, 45, 50, 55, 60, 66, 73, 80, 88, 97,
222  107, 118, 130, 143, 157, 173, 190, 209, 230, 253,
223  279, 307, 337, 371, 408, 449, 494, 544, 598, 658,
224  724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552
225 };
226 
227 // padded to zero where table size is less then 16
228 static const int8_t swf_index_tables[4][16] = {
229  /*2*/ { -1, 2 },
230  /*3*/ { -1, -1, 2, 4 },
231  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
232  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
233 };
234 
235 static const int8_t zork_index_table[8] = {
236  -1, -1, -1, 1, 4, 7, 10, 12,
237 };
238 
239 static const int8_t mtf_index_table[16] = {
240  8, 6, 4, 2, -1, -1, -1, -1,
241  -1, -1, -1, -1, 2, 4, 6, 8,
242 };
243 
244 /* end of tables */
245 
246 typedef struct ADPCMDecodeContext {
248  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
249  int has_status; /**< Status flag. Reset to 0 after a flush. */
251 
252 static void adpcm_flush(AVCodecContext *avctx);
253 
255 {
256  ADPCMDecodeContext *c = avctx->priv_data;
257  unsigned int min_channels = 1;
258  unsigned int max_channels = 2;
259 
260  adpcm_flush(avctx);
261 
262  switch(avctx->codec->id) {
265  max_channels = 1;
266  break;
268  max_channels = 2;
269  break;
276  max_channels = 6;
277  break;
279  min_channels = 2;
280  max_channels = 8;
281  if (avctx->ch_layout.nb_channels & 1) {
282  avpriv_request_sample(avctx, "channel count %d", avctx->ch_layout.nb_channels);
283  return AVERROR_PATCHWELCOME;
284  }
285  break;
287  min_channels = 2;
288  break;
290  max_channels = 8;
291  if (avctx->ch_layout.nb_channels <= 0 ||
292  avctx->block_align % (16 * avctx->ch_layout.nb_channels))
293  return AVERROR_INVALIDDATA;
294  break;
296  max_channels = 8;
297  if (avctx->ch_layout.nb_channels <= 0)
298  return AVERROR_INVALIDDATA;
299  break;
303  max_channels = 14;
304  break;
305  }
306  if (avctx->ch_layout.nb_channels < min_channels ||
307  avctx->ch_layout.nb_channels > max_channels) {
308  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
309  return AVERROR(EINVAL);
310  }
311 
312  switch(avctx->codec->id) {
314  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
315  return AVERROR_INVALIDDATA;
316  break;
318  if (avctx->bits_per_coded_sample != 4 ||
319  avctx->block_align != 17 * avctx->ch_layout.nb_channels)
320  return AVERROR_INVALIDDATA;
321  break;
323  if (avctx->bits_per_coded_sample < 3 || avctx->bits_per_coded_sample > 5)
324  return AVERROR_INVALIDDATA;
325  break;
327  if (avctx->bits_per_coded_sample != 4)
328  return AVERROR_INVALIDDATA;
329  break;
331  if (avctx->bits_per_coded_sample != 8)
332  return AVERROR_INVALIDDATA;
333  break;
334  default:
335  break;
336  }
337 
338  switch (avctx->codec->id) {
364  break;
366  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
368  break;
370  avctx->sample_fmt = avctx->ch_layout.nb_channels > 2 ? AV_SAMPLE_FMT_S16P :
372  break;
373  default:
374  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
375  }
376  return 0;
377 }
378 
379 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
380 {
381  int delta, pred, step, add;
382 
383  pred = c->predictor;
384  delta = nibble & 7;
385  step = c->step;
386  add = (delta * 2 + 1) * step;
387  if (add < 0)
388  add = add + 7;
389 
390  if ((nibble & 8) == 0)
391  pred = av_clip(pred + (add >> 3), -32767, 32767);
392  else
393  pred = av_clip(pred - (add >> 3), -32767, 32767);
394 
395  switch (delta) {
396  case 7:
397  step *= 0x99;
398  break;
399  case 6:
400  c->step = av_clip(c->step * 2, 127, 24576);
401  c->predictor = pred;
402  return pred;
403  case 5:
404  step *= 0x66;
405  break;
406  case 4:
407  step *= 0x4d;
408  break;
409  default:
410  step *= 0x39;
411  break;
412  }
413 
414  if (step < 0)
415  step += 0x3f;
416 
417  c->step = step >> 6;
418  c->step = av_clip(c->step, 127, 24576);
419  c->predictor = pred;
420  return pred;
421 }
422 
423 static inline int16_t adpcm_ima_escape_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
424 {
425  int step_index;
426  int predictor;
427  int sign, delta, diff, step;
428 
429  step = ff_adpcm_step_table[c->step_index];
430  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
431  step_index = av_clip(step_index, 0, 88);
432 
433  sign = nibble & 8;
434  delta = nibble & 7;
435  diff = (delta * step) >> 2;
436  predictor = c->predictor;
437  if (sign) predictor -= diff;
438  else predictor += diff;
439 
440  c->predictor = av_clip_int16(predictor);
441  c->step_index = step_index;
442 
443  return (int16_t)c->predictor;
444 }
445 
446 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
447 {
448  int step_index;
449  int predictor;
450  int sign, delta, diff, step;
451 
452  step = ff_adpcm_step_table[c->step_index];
453  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
454  step_index = av_clip(step_index, 0, 88);
455 
456  sign = nibble & 8;
457  delta = nibble & 7;
458  /* perform direct multiplication instead of series of jumps proposed by
459  * the reference ADPCM implementation since modern CPUs can do the mults
460  * quickly enough */
461  diff = ((2 * delta + 1) * step) >> shift;
462  predictor = c->predictor;
463  if (sign) predictor -= diff;
464  else predictor += diff;
465 
466  c->predictor = av_clip_int16(predictor);
467  c->step_index = step_index;
468 
469  return (int16_t)c->predictor;
470 }
471 
472 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
473 {
474  int step_index;
475  int predictor;
476  int sign, delta, diff, step;
477 
478  step = ff_adpcm_step_table[c->step_index];
479  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
480  step_index = av_clip(step_index, 0, 88);
481 
482  sign = nibble & 8;
483  delta = nibble & 7;
484  diff = (delta * step) >> shift;
485  predictor = c->predictor;
486  if (sign) predictor -= diff;
487  else predictor += diff;
488 
489  c->predictor = av_clip_int16(predictor);
490  c->step_index = step_index;
491 
492  return (int16_t)c->predictor;
493 }
494 
495 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
496 {
497  int step_index, step, delta, predictor;
498 
499  step = ff_adpcm_step_table[c->step_index];
500 
501  delta = step * (2 * nibble - 15);
502  predictor = c->predictor + delta;
503 
504  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
505  c->predictor = av_clip_int16(predictor >> 4);
506  c->step_index = av_clip(step_index, 0, 88);
507 
508  return (int16_t)c->predictor;
509 }
510 
511 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
512 {
513  int step_index;
514  int predictor;
515  int step;
516 
517  nibble = sign_extend(nibble & 0xF, 4);
518 
519  step = ima_cunning_step_table[c->step_index];
520  step_index = c->step_index + ima_cunning_index_table[abs(nibble)];
521  step_index = av_clip(step_index, 0, 60);
522 
523  predictor = c->predictor + step * nibble;
524 
525  c->predictor = av_clip_int16(predictor);
526  c->step_index = step_index;
527 
528  return c->predictor;
529 }
530 
532 {
533  int nibble, step_index, predictor, sign, delta, diff, step, shift;
534 
535  shift = bps - 1;
536  nibble = get_bits_le(gb, bps),
537  step = ff_adpcm_step_table[c->step_index];
538  step_index = c->step_index + adpcm_index_tables[bps - 2][nibble];
539  step_index = av_clip(step_index, 0, 88);
540 
541  sign = nibble & (1 << shift);
542  delta = av_zero_extend(nibble, shift);
543  diff = step >> shift;
544  for (int i = 0; i < shift; i++)
545  diff += (step >> (shift-1-i)) * !!(delta & (1 << i));
546  predictor = c->predictor;
547  if (sign) predictor -= diff;
548  else predictor += diff;
549 
550  c->predictor = av_clip_int16(predictor);
551  c->step_index = step_index;
552 
553  return (int16_t)c->predictor;
554 }
555 
557 {
558  int step_index;
559  int predictor;
560  int diff, step;
561 
562  step = ff_adpcm_step_table[c->step_index];
563  step_index = c->step_index + ff_adpcm_index_table[nibble];
564  step_index = av_clip(step_index, 0, 88);
565 
566  diff = step >> 3;
567  if (nibble & 4) diff += step;
568  if (nibble & 2) diff += step >> 1;
569  if (nibble & 1) diff += step >> 2;
570 
571  if (nibble & 8)
572  predictor = c->predictor - diff;
573  else
574  predictor = c->predictor + diff;
575 
576  c->predictor = av_clip_int16(predictor);
577  c->step_index = step_index;
578 
579  return c->predictor;
580 }
581 
582 static void decode_adpcm_ima_hvqm2(AVCodecContext *avctx, int16_t *outbuf, int samples_to_do,
583  int frame_format, GetByteContext *gb)
584 {
585  ADPCMDecodeContext *c = avctx->priv_data;
586  int st = avctx->ch_layout.nb_channels == 2;
587  uint8_t nibble;
588 
589  for (int ch = 0; ch < avctx->ch_layout.nb_channels; ch++) {
590  unsigned tmp;
591 
592  switch (frame_format) {
593  case 0: /* combined hist+index */
594  tmp = bytestream2_get_be16(gb);
595  c->status[ch].predictor = sign_extend(tmp & 0xFF80, 16);
596  c->status[ch].step_index = tmp & 0x7f;
597  *outbuf++ = c->status[ch].predictor;
598  samples_to_do--;
599  break;
600  default:
601  break;
602  }
603 
604  c->status[ch].step_index = av_clip(c->status[ch].step_index, 0, 88);
605  }
606 
607  for (int i = 0; i < samples_to_do; i++) {
608  if (!(i&1)) {
609  nibble = bytestream2_get_byte(gb);
610  *outbuf++ = ff_adpcm_ima_qt_expand_nibble(&c->status[st], nibble >> 4);
611  } else {
612  *outbuf++ = ff_adpcm_ima_qt_expand_nibble(&c->status[ 0], nibble & 0xF);
613  }
614  }
615 
616  bytestream2_seek(gb, 0, SEEK_END);
617 }
618 
619 static void decode_adpcm_ima_hvqm4(AVCodecContext *avctx, int16_t *outbuf, int samples_to_do,
620  int frame_format, GetByteContext *gb)
621 {
622  ADPCMDecodeContext *c = avctx->priv_data;
623  int st = avctx->ch_layout.nb_channels == 2;
624  unsigned tmp;
625 
626  for (int ch = 0; ch < avctx->ch_layout.nb_channels; ch++) {
627  switch (frame_format) {
628  case 1: /* combined hist+index */
629  tmp = bytestream2_get_be16(gb);
630  c->status[ch].predictor = sign_extend(tmp & 0xFF80, 16);
631  c->status[ch].step_index = tmp & 0x7f;
632  break;
633  case 2: /* no hist/index (continues from previous frame) */
634  default:
635  break;
636  case 3: /* separate hist+index */
637  tmp = bytestream2_get_be16(gb);
638  c->status[ch].predictor = sign_extend(tmp, 16);
639  c->status[ch].step_index = bytestream2_get_byte(gb);
640  break;
641  }
642 
643  c->status[ch].step_index = av_clip(c->status[ch].step_index, 0, 88);
644  }
645 
646  if (frame_format == 1 || frame_format == 3) {
647  for (int ch = 0; ch < avctx->ch_layout.nb_channels; ch++)
648  *outbuf++ = (int16_t)c->status[st - ch].predictor;
649  samples_to_do--;
650  }
651 
652  for (int i = 0; i < samples_to_do; i += 1+(!st)) {
653  uint8_t nibble = bytestream2_get_byte(gb);
654 
655  *outbuf++ = ff_adpcm_ima_qt_expand_nibble(&c->status[st], nibble & 0xF);
656  *outbuf++ = ff_adpcm_ima_qt_expand_nibble(&c->status[ 0], nibble >> 4);
657  }
658 
659  bytestream2_seek(gb, 0, SEEK_END);
660 }
661 
662 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
663 {
664  int predictor;
665 
666  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
667  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
668 
669  c->sample2 = c->sample1;
670  c->sample1 = av_clip_int16(predictor);
671  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
672  if (c->idelta < 16) c->idelta = 16;
673  if (c->idelta > INT_MAX/768) {
674  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
675  c->idelta = INT_MAX/768;
676  }
677 
678  return c->sample1;
679 }
680 
681 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
682 {
683  int step_index, predictor, sign, delta, diff, step;
684 
685  step = oki_step_table[c->step_index];
686  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
687  step_index = av_clip(step_index, 0, 48);
688 
689  sign = nibble & 8;
690  delta = nibble & 7;
691  diff = ((2 * delta + 1) * step) >> 3;
692  predictor = c->predictor;
693  if (sign) predictor -= diff;
694  else predictor += diff;
695 
696  c->predictor = av_clip_intp2(predictor, 11);
697  c->step_index = step_index;
698 
699  return c->predictor * 16;
700 }
701 
702 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
703 {
704  int sign, delta, diff;
705  int new_step;
706 
707  sign = nibble & 8;
708  delta = nibble & 7;
709  /* perform direct multiplication instead of series of jumps proposed by
710  * the reference ADPCM implementation since modern CPUs can do the mults
711  * quickly enough */
712  diff = ((2 * delta + 1) * c->step) >> 3;
713  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
714  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
715  c->predictor = av_clip_int16(c->predictor);
716  /* calculate new step and clamp it to range 511..32767 */
717  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
718  c->step = av_clip(new_step, 511, 32767);
719 
720  return (int16_t)c->predictor;
721 }
722 
723 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
724 {
725  int sign, delta, diff;
726 
727  sign = nibble & (1<<(size-1));
728  delta = nibble & ((1<<(size-1))-1);
729  diff = delta << (7 + c->step + shift);
730 
731  /* clamp result */
732  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
733 
734  /* calculate new step */
735  if (delta >= (2*size - 3) && c->step < 3)
736  c->step++;
737  else if (delta == 0 && c->step > 0)
738  c->step--;
739 
740  return (int16_t) c->predictor;
741 }
742 
743 static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
744 {
745  if(!c->step) {
746  c->predictor = 0;
747  c->step = 127;
748  }
749 
750  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
751  c->predictor = av_clip_int16(c->predictor);
752  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
753  c->step = av_clip(c->step, 127, 24576);
754  return c->predictor;
755 }
756 
757 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
758 {
759  c->predictor += mtaf_stepsize[c->step][nibble];
760  c->predictor = av_clip_int16(c->predictor);
761  c->step += ff_adpcm_index_table[nibble];
762  c->step = av_clip_uintp2(c->step, 5);
763  return c->predictor;
764 }
765 
766 static inline int16_t adpcm_circus_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
767 {
768  int32_t sample = c->predictor;
769  int32_t scale = c->step;
770  int32_t code = sign_extend(nibble, 8);
771 
772  sample += code * (1 << scale);
773  if (code == 0) {
774  scale--;
775  } else if (code == 127 || code == -128) {
776  scale++;
777  }
778  scale = av_clip(scale, 0, 8);
780 
781  c->predictor = sample;
782  c->step = scale;
783 
784  return sample;
785 }
786 
787 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
788 {
789  int16_t index = c->step_index;
790  uint32_t lookup_sample = ff_adpcm_step_table[index];
791  int32_t sample = 0;
792 
793  if (nibble & 0x40)
794  sample += lookup_sample;
795  if (nibble & 0x20)
796  sample += lookup_sample >> 1;
797  if (nibble & 0x10)
798  sample += lookup_sample >> 2;
799  if (nibble & 0x08)
800  sample += lookup_sample >> 3;
801  if (nibble & 0x04)
802  sample += lookup_sample >> 4;
803  if (nibble & 0x02)
804  sample += lookup_sample >> 5;
805  if (nibble & 0x01)
806  sample += lookup_sample >> 6;
807  if (nibble & 0x80)
808  sample = -sample;
809 
810  sample += c->predictor;
812 
813  index += zork_index_table[(nibble >> 4) & 7];
814  index = av_clip(index, 0, 88);
815 
816  c->predictor = sample;
817  c->step_index = index;
818 
819  return sample;
820 }
821 
822 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
823  const uint8_t *in, ADPCMChannelStatus *left,
824  ADPCMChannelStatus *right, int channels, int sample_offset)
825 {
826  int i, j;
827  int shift,filter,f0,f1;
828  int s_1,s_2;
829  int d,s,t;
830 
831  out0 += sample_offset;
832  if (channels == 1)
833  out1 = out0 + 28;
834  else
835  out1 += sample_offset;
836 
837  for(i=0;i<4;i++) {
838  shift = 12 - (in[4+i*2] & 15);
839  filter = in[4+i*2] >> 4;
841  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
842  filter=0;
843  }
844  if (shift < 0) {
845  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
846  shift = 0;
847  }
848  f0 = xa_adpcm_table[filter][0];
849  f1 = xa_adpcm_table[filter][1];
850 
851  s_1 = left->sample1;
852  s_2 = left->sample2;
853 
854  for(j=0;j<28;j++) {
855  d = in[16+i+j*4];
856 
857  t = sign_extend(d, 4);
858  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
859  s_2 = s_1;
860  s_1 = av_clip_int16(s);
861  out0[j] = s_1;
862  }
863 
864  if (channels == 2) {
865  left->sample1 = s_1;
866  left->sample2 = s_2;
867  s_1 = right->sample1;
868  s_2 = right->sample2;
869  }
870 
871  shift = 12 - (in[5+i*2] & 15);
872  filter = in[5+i*2] >> 4;
873  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
874  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
875  filter=0;
876  }
877  if (shift < 0) {
878  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
879  shift = 0;
880  }
881 
882  f0 = xa_adpcm_table[filter][0];
883  f1 = xa_adpcm_table[filter][1];
884 
885  for(j=0;j<28;j++) {
886  d = in[16+i+j*4];
887 
888  t = sign_extend(d >> 4, 4);
889  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
890  s_2 = s_1;
891  s_1 = av_clip_int16(s);
892  out1[j] = s_1;
893  }
894 
895  if (channels == 2) {
896  right->sample1 = s_1;
897  right->sample2 = s_2;
898  } else {
899  left->sample1 = s_1;
900  left->sample2 = s_2;
901  }
902 
903  out0 += 28 * (3 - channels);
904  out1 += 28 * (3 - channels);
905  }
906 
907  return 0;
908 }
909 
910 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
911 {
912  ADPCMDecodeContext *c = avctx->priv_data;
913  GetBitContext gb;
914  const int8_t *table;
915  int channels = avctx->ch_layout.nb_channels;
916  int k0, signmask, nb_bits, count;
917  int size = buf_size*8;
918  int i;
919 
920  init_get_bits(&gb, buf, size);
921 
922  //read bits & initial values
923  nb_bits = get_bits(&gb, 2)+2;
924  table = swf_index_tables[nb_bits-2];
925  k0 = 1 << (nb_bits-2);
926  signmask = 1 << (nb_bits-1);
927 
928  while (get_bits_count(&gb) <= size - 22 * channels) {
929  for (i = 0; i < channels; i++) {
930  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
931  c->status[i].step_index = get_bits(&gb, 6);
932  }
933 
934  for (count = 0; get_bits_count(&gb) <= size - nb_bits * channels && count < 4095; count++) {
935  int i;
936 
937  for (i = 0; i < channels; i++) {
938  // similar to IMA adpcm
939  int delta = get_bits(&gb, nb_bits);
940  int step = ff_adpcm_step_table[c->status[i].step_index];
941  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
942  int k = k0;
943 
944  do {
945  if (delta & k)
946  vpdiff += step;
947  step >>= 1;
948  k >>= 1;
949  } while(k);
950  vpdiff += step;
951 
952  if (delta & signmask)
953  c->status[i].predictor -= vpdiff;
954  else
955  c->status[i].predictor += vpdiff;
956 
957  c->status[i].step_index += table[delta & (~signmask)];
958 
959  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
960  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
961 
962  *samples++ = c->status[i].predictor;
963  }
964  }
965  }
966 }
967 
968 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
969 {
970  int sample = sign_extend(nibble, 4) * (1 << shift);
971 
972  if (flag)
973  sample += (8 * cs->sample1) - (4 * cs->sample2);
974  else
975  sample += 4 * cs->sample1;
976 
977  sample = av_clip_int16(sample >> 2);
978 
979  cs->sample2 = cs->sample1;
980  cs->sample1 = sample;
981 
982  return sample;
983 }
984 
986 {
987  int sign, delta, add;
988 
989  sign = bits & 4;
990  if (sign)
991  delta = 4 - (bits & 3);
992  else
993  delta = bits;
994 
995  switch (delta) {
996  case 0:
997  add = 0;
998  c->step = (3 * c->step) >> 2;
999  break;
1000  case 1:
1001  add = c->step;
1002  c->step = (4 * c->step - (c->step >> 1)) >> 2;
1003  break;
1004  case 2:
1005  add = 2 * c->step;
1006  c->step = ((c->step >> 1) + add) >> 1;
1007  break;
1008  case 3:
1009  add = 4 * c->step - (c->step >> 1);
1010  c->step = 2 * c->step;
1011  break;
1012  case 4:
1013  add = (11 * c->step) >> 1;
1014  c->step = 3 * c->step;
1015  break;
1016  default:
1017  av_unreachable("There are cases for all control paths when bits is 3-bit");
1018  }
1019 
1020  if (sign)
1021  add = -add;
1022 
1023  c->predictor = av_clip_int16(c->predictor + add);
1024  c->step = av_clip(c->step, 1, 7281);
1025  return c->predictor;
1026 }
1027 
1029 {
1030  int sign, delta, add;
1031 
1032  sign = bits & 8;
1033  if (sign)
1034  delta = 8 - (bits & 7);
1035  else
1036  delta = bits;
1037 
1038  switch (delta) {
1039  case 0:
1040  add = 0;
1041  c->step = (3 * c->step) >> 2;
1042  break;
1043  case 1:
1044  add = c->step;
1045  c->step = (3 * c->step) >> 2;
1046  break;
1047  case 2:
1048  add = 2 * c->step;
1049  break;
1050  case 3:
1051  add = 3 * c->step;
1052  break;
1053  case 4:
1054  add = 4 * c->step;
1055  break;
1056  case 5:
1057  add = (11 * c->step) >> 1;
1058  c->step += c->step >> 2;
1059  break;
1060  case 6:
1061  add = (15 * c->step) >> 1;
1062  c->step = 2 * c->step;
1063  break;
1064  case 7:
1065  if (sign)
1066  add = (19 * c->step) >> 1;
1067  else
1068  add = (21 * c->step) >> 1;
1069  c->step = (c->step >> 1) + 2 * c->step;
1070  break;
1071  case 8:
1072  add = (25 * c->step) >> 1;
1073  c->step = 5 * c->step;
1074  break;
1075  default:
1076  av_unreachable("There are cases for all control paths when bits is 4-bit");
1077  }
1078 
1079  if (sign)
1080  add = -add;
1081 
1082  c->predictor = av_clip_int16(c->predictor + add);
1083  c->step = av_clip(c->step, 1, 2621);
1084  return c->predictor;
1085 }
1086 
1088 {
1089  int sign, delta, add;
1090 
1091  sign = bits & 0x10;
1092  if (sign)
1093  delta = 16 - (bits & 0xF);
1094  else
1095  delta = bits;
1096 
1097  add = delta * c->step;
1098  switch (delta) {
1099  case 0:
1100  c->step += (c->step >> 2) - (c->step >> 1);
1101  break;
1102  case 1:
1103  case 2:
1104  case 3:
1105  c->step += (c->step >> 3) - (c->step >> 2);
1106  break;
1107  case 4:
1108  case 5:
1109  c->step += (c->step >> 4) - (c->step >> 3);
1110  break;
1111  case 6:
1112  break;
1113  case 7:
1114  c->step += c->step >> 3;
1115  break;
1116  case 8:
1117  c->step += c->step >> 2;
1118  break;
1119  case 9:
1120  c->step += c->step >> 1;
1121  break;
1122  case 10:
1123  c->step = 2 * c->step - (c->step >> 3);
1124  break;
1125  case 11:
1126  c->step = 2 * c->step + (c->step >> 3);
1127  break;
1128  case 12:
1129  c->step = 2 * c->step + (c->step >> 1) - (c->step >> 3);
1130  break;
1131  case 13:
1132  c->step = 3 * c->step - (c->step >> 2);
1133  break;
1134  case 14:
1135  c->step *= 3;
1136  break;
1137  case 15:
1138  case 16:
1139  c->step = (7 * c->step) >> 1;
1140  break;
1141  }
1142 
1143  if (sign)
1144  add = -add;
1145 
1146  c->predictor = av_clip_int16(c->predictor + add);
1147  c->step = av_clip(c->step, 1, 1024);
1148  return c->predictor;
1149 }
1150 
1151 /**
1152  * Get the number of samples (per channel) that will be decoded from the packet.
1153  * In one case, this is actually the maximum number of samples possible to
1154  * decode with the given buf_size.
1155  *
1156  * @param[out] coded_samples set to the number of samples as coded in the
1157  * packet, or 0 if the codec does not encode the
1158  * number of samples in each frame.
1159  * @param[out] approx_nb_samples set to non-zero if the number of samples
1160  * returned is an approximation.
1161  */
1163  int buf_size, int *coded_samples, int *approx_nb_samples)
1164 {
1165  ADPCMDecodeContext *s = avctx->priv_data;
1166  int nb_samples = 0;
1167  int ch = avctx->ch_layout.nb_channels;
1168  int has_coded_samples = 0;
1169  int header_size;
1170 
1171  *coded_samples = 0;
1172  *approx_nb_samples = 0;
1173 
1174  if(ch <= 0)
1175  return 0;
1176 
1177  switch (avctx->codec->id) {
1178  /* constant, only check buf_size */
1180  if (buf_size < 76 * ch)
1181  return 0;
1182  nb_samples = 128;
1183  break;
1185  if (buf_size < 34 * ch)
1186  return 0;
1187  nb_samples = 64;
1188  break;
1189  case AV_CODEC_ID_ADPCM_N64:
1190  nb_samples = (buf_size / 9) * 16;
1191  break;
1192  /* simple 4-bit adpcm */
1193  case AV_CODEC_ID_ADPCM_CT:
1206  nb_samples = buf_size * 2 / ch;
1207  break;
1208  }
1209  if (nb_samples)
1210  return nb_samples;
1211 
1212  /* simple 4-bit adpcm, with header */
1213  header_size = 0;
1214  switch (avctx->codec->id) {
1215  case AV_CODEC_ID_ADPCM_4XM:
1216  case AV_CODEC_ID_ADPCM_AGM:
1221  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
1222  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
1223  }
1224  if (header_size > 0)
1225  return (buf_size - header_size) * 2 / ch;
1226 
1227  /* more complex formats */
1228  switch (avctx->codec->id) {
1230  bytestream2_skip(gb, 4);
1231  has_coded_samples = 1;
1232  *coded_samples = bytestream2_get_le32u(gb);
1233  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
1234  bytestream2_seek(gb, -8, SEEK_CUR);
1235  break;
1236  case AV_CODEC_ID_ADPCM_EA:
1237  /* Stereo is 30 bytes per block */
1238  /* Mono is 15 bytes per block */
1239  has_coded_samples = 1;
1240  *coded_samples = bytestream2_get_le32(gb);
1241  *coded_samples -= *coded_samples % 28;
1242  nb_samples = (buf_size - 12) / (ch == 2 ? 30 : 15) * 28;
1243  break;
1245  nb_samples = ((bytestream2_peek_be64(gb) >> 16) & 0xFFFF);
1246  break;
1248  {
1249  int frame_format = bytestream2_get_be16(gb);
1250  int skip = 6;
1251 
1252  if (frame_format == 1)
1253  skip += 2 * ch;
1254  if (frame_format == 3)
1255  skip += 3 * ch;
1256 
1257  nb_samples = (buf_size - skip) * 2 / ch;
1258  bytestream2_seek(gb, 0, SEEK_SET);
1259  }
1260  break;
1262  has_coded_samples = 1;
1263  *coded_samples = bytestream2_get_le32(gb);
1264  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
1265  break;
1267  nb_samples = (buf_size - ch) / ch * 2;
1268  break;
1272  /* maximum number of samples */
1273  /* has internal offsets and a per-frame switch to signal raw 16-bit */
1274  has_coded_samples = 1;
1275  switch (avctx->codec->id) {
1277  header_size = 4 + 9 * ch;
1278  *coded_samples = bytestream2_get_le32(gb);
1279  break;
1281  header_size = 4 + 5 * ch;
1282  *coded_samples = bytestream2_get_le32(gb);
1283  break;
1285  header_size = 4 + 5 * ch;
1286  *coded_samples = bytestream2_get_be32(gb);
1287  break;
1288  }
1289  *coded_samples -= *coded_samples % 28;
1290  nb_samples = (buf_size - header_size) * 2 / ch;
1291  nb_samples -= nb_samples % 28;
1292  *approx_nb_samples = 1;
1293  break;
1295  if (avctx->block_align > 0)
1296  buf_size = FFMIN(buf_size, avctx->block_align);
1297  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
1298  break;
1300  if (avctx->block_align > 0)
1301  buf_size = FFMIN(buf_size, avctx->block_align);
1302  if (buf_size < 4 * ch)
1303  return AVERROR_INVALIDDATA;
1304  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
1305  break;
1307  if (avctx->block_align > 0)
1308  buf_size = FFMIN(buf_size, avctx->block_align);
1309  nb_samples = (buf_size - 4 * ch) * 2 / ch;
1310  break;
1312  if (avctx->block_align > 0)
1313  buf_size = FFMIN(buf_size, avctx->block_align);
1314  nb_samples = (buf_size - 4 * ch) * 2 / ch;
1315  break;
1316  CASE(ADPCM_IMA_WAV,
1317  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1318  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1319  if (avctx->block_align > 0)
1320  buf_size = FFMIN(buf_size, avctx->block_align);
1321  if (buf_size < 4 * ch)
1322  return AVERROR_INVALIDDATA;
1323  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
1324  ) /* End of CASE */
1325  CASE(ADPCM_IMA_XBOX,
1326  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1327  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1328  if (avctx->block_align > 0)
1329  buf_size = FFMIN(buf_size, avctx->block_align);
1330  if (buf_size < 4 * ch)
1331  return AVERROR_INVALIDDATA;
1332  nb_samples = (buf_size - 4 * ch) / (bsize * ch) * bsamples + 1;
1333  ) /* End of CASE */
1334  case AV_CODEC_ID_ADPCM_MS:
1335  if (avctx->block_align > 0)
1336  buf_size = FFMIN(buf_size, avctx->block_align);
1337  nb_samples = (buf_size - 6 * ch) * 2 / ch;
1338  break;
1340  if (avctx->block_align > 0)
1341  buf_size = FFMIN(buf_size, avctx->block_align);
1342  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
1343  break;
1347  {
1348  int samples_per_byte;
1349  switch (avctx->codec->id) {
1350  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
1351  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
1352  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
1353  }
1354  if (!s->status[0].step_index) {
1355  if (buf_size < ch)
1356  return AVERROR_INVALIDDATA;
1357  nb_samples++;
1358  buf_size -= ch;
1359  }
1360  nb_samples += buf_size * samples_per_byte / ch;
1361  break;
1362  }
1363  case AV_CODEC_ID_ADPCM_SWF:
1364  {
1365  int buf_bits = buf_size * 8 - 2;
1366  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
1367  int block_hdr_size = 22 * ch;
1368  int block_size = block_hdr_size + nbits * ch * 4095;
1369  int nblocks = buf_bits / block_size;
1370  int bits_left = buf_bits - nblocks * block_size;
1371  nb_samples = nblocks * 4096;
1372  if (bits_left >= block_hdr_size)
1373  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
1374  break;
1375  }
1376  case AV_CODEC_ID_ADPCM_THP:
1378  if (avctx->extradata) {
1379  nb_samples = buf_size * 14 / (8 * ch);
1380  break;
1381  }
1382  has_coded_samples = 1;
1383  bytestream2_skip(gb, 4); // channel size
1384  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
1385  bytestream2_get_le32(gb) :
1386  bytestream2_get_be32(gb);
1387  buf_size -= 8 + 36 * ch;
1388  buf_size /= ch;
1389  nb_samples = buf_size / 8 * 14;
1390  if (buf_size % 8 > 1)
1391  nb_samples += (buf_size % 8 - 1) * 2;
1392  *approx_nb_samples = 1;
1393  break;
1394  case AV_CODEC_ID_ADPCM_AFC:
1395  nb_samples = buf_size / (9 * ch) * 16;
1396  break;
1397  case AV_CODEC_ID_ADPCM_XA:
1398  nb_samples = (buf_size / 128) * 224 / ch;
1399  break;
1400  case AV_CODEC_ID_ADPCM_XMD:
1401  nb_samples = buf_size / (21 * ch) * 32;
1402  break;
1403  case AV_CODEC_ID_ADPCM_DTK:
1404  case AV_CODEC_ID_ADPCM_PSX:
1405  nb_samples = buf_size / (16 * ch) * 28;
1406  break;
1408  nb_samples = ((buf_size - 1) / ch) * 2;
1409  break;
1411  nb_samples = buf_size / avctx->block_align * 32;
1412  break;
1415  nb_samples = buf_size / ch;
1416  break;
1418  if (!avctx->extradata || avctx->extradata_size != 2)
1419  return AVERROR_INVALIDDATA;
1420  nb_samples = AV_RL16(avctx->extradata);
1421  break;
1422  }
1423 
1424  /* validate coded sample count */
1425  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
1426  return AVERROR_INVALIDDATA;
1427 
1428  return nb_samples;
1429 }
1430 
1432  int *got_frame_ptr, AVPacket *avpkt)
1433 {
1434  const uint8_t *buf = avpkt->data;
1435  int buf_size = avpkt->size;
1436  ADPCMDecodeContext *c = avctx->priv_data;
1437  int channels = avctx->ch_layout.nb_channels;
1438  int16_t *samples;
1439  int16_t **samples_p;
1440  int st; /* stereo */
1441  int nb_samples, coded_samples, approx_nb_samples, ret;
1442  GetByteContext gb;
1443 
1444  bytestream2_init(&gb, buf, buf_size);
1445  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
1446  if (nb_samples <= 0) {
1447  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
1448  return AVERROR_INVALIDDATA;
1449  }
1450 
1451  /* get output buffer */
1452  frame->nb_samples = nb_samples;
1453  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1454  return ret;
1455  samples = (int16_t *)frame->data[0];
1456  samples_p = (int16_t **)frame->extended_data;
1457 
1458  /* use coded_samples when applicable */
1459  /* it is always <= nb_samples, so the output buffer will be large enough */
1460  if (coded_samples) {
1461  if (!approx_nb_samples && coded_samples != nb_samples)
1462  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
1463  frame->nb_samples = nb_samples = coded_samples;
1464  }
1465 
1466  st = channels == 2 ? 1 : 0;
1467 
1468  switch(avctx->codec->id) {
1469  CASE(ADPCM_IMA_QT,
1470  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
1471  Channel data is interleaved per-chunk. */
1472  for (int channel = 0; channel < channels; channel++) {
1473  ADPCMChannelStatus *cs = &c->status[channel];
1474  int predictor;
1475  int step_index;
1476  /* (pppppp) (piiiiiii) */
1477 
1478  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
1479  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1480  step_index = predictor & 0x7F;
1481  predictor &= ~0x7F;
1482 
1483  if (cs->step_index == step_index) {
1484  int diff = predictor - cs->predictor;
1485  if (diff < 0)
1486  diff = - diff;
1487  if (diff > 0x7f)
1488  goto update;
1489  } else {
1490  update:
1491  cs->step_index = step_index;
1492  cs->predictor = predictor;
1493  }
1494 
1495  if (cs->step_index > 88u){
1496  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1497  channel, cs->step_index);
1498  return AVERROR_INVALIDDATA;
1499  }
1500 
1501  samples = samples_p[channel];
1502 
1503  for (int m = 0; m < 64; m += 2) {
1504  int byte = bytestream2_get_byteu(&gb);
1505  samples[m ] = ff_adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1506  samples[m + 1] = ff_adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1507  }
1508  }
1509  ) /* End of CASE */
1510  CASE(ADPCM_IMA_WAV,
1511  for (int i = 0; i < channels; i++) {
1512  ADPCMChannelStatus *cs = &c->status[i];
1513  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1514 
1515  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1516  if (cs->step_index > 88u){
1517  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1518  i, cs->step_index);
1519  return AVERROR_INVALIDDATA;
1520  }
1521  }
1522 
1523  if (avctx->bits_per_coded_sample != 4) {
1524  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1525  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1526  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
1527  GetBitContext g;
1528 
1529  for (int n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1530  for (int i = 0; i < channels; i++) {
1531  ADPCMChannelStatus *cs = &c->status[i];
1532  samples = &samples_p[i][1 + n * samples_per_block];
1533  for (int j = 0; j < block_size; j++) {
1534  temp[j] = buf[4 * channels + block_size * n * channels +
1535  (j % 4) + (j / 4) * (channels * 4) + i * 4];
1536  }
1537  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1538  if (ret < 0)
1539  return ret;
1540  for (int m = 0; m < samples_per_block; m++) {
1542  avctx->bits_per_coded_sample);
1543  }
1544  }
1545  }
1546  bytestream2_skip(&gb, avctx->block_align - channels * 4);
1547  } else {
1548  for (int n = 0; n < (nb_samples - 1) / 8; n++) {
1549  for (int i = 0; i < channels; i++) {
1550  ADPCMChannelStatus *cs = &c->status[i];
1551  samples = &samples_p[i][1 + n * 8];
1552  for (int m = 0; m < 8; m += 2) {
1553  int v = bytestream2_get_byteu(&gb);
1554  samples[m ] = ff_adpcm_ima_qt_expand_nibble(cs, v & 0x0F);
1555  samples[m + 1] = ff_adpcm_ima_qt_expand_nibble(cs, v >> 4);
1556  }
1557  }
1558  }
1559  }
1560  ) /* End of CASE */
1561  CASE(ADPCM_IMA_XBOX,
1562  for (int i = 0; i < channels; i++) {
1563  ADPCMChannelStatus *cs = &c->status[i];
1564  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1565 
1566  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1567  if (cs->step_index > 88u) {
1568  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1569  i, cs->step_index);
1570  return AVERROR_INVALIDDATA;
1571  }
1572  }
1573 
1574  for (int n = 0; n < (nb_samples-1) / 8; n++) {
1575  for (int i = 0; i < channels; i++) {
1576  ADPCMChannelStatus *cs = &c->status[i];
1577  samples = &samples_p[i][1 + n * 8];
1578  for (int m = 0; m < 8; m += 2) {
1579  int v = bytestream2_get_byteu(&gb);
1580  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1581  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1582  }
1583  }
1584  }
1585  frame->nb_samples--;
1586  ) /* End of CASE */
1587  CASE(ADPCM_4XM,
1588  for (int i = 0; i < channels; i++)
1589  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1590 
1591  for (int i = 0; i < channels; i++) {
1592  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1593  if (c->status[i].step_index > 88u) {
1594  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1595  i, c->status[i].step_index);
1596  return AVERROR_INVALIDDATA;
1597  }
1598  }
1599 
1600  for (int i = 0; i < channels; i++) {
1601  ADPCMChannelStatus *cs = &c->status[i];
1602  samples = (int16_t *)frame->data[i];
1603  for (int n = nb_samples >> 1; n > 0; n--) {
1604  int v = bytestream2_get_byteu(&gb);
1605  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1606  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1607  }
1608  }
1609  ) /* End of CASE */
1610  CASE(ADPCM_AGM,
1611  for (int i = 0; i < channels; i++)
1612  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1613  for (int i = 0; i < channels; i++)
1614  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1615 
1616  for (int n = 0; n < nb_samples >> (1 - st); n++) {
1617  int v = bytestream2_get_byteu(&gb);
1618  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1619  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1620  }
1621  ) /* End of CASE */
1622  CASE(ADPCM_MS,
1623  int block_predictor;
1624 
1625  if (avctx->ch_layout.nb_channels > 2) {
1626  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
1627  samples = samples_p[channel];
1628  block_predictor = bytestream2_get_byteu(&gb);
1629  if (block_predictor > 6) {
1630  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1631  channel, block_predictor);
1632  return AVERROR_INVALIDDATA;
1633  }
1634  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1635  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1636  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1637  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1638  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1639  *samples++ = c->status[channel].sample2;
1640  *samples++ = c->status[channel].sample1;
1641  for (int n = (nb_samples - 2) >> 1; n > 0; n--) {
1642  int byte = bytestream2_get_byteu(&gb);
1643  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1644  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1645  }
1646  }
1647  } else {
1648  block_predictor = bytestream2_get_byteu(&gb);
1649  if (block_predictor > 6) {
1650  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1651  block_predictor);
1652  return AVERROR_INVALIDDATA;
1653  }
1654  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1655  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1656  if (st) {
1657  block_predictor = bytestream2_get_byteu(&gb);
1658  if (block_predictor > 6) {
1659  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1660  block_predictor);
1661  return AVERROR_INVALIDDATA;
1662  }
1663  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1664  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1665  }
1666  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1667  if (st){
1668  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1669  }
1670 
1671  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1672  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1673  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1674  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1675 
1676  *samples++ = c->status[0].sample2;
1677  if (st) *samples++ = c->status[1].sample2;
1678  *samples++ = c->status[0].sample1;
1679  if (st) *samples++ = c->status[1].sample1;
1680  for (int n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1681  int byte = bytestream2_get_byteu(&gb);
1682  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1683  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1684  }
1685  }
1686  ) /* End of CASE */
1687  CASE(ADPCM_MTAF,
1688  for (int channel = 0; channel < channels; channel += 2) {
1689  bytestream2_skipu(&gb, 4);
1690  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1691  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1692  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1693  bytestream2_skipu(&gb, 2);
1694  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1695  bytestream2_skipu(&gb, 2);
1696  for (int n = 0; n < nb_samples; n += 2) {
1697  int v = bytestream2_get_byteu(&gb);
1698  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1699  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1700  }
1701  for (int n = 0; n < nb_samples; n += 2) {
1702  int v = bytestream2_get_byteu(&gb);
1703  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1704  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1705  }
1706  }
1707  ) /* End of CASE */
1708  CASE(ADPCM_IMA_DK4,
1709  for (int channel = 0; channel < channels; channel++) {
1710  ADPCMChannelStatus *cs = &c->status[channel];
1711  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1712  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1713  if (cs->step_index > 88u){
1714  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1715  channel, cs->step_index);
1716  return AVERROR_INVALIDDATA;
1717  }
1718  }
1719  for (int n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1720  int v = bytestream2_get_byteu(&gb);
1721  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1722  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1723  }
1724  ) /* End of CASE */
1725 
1726  /* DK3 ADPCM support macro */
1727 #define DK3_GET_NEXT_NIBBLE() \
1728  if (decode_top_nibble_next) { \
1729  nibble = last_byte >> 4; \
1730  decode_top_nibble_next = 0; \
1731  } else { \
1732  last_byte = bytestream2_get_byteu(&gb); \
1733  nibble = last_byte & 0x0F; \
1734  decode_top_nibble_next = 1; \
1735  }
1736  CASE(ADPCM_IMA_DK3,
1737  int last_byte = 0;
1738  int nibble;
1739  int decode_top_nibble_next = 0;
1740  int diff_channel;
1741  const int16_t *samples_end = samples + channels * nb_samples;
1742 
1743  bytestream2_skipu(&gb, 10);
1744  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1745  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1746  c->status[0].step_index = bytestream2_get_byteu(&gb);
1747  c->status[1].step_index = bytestream2_get_byteu(&gb);
1748  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1749  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1750  c->status[0].step_index, c->status[1].step_index);
1751  return AVERROR_INVALIDDATA;
1752  }
1753  /* sign extend the predictors */
1754  diff_channel = c->status[1].predictor;
1755 
1756  while (samples < samples_end) {
1757 
1758  /* for this algorithm, c->status[0] is the sum channel and
1759  * c->status[1] is the diff channel */
1760 
1761  /* process the first predictor of the sum channel */
1763  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1764 
1765  /* process the diff channel predictor */
1767  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1768 
1769  /* process the first pair of stereo PCM samples */
1770  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1771  *samples++ = c->status[0].predictor + c->status[1].predictor;
1772  *samples++ = c->status[0].predictor - c->status[1].predictor;
1773 
1774  /* process the second predictor of the sum channel */
1776  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1777 
1778  /* process the second pair of stereo PCM samples */
1779  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1780  *samples++ = c->status[0].predictor + c->status[1].predictor;
1781  *samples++ = c->status[0].predictor - c->status[1].predictor;
1782  }
1783 
1784  if ((bytestream2_tell(&gb) & 1))
1785  bytestream2_skip(&gb, 1);
1786  ) /* End of CASE */
1787  CASE(ADPCM_IMA_MAGIX,
1788  for (int channel = 0; channel < channels; channel++) {
1789  ADPCMChannelStatus *cs = &c->status[channel];
1790  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1791  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1792  if (cs->step_index > 88u){
1793  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1794  channel, cs->step_index);
1795  return AVERROR_INVALIDDATA;
1796  }
1797  }
1798 
1799  for (int m = 0; m < avctx->block_align-8; m += 8) {
1800  uint32_t v0 = bytestream2_get_le32u(&gb);
1801  uint32_t v1 = bytestream2_get_le32u(&gb);
1802 
1803  for (int n = 8; n > 0; n--, v0 >>= 4, v1 >>= 4, samples += 2) {
1804  samples[0] = adpcm_ima_expand_nibble(&c->status[0], v0 & 15, 3);
1805  samples[1] = adpcm_ima_expand_nibble(&c->status[1], v1 & 15, 3);
1806  }
1807  }
1808  ) /* End of CASE */
1809  CASE(ADPCM_IMA_ISS,
1810  for (int channel = 0; channel < channels; channel++) {
1811  ADPCMChannelStatus *cs = &c->status[channel];
1812  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1813  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1814  if (cs->step_index > 88u){
1815  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1816  channel, cs->step_index);
1817  return AVERROR_INVALIDDATA;
1818  }
1819  }
1820 
1821  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1822  int v1, v2;
1823  int v = bytestream2_get_byteu(&gb);
1824  /* nibbles are swapped for mono */
1825  if (st) {
1826  v1 = v >> 4;
1827  v2 = v & 0x0F;
1828  } else {
1829  v2 = v >> 4;
1830  v1 = v & 0x0F;
1831  }
1832  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1833  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1834  }
1835  ) /* End of CASE */
1836  CASE(ADPCM_IMA_MOFLEX,
1837  for (int channel = 0; channel < channels; channel++) {
1838  ADPCMChannelStatus *cs = &c->status[channel];
1839  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1840  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1841  if (cs->step_index > 88u){
1842  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1843  channel, cs->step_index);
1844  return AVERROR_INVALIDDATA;
1845  }
1846  }
1847 
1848  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1849  for (int channel = 0; channel < channels; channel++) {
1850  samples = samples_p[channel] + 256 * subframe;
1851  for (int n = 0; n < 256; n += 2) {
1852  int v = bytestream2_get_byteu(&gb);
1853  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1854  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1855  }
1856  }
1857  }
1858  ) /* End of CASE */
1859  CASE(ADPCM_IMA_DAT4,
1860  for (int channel = 0; channel < channels; channel++) {
1861  ADPCMChannelStatus *cs = &c->status[channel];
1862  samples = samples_p[channel];
1863  bytestream2_skip(&gb, 4);
1864  for (int n = 0; n < nb_samples; n += 2) {
1865  int v = bytestream2_get_byteu(&gb);
1866  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1867  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1868  }
1869  }
1870  ) /* End of CASE */
1871  CASE(ADPCM_IMA_APC,
1872  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1873  int v = bytestream2_get_byteu(&gb);
1874  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1875  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1876  }
1877  ) /* End of CASE */
1878  CASE(ADPCM_IMA_HVQM2,
1879  int format = bytestream2_get_be16(&gb);
1880 
1881  bytestream2_skip(&gb, 4);
1882  decode_adpcm_ima_hvqm2(avctx, samples, nb_samples, format, &gb);
1883  ) /* End of CASE */
1884  CASE(ADPCM_IMA_HVQM4,
1885  int format = bytestream2_get_be16(&gb);
1886 
1887  bytestream2_skip(&gb, 4);
1888  decode_adpcm_ima_hvqm4(avctx, samples, nb_samples, format, &gb);
1889  ) /* End of CASE */
1890  CASE(ADPCM_IMA_SSI,
1891  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1892  int v = bytestream2_get_byteu(&gb);
1893  *samples++ = ff_adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1894  *samples++ = ff_adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1895  }
1896  ) /* End of CASE */
1897  CASE(ADPCM_IMA_APM,
1898  for (int n = nb_samples / 2; n > 0; n--) {
1899  for (int channel = 0; channel < channels; channel++) {
1900  int v = bytestream2_get_byteu(&gb);
1901  *samples++ = ff_adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1902  samples[st] = ff_adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1903  }
1904  samples += channels;
1905  }
1906  ) /* End of CASE */
1907  CASE(ADPCM_IMA_ALP,
1908  for (int n = nb_samples / 2; n > 0; n--) {
1909  for (int channel = 0; channel < channels; channel++) {
1910  int v = bytestream2_get_byteu(&gb);
1911  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1912  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1913  }
1914  samples += channels;
1915  }
1916  ) /* End of CASE */
1917  CASE(ADPCM_IMA_CUNNING,
1918  for (int channel = 0; channel < channels; channel++) {
1919  int16_t *smp = samples_p[channel];
1920  for (int n = 0; n < nb_samples / 2; n++) {
1921  int v = bytestream2_get_byteu(&gb);
1922  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1923  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1924  }
1925  }
1926  ) /* End of CASE */
1927  CASE(ADPCM_IMA_OKI,
1928  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1929  int v = bytestream2_get_byteu(&gb);
1930  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1931  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1932  }
1933  ) /* End of CASE */
1934  CASE(ADPCM_IMA_RAD,
1935  for (int channel = 0; channel < channels; channel++) {
1936  ADPCMChannelStatus *cs = &c->status[channel];
1937  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1938  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1939  if (cs->step_index > 88u){
1940  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1941  channel, cs->step_index);
1942  return AVERROR_INVALIDDATA;
1943  }
1944  }
1945  for (int n = 0; n < nb_samples / 2; n++) {
1946  int byte[2];
1947 
1948  byte[0] = bytestream2_get_byteu(&gb);
1949  if (st)
1950  byte[1] = bytestream2_get_byteu(&gb);
1951  for (int channel = 0; channel < channels; channel++) {
1952  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1953  }
1954  for (int channel = 0; channel < channels; channel++) {
1955  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1956  }
1957  }
1958  ) /* End of CASE */
1959  CASE(ADPCM_IMA_WS,
1960  if (c->vqa_version == 3) {
1961  for (int channel = 0; channel < channels; channel++) {
1962  int16_t *smp = samples_p[channel];
1963 
1964  for (int n = nb_samples / 2; n > 0; n--) {
1965  int v = bytestream2_get_byteu(&gb);
1966  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1967  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1968  }
1969  }
1970  } else {
1971  for (int n = nb_samples / 2; n > 0; n--) {
1972  for (int channel = 0; channel < channels; channel++) {
1973  int v = bytestream2_get_byteu(&gb);
1974  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1975  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1976  }
1977  samples += channels;
1978  }
1979  }
1980  bytestream2_seek(&gb, 0, SEEK_END);
1981  ) /* End of CASE */
1982  CASE(ADPCM_XMD,
1983  int bytes_remaining, block = 0;
1984  while (bytestream2_get_bytes_left(&gb) >= 21 * channels) {
1985  for (int channel = 0; channel < channels; channel++) {
1986  int16_t *out = samples_p[channel] + block * 32;
1987  int16_t history[2];
1988  uint16_t scale;
1989 
1990  history[1] = sign_extend(bytestream2_get_le16(&gb), 16);
1991  history[0] = sign_extend(bytestream2_get_le16(&gb), 16);
1992  scale = bytestream2_get_le16(&gb);
1993 
1994  out[0] = history[1];
1995  out[1] = history[0];
1996 
1997  for (int n = 0; n < 15; n++) {
1998  unsigned byte = bytestream2_get_byte(&gb);
1999  int32_t nibble[2];
2000 
2001  nibble[0] = sign_extend(byte & 15, 4);
2002  nibble[1] = sign_extend(byte >> 4, 4);
2003 
2004  out[2+n*2] = nibble[0]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
2005  history[1] = history[0];
2006  history[0] = out[2+n*2];
2007 
2008  out[2+n*2+1] = nibble[1]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
2009  history[1] = history[0];
2010  history[0] = out[2+n*2+1];
2011  }
2012  }
2013 
2014  block++;
2015  }
2016  bytes_remaining = bytestream2_get_bytes_left(&gb);
2017  if (bytes_remaining > 0) {
2018  bytestream2_skip(&gb, bytes_remaining);
2019  }
2020  ) /* End of CASE */
2021  CASE(ADPCM_XA,
2022  int16_t *out0 = samples_p[0];
2023  int16_t *out1 = samples_p[1];
2024  int samples_per_block = 28 * (3 - channels) * 4;
2025  int sample_offset = 0;
2026  int bytes_remaining;
2027  while (bytestream2_get_bytes_left(&gb) >= 128) {
2028  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
2029  &c->status[0], &c->status[1],
2030  channels, sample_offset)) < 0)
2031  return ret;
2032  bytestream2_skipu(&gb, 128);
2033  sample_offset += samples_per_block;
2034  }
2035  /* Less than a full block of data left, e.g. when reading from
2036  * 2324 byte per sector XA; the remainder is padding */
2037  bytes_remaining = bytestream2_get_bytes_left(&gb);
2038  if (bytes_remaining > 0) {
2039  bytestream2_skip(&gb, bytes_remaining);
2040  }
2041  ) /* End of CASE */
2042  CASE(ADPCM_IMA_ESCAPE,
2043  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2044  int byte = bytestream2_get_byteu(&gb);
2045  *samples++ = adpcm_ima_escape_expand_nibble(&c->status[0], byte >> 4);
2046  *samples++ = adpcm_ima_escape_expand_nibble(&c->status[st], byte & 0xF);
2047  }
2048  ) /* End of CASE */
2049  CASE(ADPCM_IMA_EA_EACS,
2050  for (int i = 0; i <= st; i++) {
2051  c->status[i].step_index = bytestream2_get_le32u(&gb);
2052  if (c->status[i].step_index > 88u) {
2053  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
2054  i, c->status[i].step_index);
2055  return AVERROR_INVALIDDATA;
2056  }
2057  }
2058  for (int i = 0; i <= st; i++) {
2059  c->status[i].predictor = bytestream2_get_le32u(&gb);
2060  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
2061  return AVERROR_INVALIDDATA;
2062  }
2063 
2064  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2065  int byte = bytestream2_get_byteu(&gb);
2066  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
2067  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
2068  }
2069  ) /* End of CASE */
2070  CASE(ADPCM_IMA_EA_SEAD,
2071  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2072  int byte = bytestream2_get_byteu(&gb);
2073  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
2074  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
2075  }
2076  ) /* End of CASE */
2077  CASE(ADPCM_EA,
2078  int previous_left_sample, previous_right_sample;
2079  int current_left_sample, current_right_sample;
2080  int next_left_sample, next_right_sample;
2081  int coeff1l, coeff2l, coeff1r, coeff2r;
2082  int shift_left, shift_right;
2083 
2084  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte (stereo) or 15-byte (mono) pieces,
2085  each coding 28 stereo/mono samples. */
2086 
2087  if (channels != 2 && channels != 1)
2088  return AVERROR_INVALIDDATA;
2089 
2090  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
2091  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
2092  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
2093  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
2094 
2095  for (int count1 = 0; count1 < nb_samples / 28; count1++) {
2096  int byte = bytestream2_get_byteu(&gb);
2097  coeff1l = ea_adpcm_table[ byte >> 4 ];
2098  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
2099  coeff1r = ea_adpcm_table[ byte & 0x0F];
2100  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
2101 
2102  if (channels == 2){
2103  byte = bytestream2_get_byteu(&gb);
2104  shift_left = 20 - (byte >> 4);
2105  shift_right = 20 - (byte & 0x0F);
2106  } else{
2107  /* Mono packs the shift into the coefficient byte's lower nibble instead */
2108  shift_left = 20 - (byte & 0x0F);
2109  }
2110 
2111  for (int count2 = 0; count2 < (channels == 2 ? 28 : 14); count2++) {
2112  byte = bytestream2_get_byteu(&gb);
2113  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
2114 
2115  next_left_sample = (next_left_sample +
2116  (current_left_sample * coeff1l) +
2117  (previous_left_sample * coeff2l) + 0x80) >> 8;
2118 
2119  previous_left_sample = current_left_sample;
2120  current_left_sample = av_clip_int16(next_left_sample);
2121  *samples++ = current_left_sample;
2122 
2123  if (channels == 2){
2124  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
2125 
2126  next_right_sample = (next_right_sample +
2127  (current_right_sample * coeff1r) +
2128  (previous_right_sample * coeff2r) + 0x80) >> 8;
2129 
2130  previous_right_sample = current_right_sample;
2131  current_right_sample = av_clip_int16(next_right_sample);
2132  *samples++ = current_right_sample;
2133  } else {
2134  next_left_sample = sign_extend(byte, 4) * (1 << shift_left);
2135 
2136  next_left_sample = (next_left_sample +
2137  (current_left_sample * coeff1l) +
2138  (previous_left_sample * coeff2l) + 0x80) >> 8;
2139 
2140  previous_left_sample = current_left_sample;
2141  current_left_sample = av_clip_int16(next_left_sample);
2142 
2143  *samples++ = current_left_sample;
2144  }
2145  }
2146  }
2147  bytestream2_skip(&gb, channels == 2 ? 2 : 3); // Skip terminating NULs
2148  ) /* End of CASE */
2149  CASE(ADPCM_EA_MAXIS_XA,
2150  int coeff[2][2], shift[2];
2151 
2152  for (int channel = 0; channel < channels; channel++) {
2153  int byte = bytestream2_get_byteu(&gb);
2154  for (int i = 0; i < 2; i++)
2155  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
2156  shift[channel] = 20 - (byte & 0x0F);
2157  }
2158  for (int count1 = 0; count1 < nb_samples / 2; count1++) {
2159  int byte[2];
2160 
2161  byte[0] = bytestream2_get_byteu(&gb);
2162  if (st) byte[1] = bytestream2_get_byteu(&gb);
2163  for (int i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
2164  for (int channel = 0; channel < channels; channel++) {
2165  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
2166  sample = (sample +
2167  c->status[channel].sample1 * coeff[channel][0] +
2168  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
2169  c->status[channel].sample2 = c->status[channel].sample1;
2170  c->status[channel].sample1 = av_clip_int16(sample);
2171  *samples++ = c->status[channel].sample1;
2172  }
2173  }
2174  }
2175  bytestream2_seek(&gb, 0, SEEK_END);
2176  ) /* End of CASE */
2177 #if CONFIG_ADPCM_EA_R1_DECODER || CONFIG_ADPCM_EA_R2_DECODER || CONFIG_ADPCM_EA_R3_DECODER
2180  case AV_CODEC_ID_ADPCM_EA_R3: {
2181  /* channel numbering
2182  2chan: 0=fl, 1=fr
2183  4chan: 0=fl, 1=rl, 2=fr, 3=rr
2184  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
2185  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
2186  int previous_sample, current_sample, next_sample;
2187  int coeff1, coeff2;
2188  int shift;
2189  uint16_t *samplesC;
2190  int count = 0;
2191  int offsets[6];
2192 
2193  for (unsigned channel = 0; channel < channels; channel++)
2194  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
2195  bytestream2_get_le32(&gb)) +
2196  (channels + 1) * 4;
2197 
2198  for (unsigned channel = 0; channel < channels; channel++) {
2199  int count1;
2200 
2201  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
2202  samplesC = samples_p[channel];
2203 
2204  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
2205  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
2206  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
2207  } else {
2208  current_sample = c->status[channel].predictor;
2209  previous_sample = c->status[channel].prev_sample;
2210  }
2211 
2212  for (count1 = 0; count1 < nb_samples / 28; count1++) {
2213  int byte = bytestream2_get_byte(&gb);
2214  if (byte == 0xEE) { /* only seen in R2 and R3 */
2215  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
2216  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
2217 
2218  for (int count2 = 0; count2 < 28; count2++)
2219  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
2220  } else {
2221  coeff1 = ea_adpcm_table[ byte >> 4 ];
2222  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
2223  shift = 20 - (byte & 0x0F);
2224 
2225  for (int count2 = 0; count2 < 28; count2++) {
2226  if (count2 & 1)
2227  next_sample = (unsigned)sign_extend(byte, 4) << shift;
2228  else {
2229  byte = bytestream2_get_byte(&gb);
2230  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
2231  }
2232 
2233  next_sample += (current_sample * coeff1) +
2234  (previous_sample * coeff2);
2235  next_sample = av_clip_int16(next_sample >> 8);
2236 
2237  previous_sample = current_sample;
2238  current_sample = next_sample;
2239  *samplesC++ = current_sample;
2240  }
2241  }
2242  }
2243  if (!count) {
2244  count = count1;
2245  } else if (count != count1) {
2246  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
2247  count = FFMAX(count, count1);
2248  }
2249 
2250  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
2251  c->status[channel].predictor = current_sample;
2252  c->status[channel].prev_sample = previous_sample;
2253  }
2254  }
2255 
2256  frame->nb_samples = count * 28;
2257  bytestream2_seek(&gb, 0, SEEK_END);
2258  break;
2259  }
2260 #endif /* CONFIG_ADPCM_EA_Rx_DECODER */
2261  CASE(ADPCM_EA_XAS,
2262  for (int channel=0; channel < channels; channel++) {
2263  int coeff[2][4], shift[4];
2264  int16_t *s = samples_p[channel];
2265  for (int n = 0; n < 4; n++, s += 32) {
2266  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
2267  for (int i = 0; i < 2; i++)
2268  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
2269  s[0] = val & ~0x0F;
2270 
2271  val = sign_extend(bytestream2_get_le16u(&gb), 16);
2272  shift[n] = 20 - (val & 0x0F);
2273  s[1] = val & ~0x0F;
2274  }
2275 
2276  for (int m = 2; m < 32; m += 2) {
2277  s = &samples_p[channel][m];
2278  for (int n = 0; n < 4; n++, s += 32) {
2279  int level, pred;
2280  int byte = bytestream2_get_byteu(&gb);
2281 
2282  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
2283  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
2284  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
2285 
2286  level = sign_extend(byte, 4) * (1 << shift[n]);
2287  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
2288  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
2289  }
2290  }
2291  }
2292  ) /* End of CASE */
2293  CASE(ADPCM_IMA_ACORN,
2294  for (int channel = 0; channel < channels; channel++) {
2295  ADPCMChannelStatus *cs = &c->status[channel];
2296  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
2297  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
2298  if (cs->step_index > 88u){
2299  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
2300  channel, cs->step_index);
2301  return AVERROR_INVALIDDATA;
2302  }
2303  }
2304  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2305  int byte = bytestream2_get_byteu(&gb);
2306  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
2307  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
2308  }
2309  ) /* End of CASE */
2310  CASE(ADPCM_IMA_AMV,
2311  av_assert0(channels == 1);
2312 
2313  /*
2314  * Header format:
2315  * int16_t predictor;
2316  * uint8_t step_index;
2317  * uint8_t reserved;
2318  * uint32_t frame_size;
2319  *
2320  * Some implementations have step_index as 16-bits, but others
2321  * only use the lower 8 and store garbage in the upper 8.
2322  */
2323  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
2324  c->status[0].step_index = bytestream2_get_byteu(&gb);
2325  bytestream2_skipu(&gb, 5);
2326  if (c->status[0].step_index > 88u) {
2327  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
2328  c->status[0].step_index);
2329  return AVERROR_INVALIDDATA;
2330  }
2331 
2332  for (int n = nb_samples >> 1; n > 0; n--) {
2333  int v = bytestream2_get_byteu(&gb);
2334 
2335  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
2336  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
2337  }
2338 
2339  if (nb_samples & 1) {
2340  int v = bytestream2_get_byteu(&gb);
2341  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
2342 
2343  if (v & 0x0F) {
2344  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
2345  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
2346  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
2347  }
2348  }
2349  ) /* End of CASE */
2350  CASE(ADPCM_IMA_PDA,
2351  for (int i = 0; i < channels; i++) {
2352  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
2353  c->status[i].step_index = bytestream2_get_byteu(&gb);
2354  bytestream2_skipu(&gb, 1);
2355  if (c->status[i].step_index > 88u) {
2356  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
2357  c->status[i].step_index);
2358  return AVERROR_INVALIDDATA;
2359  }
2360  }
2361 
2362  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2363  int v = bytestream2_get_byteu(&gb);
2364 
2365  *samples++ = ff_adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
2366  *samples++ = ff_adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
2367  }
2368  ) /* End of CASE */
2369  CASE(ADPCM_IMA_SMJPEG,
2370  for (int i = 0; i < channels; i++) {
2371  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
2372  c->status[i].step_index = bytestream2_get_byteu(&gb);
2373  bytestream2_skipu(&gb, 1);
2374  if (c->status[i].step_index > 88u) {
2375  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
2376  c->status[i].step_index);
2377  return AVERROR_INVALIDDATA;
2378  }
2379  }
2380 
2381  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2382  int v = bytestream2_get_byteu(&gb);
2383 
2384  *samples++ = ff_adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
2385  *samples++ = ff_adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
2386  }
2387  ) /* End of CASE */
2388  CASE(ADPCM_CT,
2389  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2390  int v = bytestream2_get_byteu(&gb);
2391  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
2392  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
2393  }
2394  ) /* End of CASE */
2395 #if CONFIG_ADPCM_SBPRO_2_DECODER || CONFIG_ADPCM_SBPRO_3_DECODER || \
2396  CONFIG_ADPCM_SBPRO_4_DECODER
2400  if (!c->status[0].step_index) {
2401  /* the first byte is a raw sample */
2402  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
2403  if (st)
2404  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
2405  c->status[0].step_index = 1;
2406  nb_samples--;
2407  }
2408  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
2409  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2410  int byte = bytestream2_get_byteu(&gb);
2411  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2412  byte >> 4, 4, 0);
2413  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
2414  byte & 0x0F, 4, 0);
2415  }
2416  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
2417  for (int n = (nb_samples<<st) / 3; n > 0; n--) {
2418  int byte = bytestream2_get_byteu(&gb);
2419  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2420  byte >> 5 , 3, 0);
2421  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2422  (byte >> 2) & 0x07, 3, 0);
2423  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2424  byte & 0x03, 2, 0);
2425  }
2426  } else {
2427  for (int n = nb_samples >> (2 - st); n > 0; n--) {
2428  int byte = bytestream2_get_byteu(&gb);
2429  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2430  byte >> 6 , 2, 2);
2431  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
2432  (byte >> 4) & 0x03, 2, 2);
2433  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2434  (byte >> 2) & 0x03, 2, 2);
2435  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
2436  byte & 0x03, 2, 2);
2437  }
2438  }
2439  break;
2440 #endif /* CONFIG_ADPCM_SBPRO_x_DECODER */
2441  CASE(ADPCM_SWF,
2442  adpcm_swf_decode(avctx, buf, buf_size, samples);
2443  bytestream2_seek(&gb, 0, SEEK_END);
2444  ) /* End of CASE */
2445  CASE(ADPCM_YAMAHA,
2446  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2447  int v = bytestream2_get_byteu(&gb);
2448  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
2449  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
2450  }
2451  ) /* End of CASE */
2452  CASE(ADPCM_AICA,
2453  for (int channel = 0; channel < channels; channel++) {
2454  samples = samples_p[channel];
2455  for (int n = nb_samples >> 1; n > 0; n--) {
2456  int v = bytestream2_get_byteu(&gb);
2457  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
2458  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
2459  }
2460  }
2461  ) /* End of CASE */
2462  CASE(ADPCM_AFC,
2463  int samples_per_block;
2464  int blocks;
2465 
2466  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
2467  samples_per_block = avctx->extradata[0] / 16;
2468  blocks = nb_samples / avctx->extradata[0];
2469  } else {
2470  samples_per_block = nb_samples / 16;
2471  blocks = 1;
2472  }
2473 
2474  for (int m = 0; m < blocks; m++) {
2475  for (int channel = 0; channel < channels; channel++) {
2476  int prev1 = c->status[channel].sample1;
2477  int prev2 = c->status[channel].sample2;
2478 
2479  samples = samples_p[channel] + m * 16;
2480  /* Read in every sample for this channel. */
2481  for (int i = 0; i < samples_per_block; i++) {
2482  int byte = bytestream2_get_byteu(&gb);
2483  int scale = 1 << (byte >> 4);
2484  int index = byte & 0xf;
2485  int factor1 = afc_coeffs[0][index];
2486  int factor2 = afc_coeffs[1][index];
2487 
2488  /* Decode 16 samples. */
2489  for (int n = 0; n < 16; n++) {
2490  int32_t sampledat;
2491 
2492  if (n & 1) {
2493  sampledat = sign_extend(byte, 4);
2494  } else {
2495  byte = bytestream2_get_byteu(&gb);
2496  sampledat = sign_extend(byte >> 4, 4);
2497  }
2498 
2499  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
2500  sampledat * scale;
2501  *samples = av_clip_int16(sampledat);
2502  prev2 = prev1;
2503  prev1 = *samples++;
2504  }
2505  }
2506 
2507  c->status[channel].sample1 = prev1;
2508  c->status[channel].sample2 = prev2;
2509  }
2510  }
2511  bytestream2_seek(&gb, 0, SEEK_END);
2512  ) /* End of CASE */
2513 #if CONFIG_ADPCM_THP_DECODER || CONFIG_ADPCM_THP_LE_DECODER
2514  case AV_CODEC_ID_ADPCM_THP:
2516  {
2517  int table[14][16];
2518 
2519 #define THP_GET16(g) \
2520  sign_extend( \
2521  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
2522  bytestream2_get_le16u(&(g)) : \
2523  bytestream2_get_be16u(&(g)), 16)
2524 
2525  if (avctx->extradata) {
2526  GetByteContext tb;
2527  if (avctx->extradata_size < 32 * channels) {
2528  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
2529  return AVERROR_INVALIDDATA;
2530  }
2531 
2532  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
2533  for (int i = 0; i < channels; i++)
2534  for (int n = 0; n < 16; n++)
2535  table[i][n] = THP_GET16(tb);
2536  } else {
2537  for (int i = 0; i < channels; i++)
2538  for (int n = 0; n < 16; n++)
2539  table[i][n] = THP_GET16(gb);
2540 
2541  if (!c->has_status) {
2542  /* Initialize the previous sample. */
2543  for (int i = 0; i < channels; i++) {
2544  c->status[i].sample1 = THP_GET16(gb);
2545  c->status[i].sample2 = THP_GET16(gb);
2546  }
2547  c->has_status = 1;
2548  } else {
2549  bytestream2_skip(&gb, channels * 4);
2550  }
2551  }
2552 
2553  for (int ch = 0; ch < channels; ch++) {
2554  samples = samples_p[ch];
2555 
2556  /* Read in every sample for this channel. */
2557  for (int i = 0; i < (nb_samples + 13) / 14; i++) {
2558  int byte = bytestream2_get_byteu(&gb);
2559  int index = (byte >> 4) & 7;
2560  unsigned int exp = byte & 0x0F;
2561  int64_t factor1 = table[ch][index * 2];
2562  int64_t factor2 = table[ch][index * 2 + 1];
2563 
2564  /* Decode 14 samples. */
2565  for (int n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
2566  int32_t sampledat;
2567 
2568  if (n & 1) {
2569  sampledat = sign_extend(byte, 4);
2570  } else {
2571  byte = bytestream2_get_byteu(&gb);
2572  sampledat = sign_extend(byte >> 4, 4);
2573  }
2574 
2575  sampledat = ((c->status[ch].sample1 * factor1
2576  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
2577  *samples = av_clip_int16(sampledat);
2578  c->status[ch].sample2 = c->status[ch].sample1;
2579  c->status[ch].sample1 = *samples++;
2580  }
2581  }
2582  }
2583  break;
2584  }
2585 #endif /* CONFIG_ADPCM_THP(_LE)_DECODER */
2586  CASE(ADPCM_DTK,
2587  for (int channel = 0; channel < channels; channel++) {
2588  samples = samples_p[channel];
2589 
2590  /* Read in every sample for this channel. */
2591  for (int i = 0; i < nb_samples / 28; i++) {
2592  int byte, header;
2593  if (channel)
2594  bytestream2_skipu(&gb, 1);
2595  header = bytestream2_get_byteu(&gb);
2596  bytestream2_skipu(&gb, 3 - channel);
2597 
2598  /* Decode 28 samples. */
2599  for (int n = 0; n < 28; n++) {
2600  int32_t sampledat, prev;
2601 
2602  switch (header >> 4) {
2603  case 1:
2604  prev = (c->status[channel].sample1 * 0x3c);
2605  break;
2606  case 2:
2607  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
2608  break;
2609  case 3:
2610  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
2611  break;
2612  default:
2613  prev = 0;
2614  }
2615 
2616  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
2617 
2618  byte = bytestream2_get_byteu(&gb);
2619  if (!channel)
2620  sampledat = sign_extend(byte, 4);
2621  else
2622  sampledat = sign_extend(byte >> 4, 4);
2623 
2624  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2625  *samples++ = av_clip_int16(sampledat >> 6);
2626  c->status[channel].sample2 = c->status[channel].sample1;
2627  c->status[channel].sample1 = sampledat;
2628  }
2629  }
2630  if (!channel)
2631  bytestream2_seek(&gb, 0, SEEK_SET);
2632  }
2633  ) /* End of CASE */
2634  CASE(ADPCM_N64,
2635  ADPCMChannelStatus *cs = &c->status[0];
2636  int coefs[8*2*8] = { 0 };
2637 
2638  if (avctx->extradata) {
2639  int version, order, entries;
2641 
2642  bytestream2_init(&cb, avctx->extradata, avctx->extradata_size);
2643 
2644  version = bytestream2_get_be16(&cb);
2645  order = bytestream2_get_be16(&cb);
2646  entries = bytestream2_get_be16(&cb);
2647  if (version != 1 || order != 2 || entries > 8)
2648  return AVERROR_INVALIDDATA;
2649 
2650  for (int n = 0; n < order * entries * 8; n++)
2651  coefs[n] = sign_extend(bytestream2_get_be16(&cb), 16);
2652  }
2653 
2654  for (int block = 0; block < avpkt->size / 9; block++) {
2655  int scale, index, codes[16];
2656  int16_t hist[8] = { 0 };
2657  const int order = 2;
2658  int16_t out[16];
2659 
2660  hist[6] = cs->sample2;
2661  hist[7] = cs->sample1;
2662 
2663  samples = samples_p[0] + block * 16;
2664 
2665  scale = (buf[0] >> 4) & 0xF;
2666  index = (buf[0] >> 0) & 0xF;
2667  scale = 1 << scale;
2668  index = FFMIN(index, 8);
2669 
2670  for (int i = 0, j = 0; i < 16; i += 2, j++) {
2671  int n0 = (buf[j+1] >> 4) & 0xF;
2672  int n1 = (buf[j+1] >> 0) & 0xF;
2673 
2674  if (n0 & 8)
2675  n0 = n0 - 16;
2676  if (n1 & 8)
2677  n1 = n1 - 16;
2678 
2679  codes[i+0] = n0 * scale;
2680  codes[i+1] = n1 * scale;
2681  }
2682 
2683  for (int j = 0; j < 2; j++) {
2684  int *sf_codes = &codes[j*8];
2685  int16_t *sf_out = &out[j*8];
2686 
2687  for (int i = 0; i < 8; i++) {
2688  int sample, delta = 0;
2689 
2690  for (int o = 0; o < order; o++)
2691  delta += coefs[o*8 + i] * hist[(8 - order) + o];
2692 
2693  for (int k = i-1; k > -1; k--) {
2694  for (int o = 1; o < order; o++)
2695  delta += sf_codes[(i-1) - k] * coefs[(o*8) + k];
2696  }
2697 
2698  sample = sf_codes[i] * 2048;
2699  sample = (sample + delta) / 2048;
2701  sf_out[i] = sample;
2702  }
2703 
2704  for (int i = 8 - order; i < 8; i++)
2705  hist[i] = sf_out[i];
2706  }
2707 
2708  memcpy(samples, out, sizeof(out));
2709 
2710  cs->sample2 = hist[6];
2711  cs->sample1 = hist[7];
2712 
2713  buf += 9;
2714  }
2715  bytestream2_seek(&gb, 0, SEEK_END);
2716  ) /* End of CASE */
2717  CASE(ADPCM_PSX,
2718  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * channels); block++) {
2719  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * channels) / (16 * channels);
2720  for (int channel = 0; channel < channels; channel++) {
2721  samples = samples_p[channel] + block * nb_samples_per_block;
2722  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2723 
2724  /* Read in every sample for this channel. */
2725  for (int i = 0; i < nb_samples_per_block / 28; i++) {
2726  int filter, shift, flag, byte;
2727 
2728  filter = bytestream2_get_byteu(&gb);
2729  shift = filter & 0xf;
2730  filter = filter >> 4;
2732  return AVERROR_INVALIDDATA;
2733  flag = bytestream2_get_byteu(&gb) & 0x7;
2734 
2735  /* Decode 28 samples. */
2736  for (int n = 0; n < 28; n++) {
2737  int sample = 0, scale;
2738 
2739  if (n & 1) {
2740  scale = sign_extend(byte >> 4, 4);
2741  } else {
2742  byte = bytestream2_get_byteu(&gb);
2743  scale = sign_extend(byte, 4);
2744  }
2745 
2746  if (flag < 0x07) {
2747  scale = scale * (1 << 12);
2748  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2749  }
2751  c->status[channel].sample2 = c->status[channel].sample1;
2752  c->status[channel].sample1 = sample;
2753  }
2754  }
2755  }
2756  }
2757  ) /* End of CASE */
2758  CASE(ADPCM_PSXC,
2759  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2760  int nb_samples_per_block = ((avctx->block_align - 1) / channels) * 2;
2761  for (int channel = 0; channel < channels; channel++) {
2762  int filter, shift, byte;
2763 
2764  samples = samples_p[channel] + block * nb_samples_per_block;
2765  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2766 
2767  filter = bytestream2_get_byteu(&gb);
2768  shift = filter & 0xf;
2769  filter = filter >> 4;
2771  return AVERROR_INVALIDDATA;
2772 
2773  for (int n = 0; n < nb_samples_per_block; n++) {
2774  int sample = 0, scale;
2775 
2776  if (n & 1) {
2777  scale = sign_extend(byte >> 4, 4);
2778  } else {
2779  byte = bytestream2_get_byteu(&gb);
2780  scale = sign_extend(byte & 0xF, 4);
2781  }
2782 
2783  scale = scale * (1 << 12);
2784  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2786  c->status[channel].sample2 = c->status[channel].sample1;
2787  c->status[channel].sample1 = sample;
2788  }
2789  }
2790  }
2791  ) /* End of CASE */
2792  CASE(ADPCM_SANYO,
2793  int (*expand)(ADPCMChannelStatus *c, int bits);
2794  GetBitContext g;
2795 
2796  switch(avctx->bits_per_coded_sample) {
2797  case 3: expand = adpcm_sanyo_expand3; break;
2798  case 4: expand = adpcm_sanyo_expand4; break;
2799  case 5: expand = adpcm_sanyo_expand5; break;
2800  }
2801 
2802  for (int ch = 0; ch < channels; ch++) {
2803  c->status[ch].predictor = sign_extend(bytestream2_get_le16(&gb), 16);
2804  c->status[ch].step = sign_extend(bytestream2_get_le16(&gb), 16);
2805  }
2806 
2808  for (int i = 0; i < nb_samples; i++)
2809  for (int ch = 0; ch < channels; ch++)
2810  samples_p[ch][i] = expand(&c->status[ch], get_bits_le(&g, avctx->bits_per_coded_sample));
2811 
2812  align_get_bits(&g);
2813  bytestream2_skip(&gb, get_bits_count(&g) / 8);
2814  ) /* End of CASE */
2815  CASE(ADPCM_ARGO,
2816  /*
2817  * The format of each block:
2818  * uint8_t left_control;
2819  * uint4_t left_samples[nb_samples];
2820  * ---- and if stereo ----
2821  * uint8_t right_control;
2822  * uint4_t right_samples[nb_samples];
2823  *
2824  * Format of the control byte:
2825  * MSB [SSSSRDRR] LSB
2826  * S = (Shift Amount - 2)
2827  * D = Decoder flag.
2828  * R = Reserved
2829  *
2830  * Each block relies on the previous two samples of each channel.
2831  * They should be 0 initially.
2832  */
2833  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2834  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
2835  ADPCMChannelStatus *cs = c->status + channel;
2836  int control, shift;
2837 
2838  samples = samples_p[channel] + block * 32;
2839 
2840  /* Get the control byte and decode the samples, 2 at a time. */
2841  control = bytestream2_get_byteu(&gb);
2842  shift = (control >> 4) + 2;
2843 
2844  for (int n = 0; n < 16; n++) {
2845  int sample = bytestream2_get_byteu(&gb);
2846  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2847  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2848  }
2849  }
2850  }
2851  ) /* End of CASE */
2852  CASE(ADPCM_CIRCUS,
2853  for (int n = 0; n < nb_samples; n++) {
2854  for (int ch = 0; ch < channels; ch++) {
2855  int v = bytestream2_get_byteu(&gb);
2856  *samples++ = adpcm_circus_expand_nibble(&c->status[ch], v);
2857  }
2858  }
2859  ) /* End of CASE */
2860  CASE(ADPCM_ZORK,
2861  for (int n = 0; n < nb_samples * channels; n++) {
2862  int v = bytestream2_get_byteu(&gb);
2863  *samples++ = adpcm_zork_expand_nibble(&c->status[n % channels], v);
2864  }
2865  ) /* End of CASE */
2866  CASE(ADPCM_IMA_MTF,
2867  for (int n = nb_samples / 2; n > 0; n--) {
2868  for (int channel = 0; channel < channels; channel++) {
2869  int v = bytestream2_get_byteu(&gb);
2870  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2871  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2872  }
2873  samples += channels;
2874  }
2875  ) /* End of CASE */
2876  default:
2877  av_unreachable("There are cases for all codec ids using adpcm_decode_frame");
2878  }
2879 
2880  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2881  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2882  return AVERROR_INVALIDDATA;
2883  }
2884 
2885  *got_frame_ptr = 1;
2886 
2887  if (avpkt->size < bytestream2_tell(&gb)) {
2888  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2889  return avpkt->size;
2890  }
2891 
2892  return bytestream2_tell(&gb);
2893 }
2894 
2896 {
2897  ADPCMDecodeContext *c = avctx->priv_data;
2898 
2899  /* Just nuke the entire state and re-init. */
2900  memset(c, 0, sizeof(ADPCMDecodeContext));
2901 
2902  switch(avctx->codec_id) {
2903  case AV_CODEC_ID_ADPCM_CT:
2904  c->status[0].step = c->status[1].step = 511;
2905  break;
2906 
2908  if (avctx->extradata && avctx->extradata_size >= 8) {
2909  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2910  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2911  }
2912  break;
2913 
2915  if (avctx->extradata && avctx->extradata_size >= 28) {
2916  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2917  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2918  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2919  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2920  }
2921  break;
2922 
2924  if (avctx->extradata && avctx->extradata_size >= 2)
2925  c->vqa_version = AV_RL16(avctx->extradata);
2926  break;
2927  default:
2928  /* Other codecs may want to handle this during decoding. */
2929  c->has_status = 0;
2930  return;
2931  }
2932 
2933  c->has_status = 1;
2934 }
2935 
2936 
2944 
2945 #define ADPCM_DECODER_0(id_, sample_fmts_, name_, long_name_)
2946 #define ADPCM_DECODER_1(id_, sample_fmts_, name_, long_name_) \
2947 const FFCodec ff_ ## name_ ## _decoder = { \
2948  .p.name = #name_, \
2949  CODEC_LONG_NAME(long_name_), \
2950  .p.type = AVMEDIA_TYPE_AUDIO, \
2951  .p.id = id_, \
2952  .p.capabilities = AV_CODEC_CAP_DR1, \
2953  CODEC_SAMPLEFMTS_ARRAY(sample_fmts_), \
2954  .priv_data_size = sizeof(ADPCMDecodeContext), \
2955  .init = adpcm_decode_init, \
2956  FF_CODEC_DECODE_CB(adpcm_decode_frame), \
2957  .flush = adpcm_flush, \
2958 };
2959 #define ADPCM_DECODER_2(enabled, codec_id, name, sample_fmts, long_name) \
2960  ADPCM_DECODER_ ## enabled(codec_id, name, sample_fmts, long_name)
2961 #define ADPCM_DECODER_3(config, codec_id, name, sample_fmts, long_name) \
2962  ADPCM_DECODER_2(config, codec_id, name, sample_fmts, long_name)
2963 #define ADPCM_DECODER(codec, name, sample_fmts, long_name) \
2964  ADPCM_DECODER_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, \
2965  name, sample_fmts, long_name)
2966 
2967 /* Note: Do not forget to add new entries to the Makefile as well. */
2968 ADPCM_DECODER(ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie")
2969 ADPCM_DECODER(ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC")
2970 ADPCM_DECODER(ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie")
2971 ADPCM_DECODER(ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA")
2972 ADPCM_DECODER(ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games")
2973 ADPCM_DECODER(ADPCM_CIRCUS, sample_fmts_s16, adpcm_circus, "ADPCM Circus")
2974 ADPCM_DECODER(ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology")
2975 ADPCM_DECODER(ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK")
2976 ADPCM_DECODER(ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts")
2977 ADPCM_DECODER(ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA")
2978 ADPCM_DECODER(ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1")
2979 ADPCM_DECODER(ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2")
2980 ADPCM_DECODER(ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3")
2981 ADPCM_DECODER(ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS")
2982 ADPCM_DECODER(ADPCM_IMA_ACORN, sample_fmts_s16, adpcm_ima_acorn, "ADPCM IMA Acorn Replay")
2983 ADPCM_DECODER(ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV")
2984 ADPCM_DECODER(ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC")
2985 ADPCM_DECODER(ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM")
2986 ADPCM_DECODER(ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments")
2987 ADPCM_DECODER(ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4")
2988 ADPCM_DECODER(ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3")
2989 ADPCM_DECODER(ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4")
2990 ADPCM_DECODER(ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS")
2991 ADPCM_DECODER(ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD")
2992 ADPCM_DECODER(ADPCM_IMA_ESCAPE, sample_fmts_s16, adpcm_ima_escape, "ADPCM IMA Acorn Escape")
2993 ADPCM_DECODER(ADPCM_IMA_HVQM2, sample_fmts_s16, adpcm_ima_hvqm2, "ADPCM IMA HVQM2")
2994 ADPCM_DECODER(ADPCM_IMA_HVQM4, sample_fmts_s16, adpcm_ima_hvqm4, "ADPCM IMA HVQM4")
2995 ADPCM_DECODER(ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS")
2996 ADPCM_DECODER(ADPCM_IMA_MAGIX, sample_fmts_s16, adpcm_ima_magix, "ADPCM IMA Magix")
2997 ADPCM_DECODER(ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX")
2998 ADPCM_DECODER(ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework")
2999 ADPCM_DECODER(ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI")
3000 ADPCM_DECODER(ADPCM_IMA_PDA, sample_fmts_s16, adpcm_ima_pda, "ADPCM IMA PlayDate")
3001 ADPCM_DECODER(ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime")
3002 ADPCM_DECODER(ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical")
3003 ADPCM_DECODER(ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive")
3004 ADPCM_DECODER(ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG")
3005 ADPCM_DECODER(ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP")
3006 ADPCM_DECODER(ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV")
3007 ADPCM_DECODER(ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood")
3008 ADPCM_DECODER(ADPCM_IMA_XBOX, sample_fmts_s16p, adpcm_ima_xbox, "ADPCM IMA Xbox")
3009 ADPCM_DECODER(ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft")
3010 ADPCM_DECODER(ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF")
3011 ADPCM_DECODER(ADPCM_N64, sample_fmts_s16p, adpcm_n64, "ADPCM Silicon Graphics N64")
3012 ADPCM_DECODER(ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation")
3013 ADPCM_DECODER(ADPCM_PSXC, sample_fmts_s16p, adpcm_psxc, "ADPCM Playstation C")
3014 ADPCM_DECODER(ADPCM_SANYO, sample_fmts_s16p, adpcm_sanyo, "ADPCM Sanyo")
3015 ADPCM_DECODER(ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit")
3016 ADPCM_DECODER(ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit")
3017 ADPCM_DECODER(ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit")
3018 ADPCM_DECODER(ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash")
3019 ADPCM_DECODER(ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)")
3020 ADPCM_DECODER(ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP")
3021 ADPCM_DECODER(ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA")
3022 ADPCM_DECODER(ADPCM_XMD, sample_fmts_s16p, adpcm_xmd, "ADPCM Konami XMD")
3023 ADPCM_DECODER(ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha")
3024 ADPCM_DECODER(ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork")
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:382
adpcm_index_table5
static const int8_t adpcm_index_table5[32]
Definition: adpcm.c:140
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:376
level
uint8_t level
Definition: svq3.c:208
av_clip
#define av_clip
Definition: common.h:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
out
FILE * out
Definition: movenc.c:55
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:409
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:247
GetByteContext
Definition: bytestream.h:33
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
R3
#define R3
Definition: simple_idct.c:168
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:235
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:54
AV_CODEC_ID_ADPCM_N64
@ AV_CODEC_ID_ADPCM_N64
Definition: codec_id.h:432
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
int64_t
long long int64_t
Definition: coverity.c:34
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:424
AVPacket::data
uint8_t * data
Definition: packet.h:558
table
static const uint16_t table[]
Definition: prosumer.c:203
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:397
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:414
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:408
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
R1
#define R1
Definition: simple_idct.c:166
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
AV_CODEC_ID_ADPCM_XMD
@ AV_CODEC_ID_ADPCM_XMD
Definition: codec_id.h:427
AV_CODEC_ID_ADPCM_IMA_ESCAPE
@ AV_CODEC_ID_ADPCM_IMA_ESCAPE
Definition: codec_id.h:437
adpcm_sanyo_expand4
static int adpcm_sanyo_expand4(ADPCMChannelStatus *c, int bits)
Definition: adpcm.c:1028
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:512
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:412
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:723
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:388
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1039
decode_adpcm_ima_hvqm4
static void decode_adpcm_ima_hvqm4(AVCodecContext *avctx, int16_t *outbuf, int samples_to_do, int frame_format, GetByteContext *gb)
Definition: adpcm.c:619
GetBitContext
Definition: get_bits.h:109
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:495
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:446
AV_CODEC_ID_ADPCM_PSXC
@ AV_CODEC_ID_ADPCM_PSXC
Definition: codec_id.h:435
val
static double val(void *priv, double ch)
Definition: aeval.c:77
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
ff_adpcm_ima_qt_expand_nibble
int16_t ff_adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:556
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:393
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:100
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2939
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:472
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:743
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
AV_CODEC_ID_ADPCM_IMA_ACORN
@ AV_CODEC_ID_ADPCM_IMA_ACORN
Definition: codec_id.h:426
decode_adpcm_ima_hvqm2
static void decode_adpcm_ima_hvqm2(AVCodecContext *avctx, int16_t *outbuf, int samples_to_do, int frame_format, GetByteContext *gb)
Definition: adpcm.c:582
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:787
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:407
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:399
g
const char * g
Definition: vf_curves.c:128
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:378
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:405
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:354
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:318
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:403
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:381
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:662
AV_CODEC_ID_ADPCM_IMA_XBOX
@ AV_CODEC_ID_ADPCM_IMA_XBOX
Definition: codec_id.h:428
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2937
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:401
av_clip_int16
#define av_clip_int16
Definition: common.h:115
NULL
#define NULL
Definition: coverity.c:32
ADPCM_DECODER
#define ADPCM_DECODER(codec, name, sample_fmts, long_name)
Definition: adpcm.c:2963
bits_left
#define bits_left
Definition: bitstream.h:116
av_clip_intp2
#define av_clip_intp2
Definition: common.h:121
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
AV_CODEC_ID_ADPCM_IMA_MAGIX
@ AV_CODEC_ID_ADPCM_IMA_MAGIX
Definition: codec_id.h:434
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:390
oki_step_table
static const int16_t oki_step_table[49]
Definition: adpcm.c:219
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:380
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:108
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:400
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:418
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:379
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:395
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:97
adpcm_ima_escape_expand_nibble
static int16_t adpcm_ima_escape_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:423
ima_cunning_index_table
static const int8_t ima_cunning_index_table[9]
Definition: adpcm.c:111
exp
int8_t exp
Definition: eval.c:73
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
adpcm_sanyo_expand3
static int adpcm_sanyo_expand3(ADPCMChannelStatus *c, int bits)
Definition: adpcm.c:985
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:384
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:702
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:681
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:1431
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:420
afc_coeffs
static const int16_t afc_coeffs[2][16]
Definition: adpcm.c:92
adpcm_sanyo_expand5
static int adpcm_sanyo_expand5(ADPCMChannelStatus *c, int bits)
Definition: adpcm.c:1087
AV_CODEC_ID_ADPCM_CIRCUS
@ AV_CODEC_ID_ADPCM_CIRCUS
Definition: codec_id.h:436
ADPCMDecodeContext
Definition: adpcm.c:246
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:74
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1720
AVPacket::size
int size
Definition: packet.h:559
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:261
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:410
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:511
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:422
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
bps
unsigned bps
Definition: movenc.c:1958
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:39
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1031
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:1162
AV_CODEC_ID_ADPCM_IMA_HVQM4
@ AV_CODEC_ID_ADPCM_IMA_HVQM4
Definition: codec_id.h:430
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
R2
#define R2
Definition: simple_idct.c:167
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:389
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:68
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
attributes.h
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:622
version
version
Definition: libkvazaar.c:315
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
av_zero_extend
#define av_zero_extend
Definition: common.h:151
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:822
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
adpcm_index_table3
static const int8_t adpcm_index_table3[8]
Definition: adpcm.c:135
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1546
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2941
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:416
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:402
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:65
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
adpcm_index_tables
static const int8_t *const adpcm_index_tables[4]
Definition: adpcm.c:145
MT
#define MT(...)
Definition: codec_desc.c:32
AV_CODEC_ID_ADPCM_IMA_HVQM2
@ AV_CODEC_ID_ADPCM_IMA_HVQM2
Definition: codec_id.h:433
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:421
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:248
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:415
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:968
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:84
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:30
AV_CODEC_ID_ADPCM_SANYO
@ AV_CODEC_ID_ADPCM_SANYO
Definition: codec_id.h:429
adpcm_circus_expand_nibble
static int16_t adpcm_circus_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:766
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:386
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2895
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:423
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1057
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:555
flag
#define flag(name)
Definition: cbs_av1.c:496
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_CODEC_ID_ADPCM_IMA_PDA
@ AV_CODEC_ID_ADPCM_IMA_PDA
Definition: codec_id.h:431
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:531
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:417
mtaf_stepsize
static const int16_t mtaf_stepsize[32][16]
Definition: adpcm.c:152
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:69
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:132
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:396
update
static av_always_inline void update(AVFilterContext *ctx, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:398
temp
else temp
Definition: vf_mcdeint.c:271
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:394
adpcm_index_table2
static const int8_t adpcm_index_table2[4]
Definition: adpcm.c:130
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:391
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:910
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
expand
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:499
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:419
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:254
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:249
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:425
AVPacket
This structure stores compressed data.
Definition: packet.h:535
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:377
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:383
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:379
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:413
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:757
CASE
#define CASE(codec,...)
Definition: adpcm.c:80
ima_cunning_step_table
static const int16_t ima_cunning_step_table[61]
Definition: adpcm.c:121
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:239
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:392
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:247
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:228