FFmpeg
mv30.c
Go to the documentation of this file.
1 /*
2  * MidiVid MV30 decoder
3  *
4  * Copyright (c) 2020 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stddef.h>
24 #include <string.h>
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/mem.h"
28 #include "libavutil/thread.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "codec_internal.h"
33 #include "copy_block.h"
34 #include "decode.h"
35 #include "mathops.h"
36 #include "blockdsp.h"
37 #include "get_bits.h"
38 #include "aandcttab.h"
39 
40 #define CBP_VLC_BITS 9
41 
42 typedef struct MV30Context {
44 
47  int is_inter;
48  int mode_size;
50 
51  int block[6][64];
52  int16_t *mvectors;
53  unsigned int mvectors_size;
54  int16_t *coeffs;
55  unsigned int coeffs_size;
56 
57  int16_t intraq_tab[2][64];
58  int16_t interq_tab[2][64];
59 
62 } MV30Context;
63 
65 
66 static const uint8_t luma_tab[] = {
67  12, 12, 15, 19, 25, 34, 40, 48,
68  12, 12, 18, 22, 27, 44, 47, 46,
69  17, 18, 21, 26, 35, 46, 52, 47,
70  18, 20, 24, 28, 40, 61, 59, 51,
71  20, 24, 32, 43, 50, 72, 72, 63,
72  25, 31, 42, 48, 58, 72, 81, 75,
73  38, 46, 54, 61, 71, 84, 88, 85,
74  50, 61, 65, 68, 79, 78, 86, 91,
75 };
76 
77 static const uint8_t chroma_tab[] = {
78  12, 16, 24, 47, 99, 99, 99, 99,
79  16, 21, 26, 66, 99, 99, 99, 99,
80  24, 26, 56, 99, 99, 99, 99, 99,
81  47, 66, 99, 99, 99, 99, 99, 99,
82  99, 99, 99, 99, 99, 99, 99, 99,
83  99, 99, 99, 99, 99, 99, 99, 99,
84  99, 99, 99, 99, 99, 99, 99, 99,
85  99, 99, 99, 99, 99, 99, 99, 99,
86 };
87 
88 static const uint8_t zigzag[] = {
89  0, 1, 8, 9, 16, 2, 3, 10,
90  17, 24, 32, 25, 18, 11, 4, 5,
91  12, 19, 26, 33, 40, 48, 41, 34,
92  27, 20, 13, 6, 7, 14, 21, 28,
93  35, 42, 49, 56, 57, 50, 43, 36,
94  29, 22, 15, 23, 30, 37, 44, 51,
95  58, 59, 52, 45, 38, 31, 39, 46,
96  53, 60, 61, 54, 47, 55, 62, 63,
97 };
98 
99 static void get_qtable(int16_t *table, int quant, const uint8_t *quant_tab)
100 {
101  int factor = quant < 50 ? 5000 / FFMAX(quant, 1) : 200 - FFMIN(quant, 100) * 2;
102 
103  for (int i = 0; i < 64; i++) {
104  table[i] = av_clip((quant_tab[i] * factor + 0x32) / 100, 1, 0x7fff);
105  table[i] = ((int)ff_aanscales[i] * (int)table[i] + 0x800) >> 12;
106  }
107 }
108 
109 static inline void idct_1d(unsigned *blk, int step)
110 {
111  const unsigned t0 = blk[0 * step] + blk[4 * step];
112  const unsigned t1 = blk[0 * step] - blk[4 * step];
113  const unsigned t2 = blk[2 * step] + blk[6 * step];
114  const unsigned t3 = ((int)((blk[2 * step] - blk[6 * step]) * 362U) >> 8) - t2;
115  const unsigned t4 = t0 + t2;
116  const unsigned t5 = t0 - t2;
117  const unsigned t6 = t1 + t3;
118  const unsigned t7 = t1 - t3;
119  const unsigned t8 = blk[5 * step] + blk[3 * step];
120  const unsigned t9 = blk[5 * step] - blk[3 * step];
121  const unsigned tA = blk[1 * step] + blk[7 * step];
122  const unsigned tB = blk[1 * step] - blk[7 * step];
123  const unsigned tC = t8 + tA;
124  const unsigned tD = (int)((tB + t9) * 473U) >> 8;
125  const unsigned tE = (((int)(t9 * -669U) >> 8) - tC) + tD;
126  const unsigned tF = ((int)((tA - t8) * 362U) >> 8) - tE;
127  const unsigned t10 = (((int)(tB * 277U) >> 8) - tD) + tF;
128 
129  blk[0 * step] = t4 + tC;
130  blk[1 * step] = t6 + tE;
131  blk[2 * step] = t7 + tF;
132  blk[3 * step] = t5 - t10;
133  blk[4 * step] = t5 + t10;
134  blk[5 * step] = t7 - tF;
135  blk[6 * step] = t6 - tE;
136  blk[7 * step] = t4 - tC;
137 }
138 
139 static void idct_put(uint8_t *dst, int stride, int *block)
140 {
141  for (int i = 0; i < 8; i++) {
142  if ((block[0x08 + i] |
143  block[0x10 + i] |
144  block[0x18 + i] |
145  block[0x20 + i] |
146  block[0x28 + i] |
147  block[0x30 + i] |
148  block[0x38 + i]) == 0) {
149  block[0x08 + i] = block[i];
150  block[0x10 + i] = block[i];
151  block[0x18 + i] = block[i];
152  block[0x20 + i] = block[i];
153  block[0x28 + i] = block[i];
154  block[0x30 + i] = block[i];
155  block[0x38 + i] = block[i];
156  } else {
157  idct_1d(block + i, 8);
158  }
159  }
160 
161  for (int i = 0; i < 8; i++) {
162  idct_1d(block, 1);
163  for (int j = 0; j < 8; j++)
164  dst[j] = av_clip_uint8((block[j] >> 5) + 128);
165  block += 8;
166  dst += stride;
167  }
168 }
169 
170 static void idct_add(uint8_t *dst, int stride,
171  const uint8_t *src, int in_linesize, int *block)
172 {
173  for (int i = 0; i < 8; i++) {
174  if ((block[0x08 + i] |
175  block[0x10 + i] |
176  block[0x18 + i] |
177  block[0x20 + i] |
178  block[0x28 + i] |
179  block[0x30 + i] |
180  block[0x38 + i]) == 0) {
181  block[0x08 + i] = block[i];
182  block[0x10 + i] = block[i];
183  block[0x18 + i] = block[i];
184  block[0x20 + i] = block[i];
185  block[0x28 + i] = block[i];
186  block[0x30 + i] = block[i];
187  block[0x38 + i] = block[i];
188  } else {
189  idct_1d(block + i, 8);
190  }
191  }
192 
193  for (int i = 0; i < 8; i++) {
194  idct_1d(block, 1);
195  for (int j = 0; j < 8; j++)
196  dst[j] = av_clip_uint8((block[j] >> 5) + src[j]);
197  block += 8;
198  dst += stride;
199  src += in_linesize;
200  }
201 }
202 
203 static inline void idct2_1d(int *blk, int step)
204 {
205  const unsigned int t0 = blk[0 * step];
206  const unsigned int t1 = blk[1 * step];
207  const unsigned int t2 = (int)(t1 * 473U) >> 8;
208  const unsigned int t3 = t2 - t1;
209  const unsigned int t4 = ((int)(t1 * 362U) >> 8) - t3;
210  const unsigned int t5 = (((int)(t1 * 277U) >> 8) - t2) + t4;
211 
212  blk[0 * step] = t1 + t0;
213  blk[1 * step] = t0 + t3;
214  blk[2 * step] = t4 + t0;
215  blk[3 * step] = t0 - t5;
216  blk[4 * step] = t5 + t0;
217  blk[5 * step] = t0 - t4;
218  blk[6 * step] = t0 - t3;
219  blk[7 * step] = t0 - t1;
220 }
221 
222 static void idct2_put(uint8_t *dst, int stride, int *block)
223 {
224  for (int i = 0; i < 2; i++) {
225  if ((block[0x08 + i]) == 0) {
226  block[0x08 + i] = block[i];
227  block[0x10 + i] = block[i];
228  block[0x18 + i] = block[i];
229  block[0x20 + i] = block[i];
230  block[0x28 + i] = block[i];
231  block[0x30 + i] = block[i];
232  block[0x38 + i] = block[i];
233  } else {
234  idct2_1d(block + i, 8);
235  }
236  }
237 
238  for (int i = 0; i < 8; i++) {
239  if (block[1] == 0) {
240  for (int j = 0; j < 8; j++)
241  dst[j] = av_clip_uint8((block[0] >> 5) + 128);
242  } else {
243  idct2_1d(block, 1);
244  for (int j = 0; j < 8; j++)
245  dst[j] = av_clip_uint8((block[j] >> 5) + 128);
246  }
247  block += 8;
248  dst += stride;
249  }
250 }
251 
252 static void idct2_add(uint8_t *dst, int stride,
253  const uint8_t *src, int in_linesize,
254  int *block)
255 {
256  for (int i = 0; i < 2; i++) {
257  if ((block[0x08 + i]) == 0) {
258  block[0x08 + i] = block[i];
259  block[0x10 + i] = block[i];
260  block[0x18 + i] = block[i];
261  block[0x20 + i] = block[i];
262  block[0x28 + i] = block[i];
263  block[0x30 + i] = block[i];
264  block[0x38 + i] = block[i];
265  } else {
266  idct2_1d(block + i, 8);
267  }
268  }
269 
270  for (int i = 0; i < 8; i++) {
271  if (block[1] == 0) {
272  for (int j = 0; j < 8; j++)
273  dst[j] = av_clip_uint8((block[0] >> 5) + src[j]);
274  } else {
275  idct2_1d(block, 1);
276  for (int j = 0; j < 8; j++)
277  dst[j] = av_clip_uint8((block[j] >> 5) + src[j]);
278  }
279  block += 8;
280  dst += stride;
281  src += in_linesize;
282  }
283 }
284 
285 static void update_inter_block(uint8_t *dst, int stride,
286  const uint8_t *src, int in_linesize,
287  int block)
288 {
289  for (int i = 0; i < 8; i++) {
290  for (int j = 0; j < 8; j++)
291  dst[j] = av_clip_uint8(block + src[j]);
292  dst += stride;
293  src += in_linesize;
294  }
295 }
296 
297 static int decode_intra_block(AVCodecContext *avctx, int mode,
298  GetByteContext *gbyte, int16_t *qtab,
299  int *block, int *pfill,
300  uint8_t *dst, int linesize)
301 {
302  MV30Context *s = avctx->priv_data;
303  int fill;
304 
305  switch (mode) {
306  case 0:
307  s->bdsp.fill_block_tab[1](dst, 128, linesize, 8);
308  break;
309  case 1:
310  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
311  pfill[0] += fill;
312  block[0] = ((int)((unsigned)pfill[0] * qtab[0]) >> 5) + 128;
313  s->bdsp.fill_block_tab[1](dst, block[0], linesize, 8);
314  break;
315  case 2:
316  memset(block, 0, sizeof(*block) * 64);
317  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
318  pfill[0] += fill;
319  block[0] = (unsigned)pfill[0] * qtab[0];
320  block[1] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[1];
321  block[8] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[8];
322  block[9] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[9];
323  idct2_put(dst, linesize, block);
324  break;
325  case 3:
326  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
327  pfill[0] += fill;
328  block[0] = (unsigned)pfill[0] * qtab[0];
329  for (int i = 1; i < 64; i++)
330  block[zigzag[i]] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[zigzag[i]];
331  idct_put(dst, linesize, block);
332  break;
333  }
334 
335  return 0;
336 }
337 
338 static int decode_inter_block(AVCodecContext *avctx, int mode,
339  GetByteContext *gbyte, int16_t *qtab,
340  int *block, int *pfill,
341  uint8_t *dst, int linesize,
342  const uint8_t *src, int in_linesize)
343 {
344  int fill;
345 
346  switch (mode) {
347  case 0:
348  copy_block8(dst, src, linesize, in_linesize, 8);
349  break;
350  case 1:
351  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
352  pfill[0] += fill;
353  block[0] = (int)((unsigned)pfill[0] * qtab[0]) >> 5;
354  update_inter_block(dst, linesize, src, in_linesize, block[0]);
355  break;
356  case 2:
357  memset(block, 0, sizeof(*block) * 64);
358  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
359  pfill[0] += fill;
360  block[0] = (unsigned)pfill[0] * qtab[0];
361  block[1] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[1];
362  block[8] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[8];
363  block[9] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[9];
364  idct2_add(dst, linesize, src, in_linesize, block);
365  break;
366  case 3:
367  fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
368  pfill[0] += fill;
369  block[0] = (unsigned)pfill[0] * qtab[0];
370  for (int i = 1; i < 64; i++)
371  block[zigzag[i]] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[zigzag[i]];
372  idct_add(dst, linesize, src, in_linesize, block);
373  break;
374  }
375 
376  return 0;
377 }
378 
379 static int decode_coeffs(GetBitContext *gb, int16_t *coeffs, int nb_codes)
380 {
381  memset(coeffs, 0, nb_codes * sizeof(*coeffs));
382 
383  for (int i = 0; i < nb_codes;) {
384  int value = get_vlc2(gb, cbp_tab, CBP_VLC_BITS, 1);
385 
386  if (value > 0) {
387  int x = get_bits(gb, value);
388 
389  if (x < (1 << value) / 2) {
390  x = (1 << (value - 1)) + (x & ((1 << value) - 1 >> 1));
391  } else {
392  x = -(1 << (value - 1)) - (x & ((1 << value) - 1 >> 1));
393  }
394  coeffs[i++] = x;
395  } else {
396  int flag = get_bits1(gb);
397 
398  i += get_bits(gb, 3 + flag * 3) + 1 + flag * 8;
399  }
400  }
401 
402  return 0;
403 }
404 
406 {
407  MV30Context *s = avctx->priv_data;
408  GetBitContext mgb;
409  uint8_t *dst[6];
410  int linesize[6];
411  int ret;
412 
413  mgb = *gb;
414  if (get_bits_left(gb) < s->mode_size * 8)
415  return AVERROR_INVALIDDATA;
416 
417  skip_bits_long(gb, s->mode_size * 8);
418 
419  linesize[0] = frame->linesize[0];
420  linesize[1] = frame->linesize[0];
421  linesize[2] = frame->linesize[0];
422  linesize[3] = frame->linesize[0];
423  linesize[4] = frame->linesize[1];
424  linesize[5] = frame->linesize[2];
425 
426  for (int y = 0; y < avctx->height; y += 16) {
427  GetByteContext gbyte;
428  int pfill[3][1] = { {0} };
429  int nb_codes = get_bits(gb, 16);
430 
431  av_fast_padded_malloc(&s->coeffs, &s->coeffs_size, nb_codes * sizeof(*s->coeffs));
432  if (!s->coeffs)
433  return AVERROR(ENOMEM);
434  ret = decode_coeffs(gb, s->coeffs, nb_codes);
435  if (ret < 0)
436  return ret;
437 
438  bytestream2_init(&gbyte, (uint8_t *)s->coeffs, nb_codes * sizeof(*s->coeffs));
439 
440  for (int x = 0; x < avctx->width; x += 16) {
441  dst[0] = frame->data[0] + linesize[0] * y + x;
442  dst[1] = frame->data[0] + linesize[0] * y + x + 8;
443  dst[2] = frame->data[0] + linesize[0] * (y + 8) + x;
444  dst[3] = frame->data[0] + linesize[0] * (y + 8) + x + 8;
445  dst[4] = frame->data[1] + linesize[4] * (y >> 1) + (x >> 1);
446  dst[5] = frame->data[2] + linesize[5] * (y >> 1) + (x >> 1);
447 
448  for (int b = 0; b < 6; b++) {
449  int mode = get_bits_le(&mgb, 2);
450 
451  ret = decode_intra_block(avctx, mode, &gbyte, s->intraq_tab[b >= 4],
452  s->block[b],
453  pfill[(b >= 4) + (b >= 5)],
454  dst[b], linesize[b]);
455  if (ret < 0)
456  return ret;
457  }
458  }
459  }
460 
461  return 0;
462 }
463 
465  AVFrame *frame, AVFrame *prev)
466 {
467  MV30Context *s = avctx->priv_data;
469  GetBitContext mgb;
471  const int mask_size = ((avctx->height >> 4) * (avctx->width >> 4) * 2 + 7) / 8;
472  uint8_t *dst[6], *src[6];
473  int in_linesize[6];
474  int linesize[6];
475  int ret, cnt = 0;
476  int flags = 0;
477 
478  in_linesize[0] = prev->linesize[0];
479  in_linesize[1] = prev->linesize[0];
480  in_linesize[2] = prev->linesize[0];
481  in_linesize[3] = prev->linesize[0];
482  in_linesize[4] = prev->linesize[1];
483  in_linesize[5] = prev->linesize[2];
484 
485  linesize[0] = frame->linesize[0];
486  linesize[1] = frame->linesize[0];
487  linesize[2] = frame->linesize[0];
488  linesize[3] = frame->linesize[0];
489  linesize[4] = frame->linesize[1];
490  linesize[5] = frame->linesize[2];
491 
492  av_fast_padded_malloc(&s->mvectors, &s->mvectors_size, 2 * s->nb_mvectors * sizeof(*s->mvectors));
493  if (!s->mvectors) {
494  ret = AVERROR(ENOMEM);
495  goto fail;
496  }
497 
498  mask = *gb;
499  skip_bits_long(gb, mask_size * 8);
500  mgb = *gb;
501  skip_bits_long(gb, s->mode_size * 8);
502 
503  ret = decode_coeffs(gb, s->mvectors, 2 * s->nb_mvectors);
504  if (ret < 0)
505  goto fail;
506 
507  bytestream2_init(&mv, (uint8_t *)s->mvectors, 2 * s->nb_mvectors * sizeof(*s->mvectors));
508 
509  for (int y = 0; y < avctx->height; y += 16) {
510  GetByteContext gbyte;
511  int pfill[3][1] = { {0} };
512  int nb_codes = get_bits(gb, 16);
513 
514  skip_bits(gb, 8);
515  if (get_bits_left(gb) < 0) {
517  goto fail;
518  }
519 
520  av_fast_padded_malloc(&s->coeffs, &s->coeffs_size, nb_codes * sizeof(*s->coeffs));
521  if (!s->coeffs) {
522  ret = AVERROR(ENOMEM);
523  goto fail;
524  }
525 
526  ret = decode_coeffs(gb, s->coeffs, nb_codes);
527  if (ret < 0)
528  goto fail;
529 
530  bytestream2_init(&gbyte, (uint8_t *)s->coeffs, nb_codes * sizeof(*s->coeffs));
531 
532  for (int x = 0; x < avctx->width; x += 16) {
533  if (cnt >= 4)
534  cnt = 0;
535  if (cnt == 0) {
536  if (get_bits_left(&mask) < 8) {
538  goto fail;
539  }
540  flags = get_bits(&mask, 8);
541  }
542 
543  dst[0] = frame->data[0] + linesize[0] * y + x;
544  dst[1] = frame->data[0] + linesize[0] * y + x + 8;
545  dst[2] = frame->data[0] + linesize[0] * (y + 8) + x;
546  dst[3] = frame->data[0] + linesize[0] * (y + 8) + x + 8;
547  dst[4] = frame->data[1] + linesize[4] * (y >> 1) + (x >> 1);
548  dst[5] = frame->data[2] + linesize[5] * (y >> 1) + (x >> 1);
549 
550  if ((flags >> (cnt)) & 1) {
551  int mv_x = sign_extend(bytestream2_get_ne16(&mv), 16);
552  int mv_y = sign_extend(bytestream2_get_ne16(&mv), 16);
553 
554  int px = x + mv_x;
555  int py = y + mv_y;
556 
557  if (px < 0 || px > FFALIGN(avctx->width , 16) - 16 ||
558  py < 0 || py > FFALIGN(avctx->height, 16) - 16)
559  return AVERROR_INVALIDDATA;
560 
561  src[0] = prev->data[0] + in_linesize[0] * py + px;
562  src[1] = prev->data[0] + in_linesize[0] * py + px + 8;
563  src[2] = prev->data[0] + in_linesize[0] * (py + 8) + px;
564  src[3] = prev->data[0] + in_linesize[0] * (py + 8) + px + 8;
565  src[4] = prev->data[1] + in_linesize[4] * (py >> 1) + (px >> 1);
566  src[5] = prev->data[2] + in_linesize[5] * (py >> 1) + (px >> 1);
567 
568  if ((flags >> (cnt + 4)) & 1) {
569  for (int b = 0; b < 6; b++)
570  copy_block8(dst[b], src[b], linesize[b], in_linesize[b], 8);
571  } else {
572  for (int b = 0; b < 6; b++) {
573  int mode = get_bits_le(&mgb, 2);
574 
575  ret = decode_inter_block(avctx, mode, &gbyte, s->interq_tab[b >= 4],
576  s->block[b],
577  pfill[(b >= 4) + (b >= 5)],
578  dst[b], linesize[b],
579  src[b], in_linesize[b]);
580  if (ret < 0)
581  goto fail;
582  }
583  }
584  } else {
585  for (int b = 0; b < 6; b++) {
586  int mode = get_bits_le(&mgb, 2);
587 
588  ret = decode_intra_block(avctx, mode, &gbyte, s->intraq_tab[b >= 4],
589  s->block[b],
590  pfill[(b >= 4) + (b >= 5)],
591  dst[b], linesize[b]);
592  if (ret < 0)
593  goto fail;
594  }
595  }
596 
597  cnt++;
598  }
599  }
600 
601 fail:
602  return ret;
603 }
604 
606  int *got_frame, AVPacket *avpkt)
607 {
608  MV30Context *s = avctx->priv_data;
609  GetBitContext *gb = &s->gb;
610  int ret;
611 
612  if ((ret = init_get_bits8(gb, avpkt->data, avpkt->size)) < 0)
613  return ret;
614 
615  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
616  return ret;
617 
618  s->intra_quant = get_bits(gb, 8);
619  s->inter_quant = s->intra_quant + get_sbits(gb, 8);
620  s->is_inter = get_bits_le(gb, 16);
621  s->mode_size = get_bits_le(gb, 16);
622  if (s->is_inter)
623  s->nb_mvectors = get_bits_le(gb, 16);
624 
625  get_qtable(s->intraq_tab[0], s->intra_quant, luma_tab);
626  get_qtable(s->intraq_tab[1], s->intra_quant, chroma_tab);
627 
628  if (s->is_inter == 0) {
629  frame->flags |= AV_FRAME_FLAG_KEY;
630  ret = decode_intra(avctx, gb, frame);
631  if (ret < 0)
632  return ret;
633  } else {
634  get_qtable(s->interq_tab[0], s->inter_quant, luma_tab);
635  get_qtable(s->interq_tab[1], s->inter_quant, chroma_tab);
636 
637  if (!s->prev_frame->data[0]) {
638  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
639  return AVERROR_INVALIDDATA;
640  }
641 
642  frame->flags &= ~AV_FRAME_FLAG_KEY;
643  ret = decode_inter(avctx, gb, frame, s->prev_frame);
644  if (ret < 0)
645  return ret;
646  }
647 
648  if ((ret = av_frame_replace(s->prev_frame, frame)) < 0)
649  return ret;
650 
651  *got_frame = 1;
652 
653  return avpkt->size;
654 }
655 
656 static const uint8_t cbp_bits[] = {
657  2, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9, 9,
658 };
659 
660 static av_cold void init_static_data(void)
661 {
664  cbp_bits, 1, NULL, 0, 0, 0, 0);
665 }
666 
668 {
669  MV30Context *s = avctx->priv_data;
670  static AVOnce init_static_once = AV_ONCE_INIT;
671 
672  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
673  avctx->color_range = AVCOL_RANGE_JPEG;
674 
675  ff_blockdsp_init(&s->bdsp);
676 
677  s->prev_frame = av_frame_alloc();
678  if (!s->prev_frame)
679  return AVERROR(ENOMEM);
680 
681  ff_thread_once(&init_static_once, init_static_data);
682 
683  return 0;
684 }
685 
687 {
688  MV30Context *s = avctx->priv_data;
689 
690  av_frame_unref(s->prev_frame);
691 }
692 
694 {
695  MV30Context *s = avctx->priv_data;
696 
697  av_frame_free(&s->prev_frame);
698  av_freep(&s->coeffs);
699  s->coeffs_size = 0;
700  av_freep(&s->mvectors);
701  s->mvectors_size = 0;
702 
703  return 0;
704 }
705 
707  .p.name = "mv30",
708  CODEC_LONG_NAME("MidiVid 3.0"),
709  .p.type = AVMEDIA_TYPE_VIDEO,
710  .p.id = AV_CODEC_ID_MV30,
711  .priv_data_size = sizeof(MV30Context),
712  .init = decode_init,
713  .close = decode_close,
715  .flush = decode_flush,
716  .p.capabilities = AV_CODEC_CAP_DR1,
717  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
718 };
MV30Context::mvectors_size
unsigned int mvectors_size
Definition: mv30.c:53
flags
const SwsFlags flags[]
Definition: swscale.c:61
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:276
idct2_add
static void idct2_add(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int *block)
Definition: mv30.c:252
av_clip
#define av_clip
Definition: common.h:100
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:689
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
thread.h
ff_mv30_decoder
const FFCodec ff_mv30_decoder
Definition: mv30.c:706
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
mask
int mask
Definition: mediacodecdec_common.c:154
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mode
Definition: swscale.c:56
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
CBP_VLC_BITS
#define CBP_VLC_BITS
Definition: mv30.c:40
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:767
AVPacket::data
uint8_t * data
Definition: packet.h:558
luma_tab
static const uint8_t luma_tab[]
Definition: mv30.c:66
b
#define b
Definition: input.c:42
table
static const uint16_t table[]
Definition: prosumer.c:203
FFCodec
Definition: codec_internal.h:127
copy_block8
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:47
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
BlockDSPContext
Definition: blockdsp.h:32
MV30Context::inter_quant
int inter_quant
Definition: mv30.c:46
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
px
#define px
Definition: ops_tmpl_float.c:35
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
decode_flush
static av_cold void decode_flush(AVCodecContext *avctx)
Definition: mv30.c:686
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
decode_intra
static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame)
Definition: mv30.c:405
init_static_data
static av_cold void init_static_data(void)
Definition: mv30.c:660
fail
#define fail()
Definition: checkasm.h:200
MV30Context::bdsp
BlockDSPContext bdsp
Definition: mv30.c:60
AV_CODEC_ID_MV30
@ AV_CODEC_ID_MV30
Definition: codec_id.h:305
GetBitContext
Definition: get_bits.h:109
chroma_tab
static const uint8_t chroma_tab[]
Definition: mv30.c:77
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mv30.c:605
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:346
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:354
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:318
decode.h
get_bits.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
blk
#define blk(i)
Definition: sha.c:186
MV30Context::prev_frame
AVFrame * prev_frame
Definition: mv30.c:61
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:331
cbp_bits
static const uint8_t cbp_bits[]
Definition: mv30.c:656
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
MV30Context
Definition: mv30.c:42
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:669
decode_intra_block
static int decode_intra_block(AVCodecContext *avctx, int mode, GetByteContext *gbyte, int16_t *qtab, int *block, int *pfill, uint8_t *dst, int linesize)
Definition: mv30.c:297
aandcttab.h
MV30Context::mode_size
int mode_size
Definition: mv30.c:48
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
mathops.h
idct2_put
static void idct2_put(uint8_t *dst, int stride, int *block)
Definition: mv30.c:222
zigzag
static const uint8_t zigzag[]
Definition: mv30.c:88
MV30Context::is_inter
int is_inter
Definition: mv30.c:47
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:646
AVOnce
#define AVOnce
Definition: thread.h:202
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: mv30.c:693
MV30Context::nb_mvectors
int nb_mvectors
Definition: mv30.c:49
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1720
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:559
idct_put
static void idct_put(uint8_t *dst, int stride, int *block)
Definition: mv30.c:139
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
bytestream2_get_ne16
#define bytestream2_get_ne16
Definition: bytestream.h:119
VLCElem
Definition: vlc.h:32
MV30Context::gb
GetBitContext gb
Definition: mv30.c:43
attributes.h
MV30Context::interq_tab
int16_t interq_tab[2][64]
Definition: mv30.c:58
idct_1d
static void idct_1d(unsigned *blk, int step)
Definition: mv30.c:109
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
copy_block.h
MV30Context::coeffs
int16_t * coeffs
Definition: mv30.c:54
MV30Context::intra_quant
int intra_quant
Definition: mv30.c:45
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
MV30Context::coeffs_size
unsigned int coeffs_size
Definition: mv30.c:55
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
get_qtable
static void get_qtable(int16_t *table, int quant, const uint8_t *quant_tab)
Definition: mv30.c:99
MV30Context::block
int block[6][64]
Definition: mv30.c:51
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
flag
#define flag(name)
Definition: cbs_av1.c:496
MV30Context::intraq_tab
int16_t intraq_tab[2][64]
Definition: mv30.c:57
U
#define U(x)
Definition: vpx_arith.h:37
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:376
AVCodecContext
main external API structure.
Definition: avcodec.h:431
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:132
decode_coeffs
static int decode_coeffs(GetBitContext *gb, int16_t *coeffs, int nb_codes)
Definition: mv30.c:379
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
factor
static const int factor[16]
Definition: vf_pp7.c:80
MV30Context::mvectors
int16_t * mvectors
Definition: mv30.c:52
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
mem.h
decode_inter
static int decode_inter(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame, AVFrame *prev)
Definition: mv30.c:464
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: mv30.c:667
VLC_INIT_STATIC_TABLE_FROM_LENGTHS
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
Definition: vlc.h:288
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:535
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
bytestream.h
idct_add
static void idct_add(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int *block)
Definition: mv30.c:170
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
cbp_tab
static VLCElem cbp_tab[1<< CBP_VLC_BITS]
Definition: mv30.c:64
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
idct2_1d
static void idct2_1d(int *blk, int step)
Definition: mv30.c:203
src
#define src
Definition: vp8dsp.c:248
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
update_inter_block
static void update_inter_block(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int block)
Definition: mv30.c:285
decode_inter_block
static int decode_inter_block(AVCodecContext *avctx, int mode, GetByteContext *gbyte, int16_t *qtab, int *block, int *pfill, uint8_t *dst, int linesize, const uint8_t *src, int in_linesize)
Definition: mv30.c:338