FFmpeg
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/pixdesc.h"
31 
32 #include "avcodec.h"
33 #include "bytestream.h"
34 #include "codec_internal.h"
35 #include "decode.h"
36 #include "get_bits.h"
37 #include "internal.h"
38 #include "thread.h"
39 #include "cfhd.h"
40 
41 #define ALPHA_COMPAND_DC_OFFSET 256
42 #define ALPHA_COMPAND_GAIN 9400
43 
44 static av_cold int cfhd_init(AVCodecContext *avctx)
45 {
46  CFHDContext *s = avctx->priv_data;
47 
48  s->avctx = avctx;
49 
50  for (int i = 0; i < 64; i++) {
51  int val = i;
52 
53  if (val >= 40) {
54  if (val >= 54) {
55  val -= 54;
56  val <<= 2;
57  val += 54;
58  }
59 
60  val -= 40;
61  val <<= 2;
62  val += 40;
63  }
64 
65  s->lut[0][i] = val;
66  }
67 
68  for (int i = 0; i < 256; i++)
69  s->lut[1][i] = i + ((768LL * i * i * i) / (256 * 256 * 256));
70 
71  return ff_cfhd_init_vlcs(s);
72 }
73 
75 {
76  s->subband_num = 0;
77  s->level = 0;
78  s->subband_num_actual = 0;
79 }
80 
82 {
83  s->peak.level = 0;
84  s->peak.offset = 0;
85  memset(&s->peak.base, 0, sizeof(s->peak.base));
86 }
87 
89 {
90  s->coded_width = 0;
91  s->coded_height = 0;
92  s->coded_format = AV_PIX_FMT_YUV422P10;
93  s->cropped_height = 0;
94  s->bpc = 10;
95  s->channel_cnt = 3;
96  s->subband_cnt = SUBBAND_COUNT;
97  s->channel_num = 0;
98  s->lowpass_precision = 16;
99  s->quantisation = 1;
100  s->codebook = 0;
101  s->difference_coding = 0;
102  s->frame_type = 0;
103  s->sample_type = 0;
104  if (s->transform_type != 2)
105  s->transform_type = -1;
108 }
109 
110 static inline int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
111 {
112  if (codebook == 0 || codebook == 1) {
113  return s->lut[codebook][abs(level)] * FFSIGN(level) * quantisation;
114  } else
115  return level * quantisation;
116 }
117 
118 static inline void difference_coding(int16_t *band, int width, int height)
119 {
120  for (int i = 0; i < height; i++) {
121  for (int j = 1; j < width; j++) {
122  band[j] += band[j-1];
123  }
124  band += width;
125  }
126 }
127 
128 static inline void peak_table(int16_t *band, Peak *peak, int length)
129 {
130  for (int i = 0; i < length; i++)
131  if (abs(band[i]) > peak->level)
132  band[i] = bytestream2_get_le16(&peak->base);
133 }
134 
135 static inline void process_alpha(int16_t *alpha, int width)
136 {
137  for (int i = 0; i < width; i++) {
138  int channel = alpha[i];
140  channel <<= 3;
142  channel >>= 16;
144  alpha[i] = channel;
145  }
146 }
147 
148 static inline void process_bayer(AVFrame *frame, int bpc)
149 {
150  const int linesize = frame->linesize[0];
151  uint16_t *r = (uint16_t *)frame->data[0];
152  uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
153  uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
154  uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
155  const int mid = 1 << (bpc - 1);
156  const int factor = 1 << (16 - bpc);
157 
158  for (int y = 0; y < frame->height >> 1; y++) {
159  for (int x = 0; x < frame->width; x += 2) {
160  int R, G1, G2, B;
161  int g, rg, bg, gd;
162 
163  g = r[x];
164  rg = g1[x];
165  bg = g2[x];
166  gd = b[x];
167  gd -= mid;
168 
169  R = (rg - mid) * 2 + g;
170  G1 = g + gd;
171  G2 = g - gd;
172  B = (bg - mid) * 2 + g;
173 
174  R = av_clip_uintp2(R * factor, 16);
175  G1 = av_clip_uintp2(G1 * factor, 16);
176  G2 = av_clip_uintp2(G2 * factor, 16);
177  B = av_clip_uintp2(B * factor, 16);
178 
179  r[x] = R;
180  g1[x] = G1;
181  g2[x] = G2;
182  b[x] = B;
183  }
184 
185  r += linesize;
186  g1 += linesize;
187  g2 += linesize;
188  b += linesize;
189  }
190 }
191 
192 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
193  int width, int linesize, int plane)
194 {
195  for (int i = 0; i < width; i++) {
196  int16_t even = (low[i] - high[i])/2;
197  int16_t odd = (low[i] + high[i])/2;
198  output[i] = av_clip_uintp2(even, 10);
199  output[i + linesize] = av_clip_uintp2(odd, 10);
200  }
201 }
202 
203 static inline void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
204 {
205  for (int i = 0; i < width; i++) {
206  int even = (low[i] - high[i]) / 2;
207  int odd = (low[i] + high[i]) / 2;
208 
209  low[i] = even;
210  high[i] = odd;
211  }
212 }
213 
215 {
216  for (size_t i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
217  Plane *p = &s->plane[i];
218  av_freep(&s->plane[i].idwt_buf);
219  av_freep(&s->plane[i].idwt_tmp);
220  s->plane[i].idwt_size = 0;
221 
222  for (int j = 0; j < SUBBAND_COUNT_3D; j++)
223  s->plane[i].subband[j] = NULL;
224 
225  for (int j = 0; j < 10; j++)
226  s->plane[i].l_h[j] = NULL;
227 
228  for (int j = 0; j < DWT_LEVELS_3D; j++)
229  p->band[j][0].read_ok =
230  p->band[j][1].read_ok =
231  p->band[j][2].read_ok =
232  p->band[j][3].read_ok = 0;
233  }
234  s->a_height = 0;
235  s->a_width = 0;
236  s->a_transform_type = INT_MIN;
237 }
238 
239 static int alloc_buffers(AVCodecContext *avctx)
240 {
241  CFHDContext *s = avctx->priv_data;
242  int ret, planes, bayer = 0;
243  int chroma_x_shift, chroma_y_shift;
244 
245  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
246  return ret;
247  avctx->pix_fmt = s->coded_format;
248 
249  ff_cfhddsp_init(&s->dsp, s->bpc, avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
250 
251  if ((ret = av_pix_fmt_get_chroma_sub_sample(s->coded_format,
252  &chroma_x_shift,
253  &chroma_y_shift)) < 0)
254  return ret;
255  planes = av_pix_fmt_count_planes(s->coded_format);
256  if (s->coded_format == AV_PIX_FMT_BAYER_RGGB16) {
257  planes = 4;
258  chroma_x_shift = 1;
259  chroma_y_shift = 1;
260  bayer = 1;
261  }
262 
263  for (int i = 0; i < planes; i++) {
264  int w8, h8, w4, h4, w2, h2;
265  int width = (i || bayer) ? s->coded_width >> chroma_x_shift : s->coded_width;
266  int height = (i || bayer) ? s->coded_height >> chroma_y_shift : s->coded_height;
267  ptrdiff_t stride = (FFALIGN(width / 8, 8) + 64) * 8;
268 
269  if (chroma_y_shift && !bayer)
270  height = FFALIGN(height / 8, 2) * 8;
271  s->plane[i].width = width;
272  s->plane[i].height = height;
273  s->plane[i].stride = stride;
274 
275  w8 = FFALIGN(s->plane[i].width / 8, 8) + 64;
276  h8 = FFALIGN(height, 8) / 8;
277  w4 = w8 * 2;
278  h4 = h8 * 2;
279  w2 = w4 * 2;
280  h2 = h4 * 2;
281 
282  if (s->transform_type == 0) {
283  s->plane[i].idwt_size = FFALIGN(height, 8) * stride;
284  s->plane[i].idwt_buf =
285  av_calloc(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
286  s->plane[i].idwt_tmp =
287  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
288  } else {
289  s->plane[i].idwt_size = FFALIGN(height, 8) * stride * 2;
290  s->plane[i].idwt_buf =
291  av_calloc(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
292  s->plane[i].idwt_tmp =
293  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
294  }
295 
296  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
297  return AVERROR(ENOMEM);
298 
299  s->plane[i].subband[0] = s->plane[i].idwt_buf;
300  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
301  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
302  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
303  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
304  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
305  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
306  if (s->transform_type == 0) {
307  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
308  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
309  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
310  } else {
311  int16_t *frame2 =
312  s->plane[i].subband[7] = s->plane[i].idwt_buf + 4 * w2 * h2;
313  s->plane[i].subband[8] = frame2 + 2 * w4 * h4;
314  s->plane[i].subband[9] = frame2 + 1 * w4 * h4;
315  s->plane[i].subband[10] = frame2 + 3 * w4 * h4;
316  s->plane[i].subband[11] = frame2 + 2 * w2 * h2;
317  s->plane[i].subband[12] = frame2 + 1 * w2 * h2;
318  s->plane[i].subband[13] = frame2 + 3 * w2 * h2;
319  s->plane[i].subband[14] = s->plane[i].idwt_buf + 2 * w2 * h2;
320  s->plane[i].subband[15] = s->plane[i].idwt_buf + 1 * w2 * h2;
321  s->plane[i].subband[16] = s->plane[i].idwt_buf + 3 * w2 * h2;
322  }
323 
324  if (s->transform_type == 0) {
325  for (int j = 0; j < DWT_LEVELS; j++) {
326  for (unsigned k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
327  s->plane[i].band[j][k].a_width = w8 << j;
328  s->plane[i].band[j][k].a_height = h8 << j;
329  }
330  }
331  } else {
332  for (int j = 0; j < DWT_LEVELS_3D; j++) {
333  int t = j < 1 ? 0 : (j < 3 ? 1 : 2);
334 
335  for (unsigned k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
336  s->plane[i].band[j][k].a_width = w8 << t;
337  s->plane[i].band[j][k].a_height = h8 << t;
338  }
339  }
340  }
341 
342  /* ll2 and ll1 commented out because they are done in-place */
343  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
344  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
345  // s->plane[i].l_h[2] = ll2;
346  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
347  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
348  // s->plane[i].l_h[5] = ll1;
349  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
350  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
351  if (s->transform_type != 0) {
352  int16_t *frame2 = s->plane[i].idwt_tmp + 4 * w2 * h2;
353 
354  s->plane[i].l_h[8] = frame2;
355  s->plane[i].l_h[9] = frame2 + 2 * w2 * h2;
356  }
357  }
358 
359  s->a_transform_type = s->transform_type;
360  s->a_height = s->coded_height;
361  s->a_width = s->coded_width;
362  s->a_format = s->coded_format;
363 
364  return 0;
365 }
366 
367 static int cfhd_decode(AVCodecContext *avctx, AVFrame *pic,
368  int *got_frame, AVPacket *avpkt)
369 {
370  CFHDContext *s = avctx->priv_data;
371  CFHDDSPContext *dsp = &s->dsp;
372  GetByteContext gb;
373  int ret = 0, got_buffer = 0;
374 
376  s->planes = av_pix_fmt_count_planes(s->coded_format);
377 
378  bytestream2_init(&gb, avpkt->data, avpkt->size);
379 
380  while (bytestream2_get_bytes_left(&gb) >= 4) {
381  /* Bit weird but implement the tag parsing as the spec says */
382  uint16_t tagu = bytestream2_get_be16(&gb);
383  int16_t tag = (int16_t)tagu;
384  int8_t tag8 = (int8_t)(tagu >> 8);
385  uint16_t abstag = abs(tag);
386  int8_t abs_tag8 = abs(tag8);
387  uint16_t data = bytestream2_get_be16(&gb);
388  int16_t *coeff_data;
389 
390  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
391  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
392  } else if (tag == SampleFlags) {
393  av_log(avctx, AV_LOG_DEBUG, "Progressive? %"PRIu16"\n", data);
394  s->progressive = data & 0x0001;
395  } else if (tag == FrameType) {
396  s->frame_type = data;
397  av_log(avctx, AV_LOG_DEBUG, "Frame type %"PRIu16"\n", data);
398  } else if (abstag == VersionMajor) {
399  av_log(avctx, AV_LOG_DEBUG, "Version major %"PRIu16"\n", data);
400  } else if (abstag == VersionMinor) {
401  av_log(avctx, AV_LOG_DEBUG, "Version minor %"PRIu16"\n", data);
402  } else if (abstag == VersionRevision) {
403  av_log(avctx, AV_LOG_DEBUG, "Version revision %"PRIu16"\n", data);
404  } else if (abstag == VersionEdit) {
405  av_log(avctx, AV_LOG_DEBUG, "Version edit %"PRIu16"\n", data);
406  } else if (abstag == Version) {
407  av_log(avctx, AV_LOG_DEBUG, "Version %"PRIu16"\n", data);
408  } else if (tag == ImageWidth) {
409  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
410  s->coded_width = data;
411  } else if (tag == ImageHeight) {
412  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
413  s->coded_height = data;
414  } else if (tag == ChannelCount) {
415  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
416  s->channel_cnt = data;
417  if (data > 4) {
418  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
420  goto end;
421  }
422  } else if (tag == SubbandCount) {
423  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
424  if (data != SUBBAND_COUNT && data != SUBBAND_COUNT_3D) {
425  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
427  goto end;
428  }
429  } else if (tag == ChannelNumber) {
430  s->channel_num = data;
431  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
432  if (s->channel_num >= s->planes) {
433  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
434  ret = AVERROR(EINVAL);
435  goto end;
436  }
438  } else if (tag == SubbandNumber) {
439  if (s->subband_num != 0 && data == 1 && (s->transform_type == 0 || s->transform_type == 2)) // hack
440  s->level++;
441  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
442  s->subband_num = data;
443  if ((s->transform_type == 0 && s->level >= DWT_LEVELS) ||
444  (s->transform_type == 2 && s->level >= DWT_LEVELS_3D)) {
445  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
446  ret = AVERROR(EINVAL);
447  goto end;
448  }
449  if (s->subband_num > 3) {
450  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
451  ret = AVERROR(EINVAL);
452  goto end;
453  }
454  } else if (tag == SubbandBand) {
455  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
456  if ((s->transform_type == 0 && data >= SUBBAND_COUNT) ||
457  (s->transform_type == 2 && data >= SUBBAND_COUNT_3D && data != 255)) {
458  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
459  ret = AVERROR(EINVAL);
460  goto end;
461  }
462  if (s->transform_type == 0 || s->transform_type == 2)
463  s->subband_num_actual = data;
464  else
465  av_log(avctx, AV_LOG_WARNING, "Ignoring subband num actual %"PRIu16"\n", data);
466  } else if (tag == LowpassPrecision)
467  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
468  else if (tag == Quantization) {
469  s->quantisation = data;
470  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
471  } else if (tag == PrescaleTable) {
472  for (int i = 0; i < 8; i++)
473  s->prescale_table[i] = (data >> (14 - i * 2)) & 0x3;
474  av_log(avctx, AV_LOG_DEBUG, "Prescale table: %x\n", data);
475  } else if (tag == BandEncoding) {
476  if (!data || data > 5) {
477  av_log(avctx, AV_LOG_ERROR, "Invalid band encoding\n");
478  ret = AVERROR(EINVAL);
479  goto end;
480  }
481  s->band_encoding = data;
482  av_log(avctx, AV_LOG_DEBUG, "Encode Method for Subband %d : %x\n", s->subband_num_actual, data);
483  } else if (tag == LowpassWidth) {
484  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
485  s->plane[s->channel_num].band[0][0].width = data;
486  s->plane[s->channel_num].band[0][0].stride = data;
487  } else if (tag == LowpassHeight) {
488  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
489  s->plane[s->channel_num].band[0][0].height = data;
490  } else if (tag == SampleType) {
491  s->sample_type = data;
492  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
493  } else if (tag == TransformType) {
494  if (data > 2) {
495  av_log(avctx, AV_LOG_ERROR, "Invalid transform type\n");
496  ret = AVERROR(EINVAL);
497  goto end;
498  } else if (data == 1) {
499  av_log(avctx, AV_LOG_ERROR, "unsupported transform type\n");
501  goto end;
502  }
503  if (s->transform_type == -1) {
504  s->transform_type = data;
505  av_log(avctx, AV_LOG_DEBUG, "Transform type %"PRIu16"\n", data);
506  } else {
507  av_log(avctx, AV_LOG_DEBUG, "Ignoring additional transform type %"PRIu16"\n", data);
508  }
509  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
510  if (abstag == 0x4001)
511  s->peak.level = 0;
512  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
513  bytestream2_skipu(&gb, data * 4);
514  } else if (tag == FrameIndex) {
515  av_log(avctx, AV_LOG_DEBUG, "Frame index %"PRIu16"\n", data);
516  s->frame_index = data;
517  } else if (tag == SampleIndexTable) {
518  av_log(avctx, AV_LOG_DEBUG, "Sample index table - skipping %i values\n", data);
519  if (data > bytestream2_get_bytes_left(&gb) / 4) {
520  av_log(avctx, AV_LOG_ERROR, "too many values (%d)\n", data);
522  goto end;
523  }
524  for (int i = 0; i < data; i++) {
525  uint32_t offset = bytestream2_get_be32(&gb);
526  av_log(avctx, AV_LOG_DEBUG, "Offset = %"PRIu32"\n", offset);
527  }
528  } else if (tag == HighpassWidth) {
529  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
530  if (data < 3) {
531  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
532  ret = AVERROR(EINVAL);
533  goto end;
534  }
535  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
536  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
537  } else if (tag == HighpassHeight) {
538  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
539  if (data < 3) {
540  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
541  ret = AVERROR(EINVAL);
542  goto end;
543  }
544  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
545  } else if (tag == BandWidth) {
546  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
547  if (data < 3) {
548  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
549  ret = AVERROR(EINVAL);
550  goto end;
551  }
552  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
553  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
554  } else if (tag == BandHeight) {
555  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
556  if (data < 3) {
557  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
558  ret = AVERROR(EINVAL);
559  goto end;
560  }
561  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
562  } else if (tag == InputFormat) {
563  av_log(avctx, AV_LOG_DEBUG, "Input format %i\n", data);
564  if (s->coded_format == AV_PIX_FMT_NONE ||
565  s->coded_format == AV_PIX_FMT_YUV422P10) {
566  if (data >= 100 && data <= 105) {
567  s->coded_format = AV_PIX_FMT_BAYER_RGGB16;
568  } else if (data >= 122 && data <= 128) {
569  s->coded_format = AV_PIX_FMT_GBRP12;
570  } else if (data == 30) {
571  s->coded_format = AV_PIX_FMT_GBRAP12;
572  } else {
573  s->coded_format = AV_PIX_FMT_YUV422P10;
574  }
575  s->planes = s->coded_format == AV_PIX_FMT_BAYER_RGGB16 ? 4 : av_pix_fmt_count_planes(s->coded_format);
576  }
577  } else if (tag == BandCodingFlags) {
578  s->codebook = data & 0xf;
579  s->difference_coding = (data >> 4) & 1;
580  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
581  } else if (tag == Precision) {
582  av_log(avctx, AV_LOG_DEBUG, "Precision %i\n", data);
583  if (!(data == 10 || data == 12)) {
584  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
585  ret = AVERROR(EINVAL);
586  goto end;
587  }
588  avctx->bits_per_raw_sample = s->bpc = data;
589  } else if (tag == EncodedFormat) {
590  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
591  if (data == 1) {
592  s->coded_format = AV_PIX_FMT_YUV422P10;
593  } else if (data == 2) {
594  s->coded_format = AV_PIX_FMT_BAYER_RGGB16;
595  } else if (data == 3) {
596  s->coded_format = AV_PIX_FMT_GBRP12;
597  } else if (data == 4) {
598  s->coded_format = AV_PIX_FMT_GBRAP12;
599  } else {
600  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
602  goto end;
603  }
604  s->planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
605  } else if (tag == -DisplayHeight) {
606  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
607  s->cropped_height = data;
608  } else if (tag == -PeakOffsetLow) {
609  s->peak.offset &= ~0xffff;
610  s->peak.offset |= (data & 0xffff);
611  s->peak.base = gb;
612  s->peak.level = 0;
613  } else if (tag == -PeakOffsetHigh) {
614  s->peak.offset &= 0xffff;
615  s->peak.offset |= (data & 0xffffU)<<16;
616  s->peak.base = gb;
617  s->peak.level = 0;
618  } else if (tag == -PeakLevel && s->peak.offset) {
619  s->peak.level = data;
620  if (s->peak.offset < 4 - bytestream2_tell(&s->peak.base) ||
621  s->peak.offset > 4 + bytestream2_get_bytes_left(&s->peak.base)
622  ) {
624  goto end;
625  }
626  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
627  } else
628  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
629 
630  if (tag == BitstreamMarker && data == 0xf0f &&
631  s->coded_format != AV_PIX_FMT_NONE) {
632  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
633  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
634  int factor = s->coded_format == AV_PIX_FMT_BAYER_RGGB16 ? 2 : 1;
635 
636  if (s->coded_width) {
637  s->coded_width *= factor;
638  }
639 
640  if (s->coded_height) {
641  s->coded_height *= factor;
642  }
643 
644  if (!s->a_width && !s->coded_width) {
645  s->coded_width = lowpass_width * factor * 8;
646  }
647 
648  if (!s->a_height && !s->coded_height) {
649  s->coded_height = lowpass_height * factor * 8;
650  }
651 
652  if (s->a_width && !s->coded_width)
653  s->coded_width = s->a_width;
654  if (s->a_height && !s->coded_height)
655  s->coded_height = s->a_height;
656 
657  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
658  s->a_format != s->coded_format ||
659  s->transform_type != s->a_transform_type) {
660  free_buffers(s);
661  if ((ret = alloc_buffers(avctx)) < 0) {
662  free_buffers(s);
663  return ret;
664  }
665  }
666  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
667  if (ret < 0)
668  return ret;
669  if (s->cropped_height) {
670  unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
671  if (avctx->height < height)
672  return AVERROR_INVALIDDATA;
673  avctx->height = height;
674  }
675  pic->width = pic->height = 0;
676 
677  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
678  return ret;
679 
680  s->coded_width = 0;
681  s->coded_height = 0;
682  s->coded_format = AV_PIX_FMT_NONE;
683  got_buffer = 1;
684  } else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
685  pic->width = pic->height = 0;
686 
687  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
688  return ret;
689  s->coded_width = 0;
690  s->coded_height = 0;
691  s->coded_format = AV_PIX_FMT_NONE;
692  got_buffer = 1;
693  }
694 
695  if (s->subband_num_actual == 255)
696  goto finish;
697  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
698 
699  /* Lowpass coefficients */
700  if (tag == BitstreamMarker && data == 0xf0f) {
701  int lowpass_height, lowpass_width, lowpass_a_height, lowpass_a_width;
702 
703  if (!s->a_width || !s->a_height) {
705  goto end;
706  }
707 
708  lowpass_height = s->plane[s->channel_num].band[0][0].height;
709  lowpass_width = s->plane[s->channel_num].band[0][0].width;
710  lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
711  lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
712 
713  if (lowpass_width < 3 ||
714  lowpass_width > lowpass_a_width) {
715  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
716  ret = AVERROR(EINVAL);
717  goto end;
718  }
719 
720  if (lowpass_height < 3 ||
721  lowpass_height > lowpass_a_height) {
722  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
723  ret = AVERROR(EINVAL);
724  goto end;
725  }
726 
727  if (!got_buffer) {
728  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
729  ret = AVERROR(EINVAL);
730  goto end;
731  }
732 
733  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
734  lowpass_width * lowpass_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
735  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
736  ret = AVERROR(EINVAL);
737  goto end;
738  }
739 
740  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
741  for (int i = 0; i < lowpass_height; i++) {
742  for (int j = 0; j < lowpass_width; j++)
743  coeff_data[j] = bytestream2_get_be16u(&gb);
744 
745  coeff_data += lowpass_width;
746  }
747 
748  /* Align to mod-4 position to continue reading tags */
749  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
750 
751  /* Copy last line of coefficients if odd height */
752  if (lowpass_height & 1) {
753  memcpy(&coeff_data[lowpass_height * lowpass_width],
754  &coeff_data[(lowpass_height - 1) * lowpass_width],
755  lowpass_width * sizeof(*coeff_data));
756  }
757 
758  s->plane[s->channel_num].band[0][0].read_ok = 1;
759 
760  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
761  }
762 
763  av_assert0(s->subband_num_actual != 255);
764  if (tag == BandHeader || tag == BandSecondPass) {
765  int highpass_height, highpass_width, highpass_a_width, highpass_a_height, highpass_stride, a_expected;
766  int expected;
767  GetBitContext gbit;
768  int count = 0, bytes;
769 
770  if (!s->a_width || !s->a_height) {
772  goto end;
773  }
774 
775  highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
776  highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
777  highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
778  highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
779  highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
780  a_expected = highpass_a_height * highpass_a_width;
781 
782  if (!got_buffer) {
783  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
784  ret = AVERROR(EINVAL);
785  goto end;
786  }
787 
788  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
789  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
790  ret = AVERROR(EINVAL);
791  goto end;
792  }
793  expected = highpass_height * highpass_stride;
794 
795  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
796 
798  if (ret < 0)
799  goto end;
800  {
801  OPEN_READER(re, &gbit);
802 
803  const int lossless = s->band_encoding == 5;
804 
805  if (s->codebook == 0 && s->transform_type == 2 && s->subband_num_actual == 7)
806  s->codebook = 1;
807  if (!s->codebook) {
808  while (1) {
809  int level, run, coeff;
810 
811  UPDATE_CACHE(re, &gbit);
812  GET_RL_VLC(level, run, re, &gbit, s->table_9_rl_vlc,
813  VLC_BITS, 3, 1);
814 
815  /* escape */
816  if (!run)
817  break;
818 
819  count += run;
820 
821  if (count > expected)
822  break;
823 
824  if (!lossless)
825  coeff = dequant_and_decompand(s, level, s->quantisation, 0);
826  else
827  coeff = level;
828  if (tag == BandSecondPass) {
829  const uint16_t q = s->quantisation;
830 
831  for (int i = 0; i < run; i++) {
832  *coeff_data |= coeff * 256U;
833  *coeff_data++ *= q;
834  }
835  } else {
836  for (int i = 0; i < run; i++)
837  *coeff_data++ = coeff;
838  }
839  }
840  } else {
841  while (1) {
842  int level, run, coeff;
843 
844  UPDATE_CACHE(re, &gbit);
845  GET_RL_VLC(level, run, re, &gbit, s->table_18_rl_vlc,
846  VLC_BITS, 3, 1);
847 
848  /* escape */
849  if (!run)
850  break;
851 
852  count += run;
853 
854  if (count > expected)
855  break;
856 
857  if (!lossless)
858  coeff = dequant_and_decompand(s, level, s->quantisation, s->codebook);
859  else
860  coeff = level;
861  if (tag == BandSecondPass) {
862  const uint16_t q = s->quantisation;
863 
864  for (int i = 0; i < run; i++) {
865  *coeff_data |= coeff * 256U;
866  *coeff_data++ *= q;
867  }
868  } else {
869  for (int i = 0; i < run; i++)
870  *coeff_data++ = coeff;
871  }
872  }
873  }
874  CLOSE_READER(re, &gbit);
875  }
876 
877  if (count > expected) {
878  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
879  ret = AVERROR(EINVAL);
880  goto end;
881  }
882  if (s->peak.level)
883  peak_table(coeff_data - count, &s->peak, count);
884  if (s->difference_coding)
885  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
886 
887  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&gbit), 3), 4);
888  if (bytes > bytestream2_get_bytes_left(&gb)) {
889  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
890  ret = AVERROR(EINVAL);
891  goto end;
892  } else
893  bytestream2_seek(&gb, bytes, SEEK_CUR);
894 
895  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
896  s->plane[s->channel_num].band[s->level][s->subband_num].read_ok = 1;
897 finish:
898  if (s->subband_num_actual != 255)
899  s->codebook = 0;
900  }
901  }
902 
903  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
904  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
905  s->progressive = 1;
906  s->planes = 4;
907  }
908 
909  ff_thread_finish_setup(avctx);
910 
911  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
912  s->a_transform_type == INT_MIN ||
913  s->coded_width || s->coded_height || s->coded_format != AV_PIX_FMT_NONE) {
914  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
915  ret = AVERROR(EINVAL);
916  goto end;
917  }
918 
919  if (!got_buffer) {
920  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
921  ret = AVERROR(EINVAL);
922  goto end;
923  }
924 
925  for (int plane = 0; plane < s->planes; plane++) {
926  for (int level = 0; level < (s->transform_type == 0 ? DWT_LEVELS : DWT_LEVELS_3D) ; level++) {
927  if (s->transform_type == 2)
928  if (level == 2 || level == 5)
929  continue;
930  for (int o = !!level; o < 4 ; o++) {
931  if (!s->plane[plane].band[level][o].read_ok) {
933  goto end;
934  }
935  }
936  }
937  }
938 
939  if (s->transform_type == 0 && s->sample_type != 1) {
940  for (int plane = 0; plane < s->planes && !ret; plane++) {
941  /* level 1 */
942  int lowpass_height = s->plane[plane].band[0][0].height;
943  int output_stride = s->plane[plane].band[0][0].a_width;
944  int lowpass_width = s->plane[plane].band[0][0].width;
945  int highpass_stride = s->plane[plane].band[0][1].stride;
946  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
947  ptrdiff_t dst_linesize;
948  int16_t *low, *high, *output, *dst;
949 
950  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
951  act_plane = 0;
952  dst_linesize = pic->linesize[act_plane];
953  } else {
954  dst_linesize = pic->linesize[act_plane] / 2;
955  }
956 
957  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
958  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
959  lowpass_width < 3 || lowpass_height < 3) {
960  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
961  ret = AVERROR(EINVAL);
962  goto end;
963  }
964 
965  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
966 
967  low = s->plane[plane].subband[0];
968  high = s->plane[plane].subband[2];
969  output = s->plane[plane].l_h[0];
970  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
971 
972  low = s->plane[plane].subband[1];
973  high = s->plane[plane].subband[3];
974  output = s->plane[plane].l_h[1];
975 
976  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
977 
978  low = s->plane[plane].l_h[0];
979  high = s->plane[plane].l_h[1];
980  output = s->plane[plane].subband[0];
981  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
982  if (s->bpc == 12) {
983  output = s->plane[plane].subband[0];
984  for (int i = 0; i < lowpass_height * 2; i++) {
985  for (int j = 0; j < lowpass_width * 2; j++)
986  output[j] *= 4;
987 
988  output += output_stride * 2;
989  }
990  }
991 
992  /* level 2 */
993  lowpass_height = s->plane[plane].band[1][1].height;
994  output_stride = s->plane[plane].band[1][1].a_width;
995  lowpass_width = s->plane[plane].band[1][1].width;
996  highpass_stride = s->plane[plane].band[1][1].stride;
997 
998  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
999  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
1000  lowpass_width < 3 || lowpass_height < 3) {
1001  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1002  ret = AVERROR(EINVAL);
1003  goto end;
1004  }
1005 
1006  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1007 
1008  low = s->plane[plane].subband[0];
1009  high = s->plane[plane].subband[5];
1010  output = s->plane[plane].l_h[3];
1011  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1012 
1013  low = s->plane[plane].subband[4];
1014  high = s->plane[plane].subband[6];
1015  output = s->plane[plane].l_h[4];
1016  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1017 
1018  low = s->plane[plane].l_h[3];
1019  high = s->plane[plane].l_h[4];
1020  output = s->plane[plane].subband[0];
1021  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1022 
1023  output = s->plane[plane].subband[0];
1024  for (int i = 0; i < lowpass_height * 2; i++) {
1025  for (int j = 0; j < lowpass_width * 2; j++)
1026  output[j] *= 4;
1027 
1028  output += output_stride * 2;
1029  }
1030 
1031  /* level 3 */
1032  lowpass_height = s->plane[plane].band[2][1].height;
1033  output_stride = s->plane[plane].band[2][1].a_width;
1034  lowpass_width = s->plane[plane].band[2][1].width;
1035  highpass_stride = s->plane[plane].band[2][1].stride;
1036 
1037  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
1038  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width ||
1039  lowpass_height < 3 || lowpass_width < 3 || lowpass_width * 2 > s->plane[plane].width) {
1040  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1041  ret = AVERROR(EINVAL);
1042  goto end;
1043  }
1044 
1045  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1046  if (s->progressive) {
1047  low = s->plane[plane].subband[0];
1048  high = s->plane[plane].subband[8];
1049  output = s->plane[plane].l_h[6];
1050  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1051 
1052  low = s->plane[plane].subband[7];
1053  high = s->plane[plane].subband[9];
1054  output = s->plane[plane].l_h[7];
1055  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1056 
1057  dst = (int16_t *)pic->data[act_plane];
1058  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1059  if (plane & 1)
1060  dst++;
1061  if (plane > 1)
1062  dst += pic->linesize[act_plane] >> 1;
1063  }
1064  low = s->plane[plane].l_h[6];
1065  high = s->plane[plane].l_h[7];
1066 
1067  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1068  (lowpass_height * 2 > avctx->coded_height / 2 ||
1069  lowpass_width * 2 > avctx->coded_width / 2 )
1070  ) {
1072  goto end;
1073  }
1074 
1075  for (int i = 0; i < s->plane[act_plane].height; i++) {
1076  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1077  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
1078  process_alpha(dst, lowpass_width * 2);
1079  low += output_stride;
1080  high += output_stride;
1081  dst += dst_linesize;
1082  }
1083  } else {
1084  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", !!(pic->flags & AV_FRAME_FLAG_INTERLACED));
1086  low = s->plane[plane].subband[0];
1087  high = s->plane[plane].subband[7];
1088  output = s->plane[plane].l_h[6];
1089  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1090 
1091  low = s->plane[plane].subband[8];
1092  high = s->plane[plane].subband[9];
1093  output = s->plane[plane].l_h[7];
1094  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1095 
1096  dst = (int16_t *)pic->data[act_plane];
1097  low = s->plane[plane].l_h[6];
1098  high = s->plane[plane].l_h[7];
1099  for (int i = 0; i < s->plane[act_plane].height / 2; i++) {
1100  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1101  low += output_stride * 2;
1102  high += output_stride * 2;
1103  dst += pic->linesize[act_plane];
1104  }
1105  }
1106  }
1107  } else if (s->transform_type == 2 && (avctx->internal->is_copy || s->frame_index == 1 || s->sample_type != 1)) {
1108  for (int plane = 0; plane < s->planes && !ret; plane++) {
1109  int lowpass_height = s->plane[plane].band[0][0].height;
1110  int output_stride = s->plane[plane].band[0][0].a_width;
1111  int lowpass_width = s->plane[plane].band[0][0].width;
1112  int highpass_stride = s->plane[plane].band[0][1].stride;
1113  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1114  int16_t *low, *high, *output, *dst;
1115  ptrdiff_t dst_linesize;
1116 
1117  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1118  act_plane = 0;
1119  dst_linesize = pic->linesize[act_plane];
1120  } else {
1121  dst_linesize = pic->linesize[act_plane] / 2;
1122  }
1123 
1124  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
1125  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
1126  lowpass_width < 3 || lowpass_height < 3) {
1127  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1128  ret = AVERROR(EINVAL);
1129  goto end;
1130  }
1131 
1132  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1133 
1134  low = s->plane[plane].subband[0];
1135  high = s->plane[plane].subband[2];
1136  output = s->plane[plane].l_h[0];
1137  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
1138 
1139  low = s->plane[plane].subband[1];
1140  high = s->plane[plane].subband[3];
1141  output = s->plane[plane].l_h[1];
1142  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1143 
1144  low = s->plane[plane].l_h[0];
1145  high = s->plane[plane].l_h[1];
1146  output = s->plane[plane].l_h[7];
1147  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1148  if (s->bpc == 12) {
1149  output = s->plane[plane].l_h[7];
1150  for (int i = 0; i < lowpass_height * 2; i++) {
1151  for (int j = 0; j < lowpass_width * 2; j++)
1152  output[j] *= 4;
1153 
1154  output += output_stride * 2;
1155  }
1156  }
1157 
1158  lowpass_height = s->plane[plane].band[1][1].height;
1159  output_stride = s->plane[plane].band[1][1].a_width;
1160  lowpass_width = s->plane[plane].band[1][1].width;
1161  highpass_stride = s->plane[plane].band[1][1].stride;
1162 
1163  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
1164  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
1165  lowpass_width < 3 || lowpass_height < 3) {
1166  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1167  ret = AVERROR(EINVAL);
1168  goto end;
1169  }
1170 
1171  av_log(avctx, AV_LOG_DEBUG, "Level 2 lowpass plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1172 
1173  low = s->plane[plane].l_h[7];
1174  high = s->plane[plane].subband[5];
1175  output = s->plane[plane].l_h[3];
1176  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1177 
1178  low = s->plane[plane].subband[4];
1179  high = s->plane[plane].subband[6];
1180  output = s->plane[plane].l_h[4];
1181  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1182 
1183  low = s->plane[plane].l_h[3];
1184  high = s->plane[plane].l_h[4];
1185  output = s->plane[plane].l_h[7];
1186  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1187 
1188  output = s->plane[plane].l_h[7];
1189  for (int i = 0; i < lowpass_height * 2; i++) {
1190  for (int j = 0; j < lowpass_width * 2; j++)
1191  output[j] *= 4;
1192  output += output_stride * 2;
1193  }
1194 
1195  low = s->plane[plane].subband[7];
1196  high = s->plane[plane].subband[9];
1197  output = s->plane[plane].l_h[3];
1198  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1199 
1200  low = s->plane[plane].subband[8];
1201  high = s->plane[plane].subband[10];
1202  output = s->plane[plane].l_h[4];
1203  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1204 
1205  low = s->plane[plane].l_h[3];
1206  high = s->plane[plane].l_h[4];
1207  output = s->plane[plane].l_h[9];
1208  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1209 
1210  lowpass_height = s->plane[plane].band[4][1].height;
1211  output_stride = s->plane[plane].band[4][1].a_width;
1212  lowpass_width = s->plane[plane].band[4][1].width;
1213  highpass_stride = s->plane[plane].band[4][1].stride;
1214  av_log(avctx, AV_LOG_DEBUG, "temporal level %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1215 
1216  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1217  !highpass_stride || s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1218  lowpass_width < 3 || lowpass_height < 3) {
1219  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1220  ret = AVERROR(EINVAL);
1221  goto end;
1222  }
1223 
1224  low = s->plane[plane].l_h[7];
1225  high = s->plane[plane].l_h[9];
1226  output = s->plane[plane].l_h[7];
1227  for (int i = 0; i < lowpass_height; i++) {
1228  inverse_temporal_filter(low, high, lowpass_width);
1229  low += output_stride;
1230  high += output_stride;
1231  }
1232  if (s->progressive) {
1233  low = s->plane[plane].l_h[7];
1234  high = s->plane[plane].subband[15];
1235  output = s->plane[plane].l_h[6];
1236  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1237 
1238  low = s->plane[plane].subband[14];
1239  high = s->plane[plane].subband[16];
1240  output = s->plane[plane].l_h[7];
1241  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1242 
1243  low = s->plane[plane].l_h[9];
1244  high = s->plane[plane].subband[12];
1245  output = s->plane[plane].l_h[8];
1246  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1247 
1248  low = s->plane[plane].subband[11];
1249  high = s->plane[plane].subband[13];
1250  output = s->plane[plane].l_h[9];
1251  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1252 
1253  if (s->sample_type == 1)
1254  continue;
1255 
1256  dst = (int16_t *)pic->data[act_plane];
1257  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1258  if (plane & 1)
1259  dst++;
1260  if (plane > 1)
1261  dst += pic->linesize[act_plane] >> 1;
1262  }
1263 
1264  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1265  (lowpass_height * 2 > avctx->coded_height / 2 ||
1266  lowpass_width * 2 > avctx->coded_width / 2 )
1267  ) {
1269  goto end;
1270  }
1271 
1272  low = s->plane[plane].l_h[6];
1273  high = s->plane[plane].l_h[7];
1274  for (int i = 0; i < s->plane[act_plane].height; i++) {
1275  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1276  low += output_stride;
1277  high += output_stride;
1278  dst += dst_linesize;
1279  }
1280  } else {
1282  low = s->plane[plane].l_h[7];
1283  high = s->plane[plane].subband[14];
1284  output = s->plane[plane].l_h[6];
1285  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1286 
1287  low = s->plane[plane].subband[15];
1288  high = s->plane[plane].subband[16];
1289  output = s->plane[plane].l_h[7];
1290  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1291 
1292  low = s->plane[plane].l_h[9];
1293  high = s->plane[plane].subband[11];
1294  output = s->plane[plane].l_h[8];
1295  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1296 
1297  low = s->plane[plane].subband[12];
1298  high = s->plane[plane].subband[13];
1299  output = s->plane[plane].l_h[9];
1300  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1301 
1302  if (s->sample_type == 1)
1303  continue;
1304 
1305  dst = (int16_t *)pic->data[act_plane];
1306  low = s->plane[plane].l_h[6];
1307  high = s->plane[plane].l_h[7];
1308  for (int i = 0; i < s->plane[act_plane].height / 2; i++) {
1309  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1310  low += output_stride * 2;
1311  high += output_stride * 2;
1312  dst += pic->linesize[act_plane];
1313  }
1314  }
1315  }
1316  }
1317 
1318  if (s->transform_type == 2 && s->sample_type == 1) {
1319  int16_t *low, *high, *dst;
1320  int output_stride, lowpass_height, lowpass_width;
1321  ptrdiff_t dst_linesize;
1322 
1323  for (int plane = 0; plane < s->planes; plane++) {
1324  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1325 
1326  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1327  act_plane = 0;
1328  dst_linesize = pic->linesize[act_plane];
1329  } else {
1330  dst_linesize = pic->linesize[act_plane] / 2;
1331  }
1332 
1333  lowpass_height = s->plane[plane].band[4][1].height;
1334  output_stride = s->plane[plane].band[4][1].a_width;
1335  lowpass_width = s->plane[plane].band[4][1].width;
1336 
1337  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1338  s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1339  lowpass_width < 3 || lowpass_height < 3) {
1340  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1341  ret = AVERROR(EINVAL);
1342  goto end;
1343  }
1344 
1345  if (s->progressive) {
1346  dst = (int16_t *)pic->data[act_plane];
1347  low = s->plane[plane].l_h[8];
1348  high = s->plane[plane].l_h[9];
1349 
1350  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1351  if (plane & 1)
1352  dst++;
1353  if (plane > 1)
1354  dst += pic->linesize[act_plane] >> 1;
1355  }
1356 
1357  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1358  (lowpass_height * 2 > avctx->coded_height / 2 ||
1359  lowpass_width * 2 > avctx->coded_width / 2 )
1360  ) {
1362  goto end;
1363  }
1364 
1365  for (int i = 0; i < s->plane[act_plane].height; i++) {
1366  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1367  low += output_stride;
1368  high += output_stride;
1369  dst += dst_linesize;
1370  }
1371  } else {
1372  dst = (int16_t *)pic->data[act_plane];
1373  low = s->plane[plane].l_h[8];
1374  high = s->plane[plane].l_h[9];
1375  for (int i = 0; i < s->plane[act_plane].height / 2; i++) {
1376  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1377  low += output_stride * 2;
1378  high += output_stride * 2;
1379  dst += pic->linesize[act_plane];
1380  }
1381  }
1382  }
1383  }
1384 
1385  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
1386  process_bayer(pic, s->bpc);
1387 end:
1388  if (ret < 0)
1389  return ret;
1390 
1391  *got_frame = 1;
1392  return avpkt->size;
1393 }
1394 
1396 {
1397  CFHDContext *s = avctx->priv_data;
1398 
1399  free_buffers(s);
1400 
1401  return 0;
1402 }
1403 
1404 #if HAVE_THREADS
1406 {
1407  CFHDContext *psrc = src->priv_data;
1408  CFHDContext *pdst = dst->priv_data;
1409  int ret;
1410 
1411  if (dst == src || psrc->transform_type == 0)
1412  return 0;
1413 
1414  if (pdst->plane[0].idwt_size != psrc->plane[0].idwt_size ||
1415  pdst->a_format != psrc->a_format ||
1416  pdst->a_width != psrc->a_width ||
1417  pdst->a_height != psrc->a_height ||
1418  pdst->a_transform_type != psrc->a_transform_type)
1419  free_buffers(pdst);
1420 
1421  pdst->a_format = psrc->a_format;
1422  pdst->a_width = psrc->a_width;
1423  pdst->a_height = psrc->a_height;
1424  pdst->a_transform_type = psrc->a_transform_type;
1425  pdst->transform_type = psrc->transform_type;
1426  pdst->progressive = psrc->progressive;
1427  pdst->planes = psrc->planes;
1428 
1429  if (!pdst->plane[0].idwt_buf) {
1430  pdst->coded_width = pdst->a_width;
1431  pdst->coded_height = pdst->a_height;
1432  pdst->coded_format = pdst->a_format;
1433  pdst->transform_type = pdst->a_transform_type;
1434  ret = alloc_buffers(dst);
1435  if (ret < 0)
1436  return ret;
1437  }
1438 
1439  for (int plane = 0; plane < pdst->planes; plane++) {
1440  memcpy(pdst->plane[plane].band, psrc->plane[plane].band, sizeof(pdst->plane[plane].band));
1441  memcpy(pdst->plane[plane].idwt_buf, psrc->plane[plane].idwt_buf,
1442  pdst->plane[plane].idwt_size * sizeof(int16_t));
1443  }
1444 
1445  return 0;
1446 }
1447 #endif
1448 
1450  .p.name = "cfhd",
1451  CODEC_LONG_NAME("GoPro CineForm HD"),
1452  .p.type = AVMEDIA_TYPE_VIDEO,
1453  .p.id = AV_CODEC_ID_CFHD,
1454  .priv_data_size = sizeof(CFHDContext),
1455  .init = cfhd_init,
1456  .close = cfhd_close,
1459  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1460  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1461 };
ChannelNumber
@ ChannelNumber
Definition: cfhd.h:76
ChannelCount
@ ChannelCount
Definition: cfhd.h:40
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
BandSecondPass
@ BandSecondPass
Definition: cfhd.h:86
level
uint8_t level
Definition: svq3.c:205
Precision
@ Precision
Definition: cfhd.h:79
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
Peak::level
int level
Definition: cfhd.h:133
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
BandHeader
@ BandHeader
Definition: cfhd.h:74
PrescaleTable
@ PrescaleTable
Definition: cfhd.h:87
GetByteContext
Definition: bytestream.h:33
G2
#define G2(m)
Definition: itx_1d.c:64
CFHDContext::progressive
int progressive
Definition: cfhd.h:155
BandHeight
@ BandHeight
Definition: cfhd.h:69
ff_cfhd_decoder
const FFCodec ff_cfhd_decoder
Definition: cfhd.c:1449
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:249
SampleType
int32_t SampleType
Definition: ac3enc.h:65
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
pixdesc.h
AVFrame::width
int width
Definition: frame.h:482
CFHDDSPContext
Definition: cfhddsp.h:25
CFHDDSPContext::horiz_filter_clip
void(* horiz_filter_clip)(int16_t *output, const int16_t *low, const int16_t *high, int width, int bpc)
Definition: cfhddsp.h:36
internal.h
even
Tag MUST be even
Definition: snow.txt:206
AVPacket::data
uint8_t * data
Definition: packet.h:535
CFHDContext::a_format
int a_format
Definition: cfhd.h:159
b
#define b
Definition: input.c:42
HighpassWidth
@ HighpassWidth
Definition: cfhd.h:61
data
const char data[16]
Definition: mxf.c:149
R
#define R
Definition: huffyuv.h:44
high
int high
Definition: dovi_rpuenc.c:38
FFCodec
Definition: codec_internal.h:127
ALPHA_COMPAND_DC_OFFSET
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:41
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:208
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:654
LowpassWidth
@ LowpassWidth
Definition: cfhd.h:52
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
cfhd_init
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:44
difference_coding
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:118
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
ff_cfhd_init_vlcs
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:181
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:54
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3381
VersionMajor
@ VersionMajor
Definition: cfhd.h:34
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
init_peak_table_defaults
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:81
cfhd.h
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
GetBitContext
Definition: get_bits.h:108
DWT_LEVELS
#define DWT_LEVELS
Definition: cfhd.h:104
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3369
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
LowpassPrecision
@ LowpassPrecision
Definition: cfhd.h:56
Quantization
@ Quantization
Definition: cfhd.h:72
BandEncoding
@ BandEncoding
Definition: cfhd.h:71
SubbandNumber
@ SubbandNumber
Definition: cfhd.h:67
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
peak_table
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:128
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:528
Plane::idwt_size
int idwt_size
Definition: cfhd.h:123
process_alpha
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:135
AV_CODEC_ID_CFHD
@ AV_CODEC_ID_CFHD
Definition: codec_id.h:271
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:184
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:341
intreadwrite.h
Version
@ Version
Definition: cfhd.h:85
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:549
ALPHA_COMPAND_GAIN
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:42
TransformType
TransformType
Definition: webp.c:113
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
PeakOffsetHigh
@ PeakOffsetHigh
Definition: cfhd.h:84
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1048
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
VersionMinor
@ VersionMinor
Definition: cfhd.h:35
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
decode.h
get_bits.h
DisplayHeight
@ DisplayHeight
Definition: cfhd.h:89
process_bayer
static void process_bayer(AVFrame *frame, int bpc)
Definition: cfhd.c:148
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
FrameType
FrameType
G723.1 frame types.
Definition: g723_1.h:63
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
FrameIndex
@ FrameIndex
Definition: cfhd.h:49
InputFormat
@ InputFormat
Definition: cfhd.h:80
dequant_and_decompand
static int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
Definition: cfhd.c:110
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:204
CFHDContext::planes
int planes
Definition: cfhd.h:146
CFHDDSPContext::vert_filter
void(* vert_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:31
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
CFHDDSPContext::horiz_filter
void(* horiz_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:26
Peak
Definition: cfhd.h:132
abs
#define abs(x)
Definition: cuda_runtime.h:35
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:335
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:529
VLC_BITS
#define VLC_BITS
Definition: cfhd.h:94
HighpassHeight
@ HighpassHeight
Definition: cfhd.h:62
close
av_cold void CBS_FUNC() close(CodedBitstreamContext **ctx_ptr)
Close a context and free all internal state.
Definition: cbs.c:140
free_buffers
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:214
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
SUBBAND_COUNT
#define SUBBAND_COUNT
Definition: cfhd.h:95
alloc_buffers
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:239
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
Peak::base
GetByteContext base
Definition: cfhd.h:135
AVPacket::size
int size
Definition: packet.h:536
VersionRevision
@ VersionRevision
Definition: cfhd.h:36
height
#define height
Definition: dsp.h:85
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
BitstreamMarker
@ BitstreamMarker
Definition: cfhd.h:33
cfhd_decode
static int cfhd_decode(AVCodecContext *avctx, AVFrame *pic, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:367
planes
static const struct @489 planes[]
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:173
CFHDContext::a_width
int a_width
Definition: cfhd.h:157
CFHDContext::coded_height
int coded_height
Definition: cfhd.h:152
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
VersionEdit
@ VersionEdit
Definition: cfhd.h:37
CFHDContext::coded_format
enum AVPixelFormat coded_format
Definition: cfhd.h:154
CFHDContext::transform_type
int transform_type
Definition: cfhd.h:150
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
SubbandCount
@ SubbandCount
Definition: cfhd.h:42
CFHDContext::plane
Plane plane[4]
Definition: cfhd.h:177
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:545
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
CFHDContext
Definition: cfhd.h:138
common.h
SubbandBand
@ SubbandBand
Definition: cfhd.h:70
SubBand::read_ok
int8_t read_ok
Definition: cfhd.h:113
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
SUBBAND_COUNT_3D
#define SUBBAND_COUNT_3D
Definition: cfhd.h:96
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:633
ImageWidth
@ ImageWidth
Definition: cfhd.h:47
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:589
tag
uint32_t tag
Definition: movenc.c:1911
ret
ret
Definition: filter_design.txt:187
CFHDContext::coded_width
int coded_width
Definition: cfhd.h:151
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
inverse_temporal_filter
static void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
Definition: cfhd.c:203
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
PeakLevel
@ PeakLevel
Definition: cfhd.h:82
BandCodingFlags
@ BandCodingFlags
Definition: cfhd.h:81
U
#define U(x)
Definition: vpx_arith.h:37
interlaced_vertical_filter
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:192
EncodedFormat
@ EncodedFormat
Definition: cfhd.h:88
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:482
init_plane_defaults
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:74
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
CFHDContext::a_transform_type
int a_transform_type
Definition: cfhd.h:160
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_cfhddsp_init
av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer)
Definition: cfhddsp.c:105
ImageHeight
@ ImageHeight
Definition: cfhd.h:48
Plane
Definition: cfhd.h:116
BandWidth
@ BandWidth
Definition: cfhd.h:68
factor
static const int factor[16]
Definition: vf_pp7.c:80
SampleFlags
@ SampleFlags
Definition: cfhd.h:77
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
Plane::idwt_buf
int16_t * idwt_buf
Definition: cfhd.h:121
DWT_LEVELS_3D
#define DWT_LEVELS_3D
Definition: cfhd.h:105
mem.h
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:129
LowpassHeight
@ LowpassHeight
Definition: cfhd.h:53
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: packet.h:512
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
width
#define width
Definition: dsp.h:85
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:555
SampleIndexTable
@ SampleIndexTable
Definition: cfhd.h:32
cfhd_close
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:1395
src
#define src
Definition: vp8dsp.c:248
init_frame_defaults
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:88
channel
channel
Definition: ebur128.h:39
PeakOffsetLow
@ PeakOffsetLow
Definition: cfhd.h:83
codebook
static const unsigned codebook[256][2]
Definition: cfhdenc.c:41
CFHDContext::a_height
int a_height
Definition: cfhd.h:158