FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12codecs.h"
51 #include "mpeg12data.h"
52 #include "mpeg12enc.h"
53 #include "mpegvideo.h"
54 #include "mpegvideodata.h"
55 #include "mpegvideoenc.h"
56 #include "h261enc.h"
57 #include "h263.h"
58 #include "h263data.h"
59 #include "h263enc.h"
60 #include "mjpegenc_common.h"
61 #include "mathops.h"
62 #include "mpegutils.h"
63 #include "mpegvideo_unquantize.h"
64 #include "mjpegenc.h"
65 #include "speedhqenc.h"
66 #include "msmpeg4enc.h"
67 #include "pixblockdsp.h"
68 #include "qpeldsp.h"
69 #include "faandct.h"
70 #include "aandcttab.h"
71 #include "mpeg4video.h"
72 #include "mpeg4videodata.h"
73 #include "mpeg4videoenc.h"
74 #include "internal.h"
75 #include "bytestream.h"
76 #include "rv10enc.h"
77 #include "libavutil/refstruct.h"
78 #include <limits.h>
79 #include "sp5x.h"
80 
81 #define QUANT_BIAS_SHIFT 8
82 
83 #define QMAT_SHIFT_MMX 16
84 #define QMAT_SHIFT 21
85 
86 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
87 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
88 static int sse_mb(MPVEncContext *const s);
89 static void denoise_dct_c(MPVEncContext *const s, int16_t *block);
90 static int dct_quantize_c(MPVEncContext *const s,
91  int16_t *block, int n,
92  int qscale, int *overflow);
93 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
94 
95 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
96 
97 static const AVOption mpv_generic_options[] = {
100  { NULL },
101 };
102 
104  .class_name = "generic mpegvideo encoder",
105  .item_name = av_default_item_name,
106  .option = mpv_generic_options,
107  .version = LIBAVUTIL_VERSION_INT,
108 };
109 
110 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
111  uint16_t (*qmat16)[2][64],
112  const uint16_t *quant_matrix,
113  int bias, int qmin, int qmax, int intra)
114 {
115  FDCTDSPContext *fdsp = &s->fdsp;
116  int qscale;
117  int shift = 0;
118 
119  for (qscale = qmin; qscale <= qmax; qscale++) {
120  int i;
121  int qscale2;
122 
123  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
124  else qscale2 = qscale << 1;
125 
126  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
127 #if CONFIG_FAANDCT
128  fdsp->fdct == ff_faandct ||
129 #endif /* CONFIG_FAANDCT */
130  fdsp->fdct == ff_jpeg_fdct_islow_10) {
131  for (i = 0; i < 64; i++) {
132  const int j = s->c.idsp.idct_permutation[i];
133  int64_t den = (int64_t) qscale2 * quant_matrix[j];
134  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
135  * Assume x = qscale2 * quant_matrix[j]
136  * 1 <= x <= 28560
137  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
138  * 4194304 >= (1 << 22) / (x) >= 146 */
139 
140  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
141  }
142  } else if (fdsp->fdct == ff_fdct_ifast) {
143  for (i = 0; i < 64; i++) {
144  const int j = s->c.idsp.idct_permutation[i];
145  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
146  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
147  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
148  * 1247 <= x <= 900239760
149  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
150  * 55107840 >= (1 << 36) / (x) >= 76 */
151 
152  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
153  }
154  } else {
155  for (i = 0; i < 64; i++) {
156  const int j = s->c.idsp.idct_permutation[i];
157  int64_t den = (int64_t) qscale2 * quant_matrix[j];
158  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
159  * Assume x = qscale2 * quant_matrix[j]
160  * 1 <= x <= 28560
161  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
162  * 4194304 >= (1 << 22) / (x) >= 146
163  *
164  * 1 <= x <= 28560
165  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
166  * 131072 >= (1 << 17) / (x) >= 4 */
167 
168  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
169  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
170 
171  if (qmat16[qscale][0][i] == 0 ||
172  qmat16[qscale][0][i] == 128 * 256)
173  qmat16[qscale][0][i] = 128 * 256 - 1;
174  qmat16[qscale][1][i] =
175  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
176  qmat16[qscale][0][i]);
177  }
178  }
179 
180  for (i = intra; i < 64; i++) {
181  int64_t max = 8191;
182  if (fdsp->fdct == ff_fdct_ifast) {
183  max = (8191LL * ff_aanscales[i]) >> 14;
184  }
185  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
186  shift++;
187  }
188  }
189  }
190  if (shift) {
191  av_log(s->c.avctx, AV_LOG_INFO,
192  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
193  QMAT_SHIFT - shift);
194  }
195 }
196 
197 static inline void update_qscale(MPVMainEncContext *const m)
198 {
199  MPVEncContext *const s = &m->s;
200 
201  if (s->c.q_scale_type == 1 && 0) {
202  int i;
203  int bestdiff=INT_MAX;
204  int best = 1;
205 
206  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
207  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
208  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
209  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
210  continue;
211  if (diff < bestdiff) {
212  bestdiff = diff;
213  best = i;
214  }
215  }
216  s->c.qscale = best;
217  } else {
218  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
219  (FF_LAMBDA_SHIFT + 7);
220  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
221  }
222 
223  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
225 }
226 
228 {
229  int i;
230 
231  if (matrix) {
232  put_bits(pb, 1, 1);
233  for (i = 0; i < 64; i++) {
234  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
235  }
236  } else
237  put_bits(pb, 1, 0);
238 }
239 
240 /**
241  * init s->c.cur_pic.qscale_table from s->lambda_table
242  */
243 static void init_qscale_tab(MPVEncContext *const s)
244 {
245  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
246 
247  for (int i = 0; i < s->c.mb_num; i++) {
248  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
249  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
250  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
251  s->c.avctx->qmax);
252  }
253 }
254 
256  const MPVEncContext *const src)
257 {
258 #define COPY(a) dst->a = src->a
259  COPY(c.pict_type);
260  COPY(f_code);
261  COPY(b_code);
262  COPY(c.qscale);
263  COPY(lambda);
264  COPY(lambda2);
265  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
266  COPY(c.progressive_frame); // FIXME don't set in encode_header
267  COPY(partitioned_frame); // FIXME don't set in encode_header
268 #undef COPY
269 }
270 
272 {
273  for (int i = -16; i < 16; i++)
274  default_fcode_tab[i + MAX_MV] = 1;
275 }
276 
277 /**
278  * Set the given MPVEncContext to defaults for encoding.
279  */
281 {
282  MPVEncContext *const s = &m->s;
283  static AVOnce init_static_once = AV_ONCE_INIT;
284 
286 
287  s->f_code = 1;
288  s->b_code = 1;
289 
290  if (!m->fcode_tab) {
292  ff_thread_once(&init_static_once, mpv_encode_init_static);
293  }
294  if (!s->c.y_dc_scale_table) {
295  s->c.y_dc_scale_table =
296  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
297  }
298 }
299 
301 {
302  s->dct_quantize = dct_quantize_c;
303  s->denoise_dct = denoise_dct_c;
304 
305 #if ARCH_MIPS
307 #elif ARCH_X86
309 #endif
310 
311  if (s->c.avctx->trellis)
312  s->dct_quantize = dct_quantize_trellis_c;
313 }
314 
316 {
317  MpegEncContext *const s = &s2->c;
318  MPVUnquantDSPContext unquant_dsp_ctx;
319 
320  ff_mpv_unquantize_init(&unquant_dsp_ctx,
321  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
322 
323  if (s2->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
324  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
325  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
326  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
327  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
328  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
329  } else {
330  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
331  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
332  }
333 }
334 
336 {
337  MPVEncContext *const s = &m->s;
338  MECmpContext mecc;
339  me_cmp_func me_cmp[6];
340  int ret;
341 
342  ff_me_cmp_init(&mecc, avctx);
343  ret = ff_me_init(&s->me, avctx, &mecc, 1);
344  if (ret < 0)
345  return ret;
346  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
347  if (ret < 0)
348  return ret;
349  m->frame_skip_cmp_fn = me_cmp[1];
351  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
352  if (ret < 0)
353  return ret;
354  if (!me_cmp[0] || !me_cmp[4])
355  return AVERROR(EINVAL);
356  s->ildct_cmp[0] = me_cmp[0];
357  s->ildct_cmp[1] = me_cmp[4];
358  }
359 
360  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
361 
362  s->sse_cmp[0] = mecc.sse[0];
363  s->sse_cmp[1] = mecc.sse[1];
364  s->sad_cmp[0] = mecc.sad[0];
365  s->sad_cmp[1] = mecc.sad[1];
366  if (avctx->mb_cmp == FF_CMP_NSSE) {
367  s->n_sse_cmp[0] = mecc.nsse[0];
368  s->n_sse_cmp[1] = mecc.nsse[1];
369  } else {
370  s->n_sse_cmp[0] = mecc.sse[0];
371  s->n_sse_cmp[1] = mecc.sse[1];
372  }
373 
374  return 0;
375 }
376 
377 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
379 {
380  MPVEncContext *const s = &m->s;
381  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
382  const uint16_t *intra_matrix, *inter_matrix;
383  int ret;
384 
385  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
386  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
387  return AVERROR(ENOMEM);
388 
389  if (s->c.out_format == FMT_MJPEG) {
390  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
391  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
392  // No need to set q_inter_matrix
394  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
395  return 0;
396  } else {
397  s->q_chroma_intra_matrix = s->q_intra_matrix;
398  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
399  }
400  if (!m->intra_only) {
401  s->q_inter_matrix = s->q_intra_matrix + 32;
402  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
403  }
404 
405  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
406  s->mpeg_quant) {
409  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
410  intra_matrix =
412  } else {
413  /* MPEG-1/2, SpeedHQ */
416  }
417  if (avctx->intra_matrix)
419  if (avctx->inter_matrix)
421 
422  /* init q matrix */
423  for (int i = 0; i < 64; i++) {
424  int j = s->c.idsp.idct_permutation[i];
425 
426  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
427  s->c.inter_matrix[j] = inter_matrix[i];
428  }
429 
430  /* precompute matrix */
432  if (ret < 0)
433  return ret;
434 
435  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
436  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
437  31, 1);
438  if (s->q_inter_matrix)
439  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
440  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
441  31, 0);
442 
443  return 0;
444 }
445 
447 {
448  MPVEncContext *const s = &m->s;
449  int has_b_frames = !!m->max_b_frames;
450  int16_t (*mv_table)[2];
451 
452  /* Allocate MB type table */
453  unsigned mb_array_size = s->c.mb_stride * s->c.mb_height;
454  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
455  if (!s->mb_type)
456  return AVERROR(ENOMEM);
457  s->mc_mb_var = s->mb_type + mb_array_size;
458  s->mb_var = s->mc_mb_var + mb_array_size;
459  s->mb_mean = (uint8_t*)(s->mb_var + mb_array_size);
460 
461  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
462  return AVERROR(ENOMEM);
463 
464  unsigned mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
465  unsigned nb_mv_tables = 1 + 5 * has_b_frames;
466  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
467  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
468  nb_mv_tables += 8 * has_b_frames;
469  s->p_field_select_table[0] = av_calloc(mv_table_size, 2 * (2 + 4 * has_b_frames));
470  if (!s->p_field_select_table[0])
471  return AVERROR(ENOMEM);
472  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
473  }
474 
475  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
476  if (!mv_table)
477  return AVERROR(ENOMEM);
478  m->mv_table_base = mv_table;
479  mv_table += s->c.mb_stride + 1;
480 
481  s->p_mv_table = mv_table;
482  if (has_b_frames) {
483  s->b_forw_mv_table = mv_table += mv_table_size;
484  s->b_back_mv_table = mv_table += mv_table_size;
485  s->b_bidir_forw_mv_table = mv_table += mv_table_size;
486  s->b_bidir_back_mv_table = mv_table += mv_table_size;
487  s->b_direct_mv_table = mv_table += mv_table_size;
488 
489  if (s->p_field_select_table[1]) { // MPEG-4 or INTERLACED_ME above
490  uint8_t *field_select = s->p_field_select_table[1];
491  for (int j = 0; j < 2; j++) {
492  for (int k = 0; k < 2; k++) {
493  for (int l = 0; l < 2; l++)
494  s->b_field_mv_table[j][k][l] = mv_table += mv_table_size;
495  s->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
496  }
497  }
498  }
499  }
500 
501  return 0;
502 }
503 
505 {
506  MPVEncContext *const s = &m->s;
507  // Align the following per-thread buffers to avoid false sharing.
508  enum {
509 #ifndef _MSC_VER
510  /// The number is supposed to match/exceed the cache-line size.
511  ALIGN = FFMAX(128, _Alignof(max_align_t)),
512 #else
513  ALIGN = 128,
514 #endif
515  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
516  };
517  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
518  "Need checks for potential overflow.");
519  unsigned nb_slices = s->c.slice_context_count;
520  char *dct_error = NULL;
521 
522  if (m->noise_reduction) {
523  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
524  return AVERROR(ENOMEM);
525  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
526  if (!dct_error)
527  return AVERROR(ENOMEM);
529  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
530  }
531 
532  const int y_size = s->c.b8_stride * (2 * s->c.mb_height + 1);
533  const int c_size = s->c.mb_stride * (s->c.mb_height + 1);
534  const int yc_size = y_size + 2 * c_size;
535  ptrdiff_t offset = 0;
536 
537  for (unsigned i = 0; i < nb_slices; ++i) {
538  MPVEncContext *const s2 = s->c.enc_contexts[i];
539 
540  s2->block = s2->blocks[0];
541 
542  if (dct_error) {
543  s2->dct_offset = s->dct_offset;
544  s2->dct_error_sum = (void*)dct_error;
545  dct_error += DCT_ERROR_SIZE;
546  }
547 
548  if (s2->c.ac_val) {
549  s2->c.dc_val += offset + i;
550  s2->c.ac_val += offset;
551  offset += yc_size;
552  }
553  }
554  return 0;
555 }
556 
557 /* init video encoder */
559 {
560  MPVMainEncContext *const m = avctx->priv_data;
561  MPVEncContext *const s = &m->s;
562  AVCPBProperties *cpb_props;
563  int gcd, ret;
564 
566 
567  switch (avctx->pix_fmt) {
568  case AV_PIX_FMT_YUVJ444P:
569  case AV_PIX_FMT_YUV444P:
570  s->c.chroma_format = CHROMA_444;
571  break;
572  case AV_PIX_FMT_YUVJ422P:
573  case AV_PIX_FMT_YUV422P:
574  s->c.chroma_format = CHROMA_422;
575  break;
576  default:
577  av_unreachable("Already checked via CODEC_PIXFMTS");
578  case AV_PIX_FMT_YUVJ420P:
579  case AV_PIX_FMT_YUV420P:
580  s->c.chroma_format = CHROMA_420;
581  break;
582  }
583 
585 
586  m->bit_rate = avctx->bit_rate;
587  s->c.width = avctx->width;
588  s->c.height = avctx->height;
589  if (avctx->gop_size > 600 &&
592  "keyframe interval too large!, reducing it from %d to %d\n",
593  avctx->gop_size, 600);
594  avctx->gop_size = 600;
595  }
596  m->gop_size = avctx->gop_size;
597  s->c.avctx = avctx;
599  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
600  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
602  } else if (avctx->max_b_frames < 0) {
604  "max b frames must be 0 or positive for mpegvideo based encoders\n");
605  return AVERROR(EINVAL);
606  }
608  s->c.codec_id = avctx->codec->id;
610  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
611  return AVERROR(EINVAL);
612  }
613 
614  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
615  s->rtp_mode = !!s->rtp_payload_size;
616  s->c.intra_dc_precision = avctx->intra_dc_precision;
617 
618  // workaround some differences between how applications specify dc precision
619  if (s->c.intra_dc_precision < 0) {
620  s->c.intra_dc_precision += 8;
621  } else if (s->c.intra_dc_precision >= 8)
622  s->c.intra_dc_precision -= 8;
623 
624  if (s->c.intra_dc_precision < 0) {
626  "intra dc precision must be positive, note some applications use"
627  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
628  return AVERROR(EINVAL);
629  }
630 
631  if (s->c.intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
632  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
633  return AVERROR(EINVAL);
634  }
636 
637  if (m->gop_size <= 1) {
638  m->intra_only = 1;
639  m->gop_size = 12;
640  } else {
641  m->intra_only = 0;
642  }
643 
644  /* Fixed QSCALE */
646 
647  s->adaptive_quant = (avctx->lumi_masking ||
648  avctx->dark_masking ||
651  avctx->p_masking ||
652  m->border_masking ||
653  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
654  !m->fixed_qscale;
655 
656  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
657 
659  switch(avctx->codec_id) {
662  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
663  break;
664  case AV_CODEC_ID_MPEG4:
668  if (avctx->rc_max_rate >= 15000000) {
669  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
670  } else if(avctx->rc_max_rate >= 2000000) {
671  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
672  } else if(avctx->rc_max_rate >= 384000) {
673  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
674  } else
675  avctx->rc_buffer_size = 40;
676  avctx->rc_buffer_size *= 16384;
677  break;
678  }
679  if (avctx->rc_buffer_size) {
680  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
681  }
682  }
683 
684  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
685  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
686  return AVERROR(EINVAL);
687  }
688 
691  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
692  }
693 
695  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
696  return AVERROR(EINVAL);
697  }
698 
700  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
701  return AVERROR(EINVAL);
702  }
703 
704  if (avctx->rc_max_rate &&
708  "impossible bitrate constraints, this will fail\n");
709  }
710 
711  if (avctx->rc_buffer_size &&
714  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
715  return AVERROR(EINVAL);
716  }
717 
718  if (!m->fixed_qscale &&
721  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
723  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
724  if (nbt <= INT_MAX) {
725  avctx->bit_rate_tolerance = nbt;
726  } else
727  avctx->bit_rate_tolerance = INT_MAX;
728  }
729 
730  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
731  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
732  s->c.codec_id != AV_CODEC_ID_FLV1) {
733  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
734  return AVERROR(EINVAL);
735  }
736 
737  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
739  "OBMC is only supported with simple mb decision\n");
740  return AVERROR(EINVAL);
741  }
742 
743  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
744  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
745  return AVERROR(EINVAL);
746  }
747 
748  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
749  s->c.codec_id == AV_CODEC_ID_H263 ||
750  s->c.codec_id == AV_CODEC_ID_H263P) &&
751  (avctx->sample_aspect_ratio.num > 255 ||
752  avctx->sample_aspect_ratio.den > 255)) {
754  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
758  }
759 
760  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
761  s->c.codec_id == AV_CODEC_ID_H263P) &&
762  (avctx->width > 2048 ||
763  avctx->height > 1152 )) {
764  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
765  return AVERROR(EINVAL);
766  }
767  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
768  (avctx->width > 65535 ||
769  avctx->height > 65535 )) {
770  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
771  return AVERROR(EINVAL);
772  }
773  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
774  s->c.codec_id == AV_CODEC_ID_H263P ||
775  s->c.codec_id == AV_CODEC_ID_RV20) &&
776  ((avctx->width &3) ||
777  (avctx->height&3) )) {
778  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
779  return AVERROR(EINVAL);
780  }
781 
782  if (s->c.codec_id == AV_CODEC_ID_RV10 &&
783  (avctx->width &15 ||
784  avctx->height&15 )) {
785  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
786  return AVERROR(EINVAL);
787  }
788 
789  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
790  s->c.codec_id == AV_CODEC_ID_WMV2) &&
791  avctx->width & 1) {
792  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
793  return AVERROR(EINVAL);
794  }
795 
797  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
798  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
799  return AVERROR(EINVAL);
800  }
801 
802  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
803  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
804  return AVERROR(EINVAL);
805  }
806 
807  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
809  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
810  return AVERROR(EINVAL);
811  }
812 
813  if (m->scenechange_threshold < 1000000000 &&
816  "closed gop with scene change detection are not supported yet, "
817  "set threshold to 1000000000\n");
818  return AVERROR_PATCHWELCOME;
819  }
820 
822  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
825  "low delay forcing is only available for mpeg2, "
826  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
827  return AVERROR(EINVAL);
828  }
829  if (m->max_b_frames != 0) {
831  "B-frames cannot be used with low delay\n");
832  return AVERROR(EINVAL);
833  }
834  }
835 
836  if (avctx->slices > 1 &&
838  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
839  return AVERROR(EINVAL);
840  }
841 
844  "notice: b_frame_strategy only affects the first pass\n");
845  m->b_frame_strategy = 0;
846  }
847 
849  if (gcd > 1) {
850  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
851  avctx->time_base.den /= gcd;
852  avctx->time_base.num /= gcd;
853  //return -1;
854  }
855 
856  if (s->mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
857  // (a + x * 3 / 8) / x
858  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
859  s->inter_quant_bias = 0;
860  } else {
861  s->intra_quant_bias = 0;
862  // (a - x / 4) / x
863  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
864  }
865 
866  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
867  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
868  return AVERROR(EINVAL);
869  }
870 
871  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
872 
873  switch (avctx->codec->id) {
874 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
876  s->rtp_mode = 1;
877  /* fallthrough */
879  s->c.out_format = FMT_MPEG1;
880  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
881  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
883  break;
884 #endif
885 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
886  case AV_CODEC_ID_MJPEG:
887  case AV_CODEC_ID_AMV:
888  s->c.out_format = FMT_MJPEG;
889  m->intra_only = 1; /* force intra only for jpeg */
890  avctx->delay = 0;
891  s->c.low_delay = 1;
892  break;
893 #endif
894  case AV_CODEC_ID_SPEEDHQ:
895  s->c.out_format = FMT_SPEEDHQ;
896  m->intra_only = 1; /* force intra only for SHQ */
897  avctx->delay = 0;
898  s->c.low_delay = 1;
899  break;
900  case AV_CODEC_ID_H261:
901  s->c.out_format = FMT_H261;
902  avctx->delay = 0;
903  s->c.low_delay = 1;
904  s->rtp_mode = 0; /* Sliced encoding not supported */
905  break;
906  case AV_CODEC_ID_H263:
907  if (!CONFIG_H263_ENCODER)
910  s->c.width, s->c.height) == 8) {
912  "The specified picture size of %dx%d is not valid for "
913  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
914  "352x288, 704x576, and 1408x1152. "
915  "Try H.263+.\n", s->c.width, s->c.height);
916  return AVERROR(EINVAL);
917  }
918  s->c.out_format = FMT_H263;
919  avctx->delay = 0;
920  s->c.low_delay = 1;
921  break;
922  case AV_CODEC_ID_H263P:
923  s->c.out_format = FMT_H263;
924  /* Fx */
925  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
926  s->modified_quant = s->c.h263_aic;
927  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
928  s->me.unrestricted_mv = s->c.obmc || s->loop_filter || s->umvplus;
929  s->flipflop_rounding = 1;
930 
931  /* /Fx */
932  /* These are just to be sure */
933  avctx->delay = 0;
934  s->c.low_delay = 1;
935  break;
936  case AV_CODEC_ID_FLV1:
937  s->c.out_format = FMT_H263;
938  s->me.unrestricted_mv = 1;
939  s->rtp_mode = 0; /* don't allow GOB */
940  avctx->delay = 0;
941  s->c.low_delay = 1;
942  break;
943 #if CONFIG_RV10_ENCODER
944  case AV_CODEC_ID_RV10:
946  s->c.out_format = FMT_H263;
947  avctx->delay = 0;
948  s->c.low_delay = 1;
949  break;
950 #endif
951 #if CONFIG_RV20_ENCODER
952  case AV_CODEC_ID_RV20:
954  s->c.out_format = FMT_H263;
955  avctx->delay = 0;
956  s->c.low_delay = 1;
957  s->modified_quant = 1;
958  // Set here to force allocation of dc_val;
959  // will be set later on a per-frame basis.
960  s->c.h263_aic = 1;
961  s->loop_filter = 1;
962  s->me.unrestricted_mv = 0;
963  break;
964 #endif
965  case AV_CODEC_ID_MPEG4:
966  s->c.out_format = FMT_H263;
967  s->c.h263_pred = 1;
968  s->me.unrestricted_mv = 1;
969  s->flipflop_rounding = 1;
970  s->c.low_delay = m->max_b_frames ? 0 : 1;
971  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
972  break;
974  s->c.out_format = FMT_H263;
975  s->c.h263_pred = 1;
976  s->me.unrestricted_mv = 1;
977  s->c.msmpeg4_version = MSMP4_V2;
978  avctx->delay = 0;
979  s->c.low_delay = 1;
980  break;
982  s->c.out_format = FMT_H263;
983  s->c.h263_pred = 1;
984  s->me.unrestricted_mv = 1;
985  s->c.msmpeg4_version = MSMP4_V3;
986  s->flipflop_rounding = 1;
987  avctx->delay = 0;
988  s->c.low_delay = 1;
989  break;
990  case AV_CODEC_ID_WMV1:
991  s->c.out_format = FMT_H263;
992  s->c.h263_pred = 1;
993  s->me.unrestricted_mv = 1;
994  s->c.msmpeg4_version = MSMP4_WMV1;
995  s->flipflop_rounding = 1;
996  avctx->delay = 0;
997  s->c.low_delay = 1;
998  break;
999  case AV_CODEC_ID_WMV2:
1000  s->c.out_format = FMT_H263;
1001  s->c.h263_pred = 1;
1002  s->me.unrestricted_mv = 1;
1003  s->c.msmpeg4_version = MSMP4_WMV2;
1004  s->flipflop_rounding = 1;
1005  avctx->delay = 0;
1006  s->c.low_delay = 1;
1007  break;
1008  default:
1009  av_unreachable("List contains all codecs using ff_mpv_encode_init()");
1010  }
1011 
1012  avctx->has_b_frames = !s->c.low_delay;
1013 
1014  s->c.encoding = 1;
1015 
1016  s->c.progressive_frame =
1017  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
1019  s->c.alternate_scan);
1020 
1023  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1024  (1 << AV_PICTURE_TYPE_P) |
1025  (1 << AV_PICTURE_TYPE_B);
1026  } else if (!m->intra_only) {
1027  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1028  (1 << AV_PICTURE_TYPE_P);
1029  } else {
1030  s->frame_reconstruction_bitfield = 0;
1031  }
1032 
1033  if (m->lmin > m->lmax) {
1034  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1035  m->lmin = m->lmax;
1036  }
1037 
1038  /* ff_mpv_init_duplicate_contexts() will copy (memdup) the contents of the
1039  * main slice to the slice contexts, so we initialize various fields of it
1040  * before calling ff_mpv_init_duplicate_contexts(). */
1041  s->parent = m;
1042  ff_mpv_idct_init(&s->c);
1044  ff_fdctdsp_init(&s->fdsp, avctx);
1045  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1046  ff_pixblockdsp_init(&s->pdsp, 8);
1047  ret = me_cmp_init(m, avctx);
1048  if (ret < 0)
1049  return ret;
1050 
1051  if (!(avctx->stats_out = av_mallocz(256)) ||
1052  !(s->new_pic = av_frame_alloc()) ||
1053  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1054  return AVERROR(ENOMEM);
1055 
1056  ret = init_matrices(m, avctx);
1057  if (ret < 0)
1058  return ret;
1059 
1061 
1062  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1064 #if CONFIG_MSMPEG4ENC
1065  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1067 #endif
1068  }
1069 
1070  s->c.slice_ctx_size = sizeof(*s);
1071  ret = ff_mpv_common_init(&s->c);
1072  if (ret < 0)
1073  return ret;
1074  ret = init_buffers(m);
1075  if (ret < 0)
1076  return ret;
1077  if (s->c.slice_context_count > 1) {
1078  s->rtp_mode = 1;
1080  s->h263_slice_structured = 1;
1081  }
1083  if (ret < 0)
1084  return ret;
1085 
1086  ret = init_slice_buffers(m);
1087  if (ret < 0)
1088  return ret;
1089 
1091  if (ret < 0)
1092  return ret;
1093 
1094  if (m->b_frame_strategy == 2) {
1095  for (int i = 0; i < m->max_b_frames + 2; i++) {
1096  m->tmp_frames[i] = av_frame_alloc();
1097  if (!m->tmp_frames[i])
1098  return AVERROR(ENOMEM);
1099 
1101  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1102  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1103 
1104  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1105  if (ret < 0)
1106  return ret;
1107  }
1108  }
1109 
1110  cpb_props = ff_encode_add_cpb_side_data(avctx);
1111  if (!cpb_props)
1112  return AVERROR(ENOMEM);
1113  cpb_props->max_bitrate = avctx->rc_max_rate;
1114  cpb_props->min_bitrate = avctx->rc_min_rate;
1115  cpb_props->avg_bitrate = avctx->bit_rate;
1116  cpb_props->buffer_size = avctx->rc_buffer_size;
1117 
1118  return 0;
1119 }
1120 
1122 {
1123  MPVMainEncContext *const m = avctx->priv_data;
1124  MPVEncContext *const s = &m->s;
1125 
1127 
1128  ff_mpv_common_end(&s->c);
1129  av_refstruct_pool_uninit(&s->c.picture_pool);
1130 
1131  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1134  }
1135  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1136  av_frame_free(&m->tmp_frames[i]);
1137 
1138  av_frame_free(&s->new_pic);
1139 
1141 
1142  av_freep(&m->mv_table_base);
1143  av_freep(&s->p_field_select_table[0]);
1145 
1146  av_freep(&s->mb_type);
1147  av_freep(&s->lambda_table);
1148 
1149  av_freep(&s->q_intra_matrix);
1150  av_freep(&s->q_intra_matrix16);
1151  av_freep(&s->dct_offset);
1152 
1153  return 0;
1154 }
1155 
1156 /* put block[] to dest[] */
1157 static inline void put_dct(MPVEncContext *const s,
1158  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1159 {
1160  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1161  s->c.idsp.idct_put(dest, line_size, block);
1162 }
1163 
1164 static inline void add_dequant_dct(MPVEncContext *const s,
1165  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1166 {
1167  if (s->c.block_last_index[i] >= 0) {
1168  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1169 
1170  s->c.idsp.idct_add(dest, line_size, block);
1171  }
1172 }
1173 
1174 /**
1175  * Performs dequantization and IDCT (if necessary)
1176  */
1177 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1178 {
1179  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1180  /* print DCT coefficients */
1181  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1182  for (int i = 0; i < 6; i++) {
1183  for (int j = 0; j < 64; j++) {
1184  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1185  block[i][s->c.idsp.idct_permutation[j]]);
1186  }
1187  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1188  }
1189  }
1190 
1191  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1192  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1193  int dct_linesize, dct_offset;
1194  const int linesize = s->c.cur_pic.linesize[0];
1195  const int uvlinesize = s->c.cur_pic.linesize[1];
1196  const int block_size = 8;
1197 
1198  dct_linesize = linesize << s->c.interlaced_dct;
1199  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1200 
1201  if (!s->c.mb_intra) {
1202  /* No MC, as that was already done otherwise */
1203  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1204  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1205  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1206  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1207 
1208  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1209  if (s->c.chroma_y_shift) {
1210  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1211  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1212  } else {
1213  dct_linesize >>= 1;
1214  dct_offset >>= 1;
1215  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1216  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1217  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1218  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1219  }
1220  }
1221  } else {
1222  /* dct only in intra block */
1223  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1224  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1225  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1226  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1227 
1228  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1229  if (s->c.chroma_y_shift) {
1230  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1231  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1232  } else {
1233  dct_offset >>= 1;
1234  dct_linesize >>= 1;
1235  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1236  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1237  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1238  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1239  }
1240  }
1241  }
1242  }
1243 }
1244 
1245 static int get_sae(const uint8_t *src, int ref, int stride)
1246 {
1247  int x,y;
1248  int acc = 0;
1249 
1250  for (y = 0; y < 16; y++) {
1251  for (x = 0; x < 16; x++) {
1252  acc += FFABS(src[x + y * stride] - ref);
1253  }
1254  }
1255 
1256  return acc;
1257 }
1258 
1259 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1260  const uint8_t *ref, int stride)
1261 {
1262  int x, y, w, h;
1263  int acc = 0;
1264 
1265  w = s->c.width & ~15;
1266  h = s->c.height & ~15;
1267 
1268  for (y = 0; y < h; y += 16) {
1269  for (x = 0; x < w; x += 16) {
1270  int offset = x + y * stride;
1271  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1272  stride, 16);
1273  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1274  int sae = get_sae(src + offset, mean, stride);
1275 
1276  acc += sae + 500 < sad;
1277  }
1278  }
1279  return acc;
1280 }
1281 
1282 /**
1283  * Allocates new buffers for an AVFrame and copies the properties
1284  * from another AVFrame.
1285  */
1286 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1287 {
1288  AVCodecContext *avctx = s->c.avctx;
1289  int ret;
1290 
1291  f->width = avctx->width + 2 * EDGE_WIDTH;
1292  f->height = avctx->height + 2 * EDGE_WIDTH;
1293 
1295  if (ret < 0)
1296  return ret;
1297 
1298  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1299  if (ret < 0)
1300  return ret;
1301 
1302  for (int i = 0; f->data[i]; i++) {
1303  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1304  f->linesize[i] +
1305  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1306  f->data[i] += offset;
1307  }
1308  f->width = avctx->width;
1309  f->height = avctx->height;
1310 
1311  ret = av_frame_copy_props(f, props_frame);
1312  if (ret < 0)
1313  return ret;
1314 
1315  return 0;
1316 }
1317 
1318 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1319 {
1320  MPVEncContext *const s = &m->s;
1321  MPVPicture *pic = NULL;
1322  int64_t pts;
1323  int display_picture_number = 0, ret;
1324  int encoding_delay = m->max_b_frames ? m->max_b_frames
1325  : (s->c.low_delay ? 0 : 1);
1326  int flush_offset = 1;
1327  int direct = 1;
1328 
1329  av_assert1(!m->input_picture[0]);
1330 
1331  if (pic_arg) {
1332  pts = pic_arg->pts;
1333  display_picture_number = m->input_picture_number++;
1334 
1335  if (pts != AV_NOPTS_VALUE) {
1336  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1337  int64_t last = m->user_specified_pts;
1338 
1339  if (pts <= last) {
1340  av_log(s->c.avctx, AV_LOG_ERROR,
1341  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1342  pts, last);
1343  return AVERROR(EINVAL);
1344  }
1345 
1346  if (!s->c.low_delay && display_picture_number == 1)
1347  m->dts_delta = pts - last;
1348  }
1349  m->user_specified_pts = pts;
1350  } else {
1351  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1352  m->user_specified_pts =
1353  pts = m->user_specified_pts + 1;
1354  av_log(s->c.avctx, AV_LOG_INFO,
1355  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1356  pts);
1357  } else {
1358  pts = display_picture_number;
1359  }
1360  }
1361 
1362  if (pic_arg->linesize[0] != s->c.linesize ||
1363  pic_arg->linesize[1] != s->c.uvlinesize ||
1364  pic_arg->linesize[2] != s->c.uvlinesize)
1365  direct = 0;
1366  if ((s->c.width & 15) || (s->c.height & 15))
1367  direct = 0;
1368  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1369  direct = 0;
1370  if (s->c.linesize & (STRIDE_ALIGN-1))
1371  direct = 0;
1372 
1373  ff_dlog(s->c.avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1374  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1375 
1376  pic = av_refstruct_pool_get(s->c.picture_pool);
1377  if (!pic)
1378  return AVERROR(ENOMEM);
1379 
1380  if (direct) {
1381  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1382  goto fail;
1383  pic->shared = 1;
1384  } else {
1385  ret = prepare_picture(s, pic->f, pic_arg);
1386  if (ret < 0)
1387  goto fail;
1388 
1389  for (int i = 0; i < 3; i++) {
1390  ptrdiff_t src_stride = pic_arg->linesize[i];
1391  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1392  int h_shift = i ? s->c.chroma_x_shift : 0;
1393  int v_shift = i ? s->c.chroma_y_shift : 0;
1394  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1395  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1396  const uint8_t *src = pic_arg->data[i];
1397  uint8_t *dst = pic->f->data[i];
1398  int vpad = 16;
1399 
1400  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1401  && !s->c.progressive_sequence
1402  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1403  vpad = 32;
1404 
1405  if (!s->c.avctx->rc_buffer_size)
1406  dst += INPLACE_OFFSET;
1407 
1408  if (src_stride == dst_stride)
1409  memcpy(dst, src, src_stride * h - src_stride + w);
1410  else {
1411  int h2 = h;
1412  uint8_t *dst2 = dst;
1413  while (h2--) {
1414  memcpy(dst2, src, w);
1415  dst2 += dst_stride;
1416  src += src_stride;
1417  }
1418  }
1419  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1420  s->mpvencdsp.draw_edges(dst, dst_stride,
1421  w, h,
1422  16 >> h_shift,
1423  vpad >> v_shift,
1424  EDGE_BOTTOM);
1425  }
1426  }
1427  emms_c();
1428  }
1429 
1430  pic->display_picture_number = display_picture_number;
1431  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1432  } else if (!m->reordered_input_picture[1]) {
1433  /* Flushing: When the above check is true, the encoder is about to run
1434  * out of frames to encode. Check if there are input_pictures left;
1435  * if so, ensure m->input_picture[0] contains the first picture.
1436  * A flush_offset != 1 will only happen if we did not receive enough
1437  * input frames. */
1438  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1439  if (m->input_picture[flush_offset])
1440  break;
1441 
1442  encoding_delay -= flush_offset - 1;
1443  }
1444 
1445  /* shift buffer entries */
1446  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1447  m->input_picture[i - flush_offset] = m->input_picture[i];
1448  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1449  m->input_picture[i] = NULL;
1450 
1451  m->input_picture[encoding_delay] = pic;
1452 
1453  return 0;
1454 fail:
1455  av_refstruct_unref(&pic);
1456  return ret;
1457 }
1458 
1459 static int skip_check(MPVMainEncContext *const m,
1460  const MPVPicture *p, const MPVPicture *ref)
1461 {
1462  MPVEncContext *const s = &m->s;
1463  int score = 0;
1464  int64_t score64 = 0;
1465 
1466  for (int plane = 0; plane < 3; plane++) {
1467  const int stride = p->f->linesize[plane];
1468  const int bw = plane ? 1 : 2;
1469  for (int y = 0; y < s->c.mb_height * bw; y++) {
1470  for (int x = 0; x < s->c.mb_width * bw; x++) {
1471  int off = p->shared ? 0 : 16;
1472  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1473  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1474  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1475 
1476  switch (FFABS(m->frame_skip_exp)) {
1477  case 0: score = FFMAX(score, v); break;
1478  case 1: score += FFABS(v); break;
1479  case 2: score64 += v * (int64_t)v; break;
1480  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1481  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1482  }
1483  }
1484  }
1485  }
1486  emms_c();
1487 
1488  if (score)
1489  score64 = score;
1490  if (m->frame_skip_exp < 0)
1491  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1492  -1.0/m->frame_skip_exp);
1493 
1494  if (score64 < m->frame_skip_threshold)
1495  return 1;
1496  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1497  return 1;
1498  return 0;
1499 }
1500 
1502 {
1503  int ret;
1504  int size = 0;
1505 
1507  if (ret < 0)
1508  return ret;
1509 
1510  do {
1512  if (ret >= 0) {
1513  size += pkt->size;
1515  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1516  return ret;
1517  } while (ret >= 0);
1518 
1519  return size;
1520 }
1521 
1523 {
1524  MPVEncContext *const s = &m->s;
1525  AVPacket *pkt;
1526  const int scale = m->brd_scale;
1527  int width = s->c.width >> scale;
1528  int height = s->c.height >> scale;
1529  int out_size, p_lambda, b_lambda, lambda2;
1530  int64_t best_rd = INT64_MAX;
1531  int best_b_count = -1;
1532  int ret = 0;
1533 
1534  av_assert0(scale >= 0 && scale <= 3);
1535 
1536  pkt = av_packet_alloc();
1537  if (!pkt)
1538  return AVERROR(ENOMEM);
1539 
1540  //emms_c();
1541  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1542  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1543  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1544  if (!b_lambda) // FIXME we should do this somewhere else
1545  b_lambda = p_lambda;
1546  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1548 
1549  for (int i = 0; i < m->max_b_frames + 2; i++) {
1550  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1551  s->c.next_pic.ptr;
1552 
1553  if (pre_input_ptr) {
1554  const uint8_t *data[4];
1555  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1556 
1557  if (!pre_input_ptr->shared && i) {
1558  data[0] += INPLACE_OFFSET;
1559  data[1] += INPLACE_OFFSET;
1560  data[2] += INPLACE_OFFSET;
1561  }
1562 
1563  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1564  m->tmp_frames[i]->linesize[0],
1565  data[0],
1566  pre_input_ptr->f->linesize[0],
1567  width, height);
1568  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1569  m->tmp_frames[i]->linesize[1],
1570  data[1],
1571  pre_input_ptr->f->linesize[1],
1572  width >> 1, height >> 1);
1573  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1574  m->tmp_frames[i]->linesize[2],
1575  data[2],
1576  pre_input_ptr->f->linesize[2],
1577  width >> 1, height >> 1);
1578  }
1579  }
1580 
1581  for (int j = 0; j < m->max_b_frames + 1; j++) {
1582  AVCodecContext *c;
1583  int64_t rd = 0;
1584 
1585  if (!m->input_picture[j])
1586  break;
1587 
1589  if (!c) {
1590  ret = AVERROR(ENOMEM);
1591  goto fail;
1592  }
1593 
1594  c->width = width;
1595  c->height = height;
1597  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1598  c->mb_decision = s->c.avctx->mb_decision;
1599  c->me_cmp = s->c.avctx->me_cmp;
1600  c->mb_cmp = s->c.avctx->mb_cmp;
1601  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1602  c->pix_fmt = AV_PIX_FMT_YUV420P;
1603  c->time_base = s->c.avctx->time_base;
1604  c->max_b_frames = m->max_b_frames;
1605 
1606  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1607  if (ret < 0)
1608  goto fail;
1609 
1610 
1612  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1613 
1614  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1615  if (out_size < 0) {
1616  ret = out_size;
1617  goto fail;
1618  }
1619 
1620  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1621 
1622  for (int i = 0; i < m->max_b_frames + 1; i++) {
1623  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1624 
1625  m->tmp_frames[i + 1]->pict_type = is_p ?
1627  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1628 
1629  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1630  if (out_size < 0) {
1631  ret = out_size;
1632  goto fail;
1633  }
1634 
1635  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1636  }
1637 
1638  /* get the delayed frames */
1640  if (out_size < 0) {
1641  ret = out_size;
1642  goto fail;
1643  }
1644  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1645 
1646  rd += c->error[0] + c->error[1] + c->error[2];
1647 
1648  if (rd < best_rd) {
1649  best_rd = rd;
1650  best_b_count = j;
1651  }
1652 
1653 fail:
1656  if (ret < 0) {
1657  best_b_count = ret;
1658  break;
1659  }
1660  }
1661 
1662  av_packet_free(&pkt);
1663 
1664  return best_b_count;
1665 }
1666 
1667 /**
1668  * Determines whether an input picture is discarded or not
1669  * and if not determines the length of the next chain of B frames
1670  * and moves these pictures (including the P frame) into
1671  * reordered_input_picture.
1672  * input_picture[0] is always NULL when exiting this function, even on error;
1673  * reordered_input_picture[0] is always NULL when exiting this function on error.
1674  */
1676 {
1677  MPVEncContext *const s = &m->s;
1678 
1679  /* Either nothing to do or can't do anything */
1680  if (m->reordered_input_picture[0] || !m->input_picture[0])
1681  return 0;
1682 
1683  /* set next picture type & ordering */
1684  if (m->frame_skip_threshold || m->frame_skip_factor) {
1685  if (m->picture_in_gop_number < m->gop_size &&
1686  s->c.next_pic.ptr &&
1687  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1688  // FIXME check that the gop check above is +-1 correct
1690 
1691  ff_vbv_update(m, 0);
1692 
1693  return 0;
1694  }
1695  }
1696 
1697  if (/* m->picture_in_gop_number >= m->gop_size || */
1698  !s->c.next_pic.ptr || m->intra_only) {
1699  m->reordered_input_picture[0] = m->input_picture[0];
1700  m->input_picture[0] = NULL;
1703  m->coded_picture_number++;
1704  } else {
1705  int b_frames = 0;
1706 
1707  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1708  for (int i = 0; i < m->max_b_frames + 1; i++) {
1709  int pict_num = m->input_picture[0]->display_picture_number + i;
1710 
1711  if (pict_num >= m->rc_context.num_entries)
1712  break;
1713  if (!m->input_picture[i]) {
1714  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1715  break;
1716  }
1717 
1718  m->input_picture[i]->f->pict_type =
1719  m->rc_context.entry[pict_num].new_pict_type;
1720  }
1721  }
1722 
1723  if (m->b_frame_strategy == 0) {
1724  b_frames = m->max_b_frames;
1725  while (b_frames && !m->input_picture[b_frames])
1726  b_frames--;
1727  } else if (m->b_frame_strategy == 1) {
1728  for (int i = 1; i < m->max_b_frames + 1; i++) {
1729  if (m->input_picture[i] &&
1730  m->input_picture[i]->b_frame_score == 0) {
1733  m->input_picture[i ]->f->data[0],
1734  m->input_picture[i - 1]->f->data[0],
1735  s->c.linesize) + 1;
1736  }
1737  }
1738  for (int i = 0;; i++) {
1739  if (i >= m->max_b_frames + 1 ||
1740  !m->input_picture[i] ||
1741  m->input_picture[i]->b_frame_score - 1 >
1742  s->c.mb_num / m->b_sensitivity) {
1743  b_frames = FFMAX(0, i - 1);
1744  break;
1745  }
1746  }
1747 
1748  /* reset scores */
1749  for (int i = 0; i < b_frames + 1; i++)
1750  m->input_picture[i]->b_frame_score = 0;
1751  } else if (m->b_frame_strategy == 2) {
1752  b_frames = estimate_best_b_count(m);
1753  if (b_frames < 0) {
1755  return b_frames;
1756  }
1757  }
1758 
1759  emms_c();
1760 
1761  for (int i = b_frames - 1; i >= 0; i--) {
1762  int type = m->input_picture[i]->f->pict_type;
1763  if (type && type != AV_PICTURE_TYPE_B)
1764  b_frames = i;
1765  }
1766  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1767  b_frames == m->max_b_frames) {
1768  av_log(s->c.avctx, AV_LOG_ERROR,
1769  "warning, too many B-frames in a row\n");
1770  }
1771 
1772  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1773  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1774  m->gop_size > m->picture_in_gop_number) {
1775  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1776  } else {
1777  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1778  b_frames = 0;
1779  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1780  }
1781  }
1782 
1783  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1784  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1785  b_frames--;
1786 
1787  m->reordered_input_picture[0] = m->input_picture[b_frames];
1788  m->input_picture[b_frames] = NULL;
1792  m->coded_picture_number++;
1793  for (int i = 0; i < b_frames; i++) {
1794  m->reordered_input_picture[i + 1] = m->input_picture[i];
1795  m->input_picture[i] = NULL;
1796  m->reordered_input_picture[i + 1]->f->pict_type =
1799  m->coded_picture_number++;
1800  }
1801  }
1802 
1803  return 0;
1804 }
1805 
1807 {
1808  MPVEncContext *const s = &m->s;
1809  int ret;
1810 
1812 
1813  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1816 
1818  av_assert1(!m->input_picture[0]);
1819  if (ret < 0)
1820  return ret;
1821 
1822  av_frame_unref(s->new_pic);
1823 
1824  if (m->reordered_input_picture[0]) {
1827 
1828  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1829  // input is a shared pix, so we can't modify it -> allocate a new
1830  // one & ensure that the shared one is reusable
1831  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1832 
1833  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1834  if (ret < 0)
1835  goto fail;
1836  } else {
1837  // input is not a shared pix -> reuse buffer for current_pix
1838  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1839  if (ret < 0)
1840  goto fail;
1841  for (int i = 0; i < MPV_MAX_PLANES; i++)
1842  s->new_pic->data[i] += INPLACE_OFFSET;
1843  }
1844  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1845  m->reordered_input_picture[0] = NULL;
1846  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1847  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1848  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1849  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1850  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1851  if (ret < 0) {
1852  ff_mpv_unref_picture(&s->c.cur_pic);
1853  return ret;
1854  }
1855  s->picture_number = s->c.cur_pic.ptr->display_picture_number;
1856 
1857  }
1858  return 0;
1859 fail:
1861  return ret;
1862 }
1863 
1864 static void frame_end(MPVMainEncContext *const m)
1865 {
1866  MPVEncContext *const s = &m->s;
1867 
1868  if (s->me.unrestricted_mv &&
1869  s->c.cur_pic.reference &&
1870  !m->intra_only) {
1871  int hshift = s->c.chroma_x_shift;
1872  int vshift = s->c.chroma_y_shift;
1873  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1874  s->c.cur_pic.linesize[0],
1875  s->c.h_edge_pos, s->c.v_edge_pos,
1877  EDGE_TOP | EDGE_BOTTOM);
1878  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1879  s->c.cur_pic.linesize[1],
1880  s->c.h_edge_pos >> hshift,
1881  s->c.v_edge_pos >> vshift,
1882  EDGE_WIDTH >> hshift,
1883  EDGE_WIDTH >> vshift,
1884  EDGE_TOP | EDGE_BOTTOM);
1885  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1886  s->c.cur_pic.linesize[2],
1887  s->c.h_edge_pos >> hshift,
1888  s->c.v_edge_pos >> vshift,
1889  EDGE_WIDTH >> hshift,
1890  EDGE_WIDTH >> vshift,
1891  EDGE_TOP | EDGE_BOTTOM);
1892  }
1893 
1894  emms_c();
1895 
1896  m->last_pict_type = s->c.pict_type;
1897  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1898  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1899  m->last_non_b_pict_type = s->c.pict_type;
1900 }
1901 
1903 {
1904  MPVEncContext *const s = &m->s;
1905  int intra, i;
1906 
1907  for (intra = 0; intra < 2; intra++) {
1908  if (s->dct_count[intra] > (1 << 16)) {
1909  for (i = 0; i < 64; i++) {
1910  s->dct_error_sum[intra][i] >>= 1;
1911  }
1912  s->dct_count[intra] >>= 1;
1913  }
1914 
1915  for (i = 0; i < 64; i++) {
1916  s->dct_offset[intra][i] = (m->noise_reduction *
1917  s->dct_count[intra] +
1918  s->dct_error_sum[intra][i] / 2) /
1919  (s->dct_error_sum[intra][i] + 1);
1920  }
1921  }
1922 }
1923 
1924 static void frame_start(MPVMainEncContext *const m)
1925 {
1926  MPVEncContext *const s = &m->s;
1927 
1928  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1929 
1930  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1931  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1932  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1933  }
1934 
1935  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1936  if (s->dct_error_sum) {
1938  }
1939 }
1940 
1942  const AVFrame *pic_arg, int *got_packet)
1943 {
1944  MPVMainEncContext *const m = avctx->priv_data;
1945  MPVEncContext *const s = &m->s;
1946  int stuffing_count, ret;
1947  int context_count = s->c.slice_context_count;
1948 
1949  ff_mpv_unref_picture(&s->c.cur_pic);
1950 
1951  m->vbv_ignore_qmax = 0;
1952 
1953  m->picture_in_gop_number++;
1954 
1955  ret = load_input_picture(m, pic_arg);
1956  if (ret < 0)
1957  return ret;
1958 
1960  if (ret < 0)
1961  return ret;
1962 
1963  /* output? */
1964  if (s->new_pic->data[0]) {
1965  int growing_buffer = context_count == 1 && !s->data_partitioning;
1966  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1967  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1968  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1969  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1970  if (ret < 0)
1971  return ret;
1972  }
1973  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1974  return ret;
1976  if (s->mb_info) {
1977  s->mb_info_ptr = av_packet_new_side_data(pkt,
1979  s->c.mb_width*s->c.mb_height*12);
1980  if (!s->mb_info_ptr)
1981  return AVERROR(ENOMEM);
1982  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1983  }
1984 
1985  s->c.pict_type = s->new_pic->pict_type;
1986  //emms_c();
1987  frame_start(m);
1988 vbv_retry:
1989  ret = encode_picture(m, pkt);
1990  if (growing_buffer) {
1991  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1992  pkt->data = s->pb.buf;
1994  }
1995  if (ret < 0)
1996  return -1;
1997 
1998  frame_end(m);
1999 
2000  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
2002 
2003  if (avctx->rc_buffer_size) {
2004  RateControlContext *rcc = &m->rc_context;
2005  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
2006  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
2007  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
2008 
2009  if (put_bits_count(&s->pb) > max_size &&
2010  s->lambda < m->lmax) {
2011  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2012  (s->c.qscale + 1) / s->c.qscale);
2013  if (s->adaptive_quant) {
2014  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
2015  s->lambda_table[i] =
2016  FFMAX(s->lambda_table[i] + min_step,
2017  s->lambda_table[i] * (s->c.qscale + 1) /
2018  s->c.qscale);
2019  }
2020  s->c.mb_skipped = 0; // done in frame_start()
2021  // done in encode_picture() so we must undo it
2022  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
2023  s->c.no_rounding ^= s->flipflop_rounding;
2024  }
2025  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
2026  s->c.time_base = s->c.last_time_base;
2027  s->c.last_non_b_time = s->c.time - s->c.pp_time;
2028  }
2029  m->vbv_ignore_qmax = 1;
2030  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2031  goto vbv_retry;
2032  }
2033 
2035  }
2036 
2039 
2040  for (int i = 0; i < MPV_MAX_PLANES; i++)
2041  avctx->error[i] += s->encoding_error[i];
2042  ff_encode_add_stats_side_data(pkt, s->c.cur_pic.ptr->f->quality,
2043  s->encoding_error,
2045  s->c.pict_type);
2046 
2048  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2049  s->misc_bits + s->i_tex_bits +
2050  s->p_tex_bits);
2051  flush_put_bits(&s->pb);
2052  m->frame_bits = put_bits_count(&s->pb);
2053 
2054  stuffing_count = ff_vbv_update(m, m->frame_bits);
2055  m->stuffing_bits = 8*stuffing_count;
2056  if (stuffing_count) {
2057  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2058  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2059  return -1;
2060  }
2061 
2062  switch (s->c.codec_id) {
2065  while (stuffing_count--) {
2066  put_bits(&s->pb, 8, 0);
2067  }
2068  break;
2069  case AV_CODEC_ID_MPEG4:
2070  put_bits(&s->pb, 16, 0);
2071  put_bits(&s->pb, 16, 0x1C3);
2072  stuffing_count -= 4;
2073  while (stuffing_count--) {
2074  put_bits(&s->pb, 8, 0xFF);
2075  }
2076  break;
2077  default:
2078  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2079  m->stuffing_bits = 0;
2080  }
2081  flush_put_bits(&s->pb);
2082  m->frame_bits = put_bits_count(&s->pb);
2083  }
2084 
2085  /* update MPEG-1/2 vbv_delay for CBR */
2086  if (avctx->rc_max_rate &&
2088  s->c.out_format == FMT_MPEG1 &&
2089  90000LL * (avctx->rc_buffer_size - 1) <=
2090  avctx->rc_max_rate * 0xFFFFLL) {
2091  AVCPBProperties *props;
2092  size_t props_size;
2093 
2094  int vbv_delay, min_delay;
2095  double inbits = avctx->rc_max_rate *
2097  int minbits = m->frame_bits - 8 *
2098  (m->vbv_delay_pos - 1);
2099  double bits = m->rc_context.buffer_index + minbits - inbits;
2100  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2101 
2102  if (bits < 0)
2104  "Internal error, negative bits\n");
2105 
2106  av_assert1(s->c.repeat_first_field == 0);
2107 
2108  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2109  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2110  avctx->rc_max_rate;
2111 
2112  vbv_delay = FFMAX(vbv_delay, min_delay);
2113 
2114  av_assert0(vbv_delay < 0xFFFF);
2115 
2116  vbv_delay_ptr[0] &= 0xF8;
2117  vbv_delay_ptr[0] |= vbv_delay >> 13;
2118  vbv_delay_ptr[1] = vbv_delay >> 5;
2119  vbv_delay_ptr[2] &= 0x07;
2120  vbv_delay_ptr[2] |= vbv_delay << 3;
2121 
2122  props = av_cpb_properties_alloc(&props_size);
2123  if (!props)
2124  return AVERROR(ENOMEM);
2125  props->vbv_delay = vbv_delay * 300;
2126 
2128  (uint8_t*)props, props_size);
2129  if (ret < 0) {
2130  av_freep(&props);
2131  return ret;
2132  }
2133  }
2134  m->total_bits += m->frame_bits;
2135 
2136  pkt->pts = s->c.cur_pic.ptr->f->pts;
2137  pkt->duration = s->c.cur_pic.ptr->f->duration;
2138  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2139  if (!s->c.cur_pic.ptr->coded_picture_number)
2140  pkt->dts = pkt->pts - m->dts_delta;
2141  else
2142  pkt->dts = m->reordered_pts;
2143  m->reordered_pts = pkt->pts;
2144  } else
2145  pkt->dts = pkt->pts;
2146 
2147  // the no-delay case is handled in generic code
2149  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2150  if (ret < 0)
2151  return ret;
2152  }
2153 
2154  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2156  if (s->mb_info)
2158  } else {
2159  m->frame_bits = 0;
2160  }
2161 
2162  ff_mpv_unref_picture(&s->c.cur_pic);
2163 
2164  av_assert1((m->frame_bits & 7) == 0);
2165 
2166  pkt->size = m->frame_bits / 8;
2167  *got_packet = !!pkt->size;
2168  return 0;
2169 }
2170 
2172  int n, int threshold)
2173 {
2174  static const char tab[64] = {
2175  3, 2, 2, 1, 1, 1, 1, 1,
2176  1, 1, 1, 1, 1, 1, 1, 1,
2177  1, 1, 1, 1, 1, 1, 1, 1,
2178  0, 0, 0, 0, 0, 0, 0, 0,
2179  0, 0, 0, 0, 0, 0, 0, 0,
2180  0, 0, 0, 0, 0, 0, 0, 0,
2181  0, 0, 0, 0, 0, 0, 0, 0,
2182  0, 0, 0, 0, 0, 0, 0, 0
2183  };
2184  int score = 0;
2185  int run = 0;
2186  int i;
2187  int16_t *block = s->block[n];
2188  const int last_index = s->c.block_last_index[n];
2189  int skip_dc;
2190 
2191  if (threshold < 0) {
2192  skip_dc = 0;
2193  threshold = -threshold;
2194  } else
2195  skip_dc = 1;
2196 
2197  /* Are all we could set to zero already zero? */
2198  if (last_index <= skip_dc - 1)
2199  return;
2200 
2201  for (i = 0; i <= last_index; i++) {
2202  const int j = s->c.intra_scantable.permutated[i];
2203  const int level = FFABS(block[j]);
2204  if (level == 1) {
2205  if (skip_dc && i == 0)
2206  continue;
2207  score += tab[run];
2208  run = 0;
2209  } else if (level > 1) {
2210  return;
2211  } else {
2212  run++;
2213  }
2214  }
2215  if (score >= threshold)
2216  return;
2217  for (i = skip_dc; i <= last_index; i++) {
2218  const int j = s->c.intra_scantable.permutated[i];
2219  block[j] = 0;
2220  }
2221  if (block[0])
2222  s->c.block_last_index[n] = 0;
2223  else
2224  s->c.block_last_index[n] = -1;
2225 }
2226 
2227 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2228  int last_index)
2229 {
2230  int i;
2231  const int maxlevel = s->max_qcoeff;
2232  const int minlevel = s->min_qcoeff;
2233  int overflow = 0;
2234 
2235  if (s->c.mb_intra) {
2236  i = 1; // skip clipping of intra dc
2237  } else
2238  i = 0;
2239 
2240  for (; i <= last_index; i++) {
2241  const int j = s->c.intra_scantable.permutated[i];
2242  int level = block[j];
2243 
2244  if (level > maxlevel) {
2245  level = maxlevel;
2246  overflow++;
2247  } else if (level < minlevel) {
2248  level = minlevel;
2249  overflow++;
2250  }
2251 
2252  block[j] = level;
2253  }
2254 
2255  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2256  av_log(s->c.avctx, AV_LOG_INFO,
2257  "warning, clipping %d dct coefficients to %d..%d\n",
2258  overflow, minlevel, maxlevel);
2259 }
2260 
2261 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2262 {
2263  int x, y;
2264  // FIXME optimize
2265  for (y = 0; y < 8; y++) {
2266  for (x = 0; x < 8; x++) {
2267  int x2, y2;
2268  int sum = 0;
2269  int sqr = 0;
2270  int count = 0;
2271 
2272  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2273  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2274  int v = ptr[x2 + y2 * stride];
2275  sum += v;
2276  sqr += v * v;
2277  count++;
2278  }
2279  }
2280  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2281  }
2282  }
2283 }
2284 
2286  int motion_x, int motion_y,
2287  int mb_block_height,
2288  int mb_block_width,
2289  int mb_block_count,
2290  int chroma_x_shift,
2291  int chroma_y_shift,
2292  int chroma_format)
2293 {
2294 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2295  * and neither of these encoders currently supports 444. */
2296 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2297  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2298  DECLARE_ALIGNED(16, int16_t, weight)[12][64];
2299  int16_t orig[12][64];
2300  const int mb_x = s->c.mb_x;
2301  const int mb_y = s->c.mb_y;
2302  int i;
2303  int skip_dct[12];
2304  int dct_offset = s->c.linesize * 8; // default for progressive frames
2305  int uv_dct_offset = s->c.uvlinesize * 8;
2306  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2307  ptrdiff_t wrap_y, wrap_c;
2308 
2309  for (i = 0; i < mb_block_count; i++)
2310  skip_dct[i] = s->skipdct;
2311 
2312  if (s->adaptive_quant) {
2313  const int last_qp = s->c.qscale;
2314  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2315 
2316  s->lambda = s->lambda_table[mb_xy];
2317  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2319 
2320  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2321  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2322 
2323  if (s->c.out_format == FMT_H263) {
2324  s->dquant = av_clip(s->dquant, -2, 2);
2325 
2326  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2327  if (!s->c.mb_intra) {
2328  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2329  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2330  s->dquant = 0;
2331  }
2332  if (s->c.mv_type == MV_TYPE_8X8)
2333  s->dquant = 0;
2334  }
2335  }
2336  }
2337  }
2338  ff_set_qscale(&s->c, last_qp + s->dquant);
2339  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2340  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2341 
2342  wrap_y = s->c.linesize;
2343  wrap_c = s->c.uvlinesize;
2344  ptr_y = s->new_pic->data[0] +
2345  (mb_y * 16 * wrap_y) + mb_x * 16;
2346  ptr_cb = s->new_pic->data[1] +
2347  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2348  ptr_cr = s->new_pic->data[2] +
2349  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2350 
2351  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2352  s->c.codec_id != AV_CODEC_ID_AMV) {
2353  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2354  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2355  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2356  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2357  wrap_y, wrap_y,
2358  16, 16, mb_x * 16, mb_y * 16,
2359  s->c.width, s->c.height);
2360  ptr_y = ebuf;
2361  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2362  wrap_c, wrap_c,
2363  mb_block_width, mb_block_height,
2364  mb_x * mb_block_width, mb_y * mb_block_height,
2365  cw, ch);
2366  ptr_cb = ebuf + 16 * wrap_y;
2367  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2368  wrap_c, wrap_c,
2369  mb_block_width, mb_block_height,
2370  mb_x * mb_block_width, mb_y * mb_block_height,
2371  cw, ch);
2372  ptr_cr = ebuf + 16 * wrap_y + 16;
2373  }
2374 
2375  if (s->c.mb_intra) {
2376  if (INTERLACED_DCT(s)) {
2377  int progressive_score, interlaced_score;
2378 
2379  s->c.interlaced_dct = 0;
2380  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2381  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2382  NULL, wrap_y, 8) - 400;
2383 
2384  if (progressive_score > 0) {
2385  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2386  NULL, wrap_y * 2, 8) +
2387  s->ildct_cmp[1](s, ptr_y + wrap_y,
2388  NULL, wrap_y * 2, 8);
2389  if (progressive_score > interlaced_score) {
2390  s->c.interlaced_dct = 1;
2391 
2392  dct_offset = wrap_y;
2393  uv_dct_offset = wrap_c;
2394  wrap_y <<= 1;
2395  if (chroma_format == CHROMA_422 ||
2397  wrap_c <<= 1;
2398  }
2399  }
2400  }
2401 
2402  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2403  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2404  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2405  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2406 
2407  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2408  skip_dct[4] = 1;
2409  skip_dct[5] = 1;
2410  } else {
2411  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2412  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2413  if (chroma_format == CHROMA_422) {
2414  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2415  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2416  } else if (chroma_format == CHROMA_444) {
2417  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2418  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2419  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2420  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2421  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2422  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2423  }
2424  }
2425  } else {
2426  op_pixels_func (*op_pix)[4];
2427  qpel_mc_func (*op_qpix)[16];
2428  uint8_t *dest_y, *dest_cb, *dest_cr;
2429 
2430  dest_y = s->c.dest[0];
2431  dest_cb = s->c.dest[1];
2432  dest_cr = s->c.dest[2];
2433 
2434  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2435  op_pix = s->c.hdsp.put_pixels_tab;
2436  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2437  } else {
2438  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2439  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2440  }
2441 
2442  if (s->c.mv_dir & MV_DIR_FORWARD) {
2443  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2444  s->c.last_pic.data,
2445  op_pix, op_qpix);
2446  op_pix = s->c.hdsp.avg_pixels_tab;
2447  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2448  }
2449  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2450  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2451  s->c.next_pic.data,
2452  op_pix, op_qpix);
2453  }
2454 
2455  if (INTERLACED_DCT(s)) {
2456  int progressive_score, interlaced_score;
2457 
2458  s->c.interlaced_dct = 0;
2459  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2460  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2461  ptr_y + wrap_y * 8,
2462  wrap_y, 8) - 400;
2463 
2464  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2465  progressive_score -= 400;
2466 
2467  if (progressive_score > 0) {
2468  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2469  wrap_y * 2, 8) +
2470  s->ildct_cmp[0](s, dest_y + wrap_y,
2471  ptr_y + wrap_y,
2472  wrap_y * 2, 8);
2473 
2474  if (progressive_score > interlaced_score) {
2475  s->c.interlaced_dct = 1;
2476 
2477  dct_offset = wrap_y;
2478  uv_dct_offset = wrap_c;
2479  wrap_y <<= 1;
2480  if (chroma_format == CHROMA_422)
2481  wrap_c <<= 1;
2482  }
2483  }
2484  }
2485 
2486  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2487  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2488  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2489  dest_y + dct_offset, wrap_y);
2490  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2491  dest_y + dct_offset + 8, wrap_y);
2492 
2493  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2494  skip_dct[4] = 1;
2495  skip_dct[5] = 1;
2496  } else {
2497  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2498  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2499  if (!chroma_y_shift) { /* 422 */
2500  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2501  dest_cb + uv_dct_offset, wrap_c);
2502  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2503  dest_cr + uv_dct_offset, wrap_c);
2504  }
2505  }
2506  /* pre quantization */
2507  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2508  // FIXME optimize
2509  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2510  skip_dct[0] = 1;
2511  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2512  skip_dct[1] = 1;
2513  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2514  wrap_y, 8) < 20 * s->c.qscale)
2515  skip_dct[2] = 1;
2516  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2517  wrap_y, 8) < 20 * s->c.qscale)
2518  skip_dct[3] = 1;
2519  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2520  skip_dct[4] = 1;
2521  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2522  skip_dct[5] = 1;
2523  if (!chroma_y_shift) { /* 422 */
2524  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2525  dest_cb + uv_dct_offset,
2526  wrap_c, 8) < 20 * s->c.qscale)
2527  skip_dct[6] = 1;
2528  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2529  dest_cr + uv_dct_offset,
2530  wrap_c, 8) < 20 * s->c.qscale)
2531  skip_dct[7] = 1;
2532  }
2533  }
2534  }
2535 
2536  if (s->quantizer_noise_shaping) {
2537  if (!skip_dct[0])
2538  get_visual_weight(weight[0], ptr_y , wrap_y);
2539  if (!skip_dct[1])
2540  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2541  if (!skip_dct[2])
2542  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2543  if (!skip_dct[3])
2544  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2545  if (!skip_dct[4])
2546  get_visual_weight(weight[4], ptr_cb , wrap_c);
2547  if (!skip_dct[5])
2548  get_visual_weight(weight[5], ptr_cr , wrap_c);
2549  if (!chroma_y_shift) { /* 422 */
2550  if (!skip_dct[6])
2551  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2552  wrap_c);
2553  if (!skip_dct[7])
2554  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2555  wrap_c);
2556  }
2557  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2558  }
2559 
2560  /* DCT & quantize */
2561  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2562  {
2563  for (i = 0; i < mb_block_count; i++) {
2564  if (!skip_dct[i]) {
2565  int overflow;
2566  s->c.block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->c.qscale, &overflow);
2567  // FIXME we could decide to change to quantizer instead of
2568  // clipping
2569  // JS: I don't think that would be a good idea it could lower
2570  // quality instead of improve it. Just INTRADC clipping
2571  // deserves changes in quantizer
2572  if (overflow)
2573  clip_coeffs(s, s->block[i], s->c.block_last_index[i]);
2574  } else
2575  s->c.block_last_index[i] = -1;
2576  }
2577  if (s->quantizer_noise_shaping) {
2578  for (i = 0; i < mb_block_count; i++) {
2579  if (!skip_dct[i]) {
2580  s->c.block_last_index[i] =
2581  dct_quantize_refine(s, s->block[i], weight[i],
2582  orig[i], i, s->c.qscale);
2583  }
2584  }
2585  }
2586 
2587  if (s->luma_elim_threshold && !s->c.mb_intra)
2588  for (i = 0; i < 4; i++)
2589  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2590  if (s->chroma_elim_threshold && !s->c.mb_intra)
2591  for (i = 4; i < mb_block_count; i++)
2592  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2593 
2594  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2595  for (i = 0; i < mb_block_count; i++) {
2596  if (s->c.block_last_index[i] == -1)
2597  s->coded_score[i] = INT_MAX / 256;
2598  }
2599  }
2600  }
2601 
2602  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2603  s->c.block_last_index[4] =
2604  s->c.block_last_index[5] = 0;
2605  s->block[4][0] =
2606  s->block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2607  if (!chroma_y_shift) { /* 422 / 444 */
2608  for (i=6; i<12; i++) {
2609  s->c.block_last_index[i] = 0;
2610  s->block[i][0] = s->block[4][0];
2611  }
2612  }
2613  }
2614 
2615  // non c quantize code returns incorrect block_last_index FIXME
2616  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2617  for (i = 0; i < mb_block_count; i++) {
2618  int j;
2619  if (s->c.block_last_index[i] > 0) {
2620  for (j = 63; j > 0; j--) {
2621  if (s->block[i][s->c.intra_scantable.permutated[j]])
2622  break;
2623  }
2624  s->c.block_last_index[i] = j;
2625  }
2626  }
2627  }
2628 
2629  s->encode_mb(s, s->block, motion_x, motion_y);
2630 }
2631 
2632 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2633 {
2634  if (s->c.chroma_format == CHROMA_420)
2635  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2636  else if (s->c.chroma_format == CHROMA_422)
2637  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2638  else
2639  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2640 }
2641 
2642 typedef struct MBBackup {
2643  struct {
2644  int mv[2][4][2];
2645  int last_mv[2][2][2];
2647  int last_dc[3];
2649  int qscale;
2652  } c;
2655  int dquant;
2657  int16_t (*block)[64];
2659 } MBBackup;
2660 
2661 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2662 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2663  const SRC_TYPE *const s) \
2664 { \
2665  /* FIXME is memcpy faster than a loop? */ \
2666  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2667  \
2668  /* MPEG-1 */ \
2669  d->mb_skip_run = s->mb_skip_run; \
2670  for (int i = 0; i < 3; i++) \
2671  d->c.last_dc[i] = s->c.last_dc[i]; \
2672  \
2673  /* statistics */ \
2674  d->mv_bits = s->mv_bits; \
2675  d->i_tex_bits = s->i_tex_bits; \
2676  d->p_tex_bits = s->p_tex_bits; \
2677  d->i_count = s->i_count; \
2678  d->misc_bits = s->misc_bits; \
2679  d->last_bits = 0; \
2680  \
2681  d->c.mb_skipped = 0; \
2682  d->c.qscale = s->c.qscale; \
2683  d->dquant = s->dquant; \
2684  \
2685  d->esc3_level_length = s->esc3_level_length; \
2686 } \
2687  \
2688 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2689  const SRC_TYPE *const s, \
2690  int data_partitioning) \
2691 { \
2692  /* FIXME is memcpy faster than a loop? */ \
2693  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2694  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2695  \
2696  /* MPEG-1 */ \
2697  d->mb_skip_run = s->mb_skip_run; \
2698  for (int i = 0; i < 3; i++) \
2699  d->c.last_dc[i] = s->c.last_dc[i]; \
2700  \
2701  /* statistics */ \
2702  d->mv_bits = s->mv_bits; \
2703  d->i_tex_bits = s->i_tex_bits; \
2704  d->p_tex_bits = s->p_tex_bits; \
2705  d->i_count = s->i_count; \
2706  d->misc_bits = s->misc_bits; \
2707  \
2708  d->c.mb_intra = s->c.mb_intra; \
2709  d->c.mb_skipped = s->c.mb_skipped; \
2710  d->c.mv_type = s->c.mv_type; \
2711  d->c.mv_dir = s->c.mv_dir; \
2712  d->pb = s->pb; \
2713  if (data_partitioning) { \
2714  d->pb2 = s->pb2; \
2715  d->tex_pb = s->tex_pb; \
2716  } \
2717  d->block = s->block; \
2718  for (int i = 0; i < 8; i++) \
2719  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2720  d->c.interlaced_dct = s->c.interlaced_dct; \
2721  d->c.qscale = s->c.qscale; \
2722  \
2723  d->esc3_level_length = s->esc3_level_length; \
2724 }
2725 
2726 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2727 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2728 
2729 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2730  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2731  int *dmin, int *next_block, int motion_x, int motion_y)
2732 {
2733  int score;
2734  uint8_t *dest_backup[3];
2735 
2736  reset_context_before_encode(s, backup);
2737 
2738  s->block = s->blocks[*next_block];
2739  s->pb = pb[*next_block];
2740  if (s->data_partitioning) {
2741  s->pb2 = pb2 [*next_block];
2742  s->tex_pb= tex_pb[*next_block];
2743  }
2744 
2745  if(*next_block){
2746  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2747  s->c.dest[0] = s->c.sc.rd_scratchpad;
2748  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2749  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2750  av_assert0(s->c.linesize >= 32); //FIXME
2751  }
2752 
2753  encode_mb(s, motion_x, motion_y);
2754 
2755  score= put_bits_count(&s->pb);
2756  if (s->data_partitioning) {
2757  score+= put_bits_count(&s->pb2);
2758  score+= put_bits_count(&s->tex_pb);
2759  }
2760 
2761  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2762  mpv_reconstruct_mb(s, s->block);
2763 
2764  score *= s->lambda2;
2765  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2766  }
2767 
2768  if(*next_block){
2769  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2770  }
2771 
2772  if(score<*dmin){
2773  *dmin= score;
2774  *next_block^=1;
2775 
2776  save_context_after_encode(best, s, s->data_partitioning);
2777  }
2778 }
2779 
2780 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2781 {
2782  const uint32_t *sq = ff_square_tab + 256;
2783  int acc=0;
2784  int x,y;
2785 
2786  if(w==16 && h==16)
2787  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2788  else if(w==8 && h==8)
2789  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2790 
2791  for(y=0; y<h; y++){
2792  for(x=0; x<w; x++){
2793  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2794  }
2795  }
2796 
2797  av_assert2(acc>=0);
2798 
2799  return acc;
2800 }
2801 
2802 static int sse_mb(MPVEncContext *const s)
2803 {
2804  int w= 16;
2805  int h= 16;
2806  int chroma_mb_w = w >> s->c.chroma_x_shift;
2807  int chroma_mb_h = h >> s->c.chroma_y_shift;
2808 
2809  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2810  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2811 
2812  if(w==16 && h==16)
2813  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2814  s->c.dest[0], s->c.linesize, 16) +
2815  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2816  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2817  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2818  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2819  else
2820  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2821  s->c.dest[0], w, h, s->c.linesize) +
2822  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2823  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2824  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2825  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2826 }
2827 
2829  MPVEncContext *const s = *(void**)arg;
2830 
2831 
2832  s->me.pre_pass = 1;
2833  s->me.dia_size = s->c.avctx->pre_dia_size;
2834  s->c.first_slice_line = 1;
2835  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2836  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2837  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2838  s->c.first_slice_line = 0;
2839  }
2840 
2841  s->me.pre_pass = 0;
2842 
2843  return 0;
2844 }
2845 
2847  MPVEncContext *const s = *(void**)arg;
2848 
2849  s->me.dia_size = s->c.avctx->dia_size;
2850  s->c.first_slice_line = 1;
2851  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2852  s->c.mb_x = 0; //for block init below
2853  ff_init_block_index(&s->c);
2854  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2855  s->c.block_index[0] += 2;
2856  s->c.block_index[1] += 2;
2857  s->c.block_index[2] += 2;
2858  s->c.block_index[3] += 2;
2859 
2860  /* compute motion vector & mb_type and store in context */
2861  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2862  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2863  else
2864  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2865  }
2866  s->c.first_slice_line = 0;
2867  }
2868  return 0;
2869 }
2870 
2871 static int mb_var_thread(AVCodecContext *c, void *arg){
2872  MPVEncContext *const s = *(void**)arg;
2873 
2874  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2875  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2876  int xx = mb_x * 16;
2877  int yy = mb_y * 16;
2878  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2879  int varc;
2880  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2881 
2882  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2883  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2884 
2885  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2886  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2887  s->me.mb_var_sum_temp += varc;
2888  }
2889  }
2890  return 0;
2891 }
2892 
2893 static void write_slice_end(MPVEncContext *const s)
2894 {
2895  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2896  if (s->partitioned_frame)
2898 
2899  ff_mpeg4_stuffing(&s->pb);
2900  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2901  s->c.out_format == FMT_MJPEG) {
2903  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2905  }
2906 
2907  flush_put_bits(&s->pb);
2908 
2909  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2910  s->misc_bits+= get_bits_diff(s);
2911 }
2912 
2913 static void write_mb_info(MPVEncContext *const s)
2914 {
2915  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2916  int offset = put_bits_count(&s->pb);
2917  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->gob_index);
2918  int gobn = s->c.mb_y / s->gob_index;
2919  int pred_x, pred_y;
2920  if (CONFIG_H263_ENCODER)
2921  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2922  bytestream_put_le32(&ptr, offset);
2923  bytestream_put_byte(&ptr, s->c.qscale);
2924  bytestream_put_byte(&ptr, gobn);
2925  bytestream_put_le16(&ptr, mba);
2926  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2927  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2928  /* 4MV not implemented */
2929  bytestream_put_byte(&ptr, 0); /* hmv2 */
2930  bytestream_put_byte(&ptr, 0); /* vmv2 */
2931 }
2932 
2933 static void update_mb_info(MPVEncContext *const s, int startcode)
2934 {
2935  if (!s->mb_info)
2936  return;
2937  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2938  s->mb_info_size += 12;
2939  s->prev_mb_info = s->last_mb_info;
2940  }
2941  if (startcode) {
2942  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2943  /* This might have incremented mb_info_size above, and we return without
2944  * actually writing any info into that slot yet. But in that case,
2945  * this will be called again at the start of the after writing the
2946  * start code, actually writing the mb info. */
2947  return;
2948  }
2949 
2950  s->last_mb_info = put_bytes_count(&s->pb, 0);
2951  if (!s->mb_info_size)
2952  s->mb_info_size += 12;
2953  write_mb_info(s);
2954 }
2955 
2956 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2957 {
2958  if (put_bytes_left(&s->pb, 0) < threshold
2959  && s->c.slice_context_count == 1
2960  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2961  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2962 
2963  uint8_t *new_buffer = NULL;
2964  int new_buffer_size = 0;
2965 
2966  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2967  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2968  return AVERROR(ENOMEM);
2969  }
2970 
2971  emms_c();
2972 
2973  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2974  s->c.avctx->internal->byte_buffer_size + size_increase);
2975  if (!new_buffer)
2976  return AVERROR(ENOMEM);
2977 
2978  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2979  av_free(s->c.avctx->internal->byte_buffer);
2980  s->c.avctx->internal->byte_buffer = new_buffer;
2981  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2982  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2983  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2984  }
2985  if (put_bytes_left(&s->pb, 0) < threshold)
2986  return AVERROR(EINVAL);
2987  return 0;
2988 }
2989 
2990 static int encode_thread(AVCodecContext *c, void *arg){
2991  MPVEncContext *const s = *(void**)arg;
2992  int chr_h = 16 >> s->c.chroma_y_shift;
2993  int i;
2994  MBBackup best_s = { 0 }, backup_s;
2995  uint8_t bit_buf[2][MAX_MB_BYTES];
2996  // + 2 because ff_copy_bits() overreads
2997  uint8_t bit_buf2[2][MAX_PB2_MB_SIZE + 2];
2998  uint8_t bit_buf_tex[2][MAX_AC_TEX_MB_SIZE + 2];
2999  PutBitContext pb[2], pb2[2], tex_pb[2];
3000 
3001  for(i=0; i<2; i++){
3002  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
3003  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_PB2_MB_SIZE);
3004  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_AC_TEX_MB_SIZE);
3005  }
3006 
3007  s->last_bits= put_bits_count(&s->pb);
3008  s->mv_bits=0;
3009  s->misc_bits=0;
3010  s->i_tex_bits=0;
3011  s->p_tex_bits=0;
3012  s->i_count=0;
3013 
3014  for(i=0; i<3; i++){
3015  /* init last dc values */
3016  /* note: quant matrix value (8) is implied here */
3017  s->c.last_dc[i] = 128 << s->c.intra_dc_precision;
3018 
3019  s->encoding_error[i] = 0;
3020  }
3021  if (s->c.codec_id == AV_CODEC_ID_AMV) {
3022  s->c.last_dc[0] = 128 * 8 / 13;
3023  s->c.last_dc[1] = 128 * 8 / 14;
3024  s->c.last_dc[2] = 128 * 8 / 14;
3025 #if CONFIG_MPEG4_ENCODER
3026  } else if (s->partitioned_frame) {
3027  av_assert1(s->c.codec_id == AV_CODEC_ID_MPEG4);
3029 #endif
3030  }
3031  s->mb_skip_run = 0;
3032  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
3033 
3034  s->last_mv_dir = 0;
3035 
3036  s->c.resync_mb_x = 0;
3037  s->c.resync_mb_y = 0;
3038  s->c.first_slice_line = 1;
3039  s->ptr_lastgob = s->pb.buf;
3040  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
3041  int mb_y;
3042  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
3043  int first_in_slice;
3044  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
3045  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3047  s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 1024 << s->c.intra_dc_precision;
3048  } else {
3049  mb_y = mb_y_order;
3050  }
3051  s->c.mb_x = 0;
3052  s->c.mb_y = mb_y;
3053 
3054  ff_set_qscale(&s->c, s->c.qscale);
3055  ff_init_block_index(&s->c);
3056 
3057  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3058  int mb_type, xy;
3059 // int d;
3060  int dmin= INT_MAX;
3061  int dir;
3062  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3063  + s->c.mb_width*MAX_MB_BYTES;
3064 
3066  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3067  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3068  return -1;
3069  }
3070  if (s->data_partitioning) {
3071  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3072  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3073  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3074  return -1;
3075  }
3076  }
3077 
3078  s->c.mb_x = mb_x;
3079  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3080  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3081 
3082  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3084  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3085  mb_type = s->mb_type[xy];
3086 
3087  /* write gob / video packet header */
3088  if(s->rtp_mode){
3089  int current_packet_size, is_gob_start;
3090 
3091  current_packet_size = put_bytes_count(&s->pb, 1)
3092  - (s->ptr_lastgob - s->pb.buf);
3093 
3094  is_gob_start = s->rtp_payload_size &&
3095  current_packet_size >= s->rtp_payload_size &&
3096  mb_y + mb_x > 0;
3097 
3098  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3099 
3100  switch (s->c.codec_id) {
3101  case AV_CODEC_ID_H263:
3102  case AV_CODEC_ID_H263P:
3103  if (!s->h263_slice_structured)
3104  if (s->c.mb_x || s->c.mb_y % s->gob_index) is_gob_start = 0;
3105  break;
3107  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3109  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3110  s->mb_skip_run)
3111  is_gob_start=0;
3112  break;
3113  case AV_CODEC_ID_MJPEG:
3114  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3115  break;
3116  }
3117 
3118  if(is_gob_start){
3119  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3120  write_slice_end(s);
3121 
3122  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->partitioned_frame)
3124  }
3125 
3126  av_assert2((put_bits_count(&s->pb)&7) == 0);
3127  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3128 
3129  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3130  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->c.mb_x + s->c.mb_y;
3131  int d = 100 / s->error_rate;
3132  if(r % d == 0){
3133  current_packet_size=0;
3134  s->pb.buf_ptr= s->ptr_lastgob;
3135  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3136  }
3137  }
3138 
3139  switch (s->c.codec_id) {
3140  case AV_CODEC_ID_MPEG4:
3141  if (CONFIG_MPEG4_ENCODER) {
3145  }
3146  break;
3149  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3152  }
3153  break;
3154 #if CONFIG_H263P_ENCODER
3155  case AV_CODEC_ID_H263P:
3156  if (s->c.dc_val)
3158  // fallthrough
3159 #endif
3160  case AV_CODEC_ID_H263:
3161  if (CONFIG_H263_ENCODER) {
3162  update_mb_info(s, 1);
3164  }
3165  break;
3166  }
3167 
3168  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3169  int bits= put_bits_count(&s->pb);
3170  s->misc_bits+= bits - s->last_bits;
3171  s->last_bits= bits;
3172  }
3173 
3174  s->ptr_lastgob += current_packet_size;
3175  s->c.first_slice_line = 1;
3176  s->c.resync_mb_x = mb_x;
3177  s->c.resync_mb_y = mb_y;
3178  }
3179  }
3180 
3181  if (s->c.resync_mb_x == s->c.mb_x &&
3182  s->c.resync_mb_y+1 == s->c.mb_y)
3183  s->c.first_slice_line = 0;
3184 
3185  s->c.mb_skipped = 0;
3186  s->dquant=0; //only for QP_RD
3187 
3188  update_mb_info(s, 0);
3189 
3190  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3191  int next_block=0;
3192  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3193 
3194  backup_context_before_encode(&backup_s, s);
3195  backup_s.pb= s->pb;
3196  if (s->data_partitioning) {
3197  backup_s.pb2= s->pb2;
3198  backup_s.tex_pb= s->tex_pb;
3199  }
3200 
3201  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3202  s->c.mv_dir = MV_DIR_FORWARD;
3203  s->c.mv_type = MV_TYPE_16X16;
3204  s->c.mb_intra = 0;
3205  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3206  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3207  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3208  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3209  }
3210  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3211  s->c.mv_dir = MV_DIR_FORWARD;
3212  s->c.mv_type = MV_TYPE_FIELD;
3213  s->c.mb_intra = 0;
3214  for(i=0; i<2; i++){
3215  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3216  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3217  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3218  }
3219  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3220  &dmin, &next_block, 0, 0);
3221  }
3222  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3223  s->c.mv_dir = MV_DIR_FORWARD;
3224  s->c.mv_type = MV_TYPE_16X16;
3225  s->c.mb_intra = 0;
3226  s->c.mv[0][0][0] = 0;
3227  s->c.mv[0][0][1] = 0;
3228  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3229  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3230  }
3231  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3232  s->c.mv_dir = MV_DIR_FORWARD;
3233  s->c.mv_type = MV_TYPE_8X8;
3234  s->c.mb_intra = 0;
3235  for(i=0; i<4; i++){
3236  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3237  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3238  }
3239  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3240  &dmin, &next_block, 0, 0);
3241  }
3242  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3243  s->c.mv_dir = MV_DIR_FORWARD;
3244  s->c.mv_type = MV_TYPE_16X16;
3245  s->c.mb_intra = 0;
3246  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3247  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3248  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3249  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3250  }
3251  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3252  s->c.mv_dir = MV_DIR_BACKWARD;
3253  s->c.mv_type = MV_TYPE_16X16;
3254  s->c.mb_intra = 0;
3255  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3256  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3257  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3258  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3259  }
3260  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3261  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3262  s->c.mv_type = MV_TYPE_16X16;
3263  s->c.mb_intra = 0;
3264  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3265  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3266  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3267  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3268  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3269  &dmin, &next_block, 0, 0);
3270  }
3271  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3272  s->c.mv_dir = MV_DIR_FORWARD;
3273  s->c.mv_type = MV_TYPE_FIELD;
3274  s->c.mb_intra = 0;
3275  for(i=0; i<2; i++){
3276  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3277  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3278  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3279  }
3280  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3281  &dmin, &next_block, 0, 0);
3282  }
3283  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3284  s->c.mv_dir = MV_DIR_BACKWARD;
3285  s->c.mv_type = MV_TYPE_FIELD;
3286  s->c.mb_intra = 0;
3287  for(i=0; i<2; i++){
3288  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3289  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3290  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3291  }
3292  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3293  &dmin, &next_block, 0, 0);
3294  }
3295  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3296  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3297  s->c.mv_type = MV_TYPE_FIELD;
3298  s->c.mb_intra = 0;
3299  for(dir=0; dir<2; dir++){
3300  for(i=0; i<2; i++){
3301  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3302  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3303  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3304  }
3305  }
3306  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3307  &dmin, &next_block, 0, 0);
3308  }
3309  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3310  s->c.mv_dir = 0;
3311  s->c.mv_type = MV_TYPE_16X16;
3312  s->c.mb_intra = 1;
3313  s->c.mv[0][0][0] = 0;
3314  s->c.mv[0][0][1] = 0;
3315  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3316  &dmin, &next_block, 0, 0);
3317  s->c.mbintra_table[xy] = 1;
3318  }
3319 
3320  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3321  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3322  const int last_qp = backup_s.c.qscale;
3323  int qpi, qp, dc[6];
3324  int16_t ac[6][16];
3325  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3326  static const int dquant_tab[4]={-1,1,-2,2};
3327  int storecoefs = s->c.mb_intra && s->c.dc_val;
3328 
3329  av_assert2(backup_s.dquant == 0);
3330 
3331  //FIXME intra
3332  s->c.mv_dir = best_s.c.mv_dir;
3333  s->c.mv_type = MV_TYPE_16X16;
3334  s->c.mb_intra = best_s.c.mb_intra;
3335  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3336  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3337  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3338  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3339 
3340  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3341  for(; qpi<4; qpi++){
3342  int dquant= dquant_tab[qpi];
3343  qp= last_qp + dquant;
3344  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3345  continue;
3346  backup_s.dquant= dquant;
3347  if(storecoefs){
3348  for(i=0; i<6; i++){
3349  dc[i] = s->c.dc_val[s->c.block_index[i]];
3350  memcpy(ac[i], s->c.ac_val[s->c.block_index[i]], sizeof(*s->c.ac_val));
3351  }
3352  }
3353 
3354  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3355  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3356  if (best_s.c.qscale != qp) {
3357  if(storecoefs){
3358  for(i=0; i<6; i++){
3359  s->c.dc_val[s->c.block_index[i]] = dc[i];
3360  memcpy(s->c.ac_val[s->c.block_index[i]], ac[i], sizeof(*s->c.ac_val));
3361  }
3362  }
3363  }
3364  }
3365  }
3366  }
3367  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3368  int mx= s->b_direct_mv_table[xy][0];
3369  int my= s->b_direct_mv_table[xy][1];
3370 
3371  backup_s.dquant = 0;
3372  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3373  s->c.mb_intra = 0;
3374  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3375  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3376  &dmin, &next_block, mx, my);
3377  }
3378  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3379  backup_s.dquant = 0;
3380  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3381  s->c.mb_intra = 0;
3382  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3383  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3384  &dmin, &next_block, 0, 0);
3385  }
3386  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3387  int coded=0;
3388  for(i=0; i<6; i++)
3389  coded |= s->c.block_last_index[i];
3390  if(coded){
3391  int mx,my;
3392  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3393  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3394  mx=my=0; //FIXME find the one we actually used
3395  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3396  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3397  mx = s->c.mv[1][0][0];
3398  my = s->c.mv[1][0][1];
3399  }else{
3400  mx = s->c.mv[0][0][0];
3401  my = s->c.mv[0][0][1];
3402  }
3403 
3404  s->c.mv_dir = best_s.c.mv_dir;
3405  s->c.mv_type = best_s.c.mv_type;
3406  s->c.mb_intra = 0;
3407 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3408  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3409  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3410  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3411  backup_s.dquant= 0;
3412  s->skipdct=1;
3413  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3414  &dmin, &next_block, mx, my);
3415  s->skipdct=0;
3416  }
3417  }
3418 
3419  store_context_after_encode(s, &best_s, s->data_partitioning);
3420 
3421  pb_bits_count= put_bits_count(&s->pb);
3422  flush_put_bits(&s->pb);
3423  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3424  s->pb= backup_s.pb;
3425 
3426  if (s->data_partitioning) {
3427  pb2_bits_count= put_bits_count(&s->pb2);
3428  flush_put_bits(&s->pb2);
3429  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3430  s->pb2= backup_s.pb2;
3431 
3432  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3433  flush_put_bits(&s->tex_pb);
3434  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3435  s->tex_pb= backup_s.tex_pb;
3436  }
3437  s->last_bits= put_bits_count(&s->pb);
3438 
3439  if (CONFIG_H263_ENCODER &&
3440  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3442 
3443  if(next_block==0){ //FIXME 16 vs linesize16
3444  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3445  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3446  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3447  }
3448 
3449  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3450  mpv_reconstruct_mb(s, s->block);
3451  } else {
3452  int motion_x = 0, motion_y = 0;
3453  s->c.mv_type = MV_TYPE_16X16;
3454  // only one MB-Type possible
3455 
3456  switch(mb_type){
3458  s->c.mv_dir = 0;
3459  s->c.mb_intra = 1;
3460  motion_x= s->c.mv[0][0][0] = 0;
3461  motion_y= s->c.mv[0][0][1] = 0;
3462  s->c.mbintra_table[xy] = 1;
3463  break;
3465  s->c.mv_dir = MV_DIR_FORWARD;
3466  s->c.mb_intra = 0;
3467  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3468  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3469  break;
3471  s->c.mv_dir = MV_DIR_FORWARD;
3472  s->c.mv_type = MV_TYPE_FIELD;
3473  s->c.mb_intra = 0;
3474  for(i=0; i<2; i++){
3475  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3476  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3477  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3478  }
3479  break;
3481  s->c.mv_dir = MV_DIR_FORWARD;
3482  s->c.mv_type = MV_TYPE_8X8;
3483  s->c.mb_intra = 0;
3484  for(i=0; i<4; i++){
3485  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3486  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3487  }
3488  break;
3490  if (CONFIG_MPEG4_ENCODER) {
3492  s->c.mb_intra = 0;
3493  motion_x=s->b_direct_mv_table[xy][0];
3494  motion_y=s->b_direct_mv_table[xy][1];
3495  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3496  }
3497  break;
3499  if (CONFIG_MPEG4_ENCODER) {
3501  s->c.mb_intra = 0;
3502  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3503  }
3504  break;
3506  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3507  s->c.mb_intra = 0;
3508  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3509  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3510  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3511  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3512  break;
3514  s->c.mv_dir = MV_DIR_BACKWARD;
3515  s->c.mb_intra = 0;
3516  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3517  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3518  break;
3520  s->c.mv_dir = MV_DIR_FORWARD;
3521  s->c.mb_intra = 0;
3522  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3523  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3524  break;
3526  s->c.mv_dir = MV_DIR_FORWARD;
3527  s->c.mv_type = MV_TYPE_FIELD;
3528  s->c.mb_intra = 0;
3529  for(i=0; i<2; i++){
3530  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3531  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3532  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3533  }
3534  break;
3536  s->c.mv_dir = MV_DIR_BACKWARD;
3537  s->c.mv_type = MV_TYPE_FIELD;
3538  s->c.mb_intra = 0;
3539  for(i=0; i<2; i++){
3540  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3541  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3542  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3543  }
3544  break;
3546  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3547  s->c.mv_type = MV_TYPE_FIELD;
3548  s->c.mb_intra = 0;
3549  for(dir=0; dir<2; dir++){
3550  for(i=0; i<2; i++){
3551  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3552  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3553  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3554  }
3555  }
3556  break;
3557  default:
3558  av_unreachable("There is a case for every CANDIDATE_MB_TYPE_* "
3559  "except CANDIDATE_MB_TYPE_SKIPPED which is never "
3560  "the only candidate (always coupled with INTER) "
3561  "so that it never reaches this switch");
3562  }
3563 
3564  encode_mb(s, motion_x, motion_y);
3565 
3566  // RAL: Update last macroblock type
3567  s->last_mv_dir = s->c.mv_dir;
3568 
3569  if (CONFIG_H263_ENCODER &&
3570  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3572 
3573  mpv_reconstruct_mb(s, s->block);
3574  }
3575 
3576  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3577 
3578  /* clean the MV table in IPS frames for direct mode in B-frames */
3579  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3580  s->p_mv_table[xy][0]=0;
3581  s->p_mv_table[xy][1]=0;
3582 #if CONFIG_H263_ENCODER
3583  } else if (s->c.h263_pred || s->c.h263_aic) {
3585 #endif
3586  }
3587 
3588  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3589  int w= 16;
3590  int h= 16;
3591 
3592  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3593  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3594 
3595  s->encoding_error[0] += sse(
3596  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3597  s->c.dest[0], w, h, s->c.linesize);
3598  s->encoding_error[1] += sse(
3599  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3600  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3601  s->encoding_error[2] += sse(
3602  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3603  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3604  }
3605  if (s->loop_filter) {
3606  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3607  ff_h263_loop_filter(&s->c);
3608  }
3609  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3610  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3611  }
3612  }
3613 
3614 #if CONFIG_MSMPEG4ENC
3615  //not beautiful here but we must write it before flushing so it has to be here
3616  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3617  s->c.pict_type == AV_PICTURE_TYPE_I)
3619 #endif
3620 
3621  write_slice_end(s);
3622 
3623  return 0;
3624 }
3625 
3626 #define ADD(field) dst->field += src->field;
3627 #define MERGE(field) dst->field += src->field; src->field=0
3629 {
3630  ADD(me.scene_change_score);
3631  ADD(me.mc_mb_var_sum_temp);
3632  ADD(me.mb_var_sum_temp);
3633 }
3634 
3636 {
3637  int i;
3638 
3639  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3640  MERGE(dct_count[1]);
3641  ADD(mv_bits);
3642  ADD(i_tex_bits);
3643  ADD(p_tex_bits);
3644  ADD(i_count);
3645  ADD(misc_bits);
3646  ADD(encoding_error[0]);
3647  ADD(encoding_error[1]);
3648  ADD(encoding_error[2]);
3649 
3650  if (dst->dct_error_sum) {
3651  for(i=0; i<64; i++){
3652  MERGE(dct_error_sum[0][i]);
3653  MERGE(dct_error_sum[1][i]);
3654  }
3655  }
3656 
3657  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3658  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3659  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3660  flush_put_bits(&dst->pb);
3661 }
3662 
3663 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3664 {
3665  MPVEncContext *const s = &m->s;
3666 
3667  if (m->next_lambda){
3668  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3669  if(!dry_run) m->next_lambda= 0;
3670  } else if (!m->fixed_qscale) {
3671  int quality = ff_rate_estimate_qscale(m, dry_run);
3672  s->c.cur_pic.ptr->f->quality = quality;
3673  if (s->c.cur_pic.ptr->f->quality < 0)
3674  return -1;
3675  }
3676 
3677  if(s->adaptive_quant){
3678  init_qscale_tab(s);
3679 
3680  switch (s->c.codec_id) {
3681  case AV_CODEC_ID_MPEG4:
3682  if (CONFIG_MPEG4_ENCODER)
3684  break;
3685  case AV_CODEC_ID_H263:
3686  case AV_CODEC_ID_H263P:
3687  case AV_CODEC_ID_FLV1:
3688  if (CONFIG_H263_ENCODER)
3690  break;
3691  }
3692 
3693  s->lambda = s->lambda_table[0];
3694  //FIXME broken
3695  }else
3696  s->lambda = s->c.cur_pic.ptr->f->quality;
3697  update_qscale(m);
3698  return 0;
3699 }
3700 
3701 /* must be called before writing the header */
3703 {
3704  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3705  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3706 
3707  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3708  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3709  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3710  }else{
3711  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3712  s->c.last_non_b_time = s->c.time;
3713  av_assert1(s->picture_number == 0 || s->c.pp_time > 0);
3714  }
3715 }
3716 
3717 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3718 {
3719  MPVEncContext *const s = &m->s;
3720  int i, ret;
3721  int bits;
3722  int context_count = s->c.slice_context_count;
3723 
3724  /* we need to initialize some time vars before we can encode B-frames */
3725  // RAL: Condition added for MPEG1VIDEO
3726  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3728  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3730 
3731 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3732 
3733  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3734  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3735  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3736  s->c.no_rounding ^= s->flipflop_rounding;
3737  }
3738 
3739  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3740  ret = estimate_qp(m, 1);
3741  if (ret < 0)
3742  return ret;
3743  ff_get_2pass_fcode(m);
3744  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3745  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3746  s->lambda = m->last_lambda_for[s->c.pict_type];
3747  else
3748  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3749  update_qscale(m);
3750  }
3751 
3752  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3753  for (int i = 0; i < context_count; i++) {
3754  MPVEncContext *const slice = s->c.enc_contexts[i];
3755  int h = s->c.mb_height;
3756  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3757  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3758 
3759  init_put_bits(&slice->pb, start, end - start);
3760 
3761  if (i) {
3762  ret = ff_update_duplicate_context(&slice->c, &s->c);
3763  if (ret < 0)
3764  return ret;
3765  slice->lambda = s->lambda;
3766  slice->lambda2 = s->lambda2;
3767  }
3768  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3769  ff_me_init_pic(slice);
3770  }
3771 
3772  /* Estimate motion for every MB */
3773  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3774  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3775  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3776  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3777  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3778  m->me_pre == 2) {
3779  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3780  &s->c.enc_contexts[0], NULL,
3781  context_count, sizeof(void*));
3782  }
3783  }
3784 
3785  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3786  NULL, context_count, sizeof(void*));
3787  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3788  /* I-Frame */
3789  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3790  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3791 
3792  if (!m->fixed_qscale) {
3793  /* finding spatial complexity for I-frame rate control */
3794  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3795  NULL, context_count, sizeof(void*));
3796  }
3797  }
3798  for(i=1; i<context_count; i++){
3799  merge_context_after_me(s, s->c.enc_contexts[i]);
3800  }
3801  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3802  m->mb_var_sum = s->me. mb_var_sum_temp;
3803  emms_c();
3804 
3805  if (s->me.scene_change_score > m->scenechange_threshold &&
3806  s->c.pict_type == AV_PICTURE_TYPE_P) {
3807  s->c.pict_type = AV_PICTURE_TYPE_I;
3808  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3809  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3810  if (s->c.msmpeg4_version >= MSMP4_V3)
3811  s->c.no_rounding = 1;
3812  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3813  m->mb_var_sum, m->mc_mb_var_sum);
3814  }
3815 
3816  if (!s->umvplus) {
3817  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3818  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3819 
3820  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3821  int a,b;
3822  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3823  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3824  s->f_code = FFMAX3(s->f_code, a, b);
3825  }
3826 
3828  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3829  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3830  int j;
3831  for(i=0; i<2; i++){
3832  for(j=0; j<2; j++)
3833  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3834  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3835  }
3836  }
3837  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3838  int a, b;
3839 
3840  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3841  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3842  s->f_code = FFMAX(a, b);
3843 
3844  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3845  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3846  s->b_code = FFMAX(a, b);
3847 
3848  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3849  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3850  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3851  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3852  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3853  int dir, j;
3854  for(dir=0; dir<2; dir++){
3855  for(i=0; i<2; i++){
3856  for(j=0; j<2; j++){
3859  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3860  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3861  }
3862  }
3863  }
3864  }
3865  }
3866  }
3867 
3868  ret = estimate_qp(m, 0);
3869  if (ret < 0)
3870  return ret;
3871 
3872  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3873  s->c.pict_type == AV_PICTURE_TYPE_I &&
3874  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3875  s->c.qscale = 3; //reduce clipping problems
3876 
3877  if (s->c.out_format == FMT_MJPEG) {
3879  (7 + s->c.qscale) / s->c.qscale, 65535);
3880  if (ret < 0)
3881  return ret;
3882 
3883  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3884  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3885  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3886 
3887  if (s->c.avctx->intra_matrix) {
3888  chroma_matrix =
3889  luma_matrix = s->c.avctx->intra_matrix;
3890  }
3891  if (s->c.avctx->chroma_intra_matrix)
3892  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3893 
3894  /* for mjpeg, we do include qscale in the matrix */
3895  for (int i = 1; i < 64; i++) {
3896  int j = s->c.idsp.idct_permutation[i];
3897 
3898  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3899  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3900  }
3901  s->c.y_dc_scale_table =
3902  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
3903  s->c.chroma_intra_matrix[0] =
3904  s->c.intra_matrix[0] = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision][8];
3905  } else {
3906  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3907  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3908  for (int i = 1; i < 64; i++) {
3909  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3910 
3911  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3912  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3913  }
3914  s->c.y_dc_scale_table = y;
3915  s->c.c_dc_scale_table = c;
3916  s->c.intra_matrix[0] = 13;
3917  s->c.chroma_intra_matrix[0] = 14;
3918  }
3919  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3920  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3921  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3922  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3923  s->c.qscale = 8;
3924  }
3925 
3926  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3927  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3928  } else {
3929  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3930  }
3931  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3932 
3933  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3934  m->picture_in_gop_number = 0;
3935 
3936  s->c.mb_x = s->c.mb_y = 0;
3937  s->last_bits= put_bits_count(&s->pb);
3938  ret = m->encode_picture_header(m);
3939  if (ret < 0)
3940  return ret;
3941  bits= put_bits_count(&s->pb);
3942  m->header_bits = bits - s->last_bits;
3943 
3944  for(i=1; i<context_count; i++){
3945  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3946  }
3947  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3948  NULL, context_count, sizeof(void*));
3949  for(i=1; i<context_count; i++){
3950  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3951  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3952  merge_context_after_encode(s, s->c.enc_contexts[i]);
3953  }
3954  emms_c();
3955  return 0;
3956 }
3957 
3958 static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
3959 {
3960  const int intra = s->c.mb_intra;
3961  int i;
3962 
3963  s->dct_count[intra]++;
3964 
3965  for(i=0; i<64; i++){
3966  int level= block[i];
3967 
3968  if(level){
3969  if(level>0){
3970  s->dct_error_sum[intra][i] += level;
3971  level -= s->dct_offset[intra][i];
3972  if(level<0) level=0;
3973  }else{
3974  s->dct_error_sum[intra][i] -= level;
3975  level += s->dct_offset[intra][i];
3976  if(level>0) level=0;
3977  }
3978  block[i]= level;
3979  }
3980  }
3981 }
3982 
3984  int16_t *block, int n,
3985  int qscale, int *overflow){
3986  const int *qmat;
3987  const uint16_t *matrix;
3988  const uint8_t *scantable;
3989  const uint8_t *perm_scantable;
3990  int max=0;
3991  unsigned int threshold1, threshold2;
3992  int bias=0;
3993  int run_tab[65];
3994  int level_tab[65];
3995  int score_tab[65];
3996  int survivor[65];
3997  int survivor_count;
3998  int last_run=0;
3999  int last_level=0;
4000  int last_score= 0;
4001  int last_i;
4002  int coeff[2][64];
4003  int coeff_count[64];
4004  int qmul, qadd, start_i, last_non_zero, i, dc;
4005  const int esc_length= s->ac_esc_length;
4006  const uint8_t *length, *last_length;
4007  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4008  int mpeg2_qscale;
4009 
4010  s->fdsp.fdct(block);
4011 
4012  if(s->dct_error_sum)
4013  s->denoise_dct(s, block);
4014  qmul= qscale*16;
4015  qadd= ((qscale-1)|1)*8;
4016 
4017  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4018  else mpeg2_qscale = qscale << 1;
4019 
4020  if (s->c.mb_intra) {
4021  int q;
4022  scantable = s->c.intra_scantable.scantable;
4023  perm_scantable = s->c.intra_scantable.permutated;
4024  if (!s->c.h263_aic) {
4025  if (n < 4)
4026  q = s->c.y_dc_scale;
4027  else
4028  q = s->c.c_dc_scale;
4029  q = q << 3;
4030  } else{
4031  /* For AIC we skip quant/dequant of INTRADC */
4032  q = 1 << 3;
4033  qadd=0;
4034  }
4035 
4036  /* note: block[0] is assumed to be positive */
4037  block[0] = (block[0] + (q >> 1)) / q;
4038  start_i = 1;
4039  last_non_zero = 0;
4040  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4041  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
4042  if (s->mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
4043  bias= 1<<(QMAT_SHIFT-1);
4044 
4045  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4046  length = s->intra_chroma_ac_vlc_length;
4047  last_length= s->intra_chroma_ac_vlc_last_length;
4048  } else {
4049  length = s->intra_ac_vlc_length;
4050  last_length= s->intra_ac_vlc_last_length;
4051  }
4052  } else {
4053  scantable = s->c.inter_scantable.scantable;
4054  perm_scantable = s->c.inter_scantable.permutated;
4055  start_i = 0;
4056  last_non_zero = -1;
4057  qmat = s->q_inter_matrix[qscale];
4058  matrix = s->c.inter_matrix;
4059  length = s->inter_ac_vlc_length;
4060  last_length= s->inter_ac_vlc_last_length;
4061  }
4062  last_i= start_i;
4063 
4064  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4065  threshold2= (threshold1<<1);
4066 
4067  for(i=63; i>=start_i; i--) {
4068  const int j = scantable[i];
4069  int64_t level = (int64_t)block[j] * qmat[j];
4070 
4071  if(((uint64_t)(level+threshold1))>threshold2){
4072  last_non_zero = i;
4073  break;
4074  }
4075  }
4076 
4077  for(i=start_i; i<=last_non_zero; i++) {
4078  const int j = scantable[i];
4079  int64_t level = (int64_t)block[j] * qmat[j];
4080 
4081 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4082 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4083  if(((uint64_t)(level+threshold1))>threshold2){
4084  if(level>0){
4085  level= (bias + level)>>QMAT_SHIFT;
4086  coeff[0][i]= level;
4087  coeff[1][i]= level-1;
4088 // coeff[2][k]= level-2;
4089  }else{
4090  level= (bias - level)>>QMAT_SHIFT;
4091  coeff[0][i]= -level;
4092  coeff[1][i]= -level+1;
4093 // coeff[2][k]= -level+2;
4094  }
4095  coeff_count[i]= FFMIN(level, 2);
4096  av_assert2(coeff_count[i]);
4097  max |=level;
4098  }else{
4099  coeff[0][i]= (level>>31)|1;
4100  coeff_count[i]= 1;
4101  }
4102  }
4103 
4104  *overflow= s->max_qcoeff < max; //overflow might have happened
4105 
4106  if(last_non_zero < start_i){
4107  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4108  return last_non_zero;
4109  }
4110 
4111  score_tab[start_i]= 0;
4112  survivor[0]= start_i;
4113  survivor_count= 1;
4114 
4115  for(i=start_i; i<=last_non_zero; i++){
4116  int level_index, j, zero_distortion;
4117  int dct_coeff= FFABS(block[ scantable[i] ]);
4118  int best_score=256*256*256*120;
4119 
4120  if (s->fdsp.fdct == ff_fdct_ifast)
4121  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4122  zero_distortion= dct_coeff*dct_coeff;
4123 
4124  for(level_index=0; level_index < coeff_count[i]; level_index++){
4125  int distortion;
4126  int level= coeff[level_index][i];
4127  const int alevel= FFABS(level);
4128  int unquant_coeff;
4129 
4130  av_assert2(level);
4131 
4132  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4133  unquant_coeff= alevel*qmul + qadd;
4134  } else if (s->c.out_format == FMT_MJPEG) {
4135  j = s->c.idsp.idct_permutation[scantable[i]];
4136  unquant_coeff = alevel * matrix[j] * 8;
4137  }else{ // MPEG-1
4138  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4139  if (s->c.mb_intra) {
4140  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4141  unquant_coeff = (unquant_coeff - 1) | 1;
4142  }else{
4143  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4144  unquant_coeff = (unquant_coeff - 1) | 1;
4145  }
4146  unquant_coeff<<= 3;
4147  }
4148 
4149  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4150  level+=64;
4151  if((level&(~127)) == 0){
4152  for(j=survivor_count-1; j>=0; j--){
4153  int run= i - survivor[j];
4154  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4155  score += score_tab[i-run];
4156 
4157  if(score < best_score){
4158  best_score= score;
4159  run_tab[i+1]= run;
4160  level_tab[i+1]= level-64;
4161  }
4162  }
4163 
4164  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4165  for(j=survivor_count-1; j>=0; j--){
4166  int run= i - survivor[j];
4167  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4168  score += score_tab[i-run];
4169  if(score < last_score){
4170  last_score= score;
4171  last_run= run;
4172  last_level= level-64;
4173  last_i= i+1;
4174  }
4175  }
4176  }
4177  }else{
4178  distortion += esc_length*lambda;
4179  for(j=survivor_count-1; j>=0; j--){
4180  int run= i - survivor[j];
4181  int score= distortion + score_tab[i-run];
4182 
4183  if(score < best_score){
4184  best_score= score;
4185  run_tab[i+1]= run;
4186  level_tab[i+1]= level-64;
4187  }
4188  }
4189 
4190  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4191  for(j=survivor_count-1; j>=0; j--){
4192  int run= i - survivor[j];
4193  int score= distortion + score_tab[i-run];
4194  if(score < last_score){
4195  last_score= score;
4196  last_run= run;
4197  last_level= level-64;
4198  last_i= i+1;
4199  }
4200  }
4201  }
4202  }
4203  }
4204 
4205  score_tab[i+1]= best_score;
4206 
4207  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4208  if(last_non_zero <= 27){
4209  for(; survivor_count; survivor_count--){
4210  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4211  break;
4212  }
4213  }else{
4214  for(; survivor_count; survivor_count--){
4215  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4216  break;
4217  }
4218  }
4219 
4220  survivor[ survivor_count++ ]= i+1;
4221  }
4222 
4223  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4224  last_score= 256*256*256*120;
4225  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4226  int score= score_tab[i];
4227  if (i)
4228  score += lambda * 2; // FIXME more exact?
4229 
4230  if(score < last_score){
4231  last_score= score;
4232  last_i= i;
4233  last_level= level_tab[i];
4234  last_run= run_tab[i];
4235  }
4236  }
4237  }
4238 
4239  s->coded_score[n] = last_score;
4240 
4241  dc= FFABS(block[0]);
4242  last_non_zero= last_i - 1;
4243  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4244 
4245  if(last_non_zero < start_i)
4246  return last_non_zero;
4247 
4248  if(last_non_zero == 0 && start_i == 0){
4249  int best_level= 0;
4250  int best_score= dc * dc;
4251 
4252  for(i=0; i<coeff_count[0]; i++){
4253  int level= coeff[i][0];
4254  int alevel= FFABS(level);
4255  int unquant_coeff, score, distortion;
4256 
4257  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4258  unquant_coeff= (alevel*qmul + qadd)>>3;
4259  } else{ // MPEG-1
4260  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4261  unquant_coeff = (unquant_coeff - 1) | 1;
4262  }
4263  unquant_coeff = (unquant_coeff + 4) >> 3;
4264  unquant_coeff<<= 3 + 3;
4265 
4266  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4267  level+=64;
4268  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4269  else score= distortion + esc_length*lambda;
4270 
4271  if(score < best_score){
4272  best_score= score;
4273  best_level= level - 64;
4274  }
4275  }
4276  block[0]= best_level;
4277  s->coded_score[n] = best_score - dc*dc;
4278  if(best_level == 0) return -1;
4279  else return last_non_zero;
4280  }
4281 
4282  i= last_i;
4283  av_assert2(last_level);
4284 
4285  block[ perm_scantable[last_non_zero] ]= last_level;
4286  i -= last_run + 1;
4287 
4288  for(; i>start_i; i -= run_tab[i] + 1){
4289  block[ perm_scantable[i-1] ]= level_tab[i];
4290  }
4291 
4292  return last_non_zero;
4293 }
4294 
4295 static DECLARE_ALIGNED(16, int16_t, basis)[64][64];
4296 
4297 static void build_basis(uint8_t *perm){
4298  int i, j, x, y;
4299  emms_c();
4300  for(i=0; i<8; i++){
4301  for(j=0; j<8; j++){
4302  for(y=0; y<8; y++){
4303  for(x=0; x<8; x++){
4304  double s= 0.25*(1<<BASIS_SHIFT);
4305  int index= 8*i + j;
4306  int perm_index= perm[index];
4307  if(i==0) s*= sqrt(0.5);
4308  if(j==0) s*= sqrt(0.5);
4309  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4310  }
4311  }
4312  }
4313  }
4314 }
4315 
4316 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4317  int16_t *block, int16_t *weight, int16_t *orig,
4318  int n, int qscale){
4319  DECLARE_ALIGNED(16, int16_t, rem)[64];
4320  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4321  const uint8_t *scantable;
4322  const uint8_t *perm_scantable;
4323 // unsigned int threshold1, threshold2;
4324 // int bias=0;
4325  int run_tab[65];
4326  int prev_run=0;
4327  int prev_level=0;
4328  int qmul, qadd, start_i, last_non_zero, i, dc;
4329  const uint8_t *length;
4330  const uint8_t *last_length;
4331  int lambda;
4332  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4333 
4334  if(basis[0][0] == 0)
4335  build_basis(s->c.idsp.idct_permutation);
4336 
4337  qmul= qscale*2;
4338  qadd= (qscale-1)|1;
4339  if (s->c.mb_intra) {
4340  scantable = s->c.intra_scantable.scantable;
4341  perm_scantable = s->c.intra_scantable.permutated;
4342  if (!s->c.h263_aic) {
4343  if (n < 4)
4344  q = s->c.y_dc_scale;
4345  else
4346  q = s->c.c_dc_scale;
4347  } else{
4348  /* For AIC we skip quant/dequant of INTRADC */
4349  q = 1;
4350  qadd=0;
4351  }
4352  q <<= RECON_SHIFT-3;
4353  /* note: block[0] is assumed to be positive */
4354  dc= block[0]*q;
4355 // block[0] = (block[0] + (q >> 1)) / q;
4356  start_i = 1;
4357 // if (s->mpeg_quant || s->c.out_format == FMT_MPEG1)
4358 // bias= 1<<(QMAT_SHIFT-1);
4359  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4360  length = s->intra_chroma_ac_vlc_length;
4361  last_length= s->intra_chroma_ac_vlc_last_length;
4362  } else {
4363  length = s->intra_ac_vlc_length;
4364  last_length= s->intra_ac_vlc_last_length;
4365  }
4366  } else {
4367  scantable = s->c.inter_scantable.scantable;
4368  perm_scantable = s->c.inter_scantable.permutated;
4369  dc= 0;
4370  start_i = 0;
4371  length = s->inter_ac_vlc_length;
4372  last_length= s->inter_ac_vlc_last_length;
4373  }
4374  last_non_zero = s->c.block_last_index[n];
4375 
4376  dc += (1<<(RECON_SHIFT-1));
4377  for(i=0; i<64; i++){
4378  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4379  }
4380 
4381  sum=0;
4382  for(i=0; i<64; i++){
4383  int one= 36;
4384  int qns=4;
4385  int w;
4386 
4387  w= FFABS(weight[i]) + qns*one;
4388  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4389 
4390  weight[i] = w;
4391 // w=weight[i] = (63*qns + (w/2)) / w;
4392 
4393  av_assert2(w>0);
4394  av_assert2(w<(1<<6));
4395  sum += w*w;
4396  }
4397  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4398 
4399  run=0;
4400  rle_index=0;
4401  for(i=start_i; i<=last_non_zero; i++){
4402  int j= perm_scantable[i];
4403  const int level= block[j];
4404  int coeff;
4405 
4406  if(level){
4407  if(level<0) coeff= qmul*level - qadd;
4408  else coeff= qmul*level + qadd;
4409  run_tab[rle_index++]=run;
4410  run=0;
4411 
4412  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4413  }else{
4414  run++;
4415  }
4416  }
4417 
4418  for(;;){
4419  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4420  int best_coeff=0;
4421  int best_change=0;
4422  int run2, best_unquant_change=0, analyze_gradient;
4423  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4424 
4425  if(analyze_gradient){
4426  for(i=0; i<64; i++){
4427  int w= weight[i];
4428 
4429  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4430  }
4431  s->fdsp.fdct(d1);
4432  }
4433 
4434  if(start_i){
4435  const int level= block[0];
4436  int change, old_coeff;
4437 
4438  av_assert2(s->c.mb_intra);
4439 
4440  old_coeff= q*level;
4441 
4442  for(change=-1; change<=1; change+=2){
4443  int new_level= level + change;
4444  int score, new_coeff;
4445 
4446  new_coeff= q*new_level;
4447  if(new_coeff >= 2048 || new_coeff < 0)
4448  continue;
4449 
4450  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4451  new_coeff - old_coeff);
4452  if(score<best_score){
4453  best_score= score;
4454  best_coeff= 0;
4455  best_change= change;
4456  best_unquant_change= new_coeff - old_coeff;
4457  }
4458  }
4459  }
4460 
4461  run=0;
4462  rle_index=0;
4463  run2= run_tab[rle_index++];
4464  prev_level=0;
4465  prev_run=0;
4466 
4467  for(i=start_i; i<64; i++){
4468  int j= perm_scantable[i];
4469  const int level= block[j];
4470  int change, old_coeff;
4471 
4472  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4473  break;
4474 
4475  if(level){
4476  if(level<0) old_coeff= qmul*level - qadd;
4477  else old_coeff= qmul*level + qadd;
4478  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4479  }else{
4480  old_coeff=0;
4481  run2--;
4482  av_assert2(run2>=0 || i >= last_non_zero );
4483  }
4484 
4485  for(change=-1; change<=1; change+=2){
4486  int new_level= level + change;
4487  int score, new_coeff, unquant_change;
4488 
4489  score=0;
4490  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4491  continue;
4492 
4493  if(new_level){
4494  if(new_level<0) new_coeff= qmul*new_level - qadd;
4495  else new_coeff= qmul*new_level + qadd;
4496  if(new_coeff >= 2048 || new_coeff <= -2048)
4497  continue;
4498  //FIXME check for overflow
4499 
4500  if(level){
4501  if(level < 63 && level > -63){
4502  if(i < last_non_zero)
4503  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4504  - length[UNI_AC_ENC_INDEX(run, level+64)];
4505  else
4506  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4507  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4508  }
4509  }else{
4510  av_assert2(FFABS(new_level)==1);
4511 
4512  if(analyze_gradient){
4513  int g= d1[ scantable[i] ];
4514  if(g && (g^new_level) >= 0)
4515  continue;
4516  }
4517 
4518  if(i < last_non_zero){
4519  int next_i= i + run2 + 1;
4520  int next_level= block[ perm_scantable[next_i] ] + 64;
4521 
4522  if(next_level&(~127))
4523  next_level= 0;
4524 
4525  if(next_i < last_non_zero)
4526  score += length[UNI_AC_ENC_INDEX(run, 65)]
4527  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4528  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4529  else
4530  score += length[UNI_AC_ENC_INDEX(run, 65)]
4531  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4532  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4533  }else{
4534  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4535  if(prev_level){
4536  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4537  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4538  }
4539  }
4540  }
4541  }else{
4542  new_coeff=0;
4543  av_assert2(FFABS(level)==1);
4544 
4545  if(i < last_non_zero){
4546  int next_i= i + run2 + 1;
4547  int next_level= block[ perm_scantable[next_i] ] + 64;
4548 
4549  if(next_level&(~127))
4550  next_level= 0;
4551 
4552  if(next_i < last_non_zero)
4553  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4554  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4555  - length[UNI_AC_ENC_INDEX(run, 65)];
4556  else
4557  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4558  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4559  - length[UNI_AC_ENC_INDEX(run, 65)];
4560  }else{
4561  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4562  if(prev_level){
4563  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4564  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4565  }
4566  }
4567  }
4568 
4569  score *= lambda;
4570 
4571  unquant_change= new_coeff - old_coeff;
4572  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4573 
4574  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4575  unquant_change);
4576  if(score<best_score){
4577  best_score= score;
4578  best_coeff= i;
4579  best_change= change;
4580  best_unquant_change= unquant_change;
4581  }
4582  }
4583  if(level){
4584  prev_level= level + 64;
4585  if(prev_level&(~127))
4586  prev_level= 0;
4587  prev_run= run;
4588  run=0;
4589  }else{
4590  run++;
4591  }
4592  }
4593 
4594  if(best_change){
4595  int j= perm_scantable[ best_coeff ];
4596 
4597  block[j] += best_change;
4598 
4599  if(best_coeff > last_non_zero){
4600  last_non_zero= best_coeff;
4601  av_assert2(block[j]);
4602  }else{
4603  for(; last_non_zero>=start_i; last_non_zero--){
4604  if(block[perm_scantable[last_non_zero]])
4605  break;
4606  }
4607  }
4608 
4609  run=0;
4610  rle_index=0;
4611  for(i=start_i; i<=last_non_zero; i++){
4612  int j= perm_scantable[i];
4613  const int level= block[j];
4614 
4615  if(level){
4616  run_tab[rle_index++]=run;
4617  run=0;
4618  }else{
4619  run++;
4620  }
4621  }
4622 
4623  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4624  }else{
4625  break;
4626  }
4627  }
4628 
4629  return last_non_zero;
4630 }
4631 
4632 /**
4633  * Permute an 8x8 block according to permutation.
4634  * @param block the block which will be permuted according to
4635  * the given permutation vector
4636  * @param permutation the permutation vector
4637  * @param last the last non zero coefficient in scantable order, used to
4638  * speed the permutation up
4639  * @param scantable the used scantable, this is only used to speed the
4640  * permutation up, the block is not (inverse) permutated
4641  * to scantable order!
4642  */
4643 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4644  const uint8_t *scantable, int last)
4645 {
4646  int i;
4647  int16_t temp[64];
4648 
4649  if (last <= 0)
4650  return;
4651  //FIXME it is ok but not clean and might fail for some permutations
4652  // if (permutation[1] == 1)
4653  // return;
4654 
4655  for (i = 0; i <= last; i++) {
4656  const int j = scantable[i];
4657  temp[j] = block[j];
4658  block[j] = 0;
4659  }
4660 
4661  for (i = 0; i <= last; i++) {
4662  const int j = scantable[i];
4663  const int perm_j = permutation[j];
4664  block[perm_j] = temp[j];
4665  }
4666 }
4667 
4668 static int dct_quantize_c(MPVEncContext *const s,
4669  int16_t *block, int n,
4670  int qscale, int *overflow)
4671 {
4672  int i, last_non_zero, q, start_i;
4673  const int *qmat;
4674  const uint8_t *scantable;
4675  int bias;
4676  int max=0;
4677  unsigned int threshold1, threshold2;
4678 
4679  s->fdsp.fdct(block);
4680 
4681  if(s->dct_error_sum)
4682  s->denoise_dct(s, block);
4683 
4684  if (s->c.mb_intra) {
4685  scantable = s->c.intra_scantable.scantable;
4686  if (!s->c.h263_aic) {
4687  if (n < 4)
4688  q = s->c.y_dc_scale;
4689  else
4690  q = s->c.c_dc_scale;
4691  q = q << 3;
4692  } else
4693  /* For AIC we skip quant/dequant of INTRADC */
4694  q = 1 << 3;
4695 
4696  /* note: block[0] is assumed to be positive */
4697  block[0] = (block[0] + (q >> 1)) / q;
4698  start_i = 1;
4699  last_non_zero = 0;
4700  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4701  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4702  } else {
4703  scantable = s->c.inter_scantable.scantable;
4704  start_i = 0;
4705  last_non_zero = -1;
4706  qmat = s->q_inter_matrix[qscale];
4707  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4708  }
4709  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4710  threshold2= (threshold1<<1);
4711  for(i=63;i>=start_i;i--) {
4712  const int j = scantable[i];
4713  int64_t level = (int64_t)block[j] * qmat[j];
4714 
4715  if(((uint64_t)(level+threshold1))>threshold2){
4716  last_non_zero = i;
4717  break;
4718  }else{
4719  block[j]=0;
4720  }
4721  }
4722  for(i=start_i; i<=last_non_zero; i++) {
4723  const int j = scantable[i];
4724  int64_t level = (int64_t)block[j] * qmat[j];
4725 
4726 // if( bias+level >= (1<<QMAT_SHIFT)
4727 // || bias-level >= (1<<QMAT_SHIFT)){
4728  if(((uint64_t)(level+threshold1))>threshold2){
4729  if(level>0){
4730  level= (bias + level)>>QMAT_SHIFT;
4731  block[j]= level;
4732  }else{
4733  level= (bias - level)>>QMAT_SHIFT;
4734  block[j]= -level;
4735  }
4736  max |=level;
4737  }else{
4738  block[j]=0;
4739  }
4740  }
4741  *overflow= s->max_qcoeff < max; //overflow might have happened
4742 
4743  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4744  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4745  ff_block_permute(block, s->c.idsp.idct_permutation,
4746  scantable, last_non_zero);
4747 
4748  return last_non_zero;
4749 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1501
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3983
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1157
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1661
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:378
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:106
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:83
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:219
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:264
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1710
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:239
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:257
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:298
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2729
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:246
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:251
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2171
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:175
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
h263data.h
init_unquantize
static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:315
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:103
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2632
level
uint8_t level
Definition: svq3.c:208
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1493
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:523
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
ALIGN
#define ALIGN
Definition: hashtable.c:32
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:821
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:243
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:118
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1902
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2654
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:270
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2654
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1277
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3702
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1924
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:118
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2650
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3663
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:673
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2645
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:252
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:300
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:230
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4295
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:948
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1522
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:258
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:165
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2846
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:808
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:173
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2780
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:291
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:158
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:242
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:224
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
AVFrame::width
int width
Definition: frame.h:499
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2654
AVPacket::data
uint8_t * data
Definition: packet.h:588
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:379
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:110
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:216
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:236
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2893
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:199
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:491
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:606
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:308
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:81
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2828
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2261
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2661
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:936
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:54
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:203
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1241
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:266
mpegutils.h
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:575
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:643
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:862
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:248
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:302
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:172
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:51
MBBackup::c
struct MBBackup::@212 c
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
update_mb_info
static void update_mb_info(MPVEncContext *const s, int startcode)
Definition: mpegvideo_enc.c:2933
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2654
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:207
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1675
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:377
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1177
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2658
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:878
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1941
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:336
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2653
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:56
init_slice_buffers
static av_cold int init_slice_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:504
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:225
faandct.h
Floating point AAN DCT.
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:53
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:843
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2651
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:722
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:268
fail
#define fail()
Definition: checkasm.h:206
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:57
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:103
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:293
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:209
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:241
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1209
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:311
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1259
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1287
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2802
ff_encode_add_stats_side_data
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
Definition: encode.c:918
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:217
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_mpeg1_encode_init
static void ff_mpeg1_encode_init(MPVEncContext *s)
Definition: mpeg12enc.h:33
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:378
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:295
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:300
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:243
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1459
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:255
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:205
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:95
MpegEncContext::ac_val
int16_t(* ac_val)[16]
used for H.263 AIC, MPEG-4 AC prediction
Definition: mpegvideo.h:145
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:119
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4297
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:223
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1245
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
Definition: rv10enc.c:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:122
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:265
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MPVEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:226
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2656
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:210
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:202
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:267
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3628
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:835
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:261
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:50
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:254
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:227
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:204
MAX_AC_TEX_MB_SIZE
@ MAX_AC_TEX_MB_SIZE
Definition: mpeg4videoenc.h:39
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1067
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:270
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1270
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4643
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1505
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:282
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:296
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2646
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:829
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2871
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:55
arg
const char * arg
Definition: jacosubdec.c:67
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:271
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:447
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1255
MECmpContext
Definition: me_cmp.h:50
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:186
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:301
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
MPVEncContext::block
int16_t(* block)[64]
points into blocks below
Definition: mpegvideoenc.h:114
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:118
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:127
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:233
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
run
uint8_t run
Definition: svq3.c:207
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:194
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:96
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:190
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:108
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:912
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:297
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:53
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:240
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:206
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:241
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:335
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1806
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:524
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:188
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3627
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:868
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1121
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2649
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:937
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2956
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:120
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1164
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1305
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1326
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Average and put pixel Widths can be 16, 8, 4 or 2.
Definition: hpeldsp.h:39
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2657
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:255
MPVMainEncContext
Definition: mpegvideoenc.h:199
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:176
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:815
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1318
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:843
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:837
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
ff_h263_clean_intra_table_entries
static void ff_h263_clean_intra_table_entries(MpegEncContext *s, int xy)
Definition: h263.h:47
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1312
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:259
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:81
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2227
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1005
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:155
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:44
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:237
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:54
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2646
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:204
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:549
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:337
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:125
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:305
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:290
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:97
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:253
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:310
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3717
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:292
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:62
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:443
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:377
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:206
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:178
MAX_PB2_MB_SIZE
@ MAX_PB2_MB_SIZE
Definition: mpeg4videoenc.h:38
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:594
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:297
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:263
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:286
denoise_dct_c
static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
Definition: mpegvideo_enc.c:3958
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:303
MBBackup
Definition: mpegvideo_enc.c:2642
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:292
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:266
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:312
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:409
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2647
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:104
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:264
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2644
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *m)
Definition: rv20enc.c:37
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2990
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:271
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2658
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:108
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:971
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:129
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:287
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:194
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:497
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:63
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:44
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:280
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:256
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:153
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:879
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:232
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:38
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:285
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:260
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:239
AVCodecContext::height
int height
Definition: avcodec.h:592
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:490
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1286
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:900
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1864
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2285
avcodec.h
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:446
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
Definition: pixblockdsp.c:87
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
ff_h263_mpeg4_reset_dc
void ff_h263_mpeg4_reset_dc(MPVEncContext *s)
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:234
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:303
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1357
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:97
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:110
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:220
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:267
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:836
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:245
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2655
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:499
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2648
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:233
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3626
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1234
AVRational::den
int den
Denominator.
Definition: rational.h:60
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2654
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:822
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:271
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:244
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:84
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:939
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:892
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:287
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:769
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4316
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:558
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1284
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1300
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3635
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:224
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:171
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1021
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:938
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:565
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:957
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:228
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:121
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2648
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:212
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:887
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4668
stride
#define stride
Definition: h264pred_template.c:536
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2658
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:309
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:109
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:63
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:197
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:200
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2913
MpegEncContext::dc_val
int16_t * dc_val
used for H.263 AIC/MPEG-4 DC prediction and ER
Definition: mpegvideo.h:144
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2654
pixblockdsp.h
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:238
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:968
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:943
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:263
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:107
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:711
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1605
intmath.h
MPVEncContext::mpeg_quant
int mpeg_quant
Definition: mpegvideoenc.h:167