FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/attributes.h"
40 #include "libavutil/emms.h"
41 #include "libavutil/internal.h"
42 #include "libavutil/intmath.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/mem.h"
45 #include "libavutil/mem_internal.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/thread.h"
48 #include "avcodec.h"
49 #include "encode.h"
50 #include "idctdsp.h"
51 #include "mpeg12data.h"
52 #include "mpeg12enc.h"
53 #include "mpegvideo.h"
54 #include "mpegvideodata.h"
55 #include "mpegvideoenc.h"
56 #include "h261enc.h"
57 #include "h263.h"
58 #include "h263data.h"
59 #include "h263enc.h"
60 #include "mjpegenc_common.h"
61 #include "mathops.h"
62 #include "mpegutils.h"
63 #include "mpegvideo_unquantize.h"
64 #include "mjpegenc.h"
65 #include "speedhqenc.h"
66 #include "msmpeg4enc.h"
67 #include "pixblockdsp.h"
68 #include "qpeldsp.h"
69 #include "faandct.h"
70 #include "aandcttab.h"
71 #include "mpeg4video.h"
72 #include "mpeg4videodata.h"
73 #include "mpeg4videoenc.h"
74 #include "internal.h"
75 #include "bytestream.h"
76 #include "rv20enc.h"
77 #include "libavutil/refstruct.h"
78 #include <limits.h>
79 #include "sp5x.h"
80 
81 #define QUANT_BIAS_SHIFT 8
82 
83 #define QMAT_SHIFT_MMX 16
84 #define QMAT_SHIFT 21
85 
86 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
87 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
88 static int sse_mb(MPVEncContext *const s);
89 static int dct_quantize_c(MPVEncContext *const s,
90  int16_t *block, int n,
91  int qscale, int *overflow);
92 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
93 
94 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
95 
96 static const AVOption mpv_generic_options[] = {
99  { NULL },
100 };
101 
103  .class_name = "generic mpegvideo encoder",
104  .item_name = av_default_item_name,
105  .option = mpv_generic_options,
106  .version = LIBAVUTIL_VERSION_INT,
107 };
108 
109 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
110  uint16_t (*qmat16)[2][64],
111  const uint16_t *quant_matrix,
112  int bias, int qmin, int qmax, int intra)
113 {
114  FDCTDSPContext *fdsp = &s->fdsp;
115  int qscale;
116  int shift = 0;
117 
118  for (qscale = qmin; qscale <= qmax; qscale++) {
119  int i;
120  int qscale2;
121 
122  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
123  else qscale2 = qscale << 1;
124 
125  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
126 #if CONFIG_FAANDCT
127  fdsp->fdct == ff_faandct ||
128 #endif /* CONFIG_FAANDCT */
130  for (i = 0; i < 64; i++) {
131  const int j = s->c.idsp.idct_permutation[i];
132  int64_t den = (int64_t) qscale2 * quant_matrix[j];
133  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
134  * Assume x = qscale2 * quant_matrix[j]
135  * 1 <= x <= 28560
136  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
137  * 4194304 >= (1 << 22) / (x) >= 146 */
138 
139  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
140  }
141  } else if (fdsp->fdct == ff_fdct_ifast) {
142  for (i = 0; i < 64; i++) {
143  const int j = s->c.idsp.idct_permutation[i];
144  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
145  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
146  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
147  * 1247 <= x <= 900239760
148  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
149  * 55107840 >= (1 << 36) / (x) >= 76 */
150 
151  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
152  }
153  } else {
154  for (i = 0; i < 64; i++) {
155  const int j = s->c.idsp.idct_permutation[i];
156  int64_t den = (int64_t) qscale2 * quant_matrix[j];
157  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
158  * Assume x = qscale2 * quant_matrix[j]
159  * 1 <= x <= 28560
160  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
161  * 4194304 >= (1 << 22) / (x) >= 146
162  *
163  * 1 <= x <= 28560
164  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
165  * 131072 >= (1 << 17) / (x) >= 4 */
166 
167  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
168  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
169 
170  if (qmat16[qscale][0][i] == 0 ||
171  qmat16[qscale][0][i] == 128 * 256)
172  qmat16[qscale][0][i] = 128 * 256 - 1;
173  qmat16[qscale][1][i] =
174  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
175  qmat16[qscale][0][i]);
176  }
177  }
178 
179  for (i = intra; i < 64; i++) {
180  int64_t max = 8191;
181  if (fdsp->fdct == ff_fdct_ifast) {
182  max = (8191LL * ff_aanscales[i]) >> 14;
183  }
184  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
185  shift++;
186  }
187  }
188  }
189  if (shift) {
190  av_log(s->c.avctx, AV_LOG_INFO,
191  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
192  QMAT_SHIFT - shift);
193  }
194 }
195 
196 static inline void update_qscale(MPVMainEncContext *const m)
197 {
198  MPVEncContext *const s = &m->s;
199 
200  if (s->c.q_scale_type == 1 && 0) {
201  int i;
202  int bestdiff=INT_MAX;
203  int best = 1;
204 
205  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
206  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
207  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
208  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
209  continue;
210  if (diff < bestdiff) {
211  bestdiff = diff;
212  best = i;
213  }
214  }
215  s->c.qscale = best;
216  } else {
217  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
218  (FF_LAMBDA_SHIFT + 7);
219  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
220  }
221 
222  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
224 }
225 
227 {
228  int i;
229 
230  if (matrix) {
231  put_bits(pb, 1, 1);
232  for (i = 0; i < 64; i++) {
234  }
235  } else
236  put_bits(pb, 1, 0);
237 }
238 
239 /**
240  * init s->c.cur_pic.qscale_table from s->lambda_table
241  */
242 static void init_qscale_tab(MPVEncContext *const s)
243 {
244  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
245 
246  for (int i = 0; i < s->c.mb_num; i++) {
247  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
248  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
249  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
250  s->c.avctx->qmax);
251  }
252 }
253 
255  const MPVEncContext *const src)
256 {
257 #define COPY(a) dst->a = src->a
258  COPY(c.pict_type);
259  COPY(f_code);
260  COPY(b_code);
261  COPY(c.qscale);
262  COPY(lambda);
263  COPY(lambda2);
264  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
265  COPY(c.progressive_frame); // FIXME don't set in encode_header
266  COPY(partitioned_frame); // FIXME don't set in encode_header
267 #undef COPY
268 }
269 
271 {
272  for (int i = -16; i < 16; i++)
273  default_fcode_tab[i + MAX_MV] = 1;
274 }
275 
276 /**
277  * Set the given MPVEncContext to defaults for encoding.
278  */
280 {
281  MPVEncContext *const s = &m->s;
282  static AVOnce init_static_once = AV_ONCE_INIT;
283 
285 
286  s->f_code = 1;
287  s->b_code = 1;
288 
289  if (!m->fcode_tab) {
291  ff_thread_once(&init_static_once, mpv_encode_init_static);
292  }
293  if (!s->c.y_dc_scale_table) {
294  s->c.y_dc_scale_table =
295  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
296  }
297 }
298 
300 {
301  s->dct_quantize = dct_quantize_c;
302 
303 #if ARCH_X86
305 #endif
306 
307  if (s->c.avctx->trellis)
308  s->dct_quantize = dct_quantize_trellis_c;
309 }
310 
311 static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
312 {
313  MpegEncContext *const s = &s2->c;
314  MPVUnquantDSPContext unquant_dsp_ctx;
315 
316  ff_mpv_unquantize_init(&unquant_dsp_ctx,
317  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
318 
319  if (s2->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
320  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
321  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
322  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
323  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
324  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
325  } else {
326  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
327  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
328  }
329 }
330 
332 {
333  MPVEncContext *const s = &m->s;
334  MECmpContext mecc;
335  me_cmp_func me_cmp[6];
336  int ret;
337 
338  ff_me_cmp_init(&mecc, avctx);
339  ret = ff_me_init(&s->me, avctx, &mecc, 1);
340  if (ret < 0)
341  return ret;
342  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
343  if (ret < 0)
344  return ret;
345  m->frame_skip_cmp_fn = me_cmp[1];
346  if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
347  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
348  if (ret < 0)
349  return ret;
350  if (!me_cmp[0] || !me_cmp[4])
351  return AVERROR(EINVAL);
352  s->ildct_cmp[0] = me_cmp[0];
353  s->ildct_cmp[1] = me_cmp[4];
354  }
355 
356  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
357 
358  s->sse_cmp[0] = mecc.sse[0];
359  s->sse_cmp[1] = mecc.sse[1];
360  s->sad_cmp[0] = mecc.sad[0];
361  s->sad_cmp[1] = mecc.sad[1];
362  if (avctx->mb_cmp == FF_CMP_NSSE) {
363  s->n_sse_cmp[0] = mecc.nsse[0];
364  s->n_sse_cmp[1] = mecc.nsse[1];
365  } else {
366  s->n_sse_cmp[0] = mecc.sse[0];
367  s->n_sse_cmp[1] = mecc.sse[1];
368  }
369 
370  return 0;
371 }
372 
373 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
375 {
376  MPVEncContext *const s = &m->s;
377  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
378  const uint16_t *intra_matrix, *inter_matrix;
379  int ret;
380 
381  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
382  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
383  return AVERROR(ENOMEM);
384 
385  if (s->c.out_format == FMT_MJPEG) {
386  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
387  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
388  // No need to set q_inter_matrix
390  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
391  return 0;
392  } else {
393  s->q_chroma_intra_matrix = s->q_intra_matrix;
394  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
395  }
396  if (!m->intra_only) {
397  s->q_inter_matrix = s->q_intra_matrix + 32;
398  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
399  }
400 
401  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
402  s->mpeg_quant) {
403  intra_matrix = ff_mpeg4_default_intra_matrix;
404  inter_matrix = ff_mpeg4_default_non_intra_matrix;
405  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
406  intra_matrix =
407  inter_matrix = ff_mpeg1_default_non_intra_matrix;
408  } else {
409  /* MPEG-1/2, SpeedHQ */
410  intra_matrix = ff_mpeg1_default_intra_matrix;
411  inter_matrix = ff_mpeg1_default_non_intra_matrix;
412  }
413  if (avctx->intra_matrix)
414  intra_matrix = avctx->intra_matrix;
415  if (avctx->inter_matrix)
416  inter_matrix = avctx->inter_matrix;
417 
418  /* init q matrix */
419  for (int i = 0; i < 64; i++) {
420  int j = s->c.idsp.idct_permutation[i];
421 
422  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
423  s->c.inter_matrix[j] = inter_matrix[i];
424  }
425 
426  /* precompute matrix */
428  if (ret < 0)
429  return ret;
430 
431  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
432  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
433  31, 1);
434  if (s->q_inter_matrix)
435  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
436  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
437  31, 0);
438 
439  return 0;
440 }
441 
443 {
444  MPVEncContext *const s = &m->s;
445  int has_b_frames = !!m->max_b_frames;
446  int16_t (*mv_table)[2];
447 
448  /* Allocate MB type table */
449  unsigned mb_array_size = s->c.mb_stride * s->c.mb_height;
450  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
451  if (!s->mb_type)
452  return AVERROR(ENOMEM);
453  s->mc_mb_var = s->mb_type + mb_array_size;
454  s->mb_var = s->mc_mb_var + mb_array_size;
455  s->mb_mean = (uint8_t*)(s->mb_var + mb_array_size);
456 
457  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
458  return AVERROR(ENOMEM);
459 
460  unsigned mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
461  unsigned nb_mv_tables = 1 + 5 * has_b_frames;
462  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
463  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
464  nb_mv_tables += 8 * has_b_frames;
465  s->p_field_select_table[0] = av_calloc(mv_table_size, 2 * (2 + 4 * has_b_frames));
466  if (!s->p_field_select_table[0])
467  return AVERROR(ENOMEM);
468  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
469  }
470 
471  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
472  if (!mv_table)
473  return AVERROR(ENOMEM);
474  m->mv_table_base = mv_table;
475  mv_table += s->c.mb_stride + 1;
476 
477  s->p_mv_table = mv_table;
478  if (has_b_frames) {
479  s->b_forw_mv_table = mv_table += mv_table_size;
480  s->b_back_mv_table = mv_table += mv_table_size;
481  s->b_bidir_forw_mv_table = mv_table += mv_table_size;
482  s->b_bidir_back_mv_table = mv_table += mv_table_size;
483  s->b_direct_mv_table = mv_table += mv_table_size;
484 
485  if (s->p_field_select_table[1]) { // MPEG-4 or INTERLACED_ME above
486  uint8_t *field_select = s->p_field_select_table[1];
487  for (int j = 0; j < 2; j++) {
488  for (int k = 0; k < 2; k++) {
489  for (int l = 0; l < 2; l++)
490  s->b_field_mv_table[j][k][l] = mv_table += mv_table_size;
491  s->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
492  }
493  }
494  }
495  }
496 
497  return 0;
498 }
499 
501 {
502  MPVEncContext *const s = &m->s;
503  // Align the following per-thread buffers to avoid false sharing.
504  enum {
505 #ifndef _MSC_VER
506  /// The number is supposed to match/exceed the cache-line size.
507  ALIGN = FFMAX(128, _Alignof(max_align_t)),
508 #else
509  ALIGN = 128,
510 #endif
511  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
512  };
513  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
514  "Need checks for potential overflow.");
515  unsigned nb_slices = s->c.slice_context_count;
516  char *dct_error = NULL;
517 
518  if (m->noise_reduction) {
519  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
520  return AVERROR(ENOMEM);
521  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
522  if (!dct_error)
523  return AVERROR(ENOMEM);
525  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
526  }
527 
528  const int y_size = s->c.b8_stride * (2 * s->c.mb_height + 1);
529  const int c_size = s->c.mb_stride * (s->c.mb_height + 1);
530  const int yc_size = y_size + 2 * c_size;
531  ptrdiff_t offset = 0;
532 
533  for (unsigned i = 0; i < nb_slices; ++i) {
534  MPVEncContext *const s2 = s->c.enc_contexts[i];
535 
536  s2->block = s2->blocks[0];
537 
538  if (dct_error) {
539  s2->dct_offset = s->dct_offset;
540  s2->dct_error_sum = (void*)dct_error;
541  dct_error += DCT_ERROR_SIZE;
542  }
543 
544  if (s2->c.ac_val) {
545  s2->c.dc_val += offset + i;
546  s2->c.ac_val += offset;
547  offset += yc_size;
548  }
549  }
550  return 0;
551 }
552 
553 /* init video encoder */
555 {
556  MPVMainEncContext *const m = avctx->priv_data;
557  MPVEncContext *const s = &m->s;
558  AVCPBProperties *cpb_props;
559  int gcd, ret;
560 
562 
563  switch (avctx->pix_fmt) {
564  case AV_PIX_FMT_YUVJ444P:
565  case AV_PIX_FMT_YUV444P:
566  s->c.chroma_format = CHROMA_444;
567  break;
568  case AV_PIX_FMT_YUVJ422P:
569  case AV_PIX_FMT_YUV422P:
570  s->c.chroma_format = CHROMA_422;
571  break;
572  default:
573  av_unreachable("Already checked via CODEC_PIXFMTS");
574  case AV_PIX_FMT_YUVJ420P:
575  case AV_PIX_FMT_YUV420P:
576  s->c.chroma_format = CHROMA_420;
577  break;
578  }
579 
580  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
581 
582  m->bit_rate = avctx->bit_rate;
583  s->c.width = avctx->width;
584  s->c.height = avctx->height;
585  if (avctx->gop_size > 600 &&
587  av_log(avctx, AV_LOG_WARNING,
588  "keyframe interval too large!, reducing it from %d to %d\n",
589  avctx->gop_size, 600);
590  avctx->gop_size = 600;
591  }
592  m->gop_size = avctx->gop_size;
593  s->c.avctx = avctx;
594  if (avctx->max_b_frames > MPVENC_MAX_B_FRAMES) {
595  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
596  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
598  } else if (avctx->max_b_frames < 0) {
599  av_log(avctx, AV_LOG_ERROR,
600  "max b frames must be 0 or positive for mpegvideo based encoders\n");
601  return AVERROR(EINVAL);
602  }
603  m->max_b_frames = avctx->max_b_frames;
604  s->c.codec_id = avctx->codec->id;
605  if (m->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
606  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
607  return AVERROR(EINVAL);
608  }
609 
610  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
611  s->rtp_mode = !!s->rtp_payload_size;
613 
614  if (m->gop_size <= 1) {
615  m->intra_only = 1;
616  m->gop_size = 12;
617  } else {
618  m->intra_only = 0;
619  }
620 
621  /* Fixed QSCALE */
622  m->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
623 
624  s->adaptive_quant = (avctx->lumi_masking ||
625  avctx->dark_masking ||
626  avctx->temporal_cplx_masking ||
627  avctx->spatial_cplx_masking ||
628  avctx->p_masking ||
629  m->border_masking ||
630  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
631  !m->fixed_qscale;
632 
633  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
634 
635  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
636  switch(avctx->codec_id) {
639  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
640  break;
641  case AV_CODEC_ID_MPEG4:
645  if (avctx->rc_max_rate >= 15000000) {
646  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
647  } else if(avctx->rc_max_rate >= 2000000) {
648  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
649  } else if(avctx->rc_max_rate >= 384000) {
650  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
651  } else
652  avctx->rc_buffer_size = 40;
653  avctx->rc_buffer_size *= 16384;
654  break;
655  }
656  if (avctx->rc_buffer_size) {
657  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
658  }
659  }
660 
661  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
662  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
663  return AVERROR(EINVAL);
664  }
665 
666  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
667  av_log(avctx, AV_LOG_INFO,
668  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
669  }
670 
671  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
672  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
673  return AVERROR(EINVAL);
674  }
675 
676  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
677  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
678  return AVERROR(EINVAL);
679  }
680 
681  if (avctx->rc_max_rate &&
682  avctx->rc_max_rate == avctx->bit_rate &&
683  avctx->rc_max_rate != avctx->rc_min_rate) {
684  av_log(avctx, AV_LOG_INFO,
685  "impossible bitrate constraints, this will fail\n");
686  }
687 
688  if (avctx->rc_buffer_size &&
689  avctx->bit_rate * (int64_t)avctx->time_base.num >
690  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
691  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
692  return AVERROR(EINVAL);
693  }
694 
695  if (!m->fixed_qscale &&
696  avctx->bit_rate * av_q2d(avctx->time_base) >
697  avctx->bit_rate_tolerance) {
698  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
699  av_log(avctx, AV_LOG_WARNING,
700  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
701  if (nbt <= INT_MAX) {
702  avctx->bit_rate_tolerance = nbt;
703  } else
704  avctx->bit_rate_tolerance = INT_MAX;
705  }
706 
707  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
708  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
709  s->c.codec_id != AV_CODEC_ID_FLV1) {
710  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
711  return AVERROR(EINVAL);
712  }
713 
714  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
715  av_log(avctx, AV_LOG_ERROR,
716  "OBMC is only supported with simple mb decision\n");
717  return AVERROR(EINVAL);
718  }
719 
720  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
721  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
722  return AVERROR(EINVAL);
723  }
724 
725  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
726  s->c.codec_id == AV_CODEC_ID_H263 ||
727  s->c.codec_id == AV_CODEC_ID_H263P) &&
728  (avctx->sample_aspect_ratio.num > 255 ||
729  avctx->sample_aspect_ratio.den > 255)) {
730  av_log(avctx, AV_LOG_WARNING,
731  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
734  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
735  }
736 
737  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
738  s->c.codec_id == AV_CODEC_ID_H263P) &&
739  (avctx->width > 2048 ||
740  avctx->height > 1152 )) {
741  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
742  return AVERROR(EINVAL);
743  }
744  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
745  (avctx->width > 65535 ||
746  avctx->height > 65535 )) {
747  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
748  return AVERROR(EINVAL);
749  }
750  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
751  s->c.codec_id == AV_CODEC_ID_H263P ||
752  s->c.codec_id == AV_CODEC_ID_RV20) &&
753  ((avctx->width &3) ||
754  (avctx->height&3) )) {
755  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
756  return AVERROR(EINVAL);
757  }
758 
759  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
760  s->c.codec_id == AV_CODEC_ID_WMV2) &&
761  avctx->width & 1) {
762  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
763  return AVERROR(EINVAL);
764  }
765 
767  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
768  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
769  return AVERROR(EINVAL);
770  }
771 
772  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
773  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
774  return AVERROR(EINVAL);
775  }
776 
777  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
778  avctx->mb_decision != FF_MB_DECISION_RD) {
779  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
780  return AVERROR(EINVAL);
781  }
782 
783  if (m->scenechange_threshold < 1000000000 &&
784  (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
785  av_log(avctx, AV_LOG_ERROR,
786  "closed gop with scene change detection are not supported yet, "
787  "set threshold to 1000000000\n");
788  return AVERROR_PATCHWELCOME;
789  }
790 
791  if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
792  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
794  av_log(avctx, AV_LOG_ERROR,
795  "low delay forcing is only available for mpeg2, "
796  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
797  return AVERROR(EINVAL);
798  }
799  if (m->max_b_frames != 0) {
800  av_log(avctx, AV_LOG_ERROR,
801  "B-frames cannot be used with low delay\n");
802  return AVERROR(EINVAL);
803  }
804  }
805 
806  if (avctx->slices > 1 &&
808  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
809  return AVERROR(EINVAL);
810  }
811 
812  if (m->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
813  av_log(avctx, AV_LOG_INFO,
814  "notice: b_frame_strategy only affects the first pass\n");
815  m->b_frame_strategy = 0;
816  }
817 
818  gcd = av_gcd(avctx->time_base.den, avctx->time_base.num);
819  if (gcd > 1) {
820  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
821  avctx->time_base.den /= gcd;
822  avctx->time_base.num /= gcd;
823  //return -1;
824  }
825 
826  if (s->mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
827  // (a + x * 3 / 8) / x
828  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
829  s->inter_quant_bias = 0;
830  } else {
831  s->intra_quant_bias = 0;
832  // (a - x / 4) / x
833  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
834  }
835 
836  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
837  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
838  return AVERROR(EINVAL);
839  }
840 
841  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
842 
843  switch (avctx->codec->id) {
844 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
846  s->rtp_mode = 1;
849  s->c.out_format = FMT_MPEG1;
850  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
851  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
852  break;
853 #endif
854 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
855  case AV_CODEC_ID_MJPEG:
856  case AV_CODEC_ID_AMV:
857  s->c.out_format = FMT_MJPEG;
858  m->intra_only = 1; /* force intra only for jpeg */
859  avctx->delay = 0;
860  s->c.low_delay = 1;
861  break;
862 #endif
863  case AV_CODEC_ID_SPEEDHQ:
864  s->c.out_format = FMT_SPEEDHQ;
865  m->intra_only = 1; /* force intra only for SHQ */
866  avctx->delay = 0;
867  s->c.low_delay = 1;
868  break;
869  case AV_CODEC_ID_H261:
870  s->c.out_format = FMT_H261;
871  avctx->delay = 0;
872  s->c.low_delay = 1;
873  s->rtp_mode = 0; /* Sliced encoding not supported */
874  break;
875  case AV_CODEC_ID_H263:
876  if (!CONFIG_H263_ENCODER)
879  s->c.width, s->c.height) == 8) {
880  av_log(avctx, AV_LOG_ERROR,
881  "The specified picture size of %dx%d is not valid for "
882  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
883  "352x288, 704x576, and 1408x1152. "
884  "Try H.263+.\n", s->c.width, s->c.height);
885  return AVERROR(EINVAL);
886  }
887  s->c.out_format = FMT_H263;
888  avctx->delay = 0;
889  s->c.low_delay = 1;
890  break;
891  case AV_CODEC_ID_H263P:
892  s->c.out_format = FMT_H263;
893  /* Fx */
894  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
895  s->modified_quant = s->c.h263_aic;
896  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
897  s->me.unrestricted_mv = s->c.obmc || s->loop_filter || s->umvplus;
898  s->flipflop_rounding = 1;
899 
900  /* /Fx */
901  /* These are just to be sure */
902  avctx->delay = 0;
903  s->c.low_delay = 1;
904  break;
905  case AV_CODEC_ID_FLV1:
906  s->c.out_format = FMT_H263;
907  s->me.unrestricted_mv = 1;
908  s->rtp_mode = 0; /* don't allow GOB */
909  avctx->delay = 0;
910  s->c.low_delay = 1;
911  break;
912 #if CONFIG_RV10_ENCODER
913  case AV_CODEC_ID_RV10:
914  s->c.out_format = FMT_H263;
915  avctx->delay = 0;
916  s->c.low_delay = 1;
917  break;
918 #endif
919 #if CONFIG_RV20_ENCODER
920  case AV_CODEC_ID_RV20:
922  s->c.out_format = FMT_H263;
923  avctx->delay = 0;
924  s->c.low_delay = 1;
925  s->modified_quant = 1;
926  // Set here to force allocation of dc_val;
927  // will be set later on a per-frame basis.
928  s->c.h263_aic = 1;
929  s->loop_filter = 1;
930  s->me.unrestricted_mv = 0;
931  break;
932 #endif
933  case AV_CODEC_ID_MPEG4:
934  s->c.out_format = FMT_H263;
935  s->c.h263_pred = 1;
936  s->me.unrestricted_mv = 1;
937  s->flipflop_rounding = 1;
938  s->c.low_delay = m->max_b_frames ? 0 : 1;
939  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
940  break;
942  s->c.out_format = FMT_H263;
943  s->c.h263_pred = 1;
944  s->me.unrestricted_mv = 1;
945  s->c.msmpeg4_version = MSMP4_V2;
946  avctx->delay = 0;
947  s->c.low_delay = 1;
948  break;
950  s->c.out_format = FMT_H263;
951  s->c.h263_pred = 1;
952  s->me.unrestricted_mv = 1;
953  s->c.msmpeg4_version = MSMP4_V3;
954  s->flipflop_rounding = 1;
955  avctx->delay = 0;
956  s->c.low_delay = 1;
957  break;
958  case AV_CODEC_ID_WMV1:
959  s->c.out_format = FMT_H263;
960  s->c.h263_pred = 1;
961  s->me.unrestricted_mv = 1;
962  s->c.msmpeg4_version = MSMP4_WMV1;
963  s->flipflop_rounding = 1;
964  avctx->delay = 0;
965  s->c.low_delay = 1;
966  break;
967  case AV_CODEC_ID_WMV2:
968  s->c.out_format = FMT_H263;
969  s->c.h263_pred = 1;
970  s->me.unrestricted_mv = 1;
971  s->c.msmpeg4_version = MSMP4_WMV2;
972  s->flipflop_rounding = 1;
973  avctx->delay = 0;
974  s->c.low_delay = 1;
975  break;
976  default:
977  av_unreachable("List contains all codecs using ff_mpv_encode_init()");
978  }
979 
980  avctx->has_b_frames = !s->c.low_delay;
981 
982  s->c.encoding = 1;
983 
984  s->c.progressive_frame =
985  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
987  s->c.alternate_scan);
988 
989  if (avctx->flags & AV_CODEC_FLAG_PSNR || avctx->mb_decision == FF_MB_DECISION_RD ||
991  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
992  (1 << AV_PICTURE_TYPE_P) |
993  (1 << AV_PICTURE_TYPE_B);
994  } else if (!m->intra_only) {
995  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
996  (1 << AV_PICTURE_TYPE_P);
997  } else {
998  s->frame_reconstruction_bitfield = 0;
999  }
1000 
1001  if (m->lmin > m->lmax) {
1002  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1003  m->lmin = m->lmax;
1004  }
1005 
1006  /* ff_mpv_init_duplicate_contexts() will copy (memdup) the contents of the
1007  * main slice to the slice contexts, so we initialize various fields of it
1008  * before calling ff_mpv_init_duplicate_contexts(). */
1009  s->parent = m;
1010  ff_mpv_idct_init(&s->c);
1011  init_unquantize(s, avctx);
1012  ff_fdctdsp_init(&s->fdsp, avctx);
1013  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1014  ff_pixblockdsp_init(&s->pdsp, 8);
1015  ret = me_cmp_init(m, avctx);
1016  if (ret < 0)
1017  return ret;
1018 
1019  if (!(avctx->stats_out = av_mallocz(256)) ||
1020  !(s->new_pic = av_frame_alloc()) ||
1021  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1022  return AVERROR(ENOMEM);
1023 
1024  ret = init_matrices(m, avctx);
1025  if (ret < 0)
1026  return ret;
1027 
1029 
1030  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1032 #if CONFIG_MSMPEG4ENC
1033  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1035 #endif
1036  }
1037 
1038  s->c.slice_ctx_size = sizeof(*s);
1039  ret = ff_mpv_common_init(&s->c);
1040  if (ret < 0)
1041  return ret;
1042  ret = init_buffers(m);
1043  if (ret < 0)
1044  return ret;
1045  if (s->c.slice_context_count > 1) {
1046  s->rtp_mode = 1;
1047  if (avctx->codec_id == AV_CODEC_ID_H263P)
1048  s->h263_slice_structured = 1;
1049  }
1051  if (ret < 0)
1052  return ret;
1053 
1054  ret = init_slice_buffers(m);
1055  if (ret < 0)
1056  return ret;
1057 
1059  if (ret < 0)
1060  return ret;
1061 
1062  if (m->b_frame_strategy == 2) {
1063  for (int i = 0; i < m->max_b_frames + 2; i++) {
1064  m->tmp_frames[i] = av_frame_alloc();
1065  if (!m->tmp_frames[i])
1066  return AVERROR(ENOMEM);
1067 
1069  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1070  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1071 
1072  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1073  if (ret < 0)
1074  return ret;
1075  }
1076  }
1077 
1078  cpb_props = ff_encode_add_cpb_side_data(avctx);
1079  if (!cpb_props)
1080  return AVERROR(ENOMEM);
1081  cpb_props->max_bitrate = avctx->rc_max_rate;
1082  cpb_props->min_bitrate = avctx->rc_min_rate;
1083  cpb_props->avg_bitrate = avctx->bit_rate;
1084  cpb_props->buffer_size = avctx->rc_buffer_size;
1085 
1086  return 0;
1087 }
1088 
1090 {
1091  MPVMainEncContext *const m = avctx->priv_data;
1092  MPVEncContext *const s = &m->s;
1093 
1095 
1096  ff_mpv_common_end(&s->c);
1097  av_refstruct_pool_uninit(&s->c.picture_pool);
1098 
1099  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1102  }
1103  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1104  av_frame_free(&m->tmp_frames[i]);
1105 
1106  av_frame_free(&s->new_pic);
1107 
1108  av_freep(&avctx->stats_out);
1109 
1110  av_freep(&m->mv_table_base);
1111  av_freep(&s->p_field_select_table[0]);
1113 
1114  av_freep(&s->mb_type);
1115  av_freep(&s->lambda_table);
1116 
1117  av_freep(&s->q_intra_matrix);
1118  av_freep(&s->q_intra_matrix16);
1119  av_freep(&s->dct_offset);
1120 
1121  return 0;
1122 }
1123 
1124 /* put block[] to dest[] */
1125 static inline void put_dct(MPVEncContext *const s,
1126  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1127 {
1128  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1129  s->c.idsp.idct_put(dest, line_size, block);
1130 }
1131 
1132 static inline void add_dequant_dct(MPVEncContext *const s,
1133  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1134 {
1135  if (s->c.block_last_index[i] >= 0) {
1136  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1137 
1138  s->c.idsp.idct_add(dest, line_size, block);
1139  }
1140 }
1141 
1142 /**
1143  * Performs dequantization and IDCT (if necessary)
1144  */
1145 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1146 {
1147  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1148  /* print DCT coefficients */
1149  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1150  for (int i = 0; i < 6; i++) {
1151  for (int j = 0; j < 64; j++) {
1152  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1153  block[i][s->c.idsp.idct_permutation[j]]);
1154  }
1155  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1156  }
1157  }
1158 
1159  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1160  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1161  int dct_linesize, dct_offset;
1162  const int linesize = s->c.cur_pic.linesize[0];
1163  const int uvlinesize = s->c.cur_pic.linesize[1];
1164  const int block_size = 8;
1165 
1166  dct_linesize = linesize << s->c.interlaced_dct;
1167  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1168 
1169  if (!s->c.mb_intra) {
1170  /* No MC, as that was already done otherwise */
1171  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1172  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1173  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1174  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1175 
1176  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1177  if (s->c.chroma_y_shift) {
1178  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1179  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1180  } else {
1181  dct_linesize >>= 1;
1182  dct_offset >>= 1;
1183  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1184  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1185  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1186  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1187  }
1188  }
1189  } else {
1190  /* dct only in intra block */
1191  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1192  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1193  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1194  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1195 
1196  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1197  if (s->c.chroma_y_shift) {
1198  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1199  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1200  } else {
1201  dct_offset >>= 1;
1202  dct_linesize >>= 1;
1203  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1204  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1205  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1206  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1207  }
1208  }
1209  }
1210  }
1211 }
1212 
1213 static int get_sae(const uint8_t *src, int ref, int stride)
1214 {
1215  int x,y;
1216  int acc = 0;
1217 
1218  for (y = 0; y < 16; y++) {
1219  for (x = 0; x < 16; x++) {
1220  acc += FFABS(src[x + y * stride] - ref);
1221  }
1222  }
1223 
1224  return acc;
1225 }
1226 
1227 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1228  const uint8_t *ref, int stride)
1229 {
1230  int x, y, w, h;
1231  int acc = 0;
1232 
1233  w = s->c.width & ~15;
1234  h = s->c.height & ~15;
1235 
1236  for (y = 0; y < h; y += 16) {
1237  for (x = 0; x < w; x += 16) {
1238  int offset = x + y * stride;
1239  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1240  stride, 16);
1241  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1242  int sae = get_sae(src + offset, mean, stride);
1243 
1244  acc += sae + 500 < sad;
1245  }
1246  }
1247  return acc;
1248 }
1249 
1250 /**
1251  * Allocates new buffers for an AVFrame and copies the properties
1252  * from another AVFrame.
1253  */
1254 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1255 {
1256  AVCodecContext *avctx = s->c.avctx;
1257  int ret;
1258 
1259  f->width = avctx->width + 2 * EDGE_WIDTH;
1260  f->height = avctx->height + 2 * EDGE_WIDTH;
1261 
1262  ret = ff_encode_alloc_frame(avctx, f);
1263  if (ret < 0)
1264  return ret;
1265 
1266  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1267  if (ret < 0)
1268  return ret;
1269 
1270  for (int i = 0; f->data[i]; i++) {
1271  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1272  f->linesize[i] +
1273  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1274  f->data[i] += offset;
1275  }
1276  f->width = avctx->width;
1277  f->height = avctx->height;
1278 
1279  ret = av_frame_copy_props(f, props_frame);
1280  if (ret < 0)
1281  return ret;
1282 
1283  return 0;
1284 }
1285 
1286 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1287 {
1288  MPVEncContext *const s = &m->s;
1289  MPVPicture *pic = NULL;
1290  int64_t pts;
1291  int display_picture_number = 0, ret;
1292  int encoding_delay = m->max_b_frames ? m->max_b_frames
1293  : (s->c.low_delay ? 0 : 1);
1294  int flush_offset = 1;
1295  int direct = 1;
1296 
1297  av_assert1(!m->input_picture[0]);
1298 
1299  if (pic_arg) {
1300  pts = pic_arg->pts;
1301  display_picture_number = m->input_picture_number++;
1302 
1303  if (pts != AV_NOPTS_VALUE) {
1304  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1305  int64_t last = m->user_specified_pts;
1306 
1307  if (pts <= last) {
1308  av_log(s->c.avctx, AV_LOG_ERROR,
1309  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1310  pts, last);
1311  return AVERROR(EINVAL);
1312  }
1313 
1314  if (!s->c.low_delay && display_picture_number == 1)
1315  m->dts_delta = pts - last;
1316  }
1317  m->user_specified_pts = pts;
1318  } else {
1319  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1320  m->user_specified_pts =
1321  pts = m->user_specified_pts + 1;
1322  av_log(s->c.avctx, AV_LOG_INFO,
1323  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1324  pts);
1325  } else {
1326  pts = display_picture_number;
1327  }
1328  }
1329 
1330  if (pic_arg->linesize[0] != s->c.linesize ||
1331  pic_arg->linesize[1] != s->c.uvlinesize ||
1332  pic_arg->linesize[2] != s->c.uvlinesize)
1333  direct = 0;
1334  if ((s->c.width & 15) || (s->c.height & 15))
1335  direct = 0;
1336  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1337  direct = 0;
1338  if (s->c.linesize & (STRIDE_ALIGN-1))
1339  direct = 0;
1340 
1341  ff_dlog(s->c.avctx, "%d %d %td %td\n", pic_arg->linesize[0],
1342  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1343 
1344  pic = av_refstruct_pool_get(s->c.picture_pool);
1345  if (!pic)
1346  return AVERROR(ENOMEM);
1347 
1348  if (direct) {
1349  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1350  goto fail;
1351  pic->shared = 1;
1352  } else {
1353  ret = prepare_picture(s, pic->f, pic_arg);
1354  if (ret < 0)
1355  goto fail;
1356 
1357  for (int i = 0; i < 3; i++) {
1358  ptrdiff_t src_stride = pic_arg->linesize[i];
1359  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1360  int h_shift = i ? s->c.chroma_x_shift : 0;
1361  int v_shift = i ? s->c.chroma_y_shift : 0;
1362  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1363  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1364  const uint8_t *src = pic_arg->data[i];
1365  uint8_t *dst = pic->f->data[i];
1366  int vpad = 16;
1367 
1368  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1369  && !s->c.progressive_sequence
1370  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1371  vpad = 32;
1372 
1373  if (!s->c.avctx->rc_buffer_size)
1374  dst += INPLACE_OFFSET;
1375 
1376  if (src_stride == dst_stride)
1377  memcpy(dst, src, src_stride * h - src_stride + w);
1378  else {
1379  int h2 = h;
1380  uint8_t *dst2 = dst;
1381  while (h2--) {
1382  memcpy(dst2, src, w);
1383  dst2 += dst_stride;
1384  src += src_stride;
1385  }
1386  }
1387  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1388  s->mpvencdsp.draw_edges(dst, dst_stride,
1389  w, h,
1390  16 >> h_shift,
1391  vpad >> v_shift,
1392  EDGE_BOTTOM);
1393  }
1394  }
1395  }
1396 
1397  pic->display_picture_number = display_picture_number;
1398  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1399  } else if (!m->reordered_input_picture[1]) {
1400  /* Flushing: When the above check is true, the encoder is about to run
1401  * out of frames to encode. Check if there are input_pictures left;
1402  * if so, ensure m->input_picture[0] contains the first picture.
1403  * A flush_offset != 1 will only happen if we did not receive enough
1404  * input frames. */
1405  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1406  if (m->input_picture[flush_offset])
1407  break;
1408 
1409  encoding_delay -= flush_offset - 1;
1410  }
1411 
1412  /* shift buffer entries */
1413  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1414  m->input_picture[i - flush_offset] = m->input_picture[i];
1415  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1416  m->input_picture[i] = NULL;
1417 
1418  m->input_picture[encoding_delay] = pic;
1419 
1420  return 0;
1421 fail:
1422  av_refstruct_unref(&pic);
1423  return ret;
1424 }
1425 
1426 static int skip_check(MPVMainEncContext *const m,
1427  const MPVPicture *p, const MPVPicture *ref)
1428 {
1429  MPVEncContext *const s = &m->s;
1430  int score = 0;
1431  int64_t score64 = 0;
1432 
1433  for (int plane = 0; plane < 3; plane++) {
1434  const int stride = p->f->linesize[plane];
1435  const int bw = plane ? 1 : 2;
1436  for (int y = 0; y < s->c.mb_height * bw; y++) {
1437  for (int x = 0; x < s->c.mb_width * bw; x++) {
1438  int off = p->shared ? 0 : 16;
1439  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1440  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1441  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1442 
1443  switch (FFABS(m->frame_skip_exp)) {
1444  case 0: score = FFMAX(score, v); break;
1445  case 1: score += FFABS(v); break;
1446  case 2: score64 += v * (int64_t)v; break;
1447  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1448  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1449  }
1450  }
1451  }
1452  }
1453  emms_c();
1454 
1455  if (score)
1456  score64 = score;
1457  if (m->frame_skip_exp < 0)
1458  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1459  -1.0/m->frame_skip_exp);
1460 
1461  if (score64 < m->frame_skip_threshold)
1462  return 1;
1463  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1464  return 1;
1465  return 0;
1466 }
1467 
1469 {
1470  int ret;
1471  int size = 0;
1472 
1474  if (ret < 0)
1475  return ret;
1476 
1477  do {
1479  if (ret >= 0) {
1480  size += pkt->size;
1482  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1483  return ret;
1484  } while (ret >= 0);
1485 
1486  return size;
1487 }
1488 
1490 {
1491  MPVEncContext *const s = &m->s;
1492  AVPacket *pkt;
1493  const int scale = m->brd_scale;
1494  int width = s->c.width >> scale;
1495  int height = s->c.height >> scale;
1496  int out_size, p_lambda, b_lambda, lambda2;
1497  int64_t best_rd = INT64_MAX;
1498  int best_b_count = -1;
1499  int ret = 0;
1500 
1501  av_assert0(scale >= 0 && scale <= 3);
1502 
1503  pkt = av_packet_alloc();
1504  if (!pkt)
1505  return AVERROR(ENOMEM);
1506 
1507  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1508  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1509  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1510  if (!b_lambda) // FIXME we should do this somewhere else
1511  b_lambda = p_lambda;
1512  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1514 
1515  for (int i = 0; i < m->max_b_frames + 2; i++) {
1516  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1517  s->c.next_pic.ptr;
1518 
1519  if (pre_input_ptr) {
1520  const uint8_t *data[4];
1521  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1522 
1523  if (!pre_input_ptr->shared && i) {
1524  data[0] += INPLACE_OFFSET;
1525  data[1] += INPLACE_OFFSET;
1526  data[2] += INPLACE_OFFSET;
1527  }
1528 
1529  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1530  m->tmp_frames[i]->linesize[0],
1531  data[0],
1532  pre_input_ptr->f->linesize[0],
1533  width, height);
1534  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1535  m->tmp_frames[i]->linesize[1],
1536  data[1],
1537  pre_input_ptr->f->linesize[1],
1538  width >> 1, height >> 1);
1539  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1540  m->tmp_frames[i]->linesize[2],
1541  data[2],
1542  pre_input_ptr->f->linesize[2],
1543  width >> 1, height >> 1);
1544  }
1545  }
1546 
1547  for (int j = 0; j < m->max_b_frames + 1; j++) {
1548  AVCodecContext *c;
1549  int64_t rd = 0;
1550 
1551  if (!m->input_picture[j])
1552  break;
1553 
1555  if (!c) {
1556  ret = AVERROR(ENOMEM);
1557  goto fail;
1558  }
1559 
1560  c->width = width;
1561  c->height = height;
1563  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1564  c->mb_decision = s->c.avctx->mb_decision;
1565  c->me_cmp = s->c.avctx->me_cmp;
1566  c->mb_cmp = s->c.avctx->mb_cmp;
1567  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1568  c->pix_fmt = AV_PIX_FMT_YUV420P;
1569  c->time_base = s->c.avctx->time_base;
1570  c->max_b_frames = m->max_b_frames;
1571 
1572  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1573  if (ret < 0)
1574  goto fail;
1575 
1576 
1578  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1579 
1580  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1581  if (out_size < 0) {
1582  ret = out_size;
1583  goto fail;
1584  }
1585 
1586  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1587 
1588  for (int i = 0; i < m->max_b_frames + 1; i++) {
1589  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1590 
1591  m->tmp_frames[i + 1]->pict_type = is_p ?
1593  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1594 
1595  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1596  if (out_size < 0) {
1597  ret = out_size;
1598  goto fail;
1599  }
1600 
1601  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1602  }
1603 
1604  /* get the delayed frames */
1606  if (out_size < 0) {
1607  ret = out_size;
1608  goto fail;
1609  }
1610  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1611 
1612  rd += c->error[0] + c->error[1] + c->error[2];
1613 
1614  if (rd < best_rd) {
1615  best_rd = rd;
1616  best_b_count = j;
1617  }
1618 
1619 fail:
1622  if (ret < 0) {
1623  best_b_count = ret;
1624  break;
1625  }
1626  }
1627 
1628  av_packet_free(&pkt);
1629 
1630  return best_b_count;
1631 }
1632 
1633 /**
1634  * Determines whether an input picture is discarded or not
1635  * and if not determines the length of the next chain of B frames
1636  * and moves these pictures (including the P frame) into
1637  * reordered_input_picture.
1638  * input_picture[0] is always NULL when exiting this function, even on error;
1639  * reordered_input_picture[0] is always NULL when exiting this function on error.
1640  */
1642 {
1643  MPVEncContext *const s = &m->s;
1644 
1645  /* Either nothing to do or can't do anything */
1646  if (m->reordered_input_picture[0] || !m->input_picture[0])
1647  return 0;
1648 
1649  /* set next picture type & ordering */
1650  if (m->frame_skip_threshold || m->frame_skip_factor) {
1651  if (m->picture_in_gop_number < m->gop_size &&
1652  s->c.next_pic.ptr &&
1653  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1654  // FIXME check that the gop check above is +-1 correct
1656 
1657  ff_vbv_update(m, 0);
1658 
1659  return 0;
1660  }
1661  }
1662 
1663  if (/* m->picture_in_gop_number >= m->gop_size || */
1664  !s->c.next_pic.ptr || m->intra_only) {
1665  m->reordered_input_picture[0] = m->input_picture[0];
1666  m->input_picture[0] = NULL;
1669  m->coded_picture_number++;
1670  } else {
1671  int b_frames = 0;
1672 
1673  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1674  for (int i = 0; i < m->max_b_frames + 1; i++) {
1675  int pict_num = m->input_picture[0]->display_picture_number + i;
1676 
1677  if (pict_num >= m->rc_context.num_entries)
1678  break;
1679  if (!m->input_picture[i]) {
1680  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1681  break;
1682  }
1683 
1684  m->input_picture[i]->f->pict_type =
1685  m->rc_context.entry[pict_num].new_pict_type;
1686  }
1687  }
1688 
1689  if (m->b_frame_strategy == 0) {
1690  b_frames = m->max_b_frames;
1691  while (b_frames && !m->input_picture[b_frames])
1692  b_frames--;
1693  } else if (m->b_frame_strategy == 1) {
1694  for (int i = 1; i < m->max_b_frames + 1; i++) {
1695  if (m->input_picture[i] &&
1696  m->input_picture[i]->b_frame_score == 0) {
1699  m->input_picture[i ]->f->data[0],
1700  m->input_picture[i - 1]->f->data[0],
1701  s->c.linesize) + 1;
1702  }
1703  }
1704  for (int i = 0;; i++) {
1705  if (i >= m->max_b_frames + 1 ||
1706  !m->input_picture[i] ||
1707  m->input_picture[i]->b_frame_score - 1 >
1708  s->c.mb_num / m->b_sensitivity) {
1709  b_frames = FFMAX(0, i - 1);
1710  break;
1711  }
1712  }
1713 
1714  /* reset scores */
1715  for (int i = 0; i < b_frames + 1; i++)
1716  m->input_picture[i]->b_frame_score = 0;
1717  } else if (m->b_frame_strategy == 2) {
1718  b_frames = estimate_best_b_count(m);
1719  if (b_frames < 0) {
1721  return b_frames;
1722  }
1723  }
1724 
1725  for (int i = b_frames - 1; i >= 0; i--) {
1726  int type = m->input_picture[i]->f->pict_type;
1727  if (type && type != AV_PICTURE_TYPE_B)
1728  b_frames = i;
1729  }
1730  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1731  b_frames == m->max_b_frames) {
1732  av_log(s->c.avctx, AV_LOG_ERROR,
1733  "warning, too many B-frames in a row\n");
1734  }
1735 
1736  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1737  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1738  m->gop_size > m->picture_in_gop_number) {
1739  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1740  } else {
1741  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1742  b_frames = 0;
1743  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1744  }
1745  }
1746 
1747  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1748  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1749  b_frames--;
1750 
1751  m->reordered_input_picture[0] = m->input_picture[b_frames];
1752  m->input_picture[b_frames] = NULL;
1756  m->coded_picture_number++;
1757  for (int i = 0; i < b_frames; i++) {
1758  m->reordered_input_picture[i + 1] = m->input_picture[i];
1759  m->input_picture[i] = NULL;
1760  m->reordered_input_picture[i + 1]->f->pict_type =
1763  m->coded_picture_number++;
1764  }
1765  }
1766 
1767  return 0;
1768 }
1769 
1771 {
1772  MPVEncContext *const s = &m->s;
1773  int ret;
1774 
1776 
1777  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1780 
1782  av_assert1(!m->input_picture[0]);
1783  if (ret < 0)
1784  return ret;
1785 
1786  av_frame_unref(s->new_pic);
1787 
1788  if (m->reordered_input_picture[0]) {
1791 
1792  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1793  // input is a shared pix, so we can't modify it -> allocate a new
1794  // one & ensure that the shared one is reusable
1795  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1796 
1797  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1798  if (ret < 0)
1799  goto fail;
1800  } else {
1801  // input is not a shared pix -> reuse buffer for current_pix
1802  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1803  if (ret < 0)
1804  goto fail;
1805  for (int i = 0; i < MPV_MAX_PLANES; i++)
1806  s->new_pic->data[i] += INPLACE_OFFSET;
1807  }
1808  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1809  m->reordered_input_picture[0] = NULL;
1810  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1811  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1812  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1813  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1814  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1815  if (ret < 0) {
1816  ff_mpv_unref_picture(&s->c.cur_pic);
1817  return ret;
1818  }
1819  s->picture_number = s->c.cur_pic.ptr->display_picture_number;
1820 
1821  }
1822  return 0;
1823 fail:
1825  return ret;
1826 }
1827 
1828 static void frame_end(MPVMainEncContext *const m)
1829 {
1830  MPVEncContext *const s = &m->s;
1831 
1832  if (s->me.unrestricted_mv &&
1833  s->c.cur_pic.reference &&
1834  !m->intra_only) {
1835  int hshift = s->c.chroma_x_shift;
1836  int vshift = s->c.chroma_y_shift;
1837  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1838  s->c.cur_pic.linesize[0],
1839  s->c.h_edge_pos, s->c.v_edge_pos,
1841  EDGE_TOP | EDGE_BOTTOM);
1842  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1843  s->c.cur_pic.linesize[1],
1844  s->c.h_edge_pos >> hshift,
1845  s->c.v_edge_pos >> vshift,
1846  EDGE_WIDTH >> hshift,
1847  EDGE_WIDTH >> vshift,
1848  EDGE_TOP | EDGE_BOTTOM);
1849  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1850  s->c.cur_pic.linesize[2],
1851  s->c.h_edge_pos >> hshift,
1852  s->c.v_edge_pos >> vshift,
1853  EDGE_WIDTH >> hshift,
1854  EDGE_WIDTH >> vshift,
1855  EDGE_TOP | EDGE_BOTTOM);
1856  }
1857 
1858  m->last_pict_type = s->c.pict_type;
1859  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1860  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1861  m->last_non_b_pict_type = s->c.pict_type;
1862 }
1863 
1865 {
1866  MPVEncContext *const s = &m->s;
1867  int intra, i;
1868 
1869  for (intra = 0; intra < 2; intra++) {
1870  if (s->dct_count[intra] > (1 << 16)) {
1871  for (i = 0; i < 64; i++) {
1872  s->dct_error_sum[intra][i] >>= 1;
1873  }
1874  s->dct_count[intra] >>= 1;
1875  }
1876 
1877  for (i = 0; i < 64; i++) {
1878  s->dct_offset[intra][i] = (m->noise_reduction *
1879  s->dct_count[intra] +
1880  s->dct_error_sum[intra][i] / 2) /
1881  (s->dct_error_sum[intra][i] + 1);
1882  }
1883  }
1884 }
1885 
1886 static void frame_start(MPVMainEncContext *const m)
1887 {
1888  MPVEncContext *const s = &m->s;
1889 
1890  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1891 
1892  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1893  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1894  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1895  }
1896 
1897  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1898  if (s->dct_error_sum) {
1900  }
1901 }
1902 
1904  const AVFrame *pic_arg, int *got_packet)
1905 {
1906  MPVMainEncContext *const m = avctx->priv_data;
1907  MPVEncContext *const s = &m->s;
1908  int stuffing_count, ret;
1909  int context_count = s->c.slice_context_count;
1910 
1911  ff_mpv_unref_picture(&s->c.cur_pic);
1912 
1913  m->vbv_ignore_qmax = 0;
1914 
1915  m->picture_in_gop_number++;
1916 
1917  ret = load_input_picture(m, pic_arg);
1918  if (ret < 0)
1919  return ret;
1920 
1922  if (ret < 0)
1923  return ret;
1924 
1925  /* output? */
1926  if (s->new_pic->data[0]) {
1927  int growing_buffer = context_count == 1 && !s->data_partitioning;
1928  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1929  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1930  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1931  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1932  if (ret < 0)
1933  return ret;
1934  }
1935  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1936  return ret;
1938  if (s->mb_info) {
1939  s->mb_info_ptr = av_packet_new_side_data(pkt,
1941  s->c.mb_width*s->c.mb_height*12);
1942  if (!s->mb_info_ptr)
1943  return AVERROR(ENOMEM);
1944  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1945  }
1946 
1947  s->c.pict_type = s->new_pic->pict_type;
1948  frame_start(m);
1949 vbv_retry:
1950  ret = encode_picture(m, pkt);
1951  if (growing_buffer) {
1952  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1953  pkt->data = s->pb.buf;
1954  pkt->size = avctx->internal->byte_buffer_size;
1955  }
1956  if (ret < 0)
1957  return -1;
1958 
1959  frame_end(m);
1960 
1961  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
1963 
1964  if (avctx->rc_buffer_size) {
1965  RateControlContext *rcc = &m->rc_context;
1966  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1967  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1968  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1969 
1970  if (put_bits_count(&s->pb) > max_size &&
1971  s->lambda < m->lmax) {
1972  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1973  (s->c.qscale + 1) / s->c.qscale);
1974  if (s->adaptive_quant) {
1975  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
1976  s->lambda_table[i] =
1977  FFMAX(s->lambda_table[i] + min_step,
1978  s->lambda_table[i] * (s->c.qscale + 1) /
1979  s->c.qscale);
1980  }
1981  s->c.mb_skipped = 0; // done in frame_start()
1982  // done in encode_picture() so we must undo it
1983  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
1984  s->c.no_rounding ^= s->flipflop_rounding;
1985  }
1986  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1987  s->c.time_base = s->c.last_time_base;
1988  s->c.last_non_b_time = s->c.time - s->c.pp_time;
1989  }
1990  m->vbv_ignore_qmax = 1;
1991  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1992  goto vbv_retry;
1993  }
1994 
1995  av_assert0(avctx->rc_max_rate);
1996  }
1997 
1998  if (avctx->flags & AV_CODEC_FLAG_PASS1)
2000 
2001  for (int i = 0; i < MPV_MAX_PLANES; i++)
2002  avctx->error[i] += s->encoding_error[i];
2003  ff_encode_add_stats_side_data(pkt, s->c.cur_pic.ptr->f->quality,
2004  s->encoding_error,
2005  (avctx->flags&AV_CODEC_FLAG_PSNR) ? MPV_MAX_PLANES : 0,
2006  s->c.pict_type);
2007 
2008  if (avctx->flags & AV_CODEC_FLAG_PASS1)
2009  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2010  s->misc_bits + s->i_tex_bits +
2011  s->p_tex_bits);
2012  flush_put_bits(&s->pb);
2013  m->frame_bits = put_bits_count(&s->pb);
2014 
2015  stuffing_count = ff_vbv_update(m, m->frame_bits);
2016  m->stuffing_bits = 8*stuffing_count;
2017  if (stuffing_count) {
2018  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2019  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2020  return -1;
2021  }
2022 
2023  switch (s->c.codec_id) {
2026  while (stuffing_count--) {
2027  put_bits(&s->pb, 8, 0);
2028  }
2029  break;
2030  case AV_CODEC_ID_MPEG4:
2031  put_bits(&s->pb, 16, 0);
2032  put_bits(&s->pb, 16, 0x1C3);
2033  stuffing_count -= 4;
2034  while (stuffing_count--) {
2035  put_bits(&s->pb, 8, 0xFF);
2036  }
2037  break;
2038  default:
2039  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2040  m->stuffing_bits = 0;
2041  }
2042  flush_put_bits(&s->pb);
2043  m->frame_bits = put_bits_count(&s->pb);
2044  }
2045 
2046  /* update MPEG-1/2 vbv_delay for CBR */
2047  if (avctx->rc_max_rate &&
2048  avctx->rc_min_rate == avctx->rc_max_rate &&
2049  s->c.out_format == FMT_MPEG1 &&
2050  90000LL * (avctx->rc_buffer_size - 1) <=
2051  avctx->rc_max_rate * 0xFFFFLL) {
2052  AVCPBProperties *props;
2053  size_t props_size;
2054 
2055  int vbv_delay, min_delay;
2056  double inbits = avctx->rc_max_rate *
2057  av_q2d(avctx->time_base);
2058  int minbits = m->frame_bits - 8 *
2059  (m->vbv_delay_pos - 1);
2060  double bits = m->rc_context.buffer_index + minbits - inbits;
2061  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2062 
2063  if (bits < 0)
2064  av_log(avctx, AV_LOG_ERROR,
2065  "Internal error, negative bits\n");
2066 
2067  av_assert1(s->c.repeat_first_field == 0);
2068 
2069  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2070  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2071  avctx->rc_max_rate;
2072 
2073  vbv_delay = FFMAX(vbv_delay, min_delay);
2074 
2075  av_assert0(vbv_delay < 0xFFFF);
2076 
2077  vbv_delay_ptr[0] &= 0xF8;
2078  vbv_delay_ptr[0] |= vbv_delay >> 13;
2079  vbv_delay_ptr[1] = vbv_delay >> 5;
2080  vbv_delay_ptr[2] &= 0x07;
2081  vbv_delay_ptr[2] |= vbv_delay << 3;
2082 
2083  props = av_cpb_properties_alloc(&props_size);
2084  if (!props)
2085  return AVERROR(ENOMEM);
2086  props->vbv_delay = vbv_delay * 300;
2087 
2089  (uint8_t*)props, props_size);
2090  if (ret < 0) {
2091  av_freep(&props);
2092  return ret;
2093  }
2094  }
2095  m->total_bits += m->frame_bits;
2096 
2097  pkt->pts = s->c.cur_pic.ptr->f->pts;
2098  pkt->duration = s->c.cur_pic.ptr->f->duration;
2099  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2100  if (!s->c.cur_pic.ptr->coded_picture_number)
2101  pkt->dts = pkt->pts - m->dts_delta;
2102  else
2103  pkt->dts = m->reordered_pts;
2104  m->reordered_pts = pkt->pts;
2105  } else
2106  pkt->dts = pkt->pts;
2107 
2108  // the no-delay case is handled in generic code
2109  if (avctx->codec->capabilities & AV_CODEC_CAP_DELAY) {
2110  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2111  if (ret < 0)
2112  return ret;
2113  }
2114 
2115  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2117  if (s->mb_info)
2119  } else {
2120  m->frame_bits = 0;
2121  }
2122 
2123  ff_mpv_unref_picture(&s->c.cur_pic);
2124 
2125  av_assert1((m->frame_bits & 7) == 0);
2126 
2127  pkt->size = m->frame_bits / 8;
2128  *got_packet = !!pkt->size;
2129  return 0;
2130 }
2131 
2133  int n, int threshold)
2134 {
2135  static const char tab[64] = {
2136  3, 2, 2, 1, 1, 1, 1, 1,
2137  1, 1, 1, 1, 1, 1, 1, 1,
2138  1, 1, 1, 1, 1, 1, 1, 1,
2139  0, 0, 0, 0, 0, 0, 0, 0,
2140  0, 0, 0, 0, 0, 0, 0, 0,
2141  0, 0, 0, 0, 0, 0, 0, 0,
2142  0, 0, 0, 0, 0, 0, 0, 0,
2143  0, 0, 0, 0, 0, 0, 0, 0
2144  };
2145  int score = 0;
2146  int run = 0;
2147  int i;
2148  int16_t *block = s->block[n];
2149  const int last_index = s->c.block_last_index[n];
2150  int skip_dc;
2151 
2152  if (threshold < 0) {
2153  skip_dc = 0;
2154  threshold = -threshold;
2155  } else
2156  skip_dc = 1;
2157 
2158  /* Are all we could set to zero already zero? */
2159  if (last_index <= skip_dc - 1)
2160  return;
2161 
2162  for (i = 0; i <= last_index; i++) {
2163  const int j = s->c.intra_scantable.permutated[i];
2164  const int level = FFABS(block[j]);
2165  if (level == 1) {
2166  if (skip_dc && i == 0)
2167  continue;
2168  score += tab[run];
2169  run = 0;
2170  } else if (level > 1) {
2171  return;
2172  } else {
2173  run++;
2174  }
2175  }
2176  if (score >= threshold)
2177  return;
2178  for (i = skip_dc; i <= last_index; i++) {
2179  const int j = s->c.intra_scantable.permutated[i];
2180  block[j] = 0;
2181  }
2182  if (block[0])
2183  s->c.block_last_index[n] = 0;
2184  else
2185  s->c.block_last_index[n] = -1;
2186 }
2187 
2188 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2189  int last_index)
2190 {
2191  int i;
2192  const int maxlevel = s->max_qcoeff;
2193  const int minlevel = s->min_qcoeff;
2194  int overflow = 0;
2195 
2196  if (s->c.mb_intra) {
2197  i = 1; // skip clipping of intra dc
2198  } else
2199  i = 0;
2200 
2201  for (; i <= last_index; i++) {
2202  const int j = s->c.intra_scantable.permutated[i];
2203  int level = block[j];
2204 
2205  if (level > maxlevel) {
2206  level = maxlevel;
2207  overflow++;
2208  } else if (level < minlevel) {
2209  level = minlevel;
2210  overflow++;
2211  }
2212 
2213  block[j] = level;
2214  }
2215 
2216  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2217  av_log(s->c.avctx, AV_LOG_INFO,
2218  "warning, clipping %d dct coefficients to %d..%d\n",
2219  overflow, minlevel, maxlevel);
2220 }
2221 
2222 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2223 {
2224  int x, y;
2225  // FIXME optimize
2226  for (y = 0; y < 8; y++) {
2227  for (x = 0; x < 8; x++) {
2228  int x2, y2;
2229  int sum = 0;
2230  int sqr = 0;
2231  int count = 0;
2232 
2233  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2234  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2235  int v = ptr[x2 + y2 * stride];
2236  sum += v;
2237  sqr += v * v;
2238  count++;
2239  }
2240  }
2241  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2242  }
2243  }
2244 }
2245 
2247  int motion_x, int motion_y,
2248  int mb_block_height,
2249  int mb_block_width,
2250  int mb_block_count,
2251  int chroma_x_shift,
2252  int chroma_y_shift,
2253  int chroma_format)
2254 {
2255 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2256  * and neither of these encoders currently supports 444. */
2257 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2258  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2259  DECLARE_ALIGNED(16, int16_t, weight)[12][64];
2260  int16_t orig[12][64];
2261  const int mb_x = s->c.mb_x;
2262  const int mb_y = s->c.mb_y;
2263  int i;
2264  int skip_dct[12];
2265  int dct_offset = s->c.linesize * 8; // default for progressive frames
2266  int uv_dct_offset = s->c.uvlinesize * 8;
2267  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2268  ptrdiff_t wrap_y, wrap_c;
2269 
2270  for (i = 0; i < mb_block_count; i++)
2271  skip_dct[i] = s->skipdct;
2272 
2273  if (s->adaptive_quant) {
2274  const int last_qp = s->c.qscale;
2275  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2276 
2277  s->lambda = s->lambda_table[mb_xy];
2278  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2280 
2281  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2282  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2283 
2284  if (s->c.out_format == FMT_H263) {
2285  s->dquant = av_clip(s->dquant, -2, 2);
2286 
2287  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2288  if (!s->c.mb_intra) {
2289  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2290  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2291  s->dquant = 0;
2292  }
2293  if (s->c.mv_type == MV_TYPE_8X8)
2294  s->dquant = 0;
2295  }
2296  }
2297  }
2298  }
2299  ff_set_qscale(&s->c, last_qp + s->dquant);
2300  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2301  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2302 
2303  wrap_y = s->c.linesize;
2304  wrap_c = s->c.uvlinesize;
2305  ptr_y = s->new_pic->data[0] +
2306  (mb_y * 16 * wrap_y) + mb_x * 16;
2307  ptr_cb = s->new_pic->data[1] +
2308  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2309  ptr_cr = s->new_pic->data[2] +
2310  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2311 
2312  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2313  s->c.codec_id != AV_CODEC_ID_AMV) {
2314  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2315  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2316  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2317  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2318  wrap_y, wrap_y,
2319  16, 16, mb_x * 16, mb_y * 16,
2320  s->c.width, s->c.height);
2321  ptr_y = ebuf;
2322  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2323  wrap_c, wrap_c,
2324  mb_block_width, mb_block_height,
2325  mb_x * mb_block_width, mb_y * mb_block_height,
2326  cw, ch);
2327  ptr_cb = ebuf + 16 * wrap_y;
2328  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2329  wrap_c, wrap_c,
2330  mb_block_width, mb_block_height,
2331  mb_x * mb_block_width, mb_y * mb_block_height,
2332  cw, ch);
2333  ptr_cr = ebuf + 16 * wrap_y + 16;
2334  }
2335 
2336  if (s->c.mb_intra) {
2337  if (INTERLACED_DCT(s)) {
2338  int progressive_score, interlaced_score;
2339 
2340  s->c.interlaced_dct = 0;
2341  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2342  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2343  NULL, wrap_y, 8) - 400;
2344 
2345  if (progressive_score > 0) {
2346  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2347  NULL, wrap_y * 2, 8) +
2348  s->ildct_cmp[1](s, ptr_y + wrap_y,
2349  NULL, wrap_y * 2, 8);
2350  if (progressive_score > interlaced_score) {
2351  s->c.interlaced_dct = 1;
2352 
2353  dct_offset = wrap_y;
2354  uv_dct_offset = wrap_c;
2355  wrap_y <<= 1;
2356  if (chroma_format == CHROMA_422 ||
2357  chroma_format == CHROMA_444)
2358  wrap_c <<= 1;
2359  }
2360  }
2361  }
2362 
2363  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2364  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2365  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2366  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2367 
2368  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2369  skip_dct[4] = 1;
2370  skip_dct[5] = 1;
2371  } else {
2372  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2373  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2374  if (chroma_format == CHROMA_422) {
2375  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2376  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2377  } else if (chroma_format == CHROMA_444) {
2378  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2379  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2380  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2381  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2382  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2383  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2384  }
2385  }
2386  } else {
2387  op_pixels_func (*op_pix)[4];
2388  qpel_mc_func (*op_qpix)[16];
2389  uint8_t *dest_y, *dest_cb, *dest_cr;
2390 
2391  dest_y = s->c.dest[0];
2392  dest_cb = s->c.dest[1];
2393  dest_cr = s->c.dest[2];
2394 
2395  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2396  op_pix = s->c.hdsp.put_pixels_tab;
2397  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2398  } else {
2399  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2400  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2401  }
2402 
2403  if (s->c.mv_dir & MV_DIR_FORWARD) {
2404  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2405  s->c.last_pic.data,
2406  op_pix, op_qpix);
2407  op_pix = s->c.hdsp.avg_pixels_tab;
2408  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2409  }
2410  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2411  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2412  s->c.next_pic.data,
2413  op_pix, op_qpix);
2414  }
2415 
2416  if (INTERLACED_DCT(s)) {
2417  int progressive_score, interlaced_score;
2418 
2419  s->c.interlaced_dct = 0;
2420  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2421  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2422  ptr_y + wrap_y * 8,
2423  wrap_y, 8) - 400;
2424 
2425  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2426  progressive_score -= 400;
2427 
2428  if (progressive_score > 0) {
2429  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2430  wrap_y * 2, 8) +
2431  s->ildct_cmp[0](s, dest_y + wrap_y,
2432  ptr_y + wrap_y,
2433  wrap_y * 2, 8);
2434 
2435  if (progressive_score > interlaced_score) {
2436  s->c.interlaced_dct = 1;
2437 
2438  dct_offset = wrap_y;
2439  uv_dct_offset = wrap_c;
2440  wrap_y <<= 1;
2441  if (chroma_format == CHROMA_422)
2442  wrap_c <<= 1;
2443  }
2444  }
2445  }
2446 
2447  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2448  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2449  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2450  dest_y + dct_offset, wrap_y);
2451  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2452  dest_y + dct_offset + 8, wrap_y);
2453 
2454  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2455  skip_dct[4] = 1;
2456  skip_dct[5] = 1;
2457  } else {
2458  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2459  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2460  if (!chroma_y_shift) { /* 422 */
2461  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2462  dest_cb + uv_dct_offset, wrap_c);
2463  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2464  dest_cr + uv_dct_offset, wrap_c);
2465  }
2466  }
2467  /* pre quantization */
2468  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2469  // FIXME optimize
2470  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2471  skip_dct[0] = 1;
2472  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2473  skip_dct[1] = 1;
2474  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2475  wrap_y, 8) < 20 * s->c.qscale)
2476  skip_dct[2] = 1;
2477  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2478  wrap_y, 8) < 20 * s->c.qscale)
2479  skip_dct[3] = 1;
2480  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2481  skip_dct[4] = 1;
2482  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2483  skip_dct[5] = 1;
2484  if (!chroma_y_shift) { /* 422 */
2485  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2486  dest_cb + uv_dct_offset,
2487  wrap_c, 8) < 20 * s->c.qscale)
2488  skip_dct[6] = 1;
2489  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2490  dest_cr + uv_dct_offset,
2491  wrap_c, 8) < 20 * s->c.qscale)
2492  skip_dct[7] = 1;
2493  }
2494  }
2495  }
2496 
2497  if (s->quantizer_noise_shaping) {
2498  if (!skip_dct[0])
2499  get_visual_weight(weight[0], ptr_y , wrap_y);
2500  if (!skip_dct[1])
2501  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2502  if (!skip_dct[2])
2503  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2504  if (!skip_dct[3])
2505  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2506  if (!skip_dct[4])
2507  get_visual_weight(weight[4], ptr_cb , wrap_c);
2508  if (!skip_dct[5])
2509  get_visual_weight(weight[5], ptr_cr , wrap_c);
2510  if (!chroma_y_shift) { /* 422 */
2511  if (!skip_dct[6])
2512  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2513  wrap_c);
2514  if (!skip_dct[7])
2515  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2516  wrap_c);
2517  }
2518  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2519  }
2520 
2521  /* DCT & quantize */
2522  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2523  {
2524  for (i = 0; i < mb_block_count; i++) {
2525  if (!skip_dct[i]) {
2526  int overflow;
2527  s->c.block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->c.qscale, &overflow);
2528  // FIXME we could decide to change to quantizer instead of
2529  // clipping
2530  // JS: I don't think that would be a good idea it could lower
2531  // quality instead of improve it. Just INTRADC clipping
2532  // deserves changes in quantizer
2533  if (overflow)
2534  clip_coeffs(s, s->block[i], s->c.block_last_index[i]);
2535  } else
2536  s->c.block_last_index[i] = -1;
2537  }
2538  if (s->quantizer_noise_shaping) {
2539  for (i = 0; i < mb_block_count; i++) {
2540  if (!skip_dct[i]) {
2541  s->c.block_last_index[i] =
2542  dct_quantize_refine(s, s->block[i], weight[i],
2543  orig[i], i, s->c.qscale);
2544  }
2545  }
2546  }
2547 
2548  if (s->luma_elim_threshold && !s->c.mb_intra)
2549  for (i = 0; i < 4; i++)
2550  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2551  if (s->chroma_elim_threshold && !s->c.mb_intra)
2552  for (i = 4; i < mb_block_count; i++)
2553  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2554 
2555  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2556  for (i = 0; i < mb_block_count; i++) {
2557  if (s->c.block_last_index[i] == -1)
2558  s->coded_score[i] = INT_MAX / 256;
2559  }
2560  }
2561  }
2562 
2563  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2564  s->c.block_last_index[4] =
2565  s->c.block_last_index[5] = 0;
2566  s->block[4][0] =
2567  s->block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2568  if (!chroma_y_shift) { /* 422 / 444 */
2569  for (i=6; i<12; i++) {
2570  s->c.block_last_index[i] = 0;
2571  s->block[i][0] = s->block[4][0];
2572  }
2573  }
2574  }
2575 
2576  // non c quantize code returns incorrect block_last_index FIXME
2577  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2578  for (i = 0; i < mb_block_count; i++) {
2579  int j;
2580  if (s->c.block_last_index[i] > 0) {
2581  for (j = 63; j > 0; j--) {
2582  if (s->block[i][s->c.intra_scantable.permutated[j]])
2583  break;
2584  }
2585  s->c.block_last_index[i] = j;
2586  }
2587  }
2588  }
2589 
2590  s->encode_mb(s, s->block, motion_x, motion_y);
2591 }
2592 
2593 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2594 {
2595  if (s->c.chroma_format == CHROMA_420)
2596  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2597  else if (s->c.chroma_format == CHROMA_422)
2598  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2599  else
2600  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2601 }
2602 
2603 typedef struct MBBackup {
2604  struct {
2605  int mv[2][4][2];
2606  int last_mv[2][2][2];
2609  int qscale;
2612  } c;
2614  int last_dc[3];
2616  int dquant;
2618  int16_t (*block)[64];
2620 } MBBackup;
2621 
2622 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2623 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2624  const SRC_TYPE *const s) \
2625 { \
2626  /* FIXME is memcpy faster than a loop? */ \
2627  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2628  \
2629  /* MPEG-1 */ \
2630  d->mb_skip_run = s->mb_skip_run; \
2631  for (int i = 0; i < 3; i++) \
2632  d->last_dc[i] = s->last_dc[i]; \
2633  \
2634  /* statistics */ \
2635  d->mv_bits = s->mv_bits; \
2636  d->i_tex_bits = s->i_tex_bits; \
2637  d->p_tex_bits = s->p_tex_bits; \
2638  d->i_count = s->i_count; \
2639  d->misc_bits = s->misc_bits; \
2640  d->last_bits = 0; \
2641  \
2642  d->c.mb_skipped = 0; \
2643  d->c.qscale = s->c.qscale; \
2644  d->dquant = s->dquant; \
2645  \
2646  d->esc3_level_length = s->esc3_level_length; \
2647 } \
2648  \
2649 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2650  const SRC_TYPE *const s, \
2651  int data_partitioning) \
2652 { \
2653  /* FIXME is memcpy faster than a loop? */ \
2654  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2655  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2656  \
2657  /* MPEG-1 */ \
2658  d->mb_skip_run = s->mb_skip_run; \
2659  for (int i = 0; i < 3; i++) \
2660  d->last_dc[i] = s->last_dc[i]; \
2661  \
2662  /* statistics */ \
2663  d->mv_bits = s->mv_bits; \
2664  d->i_tex_bits = s->i_tex_bits; \
2665  d->p_tex_bits = s->p_tex_bits; \
2666  d->i_count = s->i_count; \
2667  d->misc_bits = s->misc_bits; \
2668  \
2669  d->c.mb_intra = s->c.mb_intra; \
2670  d->c.mb_skipped = s->c.mb_skipped; \
2671  d->c.mv_type = s->c.mv_type; \
2672  d->c.mv_dir = s->c.mv_dir; \
2673  d->pb = s->pb; \
2674  if (data_partitioning) { \
2675  d->pb2 = s->pb2; \
2676  d->tex_pb = s->tex_pb; \
2677  } \
2678  d->block = s->block; \
2679  for (int i = 0; i < 8; i++) \
2680  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2681  d->c.interlaced_dct = s->c.interlaced_dct; \
2682  d->c.qscale = s->c.qscale; \
2683  \
2684  d->esc3_level_length = s->esc3_level_length; \
2685 }
2686 
2687 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2688 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2689 
2690 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2691  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2692  int *dmin, int *next_block, int motion_x, int motion_y)
2693 {
2694  int score;
2695  uint8_t *dest_backup[3];
2696 
2697  reset_context_before_encode(s, backup);
2698 
2699  s->block = s->blocks[*next_block];
2700  s->pb = pb[*next_block];
2701  if (s->data_partitioning) {
2702  s->pb2 = pb2 [*next_block];
2703  s->tex_pb= tex_pb[*next_block];
2704  }
2705 
2706  if(*next_block){
2707  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2708  s->c.dest[0] = s->c.sc.rd_scratchpad;
2709  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2710  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2711  av_assert0(s->c.linesize >= 32); //FIXME
2712  }
2713 
2714  encode_mb(s, motion_x, motion_y);
2715 
2716  score= put_bits_count(&s->pb);
2717  if (s->data_partitioning) {
2718  score+= put_bits_count(&s->pb2);
2719  score+= put_bits_count(&s->tex_pb);
2720  }
2721 
2722  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2723  mpv_reconstruct_mb(s, s->block);
2724 
2725  score *= s->lambda2;
2726  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2727  }
2728 
2729  if(*next_block){
2730  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2731  }
2732 
2733  if(score<*dmin){
2734  *dmin= score;
2735  *next_block^=1;
2736 
2737  save_context_after_encode(best, s, s->data_partitioning);
2738  }
2739 }
2740 
2741 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2742 {
2743  const uint32_t *sq = ff_square_tab + 256;
2744  int acc=0;
2745  int x,y;
2746 
2747  if(w==16 && h==16)
2748  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2749  else if(w==8 && h==8)
2750  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2751 
2752  for(y=0; y<h; y++){
2753  for(x=0; x<w; x++){
2754  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2755  }
2756  }
2757 
2758  av_assert2(acc>=0);
2759 
2760  return acc;
2761 }
2762 
2763 static int sse_mb(MPVEncContext *const s)
2764 {
2765  int w= 16;
2766  int h= 16;
2767  int chroma_mb_w = w >> s->c.chroma_x_shift;
2768  int chroma_mb_h = h >> s->c.chroma_y_shift;
2769 
2770  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2771  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2772 
2773  if(w==16 && h==16)
2774  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2775  s->c.dest[0], s->c.linesize, 16) +
2776  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2777  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2778  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2779  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2780  else
2781  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2782  s->c.dest[0], w, h, s->c.linesize) +
2783  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2784  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2785  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2786  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2787 }
2788 
2790  MPVEncContext *const s = *(void**)arg;
2791 
2792 
2793  s->me.pre_pass = 1;
2794  s->me.dia_size = s->c.avctx->pre_dia_size;
2795  s->c.first_slice_line = 1;
2796  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2797  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2798  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2799  s->c.first_slice_line = 0;
2800  }
2801 
2802  s->me.pre_pass = 0;
2803 
2804  return 0;
2805 }
2806 
2808  MPVEncContext *const s = *(void**)arg;
2809 
2810  s->me.dia_size = s->c.avctx->dia_size;
2811  s->c.first_slice_line = 1;
2812  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2813  s->c.mb_x = 0; //for block init below
2814  ff_init_block_index(&s->c);
2815  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2816  s->c.block_index[0] += 2;
2817  s->c.block_index[1] += 2;
2818  s->c.block_index[2] += 2;
2819  s->c.block_index[3] += 2;
2820 
2821  /* compute motion vector & mb_type and store in context */
2822  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2823  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2824  else
2825  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2826  }
2827  s->c.first_slice_line = 0;
2828  }
2829  return 0;
2830 }
2831 
2832 static int mb_var_thread(AVCodecContext *c, void *arg){
2833  MPVEncContext *const s = *(void**)arg;
2834 
2835  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2836  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2837  int xx = mb_x * 16;
2838  int yy = mb_y * 16;
2839  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2840  int varc;
2841  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2842 
2843  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2844  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2845 
2846  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2847  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2848  s->me.mb_var_sum_temp += varc;
2849  }
2850  }
2851  return 0;
2852 }
2853 
2854 static void write_slice_end(MPVEncContext *const s)
2855 {
2856  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2857  if (s->partitioned_frame)
2859 
2860  ff_mpeg4_stuffing(&s->pb);
2861  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2862  s->c.out_format == FMT_MJPEG) {
2864  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2866  }
2867 
2868  flush_put_bits(&s->pb);
2869 
2870  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2871  s->misc_bits+= get_bits_diff(s);
2872 }
2873 
2874 static void write_mb_info(MPVEncContext *const s)
2875 {
2876  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2877  int offset = put_bits_count(&s->pb);
2878  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->gob_index);
2879  int gobn = s->c.mb_y / s->gob_index;
2880  int pred_x, pred_y;
2881  if (CONFIG_H263_ENCODER)
2882  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2883  bytestream_put_le32(&ptr, offset);
2884  bytestream_put_byte(&ptr, s->c.qscale);
2885  bytestream_put_byte(&ptr, gobn);
2886  bytestream_put_le16(&ptr, mba);
2887  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2888  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2889  /* 4MV not implemented */
2890  bytestream_put_byte(&ptr, 0); /* hmv2 */
2891  bytestream_put_byte(&ptr, 0); /* vmv2 */
2892 }
2893 
2894 static void update_mb_info(MPVEncContext *const s)
2895 {
2896  if (!s->mb_info)
2897  return;
2898  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2899  s->mb_info_size += 12;
2900  s->prev_mb_info = s->last_mb_info;
2901  }
2902 
2903  s->last_mb_info = put_bytes_count(&s->pb, 0);
2904  if (!s->mb_info_size)
2905  s->mb_info_size += 12;
2906  write_mb_info(s);
2907 }
2908 
2909 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2910 {
2911  if (put_bytes_left(&s->pb, 0) < threshold
2912  && s->c.slice_context_count == 1
2913  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2914  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2915 
2916  uint8_t *new_buffer = NULL;
2917  int new_buffer_size = 0;
2918 
2919  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2920  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2921  return AVERROR(ENOMEM);
2922  }
2923 
2924  emms_c();
2925 
2926  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2927  s->c.avctx->internal->byte_buffer_size + size_increase);
2928  if (!new_buffer)
2929  return AVERROR(ENOMEM);
2930 
2931  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2932  av_free(s->c.avctx->internal->byte_buffer);
2933  s->c.avctx->internal->byte_buffer = new_buffer;
2934  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2935  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2936  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2937  }
2938  if (put_bytes_left(&s->pb, 0) < threshold)
2939  return AVERROR(EINVAL);
2940  return 0;
2941 }
2942 
2943 static int encode_thread(AVCodecContext *c, void *arg){
2944  MPVEncContext *const s = *(void**)arg;
2945  int chr_h = 16 >> s->c.chroma_y_shift;
2946  int i;
2947  MBBackup best_s = { 0 }, backup_s;
2948  uint8_t bit_buf[2][MAX_MB_BYTES];
2949  // + 2 because ff_copy_bits() overreads
2950  uint8_t bit_buf2[2][MAX_PB2_MB_SIZE + 2];
2951  uint8_t bit_buf_tex[2][MAX_AC_TEX_MB_SIZE + 2];
2952  PutBitContext pb[2], pb2[2], tex_pb[2];
2953 
2954  for(i=0; i<2; i++){
2955  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2956  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_PB2_MB_SIZE);
2957  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_AC_TEX_MB_SIZE);
2958  }
2959 
2960  s->last_bits= put_bits_count(&s->pb);
2961  s->mv_bits=0;
2962  s->misc_bits=0;
2963  s->i_tex_bits=0;
2964  s->p_tex_bits=0;
2965  s->i_count=0;
2966 
2967  for(i=0; i<3; i++){
2968  /* init last dc values */
2969  /* note: quant matrix value (8) is implied here */
2970  s->last_dc[i] = 128 << s->c.intra_dc_precision;
2971 
2972  s->encoding_error[i] = 0;
2973  }
2974  if (s->c.codec_id == AV_CODEC_ID_AMV) {
2975  s->last_dc[0] = 128 * 8 / 13;
2976  s->last_dc[1] = 128 * 8 / 14;
2977  s->last_dc[2] = 128 * 8 / 14;
2978 #if CONFIG_MPEG4_ENCODER
2979  } else if (s->partitioned_frame) {
2980  av_assert1(s->c.codec_id == AV_CODEC_ID_MPEG4);
2982 #endif
2983  }
2984  s->mb_skip_run = 0;
2985  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
2986 
2987  s->last_mv_dir = 0;
2988 
2989  s->c.resync_mb_x = 0;
2990  s->c.resync_mb_y = 0;
2991  s->c.first_slice_line = 1;
2992  s->ptr_lastgob = s->pb.buf;
2993  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
2994  int mb_y;
2995  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
2996  int first_in_slice;
2997  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
2998  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3000  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024;
3001  } else {
3002  mb_y = mb_y_order;
3003  }
3004  s->c.mb_x = 0;
3005  s->c.mb_y = mb_y;
3006 
3007  ff_set_qscale(&s->c, s->c.qscale);
3008  ff_init_block_index(&s->c);
3009 
3010  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3011  int mb_type, xy;
3012 // int d;
3013  int dmin= INT_MAX;
3014  int dir;
3015  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3016  + s->c.mb_width*MAX_MB_BYTES;
3017 
3019  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3020  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3021  return -1;
3022  }
3023  if (s->data_partitioning) {
3024  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3025  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3026  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3027  return -1;
3028  }
3029  }
3030 
3031  s->c.mb_x = mb_x;
3032  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3033  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3034 
3035  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3037  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3038  mb_type = s->mb_type[xy];
3039 
3040  /* write gob / video packet header */
3041  if(s->rtp_mode){
3042  int current_packet_size, is_gob_start;
3043 
3044  current_packet_size = put_bytes_count(&s->pb, 1)
3045  - (s->ptr_lastgob - s->pb.buf);
3046 
3047  is_gob_start = s->rtp_payload_size &&
3048  current_packet_size >= s->rtp_payload_size &&
3049  mb_y + mb_x > 0;
3050 
3051  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3052 
3053  switch (s->c.codec_id) {
3054  case AV_CODEC_ID_H263:
3055  case AV_CODEC_ID_H263P:
3056  if (!s->h263_slice_structured)
3057  if (s->c.mb_x || s->c.mb_y % s->gob_index) is_gob_start = 0;
3058  break;
3060  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3062  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3063  s->mb_skip_run)
3064  is_gob_start=0;
3065  break;
3066  case AV_CODEC_ID_MJPEG:
3067  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3068  break;
3069  }
3070 
3071  if(is_gob_start){
3072  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3073  write_slice_end(s);
3074 
3075  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->partitioned_frame)
3077  }
3078 
3079  av_assert2((put_bits_count(&s->pb)&7) == 0);
3080  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3081 
3082  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3083  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->c.mb_x + s->c.mb_y;
3084  int d = 100 / s->error_rate;
3085  if(r % d == 0){
3086  current_packet_size=0;
3087  s->pb.buf_ptr= s->ptr_lastgob;
3088  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3089  }
3090  }
3091 
3092  switch (s->c.codec_id) {
3093  case AV_CODEC_ID_MPEG4:
3094  if (CONFIG_MPEG4_ENCODER) {
3098  }
3099  break;
3102  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3105  }
3106  break;
3107 #if CONFIG_H263P_ENCODER
3108  case AV_CODEC_ID_H263P:
3109  if (s->c.dc_val)
3112 #endif
3113  case AV_CODEC_ID_H263:
3114  if (CONFIG_H263_ENCODER) {
3115  if (s->mb_info && put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info)
3116  s->mb_info_size += 12;
3117 
3119  s->prev_mb_info = put_bits_count(&s->pb)/8;
3120  }
3121  break;
3122  }
3123 
3124  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3125  int bits= put_bits_count(&s->pb);
3126  s->misc_bits+= bits - s->last_bits;
3127  s->last_bits= bits;
3128  }
3129 
3130  s->ptr_lastgob += current_packet_size;
3131  s->c.first_slice_line = 1;
3132  s->c.resync_mb_x = mb_x;
3133  s->c.resync_mb_y = mb_y;
3134  }
3135  }
3136 
3137  if (s->c.resync_mb_x == s->c.mb_x &&
3138  s->c.resync_mb_y+1 == s->c.mb_y)
3139  s->c.first_slice_line = 0;
3140 
3141  s->c.mb_skipped = 0;
3142  s->dquant=0; //only for QP_RD
3143 
3144  update_mb_info(s);
3145 
3146  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3147  int next_block=0;
3148  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3149 
3150  backup_context_before_encode(&backup_s, s);
3151  backup_s.pb= s->pb;
3152  if (s->data_partitioning) {
3153  backup_s.pb2= s->pb2;
3154  backup_s.tex_pb= s->tex_pb;
3155  }
3156 
3157  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3158  s->c.mv_dir = MV_DIR_FORWARD;
3159  s->c.mv_type = MV_TYPE_16X16;
3160  s->c.mb_intra = 0;
3161  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3162  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3163  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3164  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3165  }
3166  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3167  s->c.mv_dir = MV_DIR_FORWARD;
3168  s->c.mv_type = MV_TYPE_FIELD;
3169  s->c.mb_intra = 0;
3170  for(i=0; i<2; i++){
3171  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3172  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3173  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3174  }
3175  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3176  &dmin, &next_block, 0, 0);
3177  }
3178  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3179  s->c.mv_dir = MV_DIR_FORWARD;
3180  s->c.mv_type = MV_TYPE_16X16;
3181  s->c.mb_intra = 0;
3182  s->c.mv[0][0][0] = 0;
3183  s->c.mv[0][0][1] = 0;
3184  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3185  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3186  }
3187  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3188  s->c.mv_dir = MV_DIR_FORWARD;
3189  s->c.mv_type = MV_TYPE_8X8;
3190  s->c.mb_intra = 0;
3191  for(i=0; i<4; i++){
3192  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3193  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3194  }
3195  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3196  &dmin, &next_block, 0, 0);
3197  }
3198  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3199  s->c.mv_dir = MV_DIR_FORWARD;
3200  s->c.mv_type = MV_TYPE_16X16;
3201  s->c.mb_intra = 0;
3202  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3203  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3204  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3205  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3206  }
3207  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3208  s->c.mv_dir = MV_DIR_BACKWARD;
3209  s->c.mv_type = MV_TYPE_16X16;
3210  s->c.mb_intra = 0;
3211  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3212  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3213  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3214  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3215  }
3216  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3217  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3218  s->c.mv_type = MV_TYPE_16X16;
3219  s->c.mb_intra = 0;
3220  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3221  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3222  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3223  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3224  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3225  &dmin, &next_block, 0, 0);
3226  }
3227  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3228  s->c.mv_dir = MV_DIR_FORWARD;
3229  s->c.mv_type = MV_TYPE_FIELD;
3230  s->c.mb_intra = 0;
3231  for(i=0; i<2; i++){
3232  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3233  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3234  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3235  }
3236  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3237  &dmin, &next_block, 0, 0);
3238  }
3239  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3240  s->c.mv_dir = MV_DIR_BACKWARD;
3241  s->c.mv_type = MV_TYPE_FIELD;
3242  s->c.mb_intra = 0;
3243  for(i=0; i<2; i++){
3244  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3245  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3246  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3247  }
3248  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3249  &dmin, &next_block, 0, 0);
3250  }
3251  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3252  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3253  s->c.mv_type = MV_TYPE_FIELD;
3254  s->c.mb_intra = 0;
3255  for(dir=0; dir<2; dir++){
3256  for(i=0; i<2; i++){
3257  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3258  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3259  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3260  }
3261  }
3262  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3263  &dmin, &next_block, 0, 0);
3264  }
3265  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3266  s->c.mv_dir = 0;
3267  s->c.mv_type = MV_TYPE_16X16;
3268  s->c.mb_intra = 1;
3269  s->c.mv[0][0][0] = 0;
3270  s->c.mv[0][0][1] = 0;
3271  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3272  &dmin, &next_block, 0, 0);
3273  s->c.mbintra_table[xy] = 1;
3274  }
3275 
3276  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3277  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3278  const int last_qp = backup_s.c.qscale;
3279  int qpi, qp, dc[6];
3280  int16_t ac[6][16];
3281  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3282  static const int dquant_tab[4]={-1,1,-2,2};
3283  int storecoefs = s->c.mb_intra && s->c.dc_val;
3284 
3285  av_assert2(backup_s.dquant == 0);
3286 
3287  //FIXME intra
3288  s->c.mv_dir = best_s.c.mv_dir;
3289  s->c.mv_type = MV_TYPE_16X16;
3290  s->c.mb_intra = best_s.c.mb_intra;
3291  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3292  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3293  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3294  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3295 
3296  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3297  for(; qpi<4; qpi++){
3298  int dquant= dquant_tab[qpi];
3299  qp= last_qp + dquant;
3300  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3301  continue;
3302  backup_s.dquant= dquant;
3303  if(storecoefs){
3304  for(i=0; i<6; i++){
3305  dc[i] = s->c.dc_val[s->c.block_index[i]];
3306  memcpy(ac[i], s->c.ac_val[s->c.block_index[i]], sizeof(*s->c.ac_val));
3307  }
3308  }
3309 
3310  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3311  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3312  if (best_s.c.qscale != qp) {
3313  if(storecoefs){
3314  for(i=0; i<6; i++){
3315  s->c.dc_val[s->c.block_index[i]] = dc[i];
3316  memcpy(s->c.ac_val[s->c.block_index[i]], ac[i], sizeof(*s->c.ac_val));
3317  }
3318  }
3319  }
3320  }
3321  }
3322  }
3323  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3324  int mx= s->b_direct_mv_table[xy][0];
3325  int my= s->b_direct_mv_table[xy][1];
3326 
3327  backup_s.dquant = 0;
3328  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3329  s->c.mb_intra = 0;
3330  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3331  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3332  &dmin, &next_block, mx, my);
3333  }
3334  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3335  backup_s.dquant = 0;
3336  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3337  s->c.mb_intra = 0;
3338  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3339  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3340  &dmin, &next_block, 0, 0);
3341  }
3342  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3343  int coded=0;
3344  for(i=0; i<6; i++)
3345  coded |= s->c.block_last_index[i];
3346  if(coded){
3347  int mx,my;
3348  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3349  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3350  mx=my=0; //FIXME find the one we actually used
3351  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3352  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3353  mx = s->c.mv[1][0][0];
3354  my = s->c.mv[1][0][1];
3355  }else{
3356  mx = s->c.mv[0][0][0];
3357  my = s->c.mv[0][0][1];
3358  }
3359 
3360  s->c.mv_dir = best_s.c.mv_dir;
3361  s->c.mv_type = best_s.c.mv_type;
3362  s->c.mb_intra = 0;
3363 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3364  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3365  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3366  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3367  backup_s.dquant= 0;
3368  s->skipdct=1;
3369  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3370  &dmin, &next_block, mx, my);
3371  s->skipdct=0;
3372  }
3373  }
3374 
3375  store_context_after_encode(s, &best_s, s->data_partitioning);
3376 
3377  pb_bits_count= put_bits_count(&s->pb);
3378  flush_put_bits(&s->pb);
3379  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3380  s->pb= backup_s.pb;
3381 
3382  if (s->data_partitioning) {
3383  pb2_bits_count= put_bits_count(&s->pb2);
3384  flush_put_bits(&s->pb2);
3385  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3386  s->pb2= backup_s.pb2;
3387 
3388  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3389  flush_put_bits(&s->tex_pb);
3390  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3391  s->tex_pb= backup_s.tex_pb;
3392  }
3393  s->last_bits= put_bits_count(&s->pb);
3394 
3395  if (CONFIG_H263_ENCODER &&
3396  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3398 
3399  if(next_block==0){ //FIXME 16 vs linesize16
3400  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3401  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3402  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3403  }
3404 
3405  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3406  mpv_reconstruct_mb(s, s->block);
3407  } else {
3408  int motion_x = 0, motion_y = 0;
3409  s->c.mv_type = MV_TYPE_16X16;
3410  // only one MB-Type possible
3411 
3412  switch(mb_type){
3414  s->c.mv_dir = 0;
3415  s->c.mb_intra = 1;
3416  motion_x= s->c.mv[0][0][0] = 0;
3417  motion_y= s->c.mv[0][0][1] = 0;
3418  s->c.mbintra_table[xy] = 1;
3419  break;
3421  s->c.mv_dir = MV_DIR_FORWARD;
3422  s->c.mb_intra = 0;
3423  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3424  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3425  break;
3427  s->c.mv_dir = MV_DIR_FORWARD;
3428  s->c.mv_type = MV_TYPE_FIELD;
3429  s->c.mb_intra = 0;
3430  for(i=0; i<2; i++){
3431  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3432  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3433  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3434  }
3435  break;
3437  s->c.mv_dir = MV_DIR_FORWARD;
3438  s->c.mv_type = MV_TYPE_8X8;
3439  s->c.mb_intra = 0;
3440  for(i=0; i<4; i++){
3441  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3442  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3443  }
3444  break;
3446  if (CONFIG_MPEG4_ENCODER) {
3448  s->c.mb_intra = 0;
3449  motion_x=s->b_direct_mv_table[xy][0];
3450  motion_y=s->b_direct_mv_table[xy][1];
3451  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3452  }
3453  break;
3455  if (CONFIG_MPEG4_ENCODER) {
3457  s->c.mb_intra = 0;
3458  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3459  }
3460  break;
3462  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3463  s->c.mb_intra = 0;
3464  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3465  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3466  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3467  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3468  break;
3470  s->c.mv_dir = MV_DIR_BACKWARD;
3471  s->c.mb_intra = 0;
3472  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3473  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3474  break;
3476  s->c.mv_dir = MV_DIR_FORWARD;
3477  s->c.mb_intra = 0;
3478  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3479  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3480  break;
3482  s->c.mv_dir = MV_DIR_FORWARD;
3483  s->c.mv_type = MV_TYPE_FIELD;
3484  s->c.mb_intra = 0;
3485  for(i=0; i<2; i++){
3486  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3487  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3488  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3489  }
3490  break;
3492  s->c.mv_dir = MV_DIR_BACKWARD;
3493  s->c.mv_type = MV_TYPE_FIELD;
3494  s->c.mb_intra = 0;
3495  for(i=0; i<2; i++){
3496  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3497  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3498  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3499  }
3500  break;
3502  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3503  s->c.mv_type = MV_TYPE_FIELD;
3504  s->c.mb_intra = 0;
3505  for(dir=0; dir<2; dir++){
3506  for(i=0; i<2; i++){
3507  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3508  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3509  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3510  }
3511  }
3512  break;
3513  default:
3514  av_unreachable("There is a case for every CANDIDATE_MB_TYPE_* "
3515  "except CANDIDATE_MB_TYPE_SKIPPED which is never "
3516  "the only candidate (always coupled with INTER) "
3517  "so that it never reaches this switch");
3518  }
3519 
3520  encode_mb(s, motion_x, motion_y);
3521 
3522  // RAL: Update last macroblock type
3523  s->last_mv_dir = s->c.mv_dir;
3524 
3525  if (CONFIG_H263_ENCODER &&
3526  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3528 
3529  mpv_reconstruct_mb(s, s->block);
3530  }
3531 
3532  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3533 
3534  /* clean the MV table in IPS frames for direct mode in B-frames */
3535  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3536  s->p_mv_table[xy][0]=0;
3537  s->p_mv_table[xy][1]=0;
3538 #if CONFIG_H263_ENCODER
3539  } else if (s->c.h263_pred || s->c.h263_aic) {
3541 #endif
3542  }
3543 
3544  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3545  int w= 16;
3546  int h= 16;
3547 
3548  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3549  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3550 
3551  s->encoding_error[0] += sse(
3552  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3553  s->c.dest[0], w, h, s->c.linesize);
3554  s->encoding_error[1] += sse(
3555  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3556  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3557  s->encoding_error[2] += sse(
3558  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3559  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3560  }
3561  if (s->loop_filter) {
3562  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3563  ff_h263_loop_filter(&s->c);
3564  }
3565  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3566  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3567  }
3568  }
3569 
3570 #if CONFIG_MSMPEG4ENC
3571  //not beautiful here but we must write it before flushing so it has to be here
3572  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3573  s->c.pict_type == AV_PICTURE_TYPE_I)
3575 #endif
3576 
3577  write_slice_end(s);
3578 
3579  return 0;
3580 }
3581 
3582 #define ADD(field) dst->field += src->field;
3583 #define MERGE(field) dst->field += src->field; src->field=0
3585 {
3586  ADD(me.scene_change_score);
3587  ADD(me.mc_mb_var_sum_temp);
3588  ADD(me.mb_var_sum_temp);
3589 }
3590 
3592 {
3593  int i;
3594 
3595  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3596  MERGE(dct_count[1]);
3597  ADD(mv_bits);
3598  ADD(i_tex_bits);
3599  ADD(p_tex_bits);
3600  ADD(i_count);
3601  ADD(misc_bits);
3602  ADD(encoding_error[0]);
3603  ADD(encoding_error[1]);
3604  ADD(encoding_error[2]);
3605 
3606  if (dst->dct_error_sum) {
3607  for(i=0; i<64; i++){
3608  MERGE(dct_error_sum[0][i]);
3609  MERGE(dct_error_sum[1][i]);
3610  }
3611  }
3612 
3613  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3614  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3615  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3616  flush_put_bits(&dst->pb);
3617 }
3618 
3619 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3620 {
3621  MPVEncContext *const s = &m->s;
3622 
3623  if (m->next_lambda){
3624  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3625  if(!dry_run) m->next_lambda= 0;
3626  } else if (!m->fixed_qscale) {
3627  int quality = ff_rate_estimate_qscale(m, dry_run);
3628  s->c.cur_pic.ptr->f->quality = quality;
3629  if (s->c.cur_pic.ptr->f->quality < 0)
3630  return -1;
3631  }
3632 
3633  if(s->adaptive_quant){
3634  init_qscale_tab(s);
3635 
3636  switch (s->c.codec_id) {
3637  case AV_CODEC_ID_MPEG4:
3638  if (CONFIG_MPEG4_ENCODER)
3640  break;
3641  case AV_CODEC_ID_H263:
3642  case AV_CODEC_ID_H263P:
3643  case AV_CODEC_ID_FLV1:
3644  if (CONFIG_H263_ENCODER)
3646  break;
3647  }
3648 
3649  s->lambda = s->lambda_table[0];
3650  //FIXME broken
3651  }else
3652  s->lambda = s->c.cur_pic.ptr->f->quality;
3653  update_qscale(m);
3654  return 0;
3655 }
3656 
3657 /* must be called before writing the header */
3659 {
3660  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3661  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3662 
3663  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3664  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3665  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3666  }else{
3667  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3668  s->c.last_non_b_time = s->c.time;
3669  av_assert1(s->picture_number == 0 || s->c.pp_time > 0);
3670  }
3671 }
3672 
3673 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3674 {
3675  MPVEncContext *const s = &m->s;
3676  int i, ret;
3677  int bits;
3678  int context_count = s->c.slice_context_count;
3679 
3680  /* we need to initialize some time vars before we can encode B-frames */
3681  // RAL: Condition added for MPEG1VIDEO
3682  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3684  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3686 
3687 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3688 
3689  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3690  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3691  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3692  s->c.no_rounding ^= s->flipflop_rounding;
3693  }
3694 
3695  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3696  ret = estimate_qp(m, 1);
3697  if (ret < 0)
3698  return ret;
3699  ff_get_2pass_fcode(m);
3700  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3701  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3702  s->lambda = m->last_lambda_for[s->c.pict_type];
3703  else
3704  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3705  update_qscale(m);
3706  }
3707 
3708  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3709  for (int i = 0; i < context_count; i++) {
3710  MPVEncContext *const slice = s->c.enc_contexts[i];
3711  int h = s->c.mb_height;
3712  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3713  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3714 
3715  init_put_bits(&slice->pb, start, end - start);
3716 
3717  if (i) {
3718  ret = ff_update_duplicate_context(&slice->c, &s->c);
3719  if (ret < 0)
3720  return ret;
3721  slice->lambda = s->lambda;
3722  slice->lambda2 = s->lambda2;
3723  }
3724  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3725  ff_me_init_pic(slice);
3726  }
3727 
3728  /* Estimate motion for every MB */
3729  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3730  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3731  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3732  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3733  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3734  m->me_pre == 2) {
3735  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3736  &s->c.enc_contexts[0], NULL,
3737  context_count, sizeof(void*));
3738  }
3739  }
3740 
3741  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3742  NULL, context_count, sizeof(void*));
3743  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3744  /* I-Frame */
3745  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3746  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3747 
3748  if (!m->fixed_qscale) {
3749  /* finding spatial complexity for I-frame rate control */
3750  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3751  NULL, context_count, sizeof(void*));
3752  }
3753  }
3754  for(i=1; i<context_count; i++){
3755  merge_context_after_me(s, s->c.enc_contexts[i]);
3756  }
3757  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3758  m->mb_var_sum = s->me. mb_var_sum_temp;
3759  emms_c();
3760 
3761  if (s->me.scene_change_score > m->scenechange_threshold &&
3762  s->c.pict_type == AV_PICTURE_TYPE_P) {
3763  s->c.pict_type = AV_PICTURE_TYPE_I;
3764  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3765  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3766  if (s->c.msmpeg4_version >= MSMP4_V3)
3767  s->c.no_rounding = 1;
3768  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3769  m->mb_var_sum, m->mc_mb_var_sum);
3770  }
3771 
3772  if (!s->umvplus) {
3773  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3774  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3775 
3776  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3777  int a,b;
3778  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3779  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3780  s->f_code = FFMAX3(s->f_code, a, b);
3781  }
3782 
3784  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3785  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3786  int j;
3787  for(i=0; i<2; i++){
3788  for(j=0; j<2; j++)
3789  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3790  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3791  }
3792  }
3793  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3794  int a, b;
3795 
3796  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3797  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3798  s->f_code = FFMAX(a, b);
3799 
3800  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3801  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3802  s->b_code = FFMAX(a, b);
3803 
3804  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3805  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3806  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3807  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3808  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3809  int dir, j;
3810  for(dir=0; dir<2; dir++){
3811  for(i=0; i<2; i++){
3812  for(j=0; j<2; j++){
3815  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3816  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3817  }
3818  }
3819  }
3820  }
3821  }
3822  }
3823 
3824  ret = estimate_qp(m, 0);
3825  if (ret < 0)
3826  return ret;
3827 
3828  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3829  s->c.pict_type == AV_PICTURE_TYPE_I &&
3830  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3831  s->c.qscale = 3; //reduce clipping problems
3832 
3833  if (s->c.out_format == FMT_MJPEG) {
3835  (7 + s->c.qscale) / s->c.qscale, 65535);
3836  if (ret < 0)
3837  return ret;
3838 
3839  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3840  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3841  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3842 
3843  if (s->c.avctx->intra_matrix) {
3844  chroma_matrix =
3845  luma_matrix = s->c.avctx->intra_matrix;
3846  }
3847  if (s->c.avctx->chroma_intra_matrix)
3848  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3849 
3850  /* for mjpeg, we do include qscale in the matrix */
3851  for (int i = 1; i < 64; i++) {
3852  int j = s->c.idsp.idct_permutation[i];
3853 
3854  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3855  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3856  }
3857  s->c.y_dc_scale_table =
3858  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[0];
3859  s->c.chroma_intra_matrix[0] = s->c.intra_matrix[0] = 8;
3860  } else {
3861  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3862  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3863  for (int i = 1; i < 64; i++) {
3864  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3865 
3866  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3867  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3868  }
3869  s->c.y_dc_scale_table = y;
3870  s->c.c_dc_scale_table = c;
3871  s->c.intra_matrix[0] = 13;
3872  s->c.chroma_intra_matrix[0] = 14;
3873  }
3874  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3875  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3876  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3877  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3878  s->c.qscale = 8;
3879  }
3880 
3881  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3882  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3883  } else {
3884  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3885  }
3886  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3887 
3888  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3889  m->picture_in_gop_number = 0;
3890 
3891  s->c.mb_x = s->c.mb_y = 0;
3892  s->last_bits= put_bits_count(&s->pb);
3893  ret = m->encode_picture_header(m);
3894  if (ret < 0)
3895  return ret;
3896  bits= put_bits_count(&s->pb);
3897  m->header_bits = bits - s->last_bits;
3898 
3899  for(i=1; i<context_count; i++){
3900  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3901  }
3902  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3903  NULL, context_count, sizeof(void*));
3904  for(i=1; i<context_count; i++){
3905  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3906  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3907  merge_context_after_encode(s, s->c.enc_contexts[i]);
3908  }
3909  emms_c();
3910  return 0;
3911 }
3912 
3913 static inline void denoise_dct(MPVEncContext *const s, int16_t block[])
3914 {
3915  if (!s->dct_error_sum)
3916  return;
3917 
3918  const int intra = s->c.mb_intra;
3919  s->dct_count[intra]++;
3920  s->mpvencdsp.denoise_dct(block, s->dct_error_sum[intra], s->dct_offset[intra]);
3921 }
3922 
3924  int16_t *block, int n,
3925  int qscale, int *overflow){
3926  const int *qmat;
3927  const uint16_t *matrix;
3928  const uint8_t *scantable;
3929  const uint8_t *perm_scantable;
3930  int max=0;
3931  unsigned int threshold1, threshold2;
3932  int bias=0;
3933  int run_tab[65];
3934  int level_tab[65];
3935  int score_tab[65];
3936  int survivor[65];
3937  int survivor_count;
3938  int last_run=0;
3939  int last_level=0;
3940  int last_score= 0;
3941  int last_i;
3942  int coeff[2][64];
3943  int coeff_count[64];
3944  int qmul, qadd, start_i, last_non_zero, i, dc;
3945  const int esc_length= s->ac_esc_length;
3946  const uint8_t *length, *last_length;
3947  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3948  int mpeg2_qscale;
3949 
3950  s->fdsp.fdct(block);
3951 
3952  denoise_dct(s, block);
3953 
3954  qmul= qscale*16;
3955  qadd= ((qscale-1)|1)*8;
3956 
3957  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3958  else mpeg2_qscale = qscale << 1;
3959 
3960  if (s->c.mb_intra) {
3961  int q;
3962  scantable = s->c.intra_scantable.scantable;
3963  perm_scantable = s->c.intra_scantable.permutated;
3964  if (!s->c.h263_aic) {
3965  if (n < 4)
3966  q = s->c.y_dc_scale;
3967  else
3968  q = s->c.c_dc_scale;
3969  q = q << 3;
3970  } else{
3971  /* For AIC we skip quant/dequant of INTRADC */
3972  q = 1 << 3;
3973  qadd=0;
3974  }
3975 
3976  /* note: block[0] is assumed to be positive */
3977  block[0] = (block[0] + (q >> 1)) / q;
3978  start_i = 1;
3979  last_non_zero = 0;
3980  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3981  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
3982  if (s->mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
3983  bias= 1<<(QMAT_SHIFT-1);
3984 
3985  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3986  length = s->intra_chroma_ac_vlc_length;
3987  last_length= s->intra_chroma_ac_vlc_last_length;
3988  } else {
3989  length = s->intra_ac_vlc_length;
3990  last_length= s->intra_ac_vlc_last_length;
3991  }
3992  } else {
3993  scantable = s->c.inter_scantable.scantable;
3994  perm_scantable = s->c.inter_scantable.permutated;
3995  start_i = 0;
3996  last_non_zero = -1;
3997  qmat = s->q_inter_matrix[qscale];
3998  matrix = s->c.inter_matrix;
3999  length = s->inter_ac_vlc_length;
4000  last_length= s->inter_ac_vlc_last_length;
4001  }
4002  last_i= start_i;
4003 
4004  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4005  threshold2= (threshold1<<1);
4006 
4007  for(i=63; i>=start_i; i--) {
4008  const int j = scantable[i];
4009  int64_t level = (int64_t)block[j] * qmat[j];
4010 
4011  if(((uint64_t)(level+threshold1))>threshold2){
4012  last_non_zero = i;
4013  break;
4014  }
4015  }
4016 
4017  for(i=start_i; i<=last_non_zero; i++) {
4018  const int j = scantable[i];
4019  int64_t level = (int64_t)block[j] * qmat[j];
4020 
4021 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4022 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4023  if(((uint64_t)(level+threshold1))>threshold2){
4024  if(level>0){
4025  level= (bias + level)>>QMAT_SHIFT;
4026  coeff[0][i]= level;
4027  coeff[1][i]= level-1;
4028 // coeff[2][k]= level-2;
4029  }else{
4030  level= (bias - level)>>QMAT_SHIFT;
4031  coeff[0][i]= -level;
4032  coeff[1][i]= -level+1;
4033 // coeff[2][k]= -level+2;
4034  }
4035  coeff_count[i]= FFMIN(level, 2);
4036  av_assert2(coeff_count[i]);
4037  max |=level;
4038  }else{
4039  coeff[0][i]= (level>>31)|1;
4040  coeff_count[i]= 1;
4041  }
4042  }
4043 
4044  *overflow= s->max_qcoeff < max; //overflow might have happened
4045 
4046  if(last_non_zero < start_i){
4047  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4048  return last_non_zero;
4049  }
4050 
4051  score_tab[start_i]= 0;
4052  survivor[0]= start_i;
4053  survivor_count= 1;
4054 
4055  for(i=start_i; i<=last_non_zero; i++){
4056  int level_index, j, zero_distortion;
4057  int dct_coeff= FFABS(block[ scantable[i] ]);
4058  int best_score=256*256*256*120;
4059 
4060  if (s->fdsp.fdct == ff_fdct_ifast)
4061  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4062  zero_distortion= dct_coeff*dct_coeff;
4063 
4064  for(level_index=0; level_index < coeff_count[i]; level_index++){
4065  int distortion;
4066  int level= coeff[level_index][i];
4067  const int alevel= FFABS(level);
4068  int unquant_coeff;
4069 
4070  av_assert2(level);
4071 
4072  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4073  unquant_coeff= alevel*qmul + qadd;
4074  } else if (s->c.out_format == FMT_MJPEG) {
4075  j = s->c.idsp.idct_permutation[scantable[i]];
4076  unquant_coeff = alevel * matrix[j] * 8;
4077  }else{ // MPEG-1
4078  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4079  if (s->c.mb_intra) {
4080  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4081  unquant_coeff = (unquant_coeff - 1) | 1;
4082  }else{
4083  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4084  unquant_coeff = (unquant_coeff - 1) | 1;
4085  }
4086  unquant_coeff<<= 3;
4087  }
4088 
4089  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4090  level+=64;
4091  if((level&(~127)) == 0){
4092  for(j=survivor_count-1; j>=0; j--){
4093  int run= i - survivor[j];
4094  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4095  score += score_tab[i-run];
4096 
4097  if(score < best_score){
4098  best_score= score;
4099  run_tab[i+1]= run;
4100  level_tab[i+1]= level-64;
4101  }
4102  }
4103 
4104  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4105  for(j=survivor_count-1; j>=0; j--){
4106  int run= i - survivor[j];
4107  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4108  score += score_tab[i-run];
4109  if(score < last_score){
4110  last_score= score;
4111  last_run= run;
4112  last_level= level-64;
4113  last_i= i+1;
4114  }
4115  }
4116  }
4117  }else{
4118  distortion += esc_length*lambda;
4119  for(j=survivor_count-1; j>=0; j--){
4120  int run= i - survivor[j];
4121  int score= distortion + score_tab[i-run];
4122 
4123  if(score < best_score){
4124  best_score= score;
4125  run_tab[i+1]= run;
4126  level_tab[i+1]= level-64;
4127  }
4128  }
4129 
4130  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4131  for(j=survivor_count-1; j>=0; j--){
4132  int run= i - survivor[j];
4133  int score= distortion + score_tab[i-run];
4134  if(score < last_score){
4135  last_score= score;
4136  last_run= run;
4137  last_level= level-64;
4138  last_i= i+1;
4139  }
4140  }
4141  }
4142  }
4143  }
4144 
4145  score_tab[i+1]= best_score;
4146 
4147  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4148  if(last_non_zero <= 27){
4149  for(; survivor_count; survivor_count--){
4150  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4151  break;
4152  }
4153  }else{
4154  for(; survivor_count; survivor_count--){
4155  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4156  break;
4157  }
4158  }
4159 
4160  survivor[ survivor_count++ ]= i+1;
4161  }
4162 
4163  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4164  last_score= 256*256*256*120;
4165  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4166  int score= score_tab[i];
4167  if (i)
4168  score += lambda * 2; // FIXME more exact?
4169 
4170  if(score < last_score){
4171  last_score= score;
4172  last_i= i;
4173  last_level= level_tab[i];
4174  last_run= run_tab[i];
4175  }
4176  }
4177  }
4178 
4179  s->coded_score[n] = last_score;
4180 
4181  dc= FFABS(block[0]);
4182  last_non_zero= last_i - 1;
4183  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4184 
4185  if(last_non_zero < start_i)
4186  return last_non_zero;
4187 
4188  if(last_non_zero == 0 && start_i == 0){
4189  int best_level= 0;
4190  int best_score= dc * dc;
4191 
4192  for(i=0; i<coeff_count[0]; i++){
4193  int level= coeff[i][0];
4194  int alevel= FFABS(level);
4195  int unquant_coeff, score, distortion;
4196 
4197  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4198  unquant_coeff= (alevel*qmul + qadd)>>3;
4199  } else{ // MPEG-1
4200  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4201  unquant_coeff = (unquant_coeff - 1) | 1;
4202  }
4203  unquant_coeff = (unquant_coeff + 4) >> 3;
4204  unquant_coeff<<= 3 + 3;
4205 
4206  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4207  level+=64;
4208  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4209  else score= distortion + esc_length*lambda;
4210 
4211  if(score < best_score){
4212  best_score= score;
4213  best_level= level - 64;
4214  }
4215  }
4216  block[0]= best_level;
4217  s->coded_score[n] = best_score - dc*dc;
4218  if(best_level == 0) return -1;
4219  else return last_non_zero;
4220  }
4221 
4222  i= last_i;
4223  av_assert2(last_level);
4224 
4225  block[ perm_scantable[last_non_zero] ]= last_level;
4226  i -= last_run + 1;
4227 
4228  for(; i>start_i; i -= run_tab[i] + 1){
4229  block[ perm_scantable[i-1] ]= level_tab[i];
4230  }
4231 
4232  return last_non_zero;
4233 }
4234 
4235 static DECLARE_ALIGNED(16, int16_t, basis)[64][64];
4236 
4237 static void build_basis(uint8_t *perm){
4238  int i, j, x, y;
4239  emms_c();
4240  for(i=0; i<8; i++){
4241  for(j=0; j<8; j++){
4242  for(y=0; y<8; y++){
4243  for(x=0; x<8; x++){
4244  double s= 0.25*(1<<BASIS_SHIFT);
4245  int index= 8*i + j;
4246  int perm_index= perm[index];
4247  if(i==0) s*= sqrt(0.5);
4248  if(j==0) s*= sqrt(0.5);
4249  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4250  }
4251  }
4252  }
4253  }
4254 }
4255 
4256 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4257  int16_t *block, int16_t *weight, int16_t *orig,
4258  int n, int qscale){
4259  DECLARE_ALIGNED(16, int16_t, rem)[64];
4260  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4261  const uint8_t *scantable;
4262  const uint8_t *perm_scantable;
4263 // unsigned int threshold1, threshold2;
4264 // int bias=0;
4265  int run_tab[65];
4266  int prev_run=0;
4267  int prev_level=0;
4268  int qmul, qadd, start_i, last_non_zero, i, dc;
4269  const uint8_t *length;
4270  const uint8_t *last_length;
4271  int lambda;
4272  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4273 
4274  if(basis[0][0] == 0)
4275  build_basis(s->c.idsp.idct_permutation);
4276 
4277  qmul= qscale*2;
4278  qadd= (qscale-1)|1;
4279  if (s->c.mb_intra) {
4280  scantable = s->c.intra_scantable.scantable;
4281  perm_scantable = s->c.intra_scantable.permutated;
4282  if (!s->c.h263_aic) {
4283  if (n < 4)
4284  q = s->c.y_dc_scale;
4285  else
4286  q = s->c.c_dc_scale;
4287  } else{
4288  /* For AIC we skip quant/dequant of INTRADC */
4289  q = 1;
4290  qadd=0;
4291  }
4292  q <<= RECON_SHIFT-3;
4293  /* note: block[0] is assumed to be positive */
4294  dc= block[0]*q;
4295 // block[0] = (block[0] + (q >> 1)) / q;
4296  start_i = 1;
4297 // if (s->mpeg_quant || s->c.out_format == FMT_MPEG1)
4298 // bias= 1<<(QMAT_SHIFT-1);
4299  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4300  length = s->intra_chroma_ac_vlc_length;
4301  last_length= s->intra_chroma_ac_vlc_last_length;
4302  } else {
4303  length = s->intra_ac_vlc_length;
4304  last_length= s->intra_ac_vlc_last_length;
4305  }
4306  } else {
4307  scantable = s->c.inter_scantable.scantable;
4308  perm_scantable = s->c.inter_scantable.permutated;
4309  dc= 0;
4310  start_i = 0;
4311  length = s->inter_ac_vlc_length;
4312  last_length= s->inter_ac_vlc_last_length;
4313  }
4314  last_non_zero = s->c.block_last_index[n];
4315 
4316  dc += (1<<(RECON_SHIFT-1));
4317  for(i=0; i<64; i++){
4318  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4319  }
4320 
4321  sum=0;
4322  for(i=0; i<64; i++){
4323  int one= 36;
4324  int qns=4;
4325  int w;
4326 
4327  w= FFABS(weight[i]) + qns*one;
4328  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4329 
4330  weight[i] = w;
4331 // w=weight[i] = (63*qns + (w/2)) / w;
4332 
4333  av_assert2(w>0);
4334  av_assert2(w<(1<<6));
4335  sum += w*w;
4336  }
4337  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4338 
4339  run=0;
4340  rle_index=0;
4341  for(i=start_i; i<=last_non_zero; i++){
4342  int j= perm_scantable[i];
4343  const int level= block[j];
4344  int coeff;
4345 
4346  if(level){
4347  if(level<0) coeff= qmul*level - qadd;
4348  else coeff= qmul*level + qadd;
4349  run_tab[rle_index++]=run;
4350  run=0;
4351 
4352  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4353  }else{
4354  run++;
4355  }
4356  }
4357 
4358  for(;;){
4359  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4360  int best_coeff=0;
4361  int best_change=0;
4362  int run2, best_unquant_change=0, analyze_gradient;
4363  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4364 
4365  if(analyze_gradient){
4366  for(i=0; i<64; i++){
4367  int w= weight[i];
4368 
4369  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4370  }
4371  s->fdsp.fdct(d1);
4372  }
4373 
4374  if(start_i){
4375  const int level= block[0];
4376  int change, old_coeff;
4377 
4378  av_assert2(s->c.mb_intra);
4379 
4380  old_coeff= q*level;
4381 
4382  for(change=-1; change<=1; change+=2){
4383  int new_level= level + change;
4384  int score, new_coeff;
4385 
4386  new_coeff= q*new_level;
4387  if(new_coeff >= 2048 || new_coeff < 0)
4388  continue;
4389 
4390  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4391  new_coeff - old_coeff);
4392  if(score<best_score){
4393  best_score= score;
4394  best_coeff= 0;
4395  best_change= change;
4396  best_unquant_change= new_coeff - old_coeff;
4397  }
4398  }
4399  }
4400 
4401  run=0;
4402  rle_index=0;
4403  run2= run_tab[rle_index++];
4404  prev_level=0;
4405  prev_run=0;
4406 
4407  for(i=start_i; i<64; i++){
4408  int j= perm_scantable[i];
4409  const int level= block[j];
4410  int change, old_coeff;
4411 
4412  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4413  break;
4414 
4415  if(level){
4416  if(level<0) old_coeff= qmul*level - qadd;
4417  else old_coeff= qmul*level + qadd;
4418  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4419  }else{
4420  old_coeff=0;
4421  run2--;
4422  av_assert2(run2>=0 || i >= last_non_zero );
4423  }
4424 
4425  for(change=-1; change<=1; change+=2){
4426  int new_level= level + change;
4427  int score, new_coeff, unquant_change;
4428 
4429  score=0;
4430  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4431  continue;
4432 
4433  if(new_level){
4434  if(new_level<0) new_coeff= qmul*new_level - qadd;
4435  else new_coeff= qmul*new_level + qadd;
4436  if(new_coeff >= 2048 || new_coeff <= -2048)
4437  continue;
4438  //FIXME check for overflow
4439 
4440  if(level){
4441  if(level < 63 && level > -63){
4442  if(i < last_non_zero)
4443  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4444  - length[UNI_AC_ENC_INDEX(run, level+64)];
4445  else
4446  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4447  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4448  }
4449  }else{
4450  av_assert2(FFABS(new_level)==1);
4451 
4452  if(analyze_gradient){
4453  int g= d1[ scantable[i] ];
4454  if(g && (g^new_level) >= 0)
4455  continue;
4456  }
4457 
4458  if(i < last_non_zero){
4459  int next_i= i + run2 + 1;
4460  int next_level= block[ perm_scantable[next_i] ] + 64;
4461 
4462  if(next_level&(~127))
4463  next_level= 0;
4464 
4465  if(next_i < last_non_zero)
4466  score += length[UNI_AC_ENC_INDEX(run, 65)]
4467  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4468  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4469  else
4470  score += length[UNI_AC_ENC_INDEX(run, 65)]
4471  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4472  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4473  }else{
4474  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4475  if(prev_level){
4476  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4477  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4478  }
4479  }
4480  }
4481  }else{
4482  new_coeff=0;
4483  av_assert2(FFABS(level)==1);
4484 
4485  if(i < last_non_zero){
4486  int next_i= i + run2 + 1;
4487  int next_level= block[ perm_scantable[next_i] ] + 64;
4488 
4489  if(next_level&(~127))
4490  next_level= 0;
4491 
4492  if(next_i < last_non_zero)
4493  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4494  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4495  - length[UNI_AC_ENC_INDEX(run, 65)];
4496  else
4497  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4498  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4499  - length[UNI_AC_ENC_INDEX(run, 65)];
4500  }else{
4501  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4502  if(prev_level){
4503  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4504  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4505  }
4506  }
4507  }
4508 
4509  score *= lambda;
4510 
4511  unquant_change= new_coeff - old_coeff;
4512  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4513 
4514  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4515  unquant_change);
4516  if(score<best_score){
4517  best_score= score;
4518  best_coeff= i;
4519  best_change= change;
4520  best_unquant_change= unquant_change;
4521  }
4522  }
4523  if(level){
4524  prev_level= level + 64;
4525  if(prev_level&(~127))
4526  prev_level= 0;
4527  prev_run= run;
4528  run=0;
4529  }else{
4530  run++;
4531  }
4532  }
4533 
4534  if(best_change){
4535  int j= perm_scantable[ best_coeff ];
4536 
4537  block[j] += best_change;
4538 
4539  if(best_coeff > last_non_zero){
4540  last_non_zero= best_coeff;
4541  av_assert2(block[j]);
4542  }else{
4543  for(; last_non_zero>=start_i; last_non_zero--){
4544  if(block[perm_scantable[last_non_zero]])
4545  break;
4546  }
4547  }
4548 
4549  run=0;
4550  rle_index=0;
4551  for(i=start_i; i<=last_non_zero; i++){
4552  int j= perm_scantable[i];
4553  const int level= block[j];
4554 
4555  if(level){
4556  run_tab[rle_index++]=run;
4557  run=0;
4558  }else{
4559  run++;
4560  }
4561  }
4562 
4563  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4564  }else{
4565  break;
4566  }
4567  }
4568 
4569  return last_non_zero;
4570 }
4571 
4572 /**
4573  * Permute an 8x8 block according to permutation.
4574  * @param block the block which will be permuted according to
4575  * the given permutation vector
4576  * @param permutation the permutation vector
4577  * @param last the last non zero coefficient in scantable order, used to
4578  * speed the permutation up
4579  * @param scantable the used scantable, this is only used to speed the
4580  * permutation up, the block is not (inverse) permutated
4581  * to scantable order!
4582  */
4583 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4584  const uint8_t *scantable, int last)
4585 {
4586  int i;
4587  int16_t temp[64];
4588 
4589  if (last <= 0)
4590  return;
4591  //FIXME it is ok but not clean and might fail for some permutations
4592  // if (permutation[1] == 1)
4593  // return;
4594 
4595  for (i = 0; i <= last; i++) {
4596  const int j = scantable[i];
4597  temp[j] = block[j];
4598  block[j] = 0;
4599  }
4600 
4601  for (i = 0; i <= last; i++) {
4602  const int j = scantable[i];
4603  const int perm_j = permutation[j];
4604  block[perm_j] = temp[j];
4605  }
4606 }
4607 
4608 static int dct_quantize_c(MPVEncContext *const s,
4609  int16_t *block, int n,
4610  int qscale, int *overflow)
4611 {
4612  int i, last_non_zero, q, start_i;
4613  const int *qmat;
4614  const uint8_t *scantable;
4615  int bias;
4616  int max=0;
4617  unsigned int threshold1, threshold2;
4618 
4619  s->fdsp.fdct(block);
4620 
4621  denoise_dct(s, block);
4622 
4623  if (s->c.mb_intra) {
4624  scantable = s->c.intra_scantable.scantable;
4625  if (!s->c.h263_aic) {
4626  if (n < 4)
4627  q = s->c.y_dc_scale;
4628  else
4629  q = s->c.c_dc_scale;
4630  q = q << 3;
4631  } else
4632  /* For AIC we skip quant/dequant of INTRADC */
4633  q = 1 << 3;
4634 
4635  /* note: block[0] is assumed to be positive */
4636  block[0] = (block[0] + (q >> 1)) / q;
4637  start_i = 1;
4638  last_non_zero = 0;
4639  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4640  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4641  } else {
4642  scantable = s->c.inter_scantable.scantable;
4643  start_i = 0;
4644  last_non_zero = -1;
4645  qmat = s->q_inter_matrix[qscale];
4646  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4647  }
4648  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4649  threshold2= (threshold1<<1);
4650  for(i=63;i>=start_i;i--) {
4651  const int j = scantable[i];
4652  int64_t level = (int64_t)block[j] * qmat[j];
4653 
4654  if(((uint64_t)(level+threshold1))>threshold2){
4655  last_non_zero = i;
4656  break;
4657  }else{
4658  block[j]=0;
4659  }
4660  }
4661  for(i=start_i; i<=last_non_zero; i++) {
4662  const int j = scantable[i];
4663  int64_t level = (int64_t)block[j] * qmat[j];
4664 
4665 // if( bias+level >= (1<<QMAT_SHIFT)
4666 // || bias-level >= (1<<QMAT_SHIFT)){
4667  if(((uint64_t)(level+threshold1))>threshold2){
4668  if(level>0){
4669  level= (bias + level)>>QMAT_SHIFT;
4670  block[j]= level;
4671  }else{
4672  level= (bias - level)>>QMAT_SHIFT;
4673  block[j]= -level;
4674  }
4675  max |=level;
4676  }else{
4677  block[j]=0;
4678  }
4679  }
4680  *overflow= s->max_qcoeff < max; //overflow might have happened
4681 
4682  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4683  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4684  ff_block_permute(block, s->c.idsp.idct_permutation,
4685  scantable, last_non_zero);
4686 
4687  return last_non_zero;
4688 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1468
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3923
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1125
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1655
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:359
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:118
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:83
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:219
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:267
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1704
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:433
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:242
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:260
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:301
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2690
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:249
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:254
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2132
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:172
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
h263data.h
init_unquantize
static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:311
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:102
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2593
level
uint8_t level
Definition: svq3.c:208
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1487
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
MPVEncContext::b_code
int b_code
backward MV resolution for B-frames
Definition: mpegvideoenc.h:81
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:524
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
ALIGN
#define ALIGN
Definition: hashtable.c:32
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:823
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:242
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:99
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1864
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2615
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:273
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2615
MpegEncContext::dc_val
int16_t * dc_val
used for H.263 AIC/MPEG-4 DC prediction and ER
Definition: mpegvideo.h:141
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1289
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3658
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1886
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:118
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2610
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3619
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:673
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2606
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:255
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
MBBackup::c
struct MBBackup::@224 c
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:299
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:233
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4235
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:956
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1489
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:261
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:165
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2807
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:816
out_size
static int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:170
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2741
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:294
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:139
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:245
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:434
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:536
AVFrame::width
int width
Definition: frame.h:506
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2615
AVPacket::data
uint8_t * data
Definition: packet.h:595
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:380
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:43
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:110
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:219
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2854
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:472
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:613
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:311
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2789
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2222
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2622
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:944
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:56
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:206
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1253
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:269
mpegutils.h
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:583
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:650
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:870
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:251
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:305
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:455
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:169
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:51
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2615
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:210
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1641
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:380
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1145
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2619
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:886
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1903
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:339
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2613
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:58
init_slice_buffers
static av_cold int init_slice_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:500
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:228
faandct.h
Floating point AAN DCT.
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:55
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:843
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2611
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:718
fail
#define fail()
Definition: checkasm.h:224
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:59
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:296
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:212
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1221
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:314
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1227
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1287
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2763
ff_encode_add_stats_side_data
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
Definition: encode.c:919
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:220
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:374
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:298
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:303
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:246
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1426
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:258
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:208
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:119
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:649
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:94
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:119
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:89
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4237
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:705
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:226
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1213
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:122
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:265
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:229
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2617
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:213
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:205
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:270
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3584
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:835
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:264
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:50
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:257
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1565
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:107
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:226
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:207
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1063
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:270
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1282
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4583
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1517
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:282
av_fallthrough
#define av_fallthrough
Definition: attributes.h:67
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:299
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2607
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:837
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2832
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
arg
const char * arg
Definition: jacosubdec.c:65
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:270
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:428
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1267
MAX_PB2_MB_SIZE
@ MAX_PB2_MB_SIZE
Definition: mpeg4videoenc.h:38
MECmpContext
Definition: me_cmp.h:50
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:304
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
MPVEncContext::block
int16_t(* block)[64]
points into blocks below
Definition: mpegvideoenc.h:114
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:126
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:236
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
run
uint8_t run
Definition: svq3.c:207
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:81
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:171
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:116
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:908
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:300
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:53
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:489
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:331
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1770
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:505
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:188
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3583
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:876
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1089
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2609
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:945
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2909
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:118
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1132
MpegEncContext::ac_val
int16_t(* ac_val)[16]
used for H.263 AIC, MPEG-4 AC prediction
Definition: mpegvideo.h:142
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1317
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1326
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Average and put pixel Widths can be 16, 8, 4 or 2.
Definition: hpeldsp.h:39
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2618
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:254
MPVMainEncContext
Definition: mpegvideoenc.h:202
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:173
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:823
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1286
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:843
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:543
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:838
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1392
ff_h263_clean_intra_table_entries
static void ff_h263_clean_intra_table_entries(MpegEncContext *s, int xy)
Definition: h263.h:47
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1324
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:262
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:526
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:81
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2188
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:596
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1017
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:44
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:240
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:54
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2607
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:556
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:335
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:308
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:293
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:96
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MAX_AC_TEX_MB_SIZE
@ MAX_AC_TEX_MB_SIZE
Definition: mpeg4videoenc.h:39
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:256
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:313
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3673
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:521
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:292
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:62
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:443
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:594
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:373
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:209
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:175
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:601
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:297
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:266
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:289
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:306
MBBackup
Definition: mpegvideo_enc.c:2603
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:295
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *const m)
Definition: rv20enc.c:37
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:266
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:312
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:411
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2614
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:264
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2605
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:588
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2943
MPVEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideoenc.h:80
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:274
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2619
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:120
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
MPVEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideoenc.h:166
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:128
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:287
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:496
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:76
update_mb_info
static void update_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2894
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:44
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:279
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:259
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:887
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:235
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:37
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:285
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:263
AVCodecContext::height
int height
Definition: avcodec.h:600
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:491
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1254
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:896
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1828
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2246
avcodec.h
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:442
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
Definition: pixblockdsp.c:87
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
ff_h263_mpeg4_reset_dc
void ff_h263_mpeg4_reset_dc(MPVEncContext *s)
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:237
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:303
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1369
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
MPVEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideoenc.h:75
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:97
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:109
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:223
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:844
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:248
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2616
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVFrame::height
int height
Definition: frame.h:506
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2608
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:236
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3582
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1246
AVRational::den
int den
Denominator.
Definition: rational.h:60
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2615
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:830
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:271
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:247
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:84
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:947
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:888
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
ff_mpeg1_clean_buffers
static void ff_mpeg1_clean_buffers(MPVEncContext *s)
Definition: mpeg12enc.h:29
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:290
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:777
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
denoise_dct
static void denoise_dct(MPVEncContext *const s, int16_t block[])
Definition: mpegvideo_enc.c:3913
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4256
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:554
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1296
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1300
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3591
w
uint8_t w
Definition: llvidencdsp.c:39
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:227
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:278
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:168
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1033
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:946
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AVPacket
This structure stores compressed data.
Definition: packet.h:572
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:965
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:276
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:231
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:56
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:479
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2608
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:215
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:888
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4608
stride
#define stride
Definition: h264pred_template.c:536
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:150
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2619
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:312
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
rv20enc.h
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:196
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:203
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:624
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2874
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2615
pixblockdsp.h
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:968
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:944
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:119
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:709
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1599
intmath.h
MPVEncContext::mpeg_quant
int mpeg_quant
Definition: mpegvideoenc.h:167