FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12codecs.h"
51 #include "mpeg12data.h"
52 #include "mpeg12enc.h"
53 #include "mpegvideo.h"
54 #include "mpegvideodata.h"
55 #include "mpegvideoenc.h"
56 #include "h261enc.h"
57 #include "h263.h"
58 #include "h263data.h"
59 #include "h263enc.h"
60 #include "mjpegenc_common.h"
61 #include "mathops.h"
62 #include "mpegutils.h"
63 #include "mpegvideo_unquantize.h"
64 #include "mjpegenc.h"
65 #include "speedhqenc.h"
66 #include "msmpeg4enc.h"
67 #include "pixblockdsp.h"
68 #include "qpeldsp.h"
69 #include "faandct.h"
70 #include "aandcttab.h"
71 #include "mpeg4video.h"
72 #include "mpeg4videodata.h"
73 #include "mpeg4videoenc.h"
74 #include "internal.h"
75 #include "bytestream.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include "libavutil/refstruct.h"
79 #include <limits.h>
80 #include "sp5x.h"
81 
82 #define QUANT_BIAS_SHIFT 8
83 
84 #define QMAT_SHIFT_MMX 16
85 #define QMAT_SHIFT 21
86 
87 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
88 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
89 static int sse_mb(MPVEncContext *const s);
90 static void denoise_dct_c(MPVEncContext *const s, int16_t *block);
91 static int dct_quantize_c(MPVEncContext *const s,
92  int16_t *block, int n,
93  int qscale, int *overflow);
94 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
95 
96 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
97 
98 static const AVOption mpv_generic_options[] = {
101  { NULL },
102 };
103 
105  .class_name = "generic mpegvideo encoder",
106  .item_name = av_default_item_name,
107  .option = mpv_generic_options,
108  .version = LIBAVUTIL_VERSION_INT,
109 };
110 
111 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
112  uint16_t (*qmat16)[2][64],
113  const uint16_t *quant_matrix,
114  int bias, int qmin, int qmax, int intra)
115 {
116  FDCTDSPContext *fdsp = &s->fdsp;
117  int qscale;
118  int shift = 0;
119 
120  for (qscale = qmin; qscale <= qmax; qscale++) {
121  int i;
122  int qscale2;
123 
124  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
125  else qscale2 = qscale << 1;
126 
127  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
128 #if CONFIG_FAANDCT
129  fdsp->fdct == ff_faandct ||
130 #endif /* CONFIG_FAANDCT */
131  fdsp->fdct == ff_jpeg_fdct_islow_10) {
132  for (i = 0; i < 64; i++) {
133  const int j = s->c.idsp.idct_permutation[i];
134  int64_t den = (int64_t) qscale2 * quant_matrix[j];
135  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
136  * Assume x = qscale2 * quant_matrix[j]
137  * 1 <= x <= 28560
138  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
139  * 4194304 >= (1 << 22) / (x) >= 146 */
140 
141  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
142  }
143  } else if (fdsp->fdct == ff_fdct_ifast) {
144  for (i = 0; i < 64; i++) {
145  const int j = s->c.idsp.idct_permutation[i];
146  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
147  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
148  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
149  * 1247 <= x <= 900239760
150  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
151  * 55107840 >= (1 << 36) / (x) >= 76 */
152 
153  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
154  }
155  } else {
156  for (i = 0; i < 64; i++) {
157  const int j = s->c.idsp.idct_permutation[i];
158  int64_t den = (int64_t) qscale2 * quant_matrix[j];
159  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
160  * Assume x = qscale2 * quant_matrix[j]
161  * 1 <= x <= 28560
162  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
163  * 4194304 >= (1 << 22) / (x) >= 146
164  *
165  * 1 <= x <= 28560
166  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
167  * 131072 >= (1 << 17) / (x) >= 4 */
168 
169  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
170  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
171 
172  if (qmat16[qscale][0][i] == 0 ||
173  qmat16[qscale][0][i] == 128 * 256)
174  qmat16[qscale][0][i] = 128 * 256 - 1;
175  qmat16[qscale][1][i] =
176  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
177  qmat16[qscale][0][i]);
178  }
179  }
180 
181  for (i = intra; i < 64; i++) {
182  int64_t max = 8191;
183  if (fdsp->fdct == ff_fdct_ifast) {
184  max = (8191LL * ff_aanscales[i]) >> 14;
185  }
186  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
187  shift++;
188  }
189  }
190  }
191  if (shift) {
192  av_log(s->c.avctx, AV_LOG_INFO,
193  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
194  QMAT_SHIFT - shift);
195  }
196 }
197 
198 static inline void update_qscale(MPVMainEncContext *const m)
199 {
200  MPVEncContext *const s = &m->s;
201 
202  if (s->c.q_scale_type == 1 && 0) {
203  int i;
204  int bestdiff=INT_MAX;
205  int best = 1;
206 
207  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
208  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
209  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
210  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
211  continue;
212  if (diff < bestdiff) {
213  bestdiff = diff;
214  best = i;
215  }
216  }
217  s->c.qscale = best;
218  } else {
219  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
220  (FF_LAMBDA_SHIFT + 7);
221  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
222  }
223 
224  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
226 }
227 
229 {
230  int i;
231 
232  if (matrix) {
233  put_bits(pb, 1, 1);
234  for (i = 0; i < 64; i++) {
235  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
236  }
237  } else
238  put_bits(pb, 1, 0);
239 }
240 
241 /**
242  * init s->c.cur_pic.qscale_table from s->lambda_table
243  */
244 static void init_qscale_tab(MPVEncContext *const s)
245 {
246  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
247 
248  for (int i = 0; i < s->c.mb_num; i++) {
249  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
250  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
251  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
252  s->c.avctx->qmax);
253  }
254 }
255 
257  const MPVEncContext *const src)
258 {
259 #define COPY(a) dst->a = src->a
260  COPY(c.pict_type);
261  COPY(c.f_code);
262  COPY(c.b_code);
263  COPY(c.qscale);
264  COPY(lambda);
265  COPY(lambda2);
266  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
267  COPY(c.progressive_frame); // FIXME don't set in encode_header
268  COPY(c.partitioned_frame); // FIXME don't set in encode_header
269 #undef COPY
270 }
271 
273 {
274  for (int i = -16; i < 16; i++)
275  default_fcode_tab[i + MAX_MV] = 1;
276 }
277 
278 /**
279  * Set the given MPVEncContext to defaults for encoding.
280  */
282 {
283  MPVEncContext *const s = &m->s;
284  static AVOnce init_static_once = AV_ONCE_INIT;
285 
287 
288  s->c.f_code = 1;
289  s->c.b_code = 1;
290 
291  if (!m->fcode_tab) {
293  ff_thread_once(&init_static_once, mpv_encode_init_static);
294  }
295  if (!s->c.y_dc_scale_table) {
296  s->c.y_dc_scale_table =
297  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
298  }
299 }
300 
302 {
303  s->dct_quantize = dct_quantize_c;
304  s->denoise_dct = denoise_dct_c;
305 
306 #if ARCH_MIPS
308 #elif ARCH_X86
310 #endif
311 
312  if (s->c.avctx->trellis)
313  s->dct_quantize = dct_quantize_trellis_c;
314 }
315 
317 {
318  MPVUnquantDSPContext unquant_dsp_ctx;
319 
320  ff_mpv_unquantize_init(&unquant_dsp_ctx,
321  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
322 
323  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
324  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
325  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
326  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
327  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
328  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
329  } else {
330  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
331  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
332  }
333 }
334 
336 {
337  MPVEncContext *const s = &m->s;
338  MECmpContext mecc;
339  me_cmp_func me_cmp[6];
340  int ret;
341 
342  ff_me_cmp_init(&mecc, avctx);
343  ret = ff_me_init(&s->me, avctx, &mecc, 1);
344  if (ret < 0)
345  return ret;
346  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
347  if (ret < 0)
348  return ret;
349  m->frame_skip_cmp_fn = me_cmp[1];
351  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
352  if (ret < 0)
353  return ret;
354  if (!me_cmp[0] || !me_cmp[4])
355  return AVERROR(EINVAL);
356  s->ildct_cmp[0] = me_cmp[0];
357  s->ildct_cmp[1] = me_cmp[4];
358  }
359 
360  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
361 
362  s->sse_cmp[0] = mecc.sse[0];
363  s->sse_cmp[1] = mecc.sse[1];
364  s->sad_cmp[0] = mecc.sad[0];
365  s->sad_cmp[1] = mecc.sad[1];
366  if (avctx->mb_cmp == FF_CMP_NSSE) {
367  s->n_sse_cmp[0] = mecc.nsse[0];
368  s->n_sse_cmp[1] = mecc.nsse[1];
369  } else {
370  s->n_sse_cmp[0] = mecc.sse[0];
371  s->n_sse_cmp[1] = mecc.sse[1];
372  }
373 
374  return 0;
375 }
376 
377 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
379 {
380  MPVEncContext *const s = &m->s;
381  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
382  const uint16_t *intra_matrix, *inter_matrix;
383  int ret;
384 
385  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
386  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
387  return AVERROR(ENOMEM);
388 
389  if (s->c.out_format == FMT_MJPEG) {
390  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
391  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
392  // No need to set q_inter_matrix
394  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
395  return 0;
396  } else {
397  s->q_chroma_intra_matrix = s->q_intra_matrix;
398  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
399  }
400  if (!m->intra_only) {
401  s->q_inter_matrix = s->q_intra_matrix + 32;
402  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
403  }
404 
405  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
406  s->c.mpeg_quant) {
409  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
410  intra_matrix =
412  } else {
413  /* MPEG-1/2, SpeedHQ */
416  }
417  if (avctx->intra_matrix)
419  if (avctx->inter_matrix)
421 
422  /* init q matrix */
423  for (int i = 0; i < 64; i++) {
424  int j = s->c.idsp.idct_permutation[i];
425 
426  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
427  s->c.inter_matrix[j] = inter_matrix[i];
428  }
429 
430  /* precompute matrix */
432  if (ret < 0)
433  return ret;
434 
435  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
436  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
437  31, 1);
438  if (s->q_inter_matrix)
439  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
440  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
441  31, 0);
442 
443  return 0;
444 }
445 
447 {
448  MPVEncContext *const s = &m->s;
449  // Align the following per-thread buffers to avoid false sharing.
450  enum {
451 #ifndef _MSC_VER
452  /// The number is supposed to match/exceed the cache-line size.
453  ALIGN = FFMAX(128, _Alignof(max_align_t)),
454 #else
455  ALIGN = 128,
456 #endif
457  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
458  };
459  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
460  "Need checks for potential overflow.");
461  unsigned nb_slices = s->c.slice_context_count, mv_table_size, mb_array_size;
462  char *dct_error = NULL;
463  int has_b_frames = !!m->max_b_frames, nb_mv_tables = 1 + 5 * has_b_frames;
464  int16_t (*mv_table)[2];
465 
466  if (m->noise_reduction) {
467  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
468  return AVERROR(ENOMEM);
469  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
470  if (!dct_error)
471  return AVERROR(ENOMEM);
473  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
474  }
475 
476  /* Allocate MB type table */
477  mb_array_size = s->c.mb_stride * s->c.mb_height;
478  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
479  if (!s->mb_type)
480  return AVERROR(ENOMEM);
481  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
482  return AVERROR(ENOMEM);
483 
484  mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
485  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
486  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
487  nb_mv_tables += 8 * has_b_frames;
488  if (!ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * (2 + 4 * has_b_frames), mv_table_size))
489  return AVERROR(ENOMEM);
490  }
491 
492  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
493  if (!mv_table)
494  return AVERROR(ENOMEM);
495  m->mv_table_base = mv_table;
496  mv_table += s->c.mb_stride + 1;
497 
498  for (unsigned i = 0; i < nb_slices; ++i) {
499  MPVEncContext *const s2 = s->c.enc_contexts[i];
500  int16_t (*tmp_mv_table)[2] = mv_table;
501 
502  if (dct_error) {
503  s2->dct_offset = s->dct_offset;
504  s2->dct_error_sum = (void*)dct_error;
505  dct_error += DCT_ERROR_SIZE;
506  }
507 
508  s2->mb_type = s->mb_type;
509  s2->mc_mb_var = s2->mb_type + mb_array_size;
510  s2->mb_var = s2->mc_mb_var + mb_array_size;
511  s2->mb_mean = (uint8_t*)(s2->mb_var + mb_array_size);
512  s2->lambda_table = s->lambda_table;
513 
514  s2->p_mv_table = tmp_mv_table;
515  if (has_b_frames) {
516  s2->b_forw_mv_table = tmp_mv_table += mv_table_size;
517  s2->b_back_mv_table = tmp_mv_table += mv_table_size;
518  s2->b_bidir_forw_mv_table = tmp_mv_table += mv_table_size;
519  s2->b_bidir_back_mv_table = tmp_mv_table += mv_table_size;
520  s2->b_direct_mv_table = tmp_mv_table += mv_table_size;
521  }
522 
523  if (s->p_field_select_table[0]) { // MPEG-4 or INTERLACED_ME above
524  uint8_t *field_select = s->p_field_select_table[0];
526  s2->p_field_select_table[1] = field_select += 2 * mv_table_size;
527 
528  if (has_b_frames) {
529  for (int j = 0; j < 2; j++) {
530  for (int k = 0; k < 2; k++) {
531  for (int l = 0; l < 2; l++)
532  s2->b_field_mv_table[j][k][l] = tmp_mv_table += mv_table_size;
533  s2->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
534  }
535  }
536  }
537  }
538  }
539 
540  return 0;
541 }
542 
543 /* init video encoder */
545 {
546  MPVMainEncContext *const m = avctx->priv_data;
547  MPVEncContext *const s = &m->s;
548  AVCPBProperties *cpb_props;
549  int gcd, ret;
550 
552 
553  switch (avctx->pix_fmt) {
554  case AV_PIX_FMT_YUVJ444P:
555  case AV_PIX_FMT_YUV444P:
556  s->c.chroma_format = CHROMA_444;
557  break;
558  case AV_PIX_FMT_YUVJ422P:
559  case AV_PIX_FMT_YUV422P:
560  s->c.chroma_format = CHROMA_422;
561  break;
562  case AV_PIX_FMT_YUVJ420P:
563  case AV_PIX_FMT_YUV420P:
564  default:
565  s->c.chroma_format = CHROMA_420;
566  break;
567  }
568 
570 
571  m->bit_rate = avctx->bit_rate;
572  s->c.width = avctx->width;
573  s->c.height = avctx->height;
574  if (avctx->gop_size > 600 &&
577  "keyframe interval too large!, reducing it from %d to %d\n",
578  avctx->gop_size, 600);
579  avctx->gop_size = 600;
580  }
581  m->gop_size = avctx->gop_size;
582  s->c.avctx = avctx;
584  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
585  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
587  } else if (avctx->max_b_frames < 0) {
589  "max b frames must be 0 or positive for mpegvideo based encoders\n");
590  return AVERROR(EINVAL);
591  }
593  s->c.codec_id = avctx->codec->id;
595  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
596  return AVERROR(EINVAL);
597  }
598 
599  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
600  s->rtp_mode = !!s->rtp_payload_size;
601  s->c.intra_dc_precision = avctx->intra_dc_precision;
602 
603  // workaround some differences between how applications specify dc precision
604  if (s->c.intra_dc_precision < 0) {
605  s->c.intra_dc_precision += 8;
606  } else if (s->c.intra_dc_precision >= 8)
607  s->c.intra_dc_precision -= 8;
608 
609  if (s->c.intra_dc_precision < 0) {
611  "intra dc precision must be positive, note some applications use"
612  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
613  return AVERROR(EINVAL);
614  }
615 
616  if (s->c.intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
617  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
618  return AVERROR(EINVAL);
619  }
621 
622  if (m->gop_size <= 1) {
623  m->intra_only = 1;
624  m->gop_size = 12;
625  } else {
626  m->intra_only = 0;
627  }
628 
629  /* Fixed QSCALE */
631 
632  s->adaptive_quant = (avctx->lumi_masking ||
633  avctx->dark_masking ||
636  avctx->p_masking ||
637  m->border_masking ||
638  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
639  !m->fixed_qscale;
640 
641  s->c.loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
642 
644  switch(avctx->codec_id) {
647  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
648  break;
649  case AV_CODEC_ID_MPEG4:
653  if (avctx->rc_max_rate >= 15000000) {
654  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
655  } else if(avctx->rc_max_rate >= 2000000) {
656  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
657  } else if(avctx->rc_max_rate >= 384000) {
658  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
659  } else
660  avctx->rc_buffer_size = 40;
661  avctx->rc_buffer_size *= 16384;
662  break;
663  }
664  if (avctx->rc_buffer_size) {
665  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
666  }
667  }
668 
669  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
670  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
671  return AVERROR(EINVAL);
672  }
673 
676  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
677  }
678 
680  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
681  return AVERROR(EINVAL);
682  }
683 
685  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
686  return AVERROR(EINVAL);
687  }
688 
689  if (avctx->rc_max_rate &&
693  "impossible bitrate constraints, this will fail\n");
694  }
695 
696  if (avctx->rc_buffer_size &&
699  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
700  return AVERROR(EINVAL);
701  }
702 
703  if (!m->fixed_qscale &&
706  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
708  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
709  if (nbt <= INT_MAX) {
710  avctx->bit_rate_tolerance = nbt;
711  } else
712  avctx->bit_rate_tolerance = INT_MAX;
713  }
714 
715  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
716  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
717  s->c.codec_id != AV_CODEC_ID_FLV1) {
718  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
719  return AVERROR(EINVAL);
720  }
721 
722  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
724  "OBMC is only supported with simple mb decision\n");
725  return AVERROR(EINVAL);
726  }
727 
728  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
729  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
730  return AVERROR(EINVAL);
731  }
732 
733  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
734  s->c.codec_id == AV_CODEC_ID_H263 ||
735  s->c.codec_id == AV_CODEC_ID_H263P) &&
736  (avctx->sample_aspect_ratio.num > 255 ||
737  avctx->sample_aspect_ratio.den > 255)) {
739  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
743  }
744 
745  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
746  s->c.codec_id == AV_CODEC_ID_H263P) &&
747  (avctx->width > 2048 ||
748  avctx->height > 1152 )) {
749  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
750  return AVERROR(EINVAL);
751  }
752  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
753  (avctx->width > 65535 ||
754  avctx->height > 65535 )) {
755  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
756  return AVERROR(EINVAL);
757  }
758  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
759  s->c.codec_id == AV_CODEC_ID_H263P ||
760  s->c.codec_id == AV_CODEC_ID_RV20) &&
761  ((avctx->width &3) ||
762  (avctx->height&3) )) {
763  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
764  return AVERROR(EINVAL);
765  }
766 
767  if (s->c.codec_id == AV_CODEC_ID_RV10 &&
768  (avctx->width &15 ||
769  avctx->height&15 )) {
770  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
771  return AVERROR(EINVAL);
772  }
773 
774  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
775  s->c.codec_id == AV_CODEC_ID_WMV2) &&
776  avctx->width & 1) {
777  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
778  return AVERROR(EINVAL);
779  }
780 
782  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
783  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
784  return AVERROR(EINVAL);
785  }
786 
787  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
788  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
789  return AVERROR(EINVAL);
790  }
791 
792  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
794  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
795  return AVERROR(EINVAL);
796  }
797 
798  if (m->scenechange_threshold < 1000000000 &&
801  "closed gop with scene change detection are not supported yet, "
802  "set threshold to 1000000000\n");
803  return AVERROR_PATCHWELCOME;
804  }
805 
807  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
810  "low delay forcing is only available for mpeg2, "
811  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
812  return AVERROR(EINVAL);
813  }
814  if (m->max_b_frames != 0) {
816  "B-frames cannot be used with low delay\n");
817  return AVERROR(EINVAL);
818  }
819  }
820 
821  if (avctx->slices > 1 &&
823  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
824  return AVERROR(EINVAL);
825  }
826 
829  "notice: b_frame_strategy only affects the first pass\n");
830  m->b_frame_strategy = 0;
831  }
832 
834  if (gcd > 1) {
835  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
836  avctx->time_base.den /= gcd;
837  avctx->time_base.num /= gcd;
838  //return -1;
839  }
840 
841  if (s->c.mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
842  // (a + x * 3 / 8) / x
843  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
844  s->inter_quant_bias = 0;
845  } else {
846  s->intra_quant_bias = 0;
847  // (a - x / 4) / x
848  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
849  }
850 
851  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
852  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
853  return AVERROR(EINVAL);
854  }
855 
856  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
857 
858  switch (avctx->codec->id) {
859 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
861  s->rtp_mode = 1;
862  /* fallthrough */
864  s->c.out_format = FMT_MPEG1;
865  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
866  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
868  break;
869 #endif
870 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
871  case AV_CODEC_ID_MJPEG:
872  case AV_CODEC_ID_AMV:
873  s->c.out_format = FMT_MJPEG;
874  m->intra_only = 1; /* force intra only for jpeg */
875  avctx->delay = 0;
876  s->c.low_delay = 1;
877  break;
878 #endif
879  case AV_CODEC_ID_SPEEDHQ:
880  s->c.out_format = FMT_SPEEDHQ;
881  m->intra_only = 1; /* force intra only for SHQ */
882  avctx->delay = 0;
883  s->c.low_delay = 1;
884  break;
885  case AV_CODEC_ID_H261:
886  s->c.out_format = FMT_H261;
887  avctx->delay = 0;
888  s->c.low_delay = 1;
889  s->rtp_mode = 0; /* Sliced encoding not supported */
890  break;
891  case AV_CODEC_ID_H263:
892  if (!CONFIG_H263_ENCODER)
895  s->c.width, s->c.height) == 8) {
897  "The specified picture size of %dx%d is not valid for "
898  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
899  "352x288, 704x576, and 1408x1152. "
900  "Try H.263+.\n", s->c.width, s->c.height);
901  return AVERROR(EINVAL);
902  }
903  s->c.out_format = FMT_H263;
904  avctx->delay = 0;
905  s->c.low_delay = 1;
906  break;
907  case AV_CODEC_ID_H263P:
908  s->c.out_format = FMT_H263;
909  s->c.h263_plus = 1;
910  /* Fx */
911  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
912  s->c.modified_quant = s->c.h263_aic;
913  s->c.loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
914  s->c.unrestricted_mv = s->c.obmc || s->c.loop_filter || s->c.umvplus;
915  s->c.flipflop_rounding = 1;
916 
917  /* /Fx */
918  /* These are just to be sure */
919  avctx->delay = 0;
920  s->c.low_delay = 1;
921  break;
922  case AV_CODEC_ID_FLV1:
923  s->c.out_format = FMT_H263;
924  s->c.h263_flv = 2; /* format = 1; 11-bit codes */
925  s->c.unrestricted_mv = 1;
926  s->rtp_mode = 0; /* don't allow GOB */
927  avctx->delay = 0;
928  s->c.low_delay = 1;
929  break;
930 #if CONFIG_RV10_ENCODER
931  case AV_CODEC_ID_RV10:
933  s->c.out_format = FMT_H263;
934  avctx->delay = 0;
935  s->c.low_delay = 1;
936  break;
937 #endif
938 #if CONFIG_RV20_ENCODER
939  case AV_CODEC_ID_RV20:
941  s->c.out_format = FMT_H263;
942  avctx->delay = 0;
943  s->c.low_delay = 1;
944  s->c.modified_quant = 1;
945  s->c.h263_aic = 1;
946  s->c.h263_plus = 1;
947  s->c.loop_filter = 1;
948  s->c.unrestricted_mv = 0;
949  break;
950 #endif
951  case AV_CODEC_ID_MPEG4:
952  s->c.out_format = FMT_H263;
953  s->c.h263_pred = 1;
954  s->c.unrestricted_mv = 1;
955  s->c.flipflop_rounding = 1;
956  s->c.low_delay = m->max_b_frames ? 0 : 1;
957  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
958  break;
960  s->c.out_format = FMT_H263;
961  s->c.h263_pred = 1;
962  s->c.unrestricted_mv = 1;
963  s->c.msmpeg4_version = MSMP4_V2;
964  avctx->delay = 0;
965  s->c.low_delay = 1;
966  break;
968  s->c.out_format = FMT_H263;
969  s->c.h263_pred = 1;
970  s->c.unrestricted_mv = 1;
971  s->c.msmpeg4_version = MSMP4_V3;
972  s->c.flipflop_rounding = 1;
973  avctx->delay = 0;
974  s->c.low_delay = 1;
975  break;
976  case AV_CODEC_ID_WMV1:
977  s->c.out_format = FMT_H263;
978  s->c.h263_pred = 1;
979  s->c.unrestricted_mv = 1;
980  s->c.msmpeg4_version = MSMP4_WMV1;
981  s->c.flipflop_rounding = 1;
982  avctx->delay = 0;
983  s->c.low_delay = 1;
984  break;
985  case AV_CODEC_ID_WMV2:
986  s->c.out_format = FMT_H263;
987  s->c.h263_pred = 1;
988  s->c.unrestricted_mv = 1;
989  s->c.msmpeg4_version = MSMP4_WMV2;
990  s->c.flipflop_rounding = 1;
991  avctx->delay = 0;
992  s->c.low_delay = 1;
993  break;
994  default:
995  return AVERROR(EINVAL);
996  }
997 
998  avctx->has_b_frames = !s->c.low_delay;
999 
1000  s->c.encoding = 1;
1001 
1002  s->c.progressive_frame =
1003  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
1005  s->c.alternate_scan);
1006 
1009  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1010  (1 << AV_PICTURE_TYPE_P) |
1011  (1 << AV_PICTURE_TYPE_B);
1012  } else if (!m->intra_only) {
1013  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1014  (1 << AV_PICTURE_TYPE_P);
1015  } else {
1016  s->frame_reconstruction_bitfield = 0;
1017  }
1018 
1019  if (m->lmin > m->lmax) {
1020  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1021  m->lmin = m->lmax;
1022  }
1023 
1024  /* ff_mpv_common_init() will copy (memdup) the contents of the main slice
1025  * to the slice contexts, so we initialize various fields of it
1026  * before calling ff_mpv_common_init(). */
1027  ff_mpv_idct_init(&s->c);
1028  init_unquantize(&s->c, avctx);
1029  ff_fdctdsp_init(&s->fdsp, avctx);
1030  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1031  ff_pixblockdsp_init(&s->pdsp, avctx);
1032  ret = me_cmp_init(m, avctx);
1033  if (ret < 0)
1034  return ret;
1035 
1036  if (!(avctx->stats_out = av_mallocz(256)) ||
1037  !(s->new_pic = av_frame_alloc()) ||
1038  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1039  return AVERROR(ENOMEM);
1040 
1041  ret = init_matrices(m, avctx);
1042  if (ret < 0)
1043  return ret;
1044 
1046 
1047  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1049 #if CONFIG_MSMPEG4ENC
1050  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1052 #endif
1053  }
1054 
1055  s->c.slice_ctx_size = sizeof(*s);
1056  ret = ff_mpv_common_init(&s->c);
1057  if (ret < 0)
1058  return ret;
1059 
1060  if (s->c.slice_context_count > 1) {
1061  for (int i = 0; i < s->c.slice_context_count; ++i) {
1062  s->c.enc_contexts[i]->rtp_mode = 1;
1063 
1065  s->c.enc_contexts[i]->c.h263_slice_structured = 1;
1066  }
1067  }
1068 
1069  ret = init_buffers(m, avctx);
1070  if (ret < 0)
1071  return ret;
1072 
1074  if (ret < 0)
1075  return ret;
1076 
1077  if (m->b_frame_strategy == 2) {
1078  for (int i = 0; i < m->max_b_frames + 2; i++) {
1079  m->tmp_frames[i] = av_frame_alloc();
1080  if (!m->tmp_frames[i])
1081  return AVERROR(ENOMEM);
1082 
1084  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1085  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1086 
1087  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1088  if (ret < 0)
1089  return ret;
1090  }
1091  }
1092 
1093  cpb_props = ff_encode_add_cpb_side_data(avctx);
1094  if (!cpb_props)
1095  return AVERROR(ENOMEM);
1096  cpb_props->max_bitrate = avctx->rc_max_rate;
1097  cpb_props->min_bitrate = avctx->rc_min_rate;
1098  cpb_props->avg_bitrate = avctx->bit_rate;
1099  cpb_props->buffer_size = avctx->rc_buffer_size;
1100 
1101  return 0;
1102 }
1103 
1105 {
1106  MPVMainEncContext *const m = avctx->priv_data;
1107  MPVEncContext *const s = &m->s;
1108 
1110 
1111  ff_mpv_common_end(&s->c);
1112  av_refstruct_pool_uninit(&s->c.picture_pool);
1113 
1114  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1117  }
1118  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1119  av_frame_free(&m->tmp_frames[i]);
1120 
1121  av_frame_free(&s->new_pic);
1122 
1124 
1125  av_freep(&m->mv_table_base);
1126  av_freep(&s->p_field_select_table[0]);
1128 
1129  av_freep(&s->mb_type);
1130  av_freep(&s->lambda_table);
1131 
1132  av_freep(&s->q_intra_matrix);
1133  av_freep(&s->q_intra_matrix16);
1134  av_freep(&s->dct_offset);
1135 
1136  return 0;
1137 }
1138 
1139 /* put block[] to dest[] */
1140 static inline void put_dct(MPVEncContext *const s,
1141  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1142 {
1143  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1144  s->c.idsp.idct_put(dest, line_size, block);
1145 }
1146 
1147 static inline void add_dequant_dct(MPVEncContext *const s,
1148  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1149 {
1150  if (s->c.block_last_index[i] >= 0) {
1151  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1152 
1153  s->c.idsp.idct_add(dest, line_size, block);
1154  }
1155 }
1156 
1157 /**
1158  * Performs dequantization and IDCT (if necessary)
1159  */
1160 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1161 {
1162  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1163  /* print DCT coefficients */
1164  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1165  for (int i = 0; i < 6; i++) {
1166  for (int j = 0; j < 64; j++) {
1167  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1168  block[i][s->c.idsp.idct_permutation[j]]);
1169  }
1170  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1171  }
1172  }
1173 
1174  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1175  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1176  int dct_linesize, dct_offset;
1177  const int linesize = s->c.cur_pic.linesize[0];
1178  const int uvlinesize = s->c.cur_pic.linesize[1];
1179  const int block_size = 8;
1180 
1181  dct_linesize = linesize << s->c.interlaced_dct;
1182  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1183 
1184  if (!s->c.mb_intra) {
1185  /* No MC, as that was already done otherwise */
1186  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1187  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1188  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1189  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1190 
1191  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1192  if (s->c.chroma_y_shift) {
1193  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1194  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1195  } else {
1196  dct_linesize >>= 1;
1197  dct_offset >>= 1;
1198  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1199  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1200  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1201  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1202  }
1203  }
1204  } else {
1205  /* dct only in intra block */
1206  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1207  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1208  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1209  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1210 
1211  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1212  if (s->c.chroma_y_shift) {
1213  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1214  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1215  } else {
1216  dct_offset >>= 1;
1217  dct_linesize >>= 1;
1218  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1219  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1220  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1221  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1222  }
1223  }
1224  }
1225  }
1226 }
1227 
1228 static int get_sae(const uint8_t *src, int ref, int stride)
1229 {
1230  int x,y;
1231  int acc = 0;
1232 
1233  for (y = 0; y < 16; y++) {
1234  for (x = 0; x < 16; x++) {
1235  acc += FFABS(src[x + y * stride] - ref);
1236  }
1237  }
1238 
1239  return acc;
1240 }
1241 
1242 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1243  const uint8_t *ref, int stride)
1244 {
1245  int x, y, w, h;
1246  int acc = 0;
1247 
1248  w = s->c.width & ~15;
1249  h = s->c.height & ~15;
1250 
1251  for (y = 0; y < h; y += 16) {
1252  for (x = 0; x < w; x += 16) {
1253  int offset = x + y * stride;
1254  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1255  stride, 16);
1256  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1257  int sae = get_sae(src + offset, mean, stride);
1258 
1259  acc += sae + 500 < sad;
1260  }
1261  }
1262  return acc;
1263 }
1264 
1265 /**
1266  * Allocates new buffers for an AVFrame and copies the properties
1267  * from another AVFrame.
1268  */
1269 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1270 {
1271  AVCodecContext *avctx = s->c.avctx;
1272  int ret;
1273 
1274  f->width = avctx->width + 2 * EDGE_WIDTH;
1275  f->height = avctx->height + 2 * EDGE_WIDTH;
1276 
1278  if (ret < 0)
1279  return ret;
1280 
1281  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1282  if (ret < 0)
1283  return ret;
1284 
1285  for (int i = 0; f->data[i]; i++) {
1286  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1287  f->linesize[i] +
1288  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1289  f->data[i] += offset;
1290  }
1291  f->width = avctx->width;
1292  f->height = avctx->height;
1293 
1294  ret = av_frame_copy_props(f, props_frame);
1295  if (ret < 0)
1296  return ret;
1297 
1298  return 0;
1299 }
1300 
1301 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1302 {
1303  MPVEncContext *const s = &m->s;
1304  MPVPicture *pic = NULL;
1305  int64_t pts;
1306  int display_picture_number = 0, ret;
1307  int encoding_delay = m->max_b_frames ? m->max_b_frames
1308  : (s->c.low_delay ? 0 : 1);
1309  int flush_offset = 1;
1310  int direct = 1;
1311 
1312  av_assert1(!m->input_picture[0]);
1313 
1314  if (pic_arg) {
1315  pts = pic_arg->pts;
1316  display_picture_number = m->input_picture_number++;
1317 
1318  if (pts != AV_NOPTS_VALUE) {
1319  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1320  int64_t last = m->user_specified_pts;
1321 
1322  if (pts <= last) {
1323  av_log(s->c.avctx, AV_LOG_ERROR,
1324  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1325  pts, last);
1326  return AVERROR(EINVAL);
1327  }
1328 
1329  if (!s->c.low_delay && display_picture_number == 1)
1330  m->dts_delta = pts - last;
1331  }
1332  m->user_specified_pts = pts;
1333  } else {
1334  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1335  m->user_specified_pts =
1336  pts = m->user_specified_pts + 1;
1337  av_log(s->c.avctx, AV_LOG_INFO,
1338  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1339  pts);
1340  } else {
1341  pts = display_picture_number;
1342  }
1343  }
1344 
1345  if (pic_arg->linesize[0] != s->c.linesize ||
1346  pic_arg->linesize[1] != s->c.uvlinesize ||
1347  pic_arg->linesize[2] != s->c.uvlinesize)
1348  direct = 0;
1349  if ((s->c.width & 15) || (s->c.height & 15))
1350  direct = 0;
1351  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1352  direct = 0;
1353  if (s->c.linesize & (STRIDE_ALIGN-1))
1354  direct = 0;
1355 
1356  ff_dlog(s->c.avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1357  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1358 
1359  pic = av_refstruct_pool_get(s->c.picture_pool);
1360  if (!pic)
1361  return AVERROR(ENOMEM);
1362 
1363  if (direct) {
1364  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1365  goto fail;
1366  pic->shared = 1;
1367  } else {
1368  ret = prepare_picture(s, pic->f, pic_arg);
1369  if (ret < 0)
1370  goto fail;
1371 
1372  for (int i = 0; i < 3; i++) {
1373  ptrdiff_t src_stride = pic_arg->linesize[i];
1374  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1375  int h_shift = i ? s->c.chroma_x_shift : 0;
1376  int v_shift = i ? s->c.chroma_y_shift : 0;
1377  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1378  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1379  const uint8_t *src = pic_arg->data[i];
1380  uint8_t *dst = pic->f->data[i];
1381  int vpad = 16;
1382 
1383  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1384  && !s->c.progressive_sequence
1385  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1386  vpad = 32;
1387 
1388  if (!s->c.avctx->rc_buffer_size)
1389  dst += INPLACE_OFFSET;
1390 
1391  if (src_stride == dst_stride)
1392  memcpy(dst, src, src_stride * h - src_stride + w);
1393  else {
1394  int h2 = h;
1395  uint8_t *dst2 = dst;
1396  while (h2--) {
1397  memcpy(dst2, src, w);
1398  dst2 += dst_stride;
1399  src += src_stride;
1400  }
1401  }
1402  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1403  s->mpvencdsp.draw_edges(dst, dst_stride,
1404  w, h,
1405  16 >> h_shift,
1406  vpad >> v_shift,
1407  EDGE_BOTTOM);
1408  }
1409  }
1410  emms_c();
1411  }
1412 
1413  pic->display_picture_number = display_picture_number;
1414  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1415  } else if (!m->reordered_input_picture[1]) {
1416  /* Flushing: When the above check is true, the encoder is about to run
1417  * out of frames to encode. Check if there are input_pictures left;
1418  * if so, ensure m->input_picture[0] contains the first picture.
1419  * A flush_offset != 1 will only happen if we did not receive enough
1420  * input frames. */
1421  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1422  if (m->input_picture[flush_offset])
1423  break;
1424 
1425  encoding_delay -= flush_offset - 1;
1426  }
1427 
1428  /* shift buffer entries */
1429  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1430  m->input_picture[i - flush_offset] = m->input_picture[i];
1431  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1432  m->input_picture[i] = NULL;
1433 
1434  m->input_picture[encoding_delay] = pic;
1435 
1436  return 0;
1437 fail:
1438  av_refstruct_unref(&pic);
1439  return ret;
1440 }
1441 
1442 static int skip_check(MPVMainEncContext *const m,
1443  const MPVPicture *p, const MPVPicture *ref)
1444 {
1445  MPVEncContext *const s = &m->s;
1446  int score = 0;
1447  int64_t score64 = 0;
1448 
1449  for (int plane = 0; plane < 3; plane++) {
1450  const int stride = p->f->linesize[plane];
1451  const int bw = plane ? 1 : 2;
1452  for (int y = 0; y < s->c.mb_height * bw; y++) {
1453  for (int x = 0; x < s->c.mb_width * bw; x++) {
1454  int off = p->shared ? 0 : 16;
1455  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1456  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1457  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1458 
1459  switch (FFABS(m->frame_skip_exp)) {
1460  case 0: score = FFMAX(score, v); break;
1461  case 1: score += FFABS(v); break;
1462  case 2: score64 += v * (int64_t)v; break;
1463  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1464  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1465  }
1466  }
1467  }
1468  }
1469  emms_c();
1470 
1471  if (score)
1472  score64 = score;
1473  if (m->frame_skip_exp < 0)
1474  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1475  -1.0/m->frame_skip_exp);
1476 
1477  if (score64 < m->frame_skip_threshold)
1478  return 1;
1479  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1480  return 1;
1481  return 0;
1482 }
1483 
1485 {
1486  int ret;
1487  int size = 0;
1488 
1490  if (ret < 0)
1491  return ret;
1492 
1493  do {
1495  if (ret >= 0) {
1496  size += pkt->size;
1498  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1499  return ret;
1500  } while (ret >= 0);
1501 
1502  return size;
1503 }
1504 
1506 {
1507  MPVEncContext *const s = &m->s;
1508  AVPacket *pkt;
1509  const int scale = m->brd_scale;
1510  int width = s->c.width >> scale;
1511  int height = s->c.height >> scale;
1512  int out_size, p_lambda, b_lambda, lambda2;
1513  int64_t best_rd = INT64_MAX;
1514  int best_b_count = -1;
1515  int ret = 0;
1516 
1517  av_assert0(scale >= 0 && scale <= 3);
1518 
1519  pkt = av_packet_alloc();
1520  if (!pkt)
1521  return AVERROR(ENOMEM);
1522 
1523  //emms_c();
1524  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1525  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1526  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1527  if (!b_lambda) // FIXME we should do this somewhere else
1528  b_lambda = p_lambda;
1529  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1531 
1532  for (int i = 0; i < m->max_b_frames + 2; i++) {
1533  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1534  s->c.next_pic.ptr;
1535 
1536  if (pre_input_ptr) {
1537  const uint8_t *data[4];
1538  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1539 
1540  if (!pre_input_ptr->shared && i) {
1541  data[0] += INPLACE_OFFSET;
1542  data[1] += INPLACE_OFFSET;
1543  data[2] += INPLACE_OFFSET;
1544  }
1545 
1546  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1547  m->tmp_frames[i]->linesize[0],
1548  data[0],
1549  pre_input_ptr->f->linesize[0],
1550  width, height);
1551  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1552  m->tmp_frames[i]->linesize[1],
1553  data[1],
1554  pre_input_ptr->f->linesize[1],
1555  width >> 1, height >> 1);
1556  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1557  m->tmp_frames[i]->linesize[2],
1558  data[2],
1559  pre_input_ptr->f->linesize[2],
1560  width >> 1, height >> 1);
1561  }
1562  }
1563 
1564  for (int j = 0; j < m->max_b_frames + 1; j++) {
1565  AVCodecContext *c;
1566  int64_t rd = 0;
1567 
1568  if (!m->input_picture[j])
1569  break;
1570 
1572  if (!c) {
1573  ret = AVERROR(ENOMEM);
1574  goto fail;
1575  }
1576 
1577  c->width = width;
1578  c->height = height;
1580  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1581  c->mb_decision = s->c.avctx->mb_decision;
1582  c->me_cmp = s->c.avctx->me_cmp;
1583  c->mb_cmp = s->c.avctx->mb_cmp;
1584  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1585  c->pix_fmt = AV_PIX_FMT_YUV420P;
1586  c->time_base = s->c.avctx->time_base;
1587  c->max_b_frames = m->max_b_frames;
1588 
1589  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1590  if (ret < 0)
1591  goto fail;
1592 
1593 
1595  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1596 
1597  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1598  if (out_size < 0) {
1599  ret = out_size;
1600  goto fail;
1601  }
1602 
1603  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1604 
1605  for (int i = 0; i < m->max_b_frames + 1; i++) {
1606  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1607 
1608  m->tmp_frames[i + 1]->pict_type = is_p ?
1610  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1611 
1612  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1613  if (out_size < 0) {
1614  ret = out_size;
1615  goto fail;
1616  }
1617 
1618  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1619  }
1620 
1621  /* get the delayed frames */
1623  if (out_size < 0) {
1624  ret = out_size;
1625  goto fail;
1626  }
1627  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1628 
1629  rd += c->error[0] + c->error[1] + c->error[2];
1630 
1631  if (rd < best_rd) {
1632  best_rd = rd;
1633  best_b_count = j;
1634  }
1635 
1636 fail:
1639  if (ret < 0) {
1640  best_b_count = ret;
1641  break;
1642  }
1643  }
1644 
1645  av_packet_free(&pkt);
1646 
1647  return best_b_count;
1648 }
1649 
1650 /**
1651  * Determines whether an input picture is discarded or not
1652  * and if not determines the length of the next chain of B frames
1653  * and moves these pictures (including the P frame) into
1654  * reordered_input_picture.
1655  * input_picture[0] is always NULL when exiting this function, even on error;
1656  * reordered_input_picture[0] is always NULL when exiting this function on error.
1657  */
1659 {
1660  MPVEncContext *const s = &m->s;
1661 
1662  /* Either nothing to do or can't do anything */
1663  if (m->reordered_input_picture[0] || !m->input_picture[0])
1664  return 0;
1665 
1666  /* set next picture type & ordering */
1667  if (m->frame_skip_threshold || m->frame_skip_factor) {
1668  if (m->picture_in_gop_number < m->gop_size &&
1669  s->c.next_pic.ptr &&
1670  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1671  // FIXME check that the gop check above is +-1 correct
1673 
1674  ff_vbv_update(m, 0);
1675 
1676  return 0;
1677  }
1678  }
1679 
1680  if (/* m->picture_in_gop_number >= m->gop_size || */
1681  !s->c.next_pic.ptr || m->intra_only) {
1682  m->reordered_input_picture[0] = m->input_picture[0];
1683  m->input_picture[0] = NULL;
1686  m->coded_picture_number++;
1687  } else {
1688  int b_frames = 0;
1689 
1690  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1691  for (int i = 0; i < m->max_b_frames + 1; i++) {
1692  int pict_num = m->input_picture[0]->display_picture_number + i;
1693 
1694  if (pict_num >= m->rc_context.num_entries)
1695  break;
1696  if (!m->input_picture[i]) {
1697  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1698  break;
1699  }
1700 
1701  m->input_picture[i]->f->pict_type =
1702  m->rc_context.entry[pict_num].new_pict_type;
1703  }
1704  }
1705 
1706  if (m->b_frame_strategy == 0) {
1707  b_frames = m->max_b_frames;
1708  while (b_frames && !m->input_picture[b_frames])
1709  b_frames--;
1710  } else if (m->b_frame_strategy == 1) {
1711  for (int i = 1; i < m->max_b_frames + 1; i++) {
1712  if (m->input_picture[i] &&
1713  m->input_picture[i]->b_frame_score == 0) {
1716  m->input_picture[i ]->f->data[0],
1717  m->input_picture[i - 1]->f->data[0],
1718  s->c.linesize) + 1;
1719  }
1720  }
1721  for (int i = 0;; i++) {
1722  if (i >= m->max_b_frames + 1 ||
1723  !m->input_picture[i] ||
1724  m->input_picture[i]->b_frame_score - 1 >
1725  s->c.mb_num / m->b_sensitivity) {
1726  b_frames = FFMAX(0, i - 1);
1727  break;
1728  }
1729  }
1730 
1731  /* reset scores */
1732  for (int i = 0; i < b_frames + 1; i++)
1733  m->input_picture[i]->b_frame_score = 0;
1734  } else if (m->b_frame_strategy == 2) {
1735  b_frames = estimate_best_b_count(m);
1736  if (b_frames < 0) {
1738  return b_frames;
1739  }
1740  }
1741 
1742  emms_c();
1743 
1744  for (int i = b_frames - 1; i >= 0; i--) {
1745  int type = m->input_picture[i]->f->pict_type;
1746  if (type && type != AV_PICTURE_TYPE_B)
1747  b_frames = i;
1748  }
1749  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1750  b_frames == m->max_b_frames) {
1751  av_log(s->c.avctx, AV_LOG_ERROR,
1752  "warning, too many B-frames in a row\n");
1753  }
1754 
1755  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1756  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1757  m->gop_size > m->picture_in_gop_number) {
1758  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1759  } else {
1760  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1761  b_frames = 0;
1762  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1763  }
1764  }
1765 
1766  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1767  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1768  b_frames--;
1769 
1770  m->reordered_input_picture[0] = m->input_picture[b_frames];
1771  m->input_picture[b_frames] = NULL;
1775  m->coded_picture_number++;
1776  for (int i = 0; i < b_frames; i++) {
1777  m->reordered_input_picture[i + 1] = m->input_picture[i];
1778  m->input_picture[i] = NULL;
1779  m->reordered_input_picture[i + 1]->f->pict_type =
1782  m->coded_picture_number++;
1783  }
1784  }
1785 
1786  return 0;
1787 }
1788 
1790 {
1791  MPVEncContext *const s = &m->s;
1792  int ret;
1793 
1795 
1796  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1799 
1801  av_assert1(!m->input_picture[0]);
1802  if (ret < 0)
1803  return ret;
1804 
1805  av_frame_unref(s->new_pic);
1806 
1807  if (m->reordered_input_picture[0]) {
1810 
1811  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1812  // input is a shared pix, so we can't modify it -> allocate a new
1813  // one & ensure that the shared one is reuseable
1814  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1815 
1816  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1817  if (ret < 0)
1818  goto fail;
1819  } else {
1820  // input is not a shared pix -> reuse buffer for current_pix
1821  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1822  if (ret < 0)
1823  goto fail;
1824  for (int i = 0; i < MPV_MAX_PLANES; i++) {
1825  if (s->new_pic->data[i])
1826  s->new_pic->data[i] += INPLACE_OFFSET;
1827  }
1828  }
1829  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1830  m->reordered_input_picture[0] = NULL;
1831  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1832  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1833  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1834  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1835  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1836  if (ret < 0) {
1837  ff_mpv_unref_picture(&s->c.cur_pic);
1838  return ret;
1839  }
1840  s->c.picture_number = s->c.cur_pic.ptr->display_picture_number;
1841 
1842  }
1843  return 0;
1844 fail:
1846  return ret;
1847 }
1848 
1849 static void frame_end(MPVMainEncContext *const m)
1850 {
1851  MPVEncContext *const s = &m->s;
1852 
1853  if (s->c.unrestricted_mv &&
1854  s->c.cur_pic.reference &&
1855  !m->intra_only) {
1856  int hshift = s->c.chroma_x_shift;
1857  int vshift = s->c.chroma_y_shift;
1858  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1859  s->c.cur_pic.linesize[0],
1860  s->c.h_edge_pos, s->c.v_edge_pos,
1862  EDGE_TOP | EDGE_BOTTOM);
1863  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1864  s->c.cur_pic.linesize[1],
1865  s->c.h_edge_pos >> hshift,
1866  s->c.v_edge_pos >> vshift,
1867  EDGE_WIDTH >> hshift,
1868  EDGE_WIDTH >> vshift,
1869  EDGE_TOP | EDGE_BOTTOM);
1870  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1871  s->c.cur_pic.linesize[2],
1872  s->c.h_edge_pos >> hshift,
1873  s->c.v_edge_pos >> vshift,
1874  EDGE_WIDTH >> hshift,
1875  EDGE_WIDTH >> vshift,
1876  EDGE_TOP | EDGE_BOTTOM);
1877  }
1878 
1879  emms_c();
1880 
1881  m->last_pict_type = s->c.pict_type;
1882  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1883  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1884  m->last_non_b_pict_type = s->c.pict_type;
1885 }
1886 
1888 {
1889  MPVEncContext *const s = &m->s;
1890  int intra, i;
1891 
1892  for (intra = 0; intra < 2; intra++) {
1893  if (s->dct_count[intra] > (1 << 16)) {
1894  for (i = 0; i < 64; i++) {
1895  s->dct_error_sum[intra][i] >>= 1;
1896  }
1897  s->dct_count[intra] >>= 1;
1898  }
1899 
1900  for (i = 0; i < 64; i++) {
1901  s->dct_offset[intra][i] = (m->noise_reduction *
1902  s->dct_count[intra] +
1903  s->dct_error_sum[intra][i] / 2) /
1904  (s->dct_error_sum[intra][i] + 1);
1905  }
1906  }
1907 }
1908 
1909 static void frame_start(MPVMainEncContext *const m)
1910 {
1911  MPVEncContext *const s = &m->s;
1912 
1913  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1914 
1915  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1916  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1917  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1918  }
1919 
1920  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1921  if (s->dct_error_sum) {
1923  }
1924 }
1925 
1927  const AVFrame *pic_arg, int *got_packet)
1928 {
1929  MPVMainEncContext *const m = avctx->priv_data;
1930  MPVEncContext *const s = &m->s;
1931  int stuffing_count, ret;
1932  int context_count = s->c.slice_context_count;
1933 
1934  ff_mpv_unref_picture(&s->c.cur_pic);
1935 
1936  m->vbv_ignore_qmax = 0;
1937 
1938  m->picture_in_gop_number++;
1939 
1940  ret = load_input_picture(m, pic_arg);
1941  if (ret < 0)
1942  return ret;
1943 
1945  if (ret < 0)
1946  return ret;
1947 
1948  /* output? */
1949  if (s->new_pic->data[0]) {
1950  int growing_buffer = context_count == 1 && !s->c.data_partitioning;
1951  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1952  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1953  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1954  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1955  if (ret < 0)
1956  return ret;
1957  }
1958  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1959  return ret;
1961  if (s->mb_info) {
1962  s->mb_info_ptr = av_packet_new_side_data(pkt,
1964  s->c.mb_width*s->c.mb_height*12);
1965  if (!s->mb_info_ptr)
1966  return AVERROR(ENOMEM);
1967  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1968  }
1969 
1970  s->c.pict_type = s->new_pic->pict_type;
1971  //emms_c();
1972  frame_start(m);
1973 vbv_retry:
1974  ret = encode_picture(m, pkt);
1975  if (growing_buffer) {
1976  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1977  pkt->data = s->pb.buf;
1979  }
1980  if (ret < 0)
1981  return -1;
1982 
1983  frame_end(m);
1984 
1985  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
1987 
1988  if (avctx->rc_buffer_size) {
1989  RateControlContext *rcc = &m->rc_context;
1990  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1991  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1992  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1993 
1994  if (put_bits_count(&s->pb) > max_size &&
1995  s->lambda < m->lmax) {
1996  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1997  (s->c.qscale + 1) / s->c.qscale);
1998  if (s->adaptive_quant) {
1999  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
2000  s->lambda_table[i] =
2001  FFMAX(s->lambda_table[i] + min_step,
2002  s->lambda_table[i] * (s->c.qscale + 1) /
2003  s->c.qscale);
2004  }
2005  s->c.mb_skipped = 0; // done in frame_start()
2006  // done in encode_picture() so we must undo it
2007  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
2008  s->c.no_rounding ^= s->c.flipflop_rounding;
2009  }
2010  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
2011  s->c.time_base = s->c.last_time_base;
2012  s->c.last_non_b_time = s->c.time - s->c.pp_time;
2013  }
2014  m->vbv_ignore_qmax = 1;
2015  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2016  goto vbv_retry;
2017  }
2018 
2020  }
2021 
2024 
2025  for (int i = 0; i < MPV_MAX_PLANES; i++)
2026  avctx->error[i] += s->encoding_error[i];
2027  ff_side_data_set_encoder_stats(pkt, s->c.cur_pic.ptr->f->quality,
2028  s->encoding_error,
2030  s->c.pict_type);
2031 
2033  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2034  s->misc_bits + s->i_tex_bits +
2035  s->p_tex_bits);
2036  flush_put_bits(&s->pb);
2037  m->frame_bits = put_bits_count(&s->pb);
2038 
2039  stuffing_count = ff_vbv_update(m, m->frame_bits);
2040  m->stuffing_bits = 8*stuffing_count;
2041  if (stuffing_count) {
2042  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2043  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2044  return -1;
2045  }
2046 
2047  switch (s->c.codec_id) {
2050  while (stuffing_count--) {
2051  put_bits(&s->pb, 8, 0);
2052  }
2053  break;
2054  case AV_CODEC_ID_MPEG4:
2055  put_bits(&s->pb, 16, 0);
2056  put_bits(&s->pb, 16, 0x1C3);
2057  stuffing_count -= 4;
2058  while (stuffing_count--) {
2059  put_bits(&s->pb, 8, 0xFF);
2060  }
2061  break;
2062  default:
2063  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2064  m->stuffing_bits = 0;
2065  }
2066  flush_put_bits(&s->pb);
2067  m->frame_bits = put_bits_count(&s->pb);
2068  }
2069 
2070  /* update MPEG-1/2 vbv_delay for CBR */
2071  if (avctx->rc_max_rate &&
2073  s->c.out_format == FMT_MPEG1 &&
2074  90000LL * (avctx->rc_buffer_size - 1) <=
2075  avctx->rc_max_rate * 0xFFFFLL) {
2076  AVCPBProperties *props;
2077  size_t props_size;
2078 
2079  int vbv_delay, min_delay;
2080  double inbits = avctx->rc_max_rate *
2082  int minbits = m->frame_bits - 8 *
2083  (m->vbv_delay_pos - 1);
2084  double bits = m->rc_context.buffer_index + minbits - inbits;
2085  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2086 
2087  if (bits < 0)
2089  "Internal error, negative bits\n");
2090 
2091  av_assert1(s->c.repeat_first_field == 0);
2092 
2093  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2094  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2095  avctx->rc_max_rate;
2096 
2097  vbv_delay = FFMAX(vbv_delay, min_delay);
2098 
2099  av_assert0(vbv_delay < 0xFFFF);
2100 
2101  vbv_delay_ptr[0] &= 0xF8;
2102  vbv_delay_ptr[0] |= vbv_delay >> 13;
2103  vbv_delay_ptr[1] = vbv_delay >> 5;
2104  vbv_delay_ptr[2] &= 0x07;
2105  vbv_delay_ptr[2] |= vbv_delay << 3;
2106 
2107  props = av_cpb_properties_alloc(&props_size);
2108  if (!props)
2109  return AVERROR(ENOMEM);
2110  props->vbv_delay = vbv_delay * 300;
2111 
2113  (uint8_t*)props, props_size);
2114  if (ret < 0) {
2115  av_freep(&props);
2116  return ret;
2117  }
2118  }
2119  m->total_bits += m->frame_bits;
2120 
2121  pkt->pts = s->c.cur_pic.ptr->f->pts;
2122  pkt->duration = s->c.cur_pic.ptr->f->duration;
2123  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2124  if (!s->c.cur_pic.ptr->coded_picture_number)
2125  pkt->dts = pkt->pts - m->dts_delta;
2126  else
2127  pkt->dts = m->reordered_pts;
2128  m->reordered_pts = pkt->pts;
2129  } else
2130  pkt->dts = pkt->pts;
2131 
2132  // the no-delay case is handled in generic code
2134  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2135  if (ret < 0)
2136  return ret;
2137  }
2138 
2139  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2141  if (s->mb_info)
2143  } else {
2144  m->frame_bits = 0;
2145  }
2146 
2147  ff_mpv_unref_picture(&s->c.cur_pic);
2148 
2149  av_assert1((m->frame_bits & 7) == 0);
2150 
2151  pkt->size = m->frame_bits / 8;
2152  *got_packet = !!pkt->size;
2153  return 0;
2154 }
2155 
2157  int n, int threshold)
2158 {
2159  static const char tab[64] = {
2160  3, 2, 2, 1, 1, 1, 1, 1,
2161  1, 1, 1, 1, 1, 1, 1, 1,
2162  1, 1, 1, 1, 1, 1, 1, 1,
2163  0, 0, 0, 0, 0, 0, 0, 0,
2164  0, 0, 0, 0, 0, 0, 0, 0,
2165  0, 0, 0, 0, 0, 0, 0, 0,
2166  0, 0, 0, 0, 0, 0, 0, 0,
2167  0, 0, 0, 0, 0, 0, 0, 0
2168  };
2169  int score = 0;
2170  int run = 0;
2171  int i;
2172  int16_t *block = s->c.block[n];
2173  const int last_index = s->c.block_last_index[n];
2174  int skip_dc;
2175 
2176  if (threshold < 0) {
2177  skip_dc = 0;
2178  threshold = -threshold;
2179  } else
2180  skip_dc = 1;
2181 
2182  /* Are all we could set to zero already zero? */
2183  if (last_index <= skip_dc - 1)
2184  return;
2185 
2186  for (i = 0; i <= last_index; i++) {
2187  const int j = s->c.intra_scantable.permutated[i];
2188  const int level = FFABS(block[j]);
2189  if (level == 1) {
2190  if (skip_dc && i == 0)
2191  continue;
2192  score += tab[run];
2193  run = 0;
2194  } else if (level > 1) {
2195  return;
2196  } else {
2197  run++;
2198  }
2199  }
2200  if (score >= threshold)
2201  return;
2202  for (i = skip_dc; i <= last_index; i++) {
2203  const int j = s->c.intra_scantable.permutated[i];
2204  block[j] = 0;
2205  }
2206  if (block[0])
2207  s->c.block_last_index[n] = 0;
2208  else
2209  s->c.block_last_index[n] = -1;
2210 }
2211 
2212 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2213  int last_index)
2214 {
2215  int i;
2216  const int maxlevel = s->max_qcoeff;
2217  const int minlevel = s->min_qcoeff;
2218  int overflow = 0;
2219 
2220  if (s->c.mb_intra) {
2221  i = 1; // skip clipping of intra dc
2222  } else
2223  i = 0;
2224 
2225  for (; i <= last_index; i++) {
2226  const int j = s->c.intra_scantable.permutated[i];
2227  int level = block[j];
2228 
2229  if (level > maxlevel) {
2230  level = maxlevel;
2231  overflow++;
2232  } else if (level < minlevel) {
2233  level = minlevel;
2234  overflow++;
2235  }
2236 
2237  block[j] = level;
2238  }
2239 
2240  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2241  av_log(s->c.avctx, AV_LOG_INFO,
2242  "warning, clipping %d dct coefficients to %d..%d\n",
2243  overflow, minlevel, maxlevel);
2244 }
2245 
2246 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2247 {
2248  int x, y;
2249  // FIXME optimize
2250  for (y = 0; y < 8; y++) {
2251  for (x = 0; x < 8; x++) {
2252  int x2, y2;
2253  int sum = 0;
2254  int sqr = 0;
2255  int count = 0;
2256 
2257  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2258  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2259  int v = ptr[x2 + y2 * stride];
2260  sum += v;
2261  sqr += v * v;
2262  count++;
2263  }
2264  }
2265  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2266  }
2267  }
2268 }
2269 
2271  int motion_x, int motion_y,
2272  int mb_block_height,
2273  int mb_block_width,
2274  int mb_block_count,
2275  int chroma_x_shift,
2276  int chroma_y_shift,
2277  int chroma_format)
2278 {
2279 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2280  * and neither of these encoders currently supports 444. */
2281 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2282  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2283  int16_t weight[12][64];
2284  int16_t orig[12][64];
2285  const int mb_x = s->c.mb_x;
2286  const int mb_y = s->c.mb_y;
2287  int i;
2288  int skip_dct[12];
2289  int dct_offset = s->c.linesize * 8; // default for progressive frames
2290  int uv_dct_offset = s->c.uvlinesize * 8;
2291  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2292  ptrdiff_t wrap_y, wrap_c;
2293 
2294  for (i = 0; i < mb_block_count; i++)
2295  skip_dct[i] = s->skipdct;
2296 
2297  if (s->adaptive_quant) {
2298  const int last_qp = s->c.qscale;
2299  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2300 
2301  s->lambda = s->lambda_table[mb_xy];
2302  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2304 
2305  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2306  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2307 
2308  if (s->c.out_format == FMT_H263) {
2309  s->dquant = av_clip(s->dquant, -2, 2);
2310 
2311  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2312  if (!s->c.mb_intra) {
2313  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2314  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2315  s->dquant = 0;
2316  }
2317  if (s->c.mv_type == MV_TYPE_8X8)
2318  s->dquant = 0;
2319  }
2320  }
2321  }
2322  }
2323  ff_set_qscale(&s->c, last_qp + s->dquant);
2324  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2325  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2326 
2327  wrap_y = s->c.linesize;
2328  wrap_c = s->c.uvlinesize;
2329  ptr_y = s->new_pic->data[0] +
2330  (mb_y * 16 * wrap_y) + mb_x * 16;
2331  ptr_cb = s->new_pic->data[1] +
2332  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2333  ptr_cr = s->new_pic->data[2] +
2334  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2335 
2336  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2337  s->c.codec_id != AV_CODEC_ID_AMV) {
2338  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2339  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2340  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2341  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2342  wrap_y, wrap_y,
2343  16, 16, mb_x * 16, mb_y * 16,
2344  s->c.width, s->c.height);
2345  ptr_y = ebuf;
2346  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2347  wrap_c, wrap_c,
2348  mb_block_width, mb_block_height,
2349  mb_x * mb_block_width, mb_y * mb_block_height,
2350  cw, ch);
2351  ptr_cb = ebuf + 16 * wrap_y;
2352  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2353  wrap_c, wrap_c,
2354  mb_block_width, mb_block_height,
2355  mb_x * mb_block_width, mb_y * mb_block_height,
2356  cw, ch);
2357  ptr_cr = ebuf + 16 * wrap_y + 16;
2358  }
2359 
2360  if (s->c.mb_intra) {
2361  if (INTERLACED_DCT(s)) {
2362  int progressive_score, interlaced_score;
2363 
2364  s->c.interlaced_dct = 0;
2365  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2366  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2367  NULL, wrap_y, 8) - 400;
2368 
2369  if (progressive_score > 0) {
2370  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2371  NULL, wrap_y * 2, 8) +
2372  s->ildct_cmp[1](s, ptr_y + wrap_y,
2373  NULL, wrap_y * 2, 8);
2374  if (progressive_score > interlaced_score) {
2375  s->c.interlaced_dct = 1;
2376 
2377  dct_offset = wrap_y;
2378  uv_dct_offset = wrap_c;
2379  wrap_y <<= 1;
2380  if (chroma_format == CHROMA_422 ||
2382  wrap_c <<= 1;
2383  }
2384  }
2385  }
2386 
2387  s->pdsp.get_pixels(s->c.block[0], ptr_y, wrap_y);
2388  s->pdsp.get_pixels(s->c.block[1], ptr_y + 8, wrap_y);
2389  s->pdsp.get_pixels(s->c.block[2], ptr_y + dct_offset, wrap_y);
2390  s->pdsp.get_pixels(s->c.block[3], ptr_y + dct_offset + 8, wrap_y);
2391 
2392  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2393  skip_dct[4] = 1;
2394  skip_dct[5] = 1;
2395  } else {
2396  s->pdsp.get_pixels(s->c.block[4], ptr_cb, wrap_c);
2397  s->pdsp.get_pixels(s->c.block[5], ptr_cr, wrap_c);
2398  if (chroma_format == CHROMA_422) {
2399  s->pdsp.get_pixels(s->c.block[6], ptr_cb + uv_dct_offset, wrap_c);
2400  s->pdsp.get_pixels(s->c.block[7], ptr_cr + uv_dct_offset, wrap_c);
2401  } else if (chroma_format == CHROMA_444) {
2402  s->pdsp.get_pixels(s->c.block[ 6], ptr_cb + 8, wrap_c);
2403  s->pdsp.get_pixels(s->c.block[ 7], ptr_cr + 8, wrap_c);
2404  s->pdsp.get_pixels(s->c.block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2405  s->pdsp.get_pixels(s->c.block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2406  s->pdsp.get_pixels(s->c.block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2407  s->pdsp.get_pixels(s->c.block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2408  }
2409  }
2410  } else {
2411  op_pixels_func (*op_pix)[4];
2412  qpel_mc_func (*op_qpix)[16];
2413  uint8_t *dest_y, *dest_cb, *dest_cr;
2414 
2415  dest_y = s->c.dest[0];
2416  dest_cb = s->c.dest[1];
2417  dest_cr = s->c.dest[2];
2418 
2419  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2420  op_pix = s->c.hdsp.put_pixels_tab;
2421  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2422  } else {
2423  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2424  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2425  }
2426 
2427  if (s->c.mv_dir & MV_DIR_FORWARD) {
2428  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2429  s->c.last_pic.data,
2430  op_pix, op_qpix);
2431  op_pix = s->c.hdsp.avg_pixels_tab;
2432  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2433  }
2434  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2435  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2436  s->c.next_pic.data,
2437  op_pix, op_qpix);
2438  }
2439 
2440  if (INTERLACED_DCT(s)) {
2441  int progressive_score, interlaced_score;
2442 
2443  s->c.interlaced_dct = 0;
2444  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2445  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2446  ptr_y + wrap_y * 8,
2447  wrap_y, 8) - 400;
2448 
2449  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2450  progressive_score -= 400;
2451 
2452  if (progressive_score > 0) {
2453  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2454  wrap_y * 2, 8) +
2455  s->ildct_cmp[0](s, dest_y + wrap_y,
2456  ptr_y + wrap_y,
2457  wrap_y * 2, 8);
2458 
2459  if (progressive_score > interlaced_score) {
2460  s->c.interlaced_dct = 1;
2461 
2462  dct_offset = wrap_y;
2463  uv_dct_offset = wrap_c;
2464  wrap_y <<= 1;
2465  if (chroma_format == CHROMA_422)
2466  wrap_c <<= 1;
2467  }
2468  }
2469  }
2470 
2471  s->pdsp.diff_pixels(s->c.block[0], ptr_y, dest_y, wrap_y);
2472  s->pdsp.diff_pixels(s->c.block[1], ptr_y + 8, dest_y + 8, wrap_y);
2473  s->pdsp.diff_pixels(s->c.block[2], ptr_y + dct_offset,
2474  dest_y + dct_offset, wrap_y);
2475  s->pdsp.diff_pixels(s->c.block[3], ptr_y + dct_offset + 8,
2476  dest_y + dct_offset + 8, wrap_y);
2477 
2478  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2479  skip_dct[4] = 1;
2480  skip_dct[5] = 1;
2481  } else {
2482  s->pdsp.diff_pixels(s->c.block[4], ptr_cb, dest_cb, wrap_c);
2483  s->pdsp.diff_pixels(s->c.block[5], ptr_cr, dest_cr, wrap_c);
2484  if (!chroma_y_shift) { /* 422 */
2485  s->pdsp.diff_pixels(s->c.block[6], ptr_cb + uv_dct_offset,
2486  dest_cb + uv_dct_offset, wrap_c);
2487  s->pdsp.diff_pixels(s->c.block[7], ptr_cr + uv_dct_offset,
2488  dest_cr + uv_dct_offset, wrap_c);
2489  }
2490  }
2491  /* pre quantization */
2492  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2493  // FIXME optimize
2494  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2495  skip_dct[0] = 1;
2496  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2497  skip_dct[1] = 1;
2498  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2499  wrap_y, 8) < 20 * s->c.qscale)
2500  skip_dct[2] = 1;
2501  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2502  wrap_y, 8) < 20 * s->c.qscale)
2503  skip_dct[3] = 1;
2504  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2505  skip_dct[4] = 1;
2506  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2507  skip_dct[5] = 1;
2508  if (!chroma_y_shift) { /* 422 */
2509  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2510  dest_cb + uv_dct_offset,
2511  wrap_c, 8) < 20 * s->c.qscale)
2512  skip_dct[6] = 1;
2513  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2514  dest_cr + uv_dct_offset,
2515  wrap_c, 8) < 20 * s->c.qscale)
2516  skip_dct[7] = 1;
2517  }
2518  }
2519  }
2520 
2521  if (s->quantizer_noise_shaping) {
2522  if (!skip_dct[0])
2523  get_visual_weight(weight[0], ptr_y , wrap_y);
2524  if (!skip_dct[1])
2525  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2526  if (!skip_dct[2])
2527  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2528  if (!skip_dct[3])
2529  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2530  if (!skip_dct[4])
2531  get_visual_weight(weight[4], ptr_cb , wrap_c);
2532  if (!skip_dct[5])
2533  get_visual_weight(weight[5], ptr_cr , wrap_c);
2534  if (!chroma_y_shift) { /* 422 */
2535  if (!skip_dct[6])
2536  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2537  wrap_c);
2538  if (!skip_dct[7])
2539  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2540  wrap_c);
2541  }
2542  memcpy(orig[0], s->c.block[0], sizeof(int16_t) * 64 * mb_block_count);
2543  }
2544 
2545  /* DCT & quantize */
2546  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2547  {
2548  for (i = 0; i < mb_block_count; i++) {
2549  if (!skip_dct[i]) {
2550  int overflow;
2551  s->c.block_last_index[i] = s->dct_quantize(s, s->c.block[i], i, s->c.qscale, &overflow);
2552  // FIXME we could decide to change to quantizer instead of
2553  // clipping
2554  // JS: I don't think that would be a good idea it could lower
2555  // quality instead of improve it. Just INTRADC clipping
2556  // deserves changes in quantizer
2557  if (overflow)
2558  clip_coeffs(s, s->c.block[i], s->c.block_last_index[i]);
2559  } else
2560  s->c.block_last_index[i] = -1;
2561  }
2562  if (s->quantizer_noise_shaping) {
2563  for (i = 0; i < mb_block_count; i++) {
2564  if (!skip_dct[i]) {
2565  s->c.block_last_index[i] =
2566  dct_quantize_refine(s, s->c.block[i], weight[i],
2567  orig[i], i, s->c.qscale);
2568  }
2569  }
2570  }
2571 
2572  if (s->luma_elim_threshold && !s->c.mb_intra)
2573  for (i = 0; i < 4; i++)
2574  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2575  if (s->chroma_elim_threshold && !s->c.mb_intra)
2576  for (i = 4; i < mb_block_count; i++)
2577  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2578 
2579  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2580  for (i = 0; i < mb_block_count; i++) {
2581  if (s->c.block_last_index[i] == -1)
2582  s->coded_score[i] = INT_MAX / 256;
2583  }
2584  }
2585  }
2586 
2587  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2588  s->c.block_last_index[4] =
2589  s->c.block_last_index[5] = 0;
2590  s->c.block[4][0] =
2591  s->c.block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2592  if (!chroma_y_shift) { /* 422 / 444 */
2593  for (i=6; i<12; i++) {
2594  s->c.block_last_index[i] = 0;
2595  s->c.block[i][0] = s->c.block[4][0];
2596  }
2597  }
2598  }
2599 
2600  // non c quantize code returns incorrect block_last_index FIXME
2601  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2602  for (i = 0; i < mb_block_count; i++) {
2603  int j;
2604  if (s->c.block_last_index[i] > 0) {
2605  for (j = 63; j > 0; j--) {
2606  if (s->c.block[i][s->c.intra_scantable.permutated[j]])
2607  break;
2608  }
2609  s->c.block_last_index[i] = j;
2610  }
2611  }
2612  }
2613 
2614  s->encode_mb(s, s->c.block, motion_x, motion_y);
2615 }
2616 
2617 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2618 {
2619  if (s->c.chroma_format == CHROMA_420)
2620  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2621  else if (s->c.chroma_format == CHROMA_422)
2622  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2623  else
2624  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2625 }
2626 
2627 typedef struct MBBackup {
2628  struct {
2629  int mv[2][4][2];
2630  int last_mv[2][2][2];
2632  int last_dc[3];
2634  int qscale;
2637  int16_t (*block)[64];
2638  } c;
2640  int dquant;
2643 } MBBackup;
2644 
2645 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2646 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2647  const SRC_TYPE *const s) \
2648 { \
2649  /* FIXME is memcpy faster than a loop? */ \
2650  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2651  \
2652  /* MPEG-1 */ \
2653  d->c.mb_skip_run = s->c.mb_skip_run; \
2654  for (int i = 0; i < 3; i++) \
2655  d->c.last_dc[i] = s->c.last_dc[i]; \
2656  \
2657  /* statistics */ \
2658  d->mv_bits = s->mv_bits; \
2659  d->i_tex_bits = s->i_tex_bits; \
2660  d->p_tex_bits = s->p_tex_bits; \
2661  d->i_count = s->i_count; \
2662  d->misc_bits = s->misc_bits; \
2663  d->last_bits = 0; \
2664  \
2665  d->c.mb_skipped = 0; \
2666  d->c.qscale = s->c.qscale; \
2667  d->dquant = s->dquant; \
2668  \
2669  d->esc3_level_length = s->esc3_level_length; \
2670 } \
2671  \
2672 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2673  const SRC_TYPE *const s, \
2674  int data_partitioning) \
2675 { \
2676  /* FIXME is memcpy faster than a loop? */ \
2677  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2678  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2679  \
2680  /* MPEG-1 */ \
2681  d->c.mb_skip_run = s->c.mb_skip_run; \
2682  for (int i = 0; i < 3; i++) \
2683  d->c.last_dc[i] = s->c.last_dc[i]; \
2684  \
2685  /* statistics */ \
2686  d->mv_bits = s->mv_bits; \
2687  d->i_tex_bits = s->i_tex_bits; \
2688  d->p_tex_bits = s->p_tex_bits; \
2689  d->i_count = s->i_count; \
2690  d->misc_bits = s->misc_bits; \
2691  \
2692  d->c.mb_intra = s->c.mb_intra; \
2693  d->c.mb_skipped = s->c.mb_skipped; \
2694  d->c.mv_type = s->c.mv_type; \
2695  d->c.mv_dir = s->c.mv_dir; \
2696  d->pb = s->pb; \
2697  if (data_partitioning) { \
2698  d->pb2 = s->pb2; \
2699  d->tex_pb = s->tex_pb; \
2700  } \
2701  d->c.block = s->c.block; \
2702  for (int i = 0; i < 8; i++) \
2703  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2704  d->c.interlaced_dct = s->c.interlaced_dct; \
2705  d->c.qscale = s->c.qscale; \
2706  \
2707  d->esc3_level_length = s->esc3_level_length; \
2708 }
2709 
2710 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2711 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2712 
2713 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2714  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2715  int *dmin, int *next_block, int motion_x, int motion_y)
2716 {
2717  int score;
2718  uint8_t *dest_backup[3];
2719 
2720  reset_context_before_encode(s, backup);
2721 
2722  s->c.block = s->c.blocks[*next_block];
2723  s->pb = pb[*next_block];
2724  if (s->c.data_partitioning) {
2725  s->pb2 = pb2 [*next_block];
2726  s->tex_pb= tex_pb[*next_block];
2727  }
2728 
2729  if(*next_block){
2730  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2731  s->c.dest[0] = s->c.sc.rd_scratchpad;
2732  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2733  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2734  av_assert0(s->c.linesize >= 32); //FIXME
2735  }
2736 
2737  encode_mb(s, motion_x, motion_y);
2738 
2739  score= put_bits_count(&s->pb);
2740  if (s->c.data_partitioning) {
2741  score+= put_bits_count(&s->pb2);
2742  score+= put_bits_count(&s->tex_pb);
2743  }
2744 
2745  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2746  mpv_reconstruct_mb(s, s->c.block);
2747 
2748  score *= s->lambda2;
2749  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2750  }
2751 
2752  if(*next_block){
2753  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2754  }
2755 
2756  if(score<*dmin){
2757  *dmin= score;
2758  *next_block^=1;
2759 
2760  save_context_after_encode(best, s, s->c.data_partitioning);
2761  }
2762 }
2763 
2764 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2765 {
2766  const uint32_t *sq = ff_square_tab + 256;
2767  int acc=0;
2768  int x,y;
2769 
2770  if(w==16 && h==16)
2771  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2772  else if(w==8 && h==8)
2773  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2774 
2775  for(y=0; y<h; y++){
2776  for(x=0; x<w; x++){
2777  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2778  }
2779  }
2780 
2781  av_assert2(acc>=0);
2782 
2783  return acc;
2784 }
2785 
2786 static int sse_mb(MPVEncContext *const s)
2787 {
2788  int w= 16;
2789  int h= 16;
2790  int chroma_mb_w = w >> s->c.chroma_x_shift;
2791  int chroma_mb_h = h >> s->c.chroma_y_shift;
2792 
2793  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2794  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2795 
2796  if(w==16 && h==16)
2797  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2798  s->c.dest[0], s->c.linesize, 16) +
2799  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2800  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2801  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2802  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2803  else
2804  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2805  s->c.dest[0], w, h, s->c.linesize) +
2806  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2807  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2808  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2809  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2810 }
2811 
2813  MPVEncContext *const s = *(void**)arg;
2814 
2815 
2816  s->me.pre_pass = 1;
2817  s->me.dia_size = s->c.avctx->pre_dia_size;
2818  s->c.first_slice_line = 1;
2819  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2820  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2821  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2822  s->c.first_slice_line = 0;
2823  }
2824 
2825  s->me.pre_pass = 0;
2826 
2827  return 0;
2828 }
2829 
2831  MPVEncContext *const s = *(void**)arg;
2832 
2833  s->me.dia_size = s->c.avctx->dia_size;
2834  s->c.first_slice_line = 1;
2835  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2836  s->c.mb_x = 0; //for block init below
2837  ff_init_block_index(&s->c);
2838  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2839  s->c.block_index[0] += 2;
2840  s->c.block_index[1] += 2;
2841  s->c.block_index[2] += 2;
2842  s->c.block_index[3] += 2;
2843 
2844  /* compute motion vector & mb_type and store in context */
2845  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2846  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2847  else
2848  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2849  }
2850  s->c.first_slice_line = 0;
2851  }
2852  return 0;
2853 }
2854 
2855 static int mb_var_thread(AVCodecContext *c, void *arg){
2856  MPVEncContext *const s = *(void**)arg;
2857 
2858  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2859  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2860  int xx = mb_x * 16;
2861  int yy = mb_y * 16;
2862  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2863  int varc;
2864  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2865 
2866  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2867  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2868 
2869  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2870  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2871  s->me.mb_var_sum_temp += varc;
2872  }
2873  }
2874  return 0;
2875 }
2876 
2877 static void write_slice_end(MPVEncContext *const s)
2878 {
2879  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2880  if (s->c.partitioned_frame)
2882 
2883  ff_mpeg4_stuffing(&s->pb);
2884  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2885  s->c.out_format == FMT_MJPEG) {
2887  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2889  }
2890 
2891  flush_put_bits(&s->pb);
2892 
2893  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->c.partitioned_frame)
2894  s->misc_bits+= get_bits_diff(s);
2895 }
2896 
2897 static void write_mb_info(MPVEncContext *const s)
2898 {
2899  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2900  int offset = put_bits_count(&s->pb);
2901  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->c.gob_index);
2902  int gobn = s->c.mb_y / s->c.gob_index;
2903  int pred_x, pred_y;
2904  if (CONFIG_H263_ENCODER)
2905  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2906  bytestream_put_le32(&ptr, offset);
2907  bytestream_put_byte(&ptr, s->c.qscale);
2908  bytestream_put_byte(&ptr, gobn);
2909  bytestream_put_le16(&ptr, mba);
2910  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2911  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2912  /* 4MV not implemented */
2913  bytestream_put_byte(&ptr, 0); /* hmv2 */
2914  bytestream_put_byte(&ptr, 0); /* vmv2 */
2915 }
2916 
2917 static void update_mb_info(MPVEncContext *const s, int startcode)
2918 {
2919  if (!s->mb_info)
2920  return;
2921  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2922  s->mb_info_size += 12;
2923  s->prev_mb_info = s->last_mb_info;
2924  }
2925  if (startcode) {
2926  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2927  /* This might have incremented mb_info_size above, and we return without
2928  * actually writing any info into that slot yet. But in that case,
2929  * this will be called again at the start of the after writing the
2930  * start code, actually writing the mb info. */
2931  return;
2932  }
2933 
2934  s->last_mb_info = put_bytes_count(&s->pb, 0);
2935  if (!s->mb_info_size)
2936  s->mb_info_size += 12;
2937  write_mb_info(s);
2938 }
2939 
2940 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2941 {
2942  if (put_bytes_left(&s->pb, 0) < threshold
2943  && s->c.slice_context_count == 1
2944  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2945  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2946 
2947  uint8_t *new_buffer = NULL;
2948  int new_buffer_size = 0;
2949 
2950  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2951  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2952  return AVERROR(ENOMEM);
2953  }
2954 
2955  emms_c();
2956 
2957  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2958  s->c.avctx->internal->byte_buffer_size + size_increase);
2959  if (!new_buffer)
2960  return AVERROR(ENOMEM);
2961 
2962  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2963  av_free(s->c.avctx->internal->byte_buffer);
2964  s->c.avctx->internal->byte_buffer = new_buffer;
2965  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2966  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2967  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2968  }
2969  if (put_bytes_left(&s->pb, 0) < threshold)
2970  return AVERROR(EINVAL);
2971  return 0;
2972 }
2973 
2974 static int encode_thread(AVCodecContext *c, void *arg){
2975  MPVEncContext *const s = *(void**)arg;
2976  int chr_h = 16 >> s->c.chroma_y_shift;
2977  int i;
2978  MBBackup best_s = { 0 }, backup_s;
2979  uint8_t bit_buf[2][MAX_MB_BYTES];
2980  uint8_t bit_buf2[2][MAX_MB_BYTES];
2981  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2982  PutBitContext pb[2], pb2[2], tex_pb[2];
2983 
2984  for(i=0; i<2; i++){
2985  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2986  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2987  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2988  }
2989 
2990  s->last_bits= put_bits_count(&s->pb);
2991  s->mv_bits=0;
2992  s->misc_bits=0;
2993  s->i_tex_bits=0;
2994  s->p_tex_bits=0;
2995  s->i_count=0;
2996 
2997  for(i=0; i<3; i++){
2998  /* init last dc values */
2999  /* note: quant matrix value (8) is implied here */
3000  s->c.last_dc[i] = 128 << s->c.intra_dc_precision;
3001 
3002  s->encoding_error[i] = 0;
3003  }
3004  if (s->c.codec_id == AV_CODEC_ID_AMV) {
3005  s->c.last_dc[0] = 128 * 8 / 13;
3006  s->c.last_dc[1] = 128 * 8 / 14;
3007  s->c.last_dc[2] = 128 * 8 / 14;
3008  }
3009  s->c.mb_skip_run = 0;
3010  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
3011 
3012  s->last_mv_dir = 0;
3013 
3014  switch (s->c.codec_id) {
3015  case AV_CODEC_ID_H263:
3016  case AV_CODEC_ID_H263P:
3017  case AV_CODEC_ID_FLV1:
3018  if (CONFIG_H263_ENCODER)
3019  s->c.gob_index = H263_GOB_HEIGHT(s->c.height);
3020  break;
3021  case AV_CODEC_ID_MPEG4:
3022  if (CONFIG_MPEG4_ENCODER && s->c.partitioned_frame)
3024  break;
3025  }
3026 
3027  s->c.resync_mb_x = 0;
3028  s->c.resync_mb_y = 0;
3029  s->c.first_slice_line = 1;
3030  s->ptr_lastgob = s->pb.buf;
3031  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
3032  int mb_y;
3033  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
3034  int first_in_slice;
3035  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
3036  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3038  s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 1024 << s->c.intra_dc_precision;
3039  } else {
3040  mb_y = mb_y_order;
3041  }
3042  s->c.mb_x = 0;
3043  s->c.mb_y = mb_y;
3044 
3045  ff_set_qscale(&s->c, s->c.qscale);
3046  ff_init_block_index(&s->c);
3047 
3048  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3049  int mb_type, xy;
3050 // int d;
3051  int dmin= INT_MAX;
3052  int dir;
3053  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3054  + s->c.mb_width*MAX_MB_BYTES;
3055 
3057  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3058  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3059  return -1;
3060  }
3061  if (s->c.data_partitioning) {
3062  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3063  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3064  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3065  return -1;
3066  }
3067  }
3068 
3069  s->c.mb_x = mb_x;
3070  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3071  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3072 
3073  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3075  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3076  mb_type = s->mb_type[xy];
3077 
3078  /* write gob / video packet header */
3079  if(s->rtp_mode){
3080  int current_packet_size, is_gob_start;
3081 
3082  current_packet_size = put_bytes_count(&s->pb, 1)
3083  - (s->ptr_lastgob - s->pb.buf);
3084 
3085  is_gob_start = s->rtp_payload_size &&
3086  current_packet_size >= s->rtp_payload_size &&
3087  mb_y + mb_x > 0;
3088 
3089  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3090 
3091  switch (s->c.codec_id) {
3092  case AV_CODEC_ID_H263:
3093  case AV_CODEC_ID_H263P:
3094  if (!s->c.h263_slice_structured)
3095  if (s->c.mb_x || s->c.mb_y % s->c.gob_index) is_gob_start = 0;
3096  break;
3098  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3100  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3101  s->c.mb_skip_run)
3102  is_gob_start=0;
3103  break;
3104  case AV_CODEC_ID_MJPEG:
3105  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3106  break;
3107  }
3108 
3109  if(is_gob_start){
3110  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3111  write_slice_end(s);
3112 
3113  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->c.partitioned_frame)
3115  }
3116 
3117  av_assert2((put_bits_count(&s->pb)&7) == 0);
3118  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3119 
3120  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3121  int r = put_bytes_count(&s->pb, 0) + s->c.picture_number + 16 + s->c.mb_x + s->c.mb_y;
3122  int d = 100 / s->error_rate;
3123  if(r % d == 0){
3124  current_packet_size=0;
3125  s->pb.buf_ptr= s->ptr_lastgob;
3126  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3127  }
3128  }
3129 
3130  switch (s->c.codec_id) {
3131  case AV_CODEC_ID_MPEG4:
3132  if (CONFIG_MPEG4_ENCODER) {
3135  }
3136  break;
3139  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3142  }
3143  break;
3144  case AV_CODEC_ID_H263:
3145  case AV_CODEC_ID_H263P:
3146  if (CONFIG_H263_ENCODER) {
3147  update_mb_info(s, 1);
3149  }
3150  break;
3151  }
3152 
3153  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3154  int bits= put_bits_count(&s->pb);
3155  s->misc_bits+= bits - s->last_bits;
3156  s->last_bits= bits;
3157  }
3158 
3159  s->ptr_lastgob += current_packet_size;
3160  s->c.first_slice_line = 1;
3161  s->c.resync_mb_x = mb_x;
3162  s->c.resync_mb_y = mb_y;
3163  }
3164  }
3165 
3166  if (s->c.resync_mb_x == s->c.mb_x &&
3167  s->c.resync_mb_y+1 == s->c.mb_y)
3168  s->c.first_slice_line = 0;
3169 
3170  s->c.mb_skipped = 0;
3171  s->dquant=0; //only for QP_RD
3172 
3173  update_mb_info(s, 0);
3174 
3175  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3176  int next_block=0;
3177  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3178 
3179  backup_context_before_encode(&backup_s, s);
3180  backup_s.pb= s->pb;
3181  if (s->c.data_partitioning) {
3182  backup_s.pb2= s->pb2;
3183  backup_s.tex_pb= s->tex_pb;
3184  }
3185 
3186  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3187  s->c.mv_dir = MV_DIR_FORWARD;
3188  s->c.mv_type = MV_TYPE_16X16;
3189  s->c.mb_intra = 0;
3190  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3191  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3192  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3193  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3194  }
3195  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3196  s->c.mv_dir = MV_DIR_FORWARD;
3197  s->c.mv_type = MV_TYPE_FIELD;
3198  s->c.mb_intra = 0;
3199  for(i=0; i<2; i++){
3200  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3201  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3202  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3203  }
3204  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3205  &dmin, &next_block, 0, 0);
3206  }
3207  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3208  s->c.mv_dir = MV_DIR_FORWARD;
3209  s->c.mv_type = MV_TYPE_16X16;
3210  s->c.mb_intra = 0;
3211  s->c.mv[0][0][0] = 0;
3212  s->c.mv[0][0][1] = 0;
3213  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3214  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3215  }
3216  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3217  s->c.mv_dir = MV_DIR_FORWARD;
3218  s->c.mv_type = MV_TYPE_8X8;
3219  s->c.mb_intra = 0;
3220  for(i=0; i<4; i++){
3221  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3222  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3223  }
3224  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3225  &dmin, &next_block, 0, 0);
3226  }
3227  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3228  s->c.mv_dir = MV_DIR_FORWARD;
3229  s->c.mv_type = MV_TYPE_16X16;
3230  s->c.mb_intra = 0;
3231  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3232  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3233  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3234  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3235  }
3236  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3237  s->c.mv_dir = MV_DIR_BACKWARD;
3238  s->c.mv_type = MV_TYPE_16X16;
3239  s->c.mb_intra = 0;
3240  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3241  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3242  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3243  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3244  }
3245  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3246  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3247  s->c.mv_type = MV_TYPE_16X16;
3248  s->c.mb_intra = 0;
3249  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3250  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3251  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3252  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3253  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3254  &dmin, &next_block, 0, 0);
3255  }
3256  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3257  s->c.mv_dir = MV_DIR_FORWARD;
3258  s->c.mv_type = MV_TYPE_FIELD;
3259  s->c.mb_intra = 0;
3260  for(i=0; i<2; i++){
3261  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3262  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3263  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3264  }
3265  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3266  &dmin, &next_block, 0, 0);
3267  }
3268  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3269  s->c.mv_dir = MV_DIR_BACKWARD;
3270  s->c.mv_type = MV_TYPE_FIELD;
3271  s->c.mb_intra = 0;
3272  for(i=0; i<2; i++){
3273  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3274  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3275  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3276  }
3277  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3278  &dmin, &next_block, 0, 0);
3279  }
3280  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3281  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3282  s->c.mv_type = MV_TYPE_FIELD;
3283  s->c.mb_intra = 0;
3284  for(dir=0; dir<2; dir++){
3285  for(i=0; i<2; i++){
3286  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3287  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3288  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3289  }
3290  }
3291  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3292  &dmin, &next_block, 0, 0);
3293  }
3294  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3295  s->c.mv_dir = 0;
3296  s->c.mv_type = MV_TYPE_16X16;
3297  s->c.mb_intra = 1;
3298  s->c.mv[0][0][0] = 0;
3299  s->c.mv[0][0][1] = 0;
3300  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3301  &dmin, &next_block, 0, 0);
3302  s->c.mbintra_table[xy] = 1;
3303  }
3304 
3305  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3306  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3307  const int last_qp = backup_s.c.qscale;
3308  int qpi, qp, dc[6];
3309  int16_t ac[6][16];
3310  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3311  static const int dquant_tab[4]={-1,1,-2,2};
3312  int storecoefs = s->c.mb_intra && s->c.dc_val[0];
3313 
3314  av_assert2(backup_s.dquant == 0);
3315 
3316  //FIXME intra
3317  s->c.mv_dir = best_s.c.mv_dir;
3318  s->c.mv_type = MV_TYPE_16X16;
3319  s->c.mb_intra = best_s.c.mb_intra;
3320  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3321  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3322  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3323  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3324 
3325  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3326  for(; qpi<4; qpi++){
3327  int dquant= dquant_tab[qpi];
3328  qp= last_qp + dquant;
3329  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3330  continue;
3331  backup_s.dquant= dquant;
3332  if(storecoefs){
3333  for(i=0; i<6; i++){
3334  dc[i] = s->c.dc_val[0][s->c.block_index[i]];
3335  memcpy(ac[i], s->c.ac_val[0][s->c.block_index[i]], sizeof(int16_t)*16);
3336  }
3337  }
3338 
3339  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3340  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3341  if (best_s.c.qscale != qp) {
3342  if(storecoefs){
3343  for(i=0; i<6; i++){
3344  s->c.dc_val[0][s->c.block_index[i]] = dc[i];
3345  memcpy(s->c.ac_val[0][s->c.block_index[i]], ac[i], sizeof(int16_t)*16);
3346  }
3347  }
3348  }
3349  }
3350  }
3351  }
3352  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3353  int mx= s->b_direct_mv_table[xy][0];
3354  int my= s->b_direct_mv_table[xy][1];
3355 
3356  backup_s.dquant = 0;
3357  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3358  s->c.mb_intra = 0;
3359  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3360  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3361  &dmin, &next_block, mx, my);
3362  }
3363  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3364  backup_s.dquant = 0;
3365  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3366  s->c.mb_intra = 0;
3367  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3368  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3369  &dmin, &next_block, 0, 0);
3370  }
3371  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3372  int coded=0;
3373  for(i=0; i<6; i++)
3374  coded |= s->c.block_last_index[i];
3375  if(coded){
3376  int mx,my;
3377  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3378  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3379  mx=my=0; //FIXME find the one we actually used
3380  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3381  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3382  mx = s->c.mv[1][0][0];
3383  my = s->c.mv[1][0][1];
3384  }else{
3385  mx = s->c.mv[0][0][0];
3386  my = s->c.mv[0][0][1];
3387  }
3388 
3389  s->c.mv_dir = best_s.c.mv_dir;
3390  s->c.mv_type = best_s.c.mv_type;
3391  s->c.mb_intra = 0;
3392 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3393  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3394  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3395  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3396  backup_s.dquant= 0;
3397  s->skipdct=1;
3398  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3399  &dmin, &next_block, mx, my);
3400  s->skipdct=0;
3401  }
3402  }
3403 
3404  store_context_after_encode(s, &best_s, s->c.data_partitioning);
3405 
3406  pb_bits_count= put_bits_count(&s->pb);
3407  flush_put_bits(&s->pb);
3408  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3409  s->pb= backup_s.pb;
3410 
3411  if (s->c.data_partitioning) {
3412  pb2_bits_count= put_bits_count(&s->pb2);
3413  flush_put_bits(&s->pb2);
3414  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3415  s->pb2= backup_s.pb2;
3416 
3417  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3418  flush_put_bits(&s->tex_pb);
3419  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3420  s->tex_pb= backup_s.tex_pb;
3421  }
3422  s->last_bits= put_bits_count(&s->pb);
3423 
3424  if (CONFIG_H263_ENCODER &&
3425  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3427 
3428  if(next_block==0){ //FIXME 16 vs linesize16
3429  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3430  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3431  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3432  }
3433 
3434  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3435  mpv_reconstruct_mb(s, s->c.block);
3436  } else {
3437  int motion_x = 0, motion_y = 0;
3438  s->c.mv_type = MV_TYPE_16X16;
3439  // only one MB-Type possible
3440 
3441  switch(mb_type){
3443  s->c.mv_dir = 0;
3444  s->c.mb_intra = 1;
3445  motion_x= s->c.mv[0][0][0] = 0;
3446  motion_y= s->c.mv[0][0][1] = 0;
3447  s->c.mbintra_table[xy] = 1;
3448  break;
3450  s->c.mv_dir = MV_DIR_FORWARD;
3451  s->c.mb_intra = 0;
3452  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3453  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3454  break;
3456  s->c.mv_dir = MV_DIR_FORWARD;
3457  s->c.mv_type = MV_TYPE_FIELD;
3458  s->c.mb_intra = 0;
3459  for(i=0; i<2; i++){
3460  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3461  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3462  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3463  }
3464  break;
3466  s->c.mv_dir = MV_DIR_FORWARD;
3467  s->c.mv_type = MV_TYPE_8X8;
3468  s->c.mb_intra = 0;
3469  for(i=0; i<4; i++){
3470  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3471  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3472  }
3473  break;
3475  if (CONFIG_MPEG4_ENCODER) {
3477  s->c.mb_intra = 0;
3478  motion_x=s->b_direct_mv_table[xy][0];
3479  motion_y=s->b_direct_mv_table[xy][1];
3480  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3481  }
3482  break;
3484  if (CONFIG_MPEG4_ENCODER) {
3486  s->c.mb_intra = 0;
3487  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3488  }
3489  break;
3491  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3492  s->c.mb_intra = 0;
3493  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3494  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3495  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3496  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3497  break;
3499  s->c.mv_dir = MV_DIR_BACKWARD;
3500  s->c.mb_intra = 0;
3501  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3502  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3503  break;
3505  s->c.mv_dir = MV_DIR_FORWARD;
3506  s->c.mb_intra = 0;
3507  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3508  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3509  break;
3511  s->c.mv_dir = MV_DIR_FORWARD;
3512  s->c.mv_type = MV_TYPE_FIELD;
3513  s->c.mb_intra = 0;
3514  for(i=0; i<2; i++){
3515  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3516  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3517  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3518  }
3519  break;
3521  s->c.mv_dir = MV_DIR_BACKWARD;
3522  s->c.mv_type = MV_TYPE_FIELD;
3523  s->c.mb_intra = 0;
3524  for(i=0; i<2; i++){
3525  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3526  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3527  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3528  }
3529  break;
3531  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3532  s->c.mv_type = MV_TYPE_FIELD;
3533  s->c.mb_intra = 0;
3534  for(dir=0; dir<2; dir++){
3535  for(i=0; i<2; i++){
3536  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3537  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3538  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3539  }
3540  }
3541  break;
3542  default:
3543  av_log(s->c.avctx, AV_LOG_ERROR, "illegal MB type\n");
3544  }
3545 
3546  encode_mb(s, motion_x, motion_y);
3547 
3548  // RAL: Update last macroblock type
3549  s->last_mv_dir = s->c.mv_dir;
3550 
3551  if (CONFIG_H263_ENCODER &&
3552  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3554 
3555  mpv_reconstruct_mb(s, s->c.block);
3556  }
3557 
3558  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3559 
3560  /* clean the MV table in IPS frames for direct mode in B-frames */
3561  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3562  s->p_mv_table[xy][0]=0;
3563  s->p_mv_table[xy][1]=0;
3564  } else if ((s->c.h263_pred || s->c.h263_aic) && s->c.mbintra_table[xy])
3566 
3567  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3568  int w= 16;
3569  int h= 16;
3570 
3571  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3572  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3573 
3574  s->encoding_error[0] += sse(
3575  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3576  s->c.dest[0], w, h, s->c.linesize);
3577  s->encoding_error[1] += sse(
3578  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3579  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3580  s->encoding_error[2] += sse(
3581  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3582  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3583  }
3584  if (s->c.loop_filter) {
3585  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3586  ff_h263_loop_filter(&s->c);
3587  }
3588  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3589  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3590  }
3591  }
3592 
3593 #if CONFIG_MSMPEG4ENC
3594  //not beautiful here but we must write it before flushing so it has to be here
3595  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3596  s->c.pict_type == AV_PICTURE_TYPE_I)
3598 #endif
3599 
3600  write_slice_end(s);
3601 
3602  return 0;
3603 }
3604 
3605 #define ADD(field) dst->field += src->field;
3606 #define MERGE(field) dst->field += src->field; src->field=0
3608 {
3609  ADD(me.scene_change_score);
3610  ADD(me.mc_mb_var_sum_temp);
3611  ADD(me.mb_var_sum_temp);
3612 }
3613 
3615 {
3616  int i;
3617 
3618  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3619  MERGE(dct_count[1]);
3620  ADD(mv_bits);
3621  ADD(i_tex_bits);
3622  ADD(p_tex_bits);
3623  ADD(i_count);
3624  ADD(misc_bits);
3625  ADD(encoding_error[0]);
3626  ADD(encoding_error[1]);
3627  ADD(encoding_error[2]);
3628 
3629  if (dst->dct_error_sum) {
3630  for(i=0; i<64; i++){
3631  MERGE(dct_error_sum[0][i]);
3632  MERGE(dct_error_sum[1][i]);
3633  }
3634  }
3635 
3636  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3637  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3638  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3639  flush_put_bits(&dst->pb);
3640 }
3641 
3642 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3643 {
3644  MPVEncContext *const s = &m->s;
3645 
3646  if (m->next_lambda){
3647  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3648  if(!dry_run) m->next_lambda= 0;
3649  } else if (!m->fixed_qscale) {
3650  int quality = ff_rate_estimate_qscale(m, dry_run);
3651  s->c.cur_pic.ptr->f->quality = quality;
3652  if (s->c.cur_pic.ptr->f->quality < 0)
3653  return -1;
3654  }
3655 
3656  if(s->adaptive_quant){
3657  init_qscale_tab(s);
3658 
3659  switch (s->c.codec_id) {
3660  case AV_CODEC_ID_MPEG4:
3661  if (CONFIG_MPEG4_ENCODER)
3663  break;
3664  case AV_CODEC_ID_H263:
3665  case AV_CODEC_ID_H263P:
3666  case AV_CODEC_ID_FLV1:
3667  if (CONFIG_H263_ENCODER)
3669  break;
3670  }
3671 
3672  s->lambda = s->lambda_table[0];
3673  //FIXME broken
3674  }else
3675  s->lambda = s->c.cur_pic.ptr->f->quality;
3676  update_qscale(m);
3677  return 0;
3678 }
3679 
3680 /* must be called before writing the header */
3682 {
3683  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3684  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3685 
3686  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3687  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3688  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3689  }else{
3690  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3691  s->c.last_non_b_time = s->c.time;
3692  av_assert1(s->c.picture_number == 0 || s->c.pp_time > 0);
3693  }
3694 }
3695 
3696 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3697 {
3698  MPVEncContext *const s = &m->s;
3699  int i, ret;
3700  int bits;
3701  int context_count = s->c.slice_context_count;
3702 
3703  /* we need to initialize some time vars before we can encode B-frames */
3704  // RAL: Condition added for MPEG1VIDEO
3705  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3707  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3709 
3710 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3711 
3712  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3713  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3714  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3715  s->c.no_rounding ^= s->c.flipflop_rounding;
3716  }
3717 
3718  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3719  ret = estimate_qp(m, 1);
3720  if (ret < 0)
3721  return ret;
3722  ff_get_2pass_fcode(m);
3723  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3724  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3725  s->lambda = m->last_lambda_for[s->c.pict_type];
3726  else
3727  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3728  update_qscale(m);
3729  }
3730 
3731  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3732  for (int i = 0; i < context_count; i++) {
3733  MPVEncContext *const slice = s->c.enc_contexts[i];
3734  int h = s->c.mb_height;
3735  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3736  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3737 
3738  init_put_bits(&slice->pb, start, end - start);
3739 
3740  if (i) {
3741  ret = ff_update_duplicate_context(&slice->c, &s->c);
3742  if (ret < 0)
3743  return ret;
3744  slice->lambda = s->lambda;
3745  slice->lambda2 = s->lambda2;
3746  }
3747  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3748  ff_me_init_pic(slice);
3749  }
3750 
3751  /* Estimate motion for every MB */
3752  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3753  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3754  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3755  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3756  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3757  m->me_pre == 2) {
3758  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3759  &s->c.enc_contexts[0], NULL,
3760  context_count, sizeof(void*));
3761  }
3762  }
3763 
3764  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3765  NULL, context_count, sizeof(void*));
3766  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3767  /* I-Frame */
3768  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3769  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3770 
3771  if (!m->fixed_qscale) {
3772  /* finding spatial complexity for I-frame rate control */
3773  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3774  NULL, context_count, sizeof(void*));
3775  }
3776  }
3777  for(i=1; i<context_count; i++){
3778  merge_context_after_me(s, s->c.enc_contexts[i]);
3779  }
3780  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3781  m->mb_var_sum = s->me. mb_var_sum_temp;
3782  emms_c();
3783 
3784  if (s->me.scene_change_score > m->scenechange_threshold &&
3785  s->c.pict_type == AV_PICTURE_TYPE_P) {
3786  s->c.pict_type = AV_PICTURE_TYPE_I;
3787  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3788  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3789  if (s->c.msmpeg4_version >= MSMP4_V3)
3790  s->c.no_rounding = 1;
3791  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3792  m->mb_var_sum, m->mc_mb_var_sum);
3793  }
3794 
3795  if (!s->c.umvplus) {
3796  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3797  s->c.f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3798 
3799  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3800  int a,b;
3801  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3802  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3803  s->c.f_code = FFMAX3(s->c.f_code, a, b);
3804  }
3805 
3807  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->c.f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3808  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3809  int j;
3810  for(i=0; i<2; i++){
3811  for(j=0; j<2; j++)
3812  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3813  s->c.p_field_mv_table[i][j], s->c.f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3814  }
3815  }
3816  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3817  int a, b;
3818 
3819  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3820  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3821  s->c.f_code = FFMAX(a, b);
3822 
3823  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3824  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3825  s->c.b_code = FFMAX(a, b);
3826 
3827  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->c.f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3828  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->c.b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3829  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->c.f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3830  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->c.b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3831  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3832  int dir, j;
3833  for(dir=0; dir<2; dir++){
3834  for(i=0; i<2; i++){
3835  for(j=0; j<2; j++){
3838  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3839  s->b_field_mv_table[dir][i][j], dir ? s->c.b_code : s->c.f_code, type, 1);
3840  }
3841  }
3842  }
3843  }
3844  }
3845  }
3846 
3847  ret = estimate_qp(m, 0);
3848  if (ret < 0)
3849  return ret;
3850 
3851  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3852  s->c.pict_type == AV_PICTURE_TYPE_I &&
3853  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3854  s->c.qscale = 3; //reduce clipping problems
3855 
3856  if (s->c.out_format == FMT_MJPEG) {
3858  (7 + s->c.qscale) / s->c.qscale, 65535);
3859  if (ret < 0)
3860  return ret;
3861 
3862  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3863  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3864  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3865 
3866  if (s->c.avctx->intra_matrix) {
3867  chroma_matrix =
3868  luma_matrix = s->c.avctx->intra_matrix;
3869  }
3870  if (s->c.avctx->chroma_intra_matrix)
3871  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3872 
3873  /* for mjpeg, we do include qscale in the matrix */
3874  for (int i = 1; i < 64; i++) {
3875  int j = s->c.idsp.idct_permutation[i];
3876 
3877  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3878  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3879  }
3880  s->c.y_dc_scale_table =
3881  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
3882  s->c.chroma_intra_matrix[0] =
3883  s->c.intra_matrix[0] = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision][8];
3884  } else {
3885  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3886  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3887  for (int i = 1; i < 64; i++) {
3888  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3889 
3890  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3891  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3892  }
3893  s->c.y_dc_scale_table = y;
3894  s->c.c_dc_scale_table = c;
3895  s->c.intra_matrix[0] = 13;
3896  s->c.chroma_intra_matrix[0] = 14;
3897  }
3898  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3899  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3900  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3901  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3902  s->c.qscale = 8;
3903  }
3904 
3905  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3906  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3907  } else {
3908  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3909  }
3910  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3911 
3912  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3913  m->picture_in_gop_number = 0;
3914 
3915  s->c.mb_x = s->c.mb_y = 0;
3916  s->last_bits= put_bits_count(&s->pb);
3917  ret = m->encode_picture_header(m);
3918  if (ret < 0)
3919  return ret;
3920  bits= put_bits_count(&s->pb);
3921  m->header_bits = bits - s->last_bits;
3922 
3923  for(i=1; i<context_count; i++){
3924  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3925  }
3926  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3927  NULL, context_count, sizeof(void*));
3928  for(i=1; i<context_count; i++){
3929  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3930  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3931  merge_context_after_encode(s, s->c.enc_contexts[i]);
3932  }
3933  emms_c();
3934  return 0;
3935 }
3936 
3937 static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
3938 {
3939  const int intra = s->c.mb_intra;
3940  int i;
3941 
3942  s->dct_count[intra]++;
3943 
3944  for(i=0; i<64; i++){
3945  int level= block[i];
3946 
3947  if(level){
3948  if(level>0){
3949  s->dct_error_sum[intra][i] += level;
3950  level -= s->dct_offset[intra][i];
3951  if(level<0) level=0;
3952  }else{
3953  s->dct_error_sum[intra][i] -= level;
3954  level += s->dct_offset[intra][i];
3955  if(level>0) level=0;
3956  }
3957  block[i]= level;
3958  }
3959  }
3960 }
3961 
3963  int16_t *block, int n,
3964  int qscale, int *overflow){
3965  const int *qmat;
3966  const uint16_t *matrix;
3967  const uint8_t *scantable;
3968  const uint8_t *perm_scantable;
3969  int max=0;
3970  unsigned int threshold1, threshold2;
3971  int bias=0;
3972  int run_tab[65];
3973  int level_tab[65];
3974  int score_tab[65];
3975  int survivor[65];
3976  int survivor_count;
3977  int last_run=0;
3978  int last_level=0;
3979  int last_score= 0;
3980  int last_i;
3981  int coeff[2][64];
3982  int coeff_count[64];
3983  int qmul, qadd, start_i, last_non_zero, i, dc;
3984  const int esc_length= s->ac_esc_length;
3985  const uint8_t *length, *last_length;
3986  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3987  int mpeg2_qscale;
3988 
3989  s->fdsp.fdct(block);
3990 
3991  if(s->dct_error_sum)
3992  s->denoise_dct(s, block);
3993  qmul= qscale*16;
3994  qadd= ((qscale-1)|1)*8;
3995 
3996  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3997  else mpeg2_qscale = qscale << 1;
3998 
3999  if (s->c.mb_intra) {
4000  int q;
4001  scantable = s->c.intra_scantable.scantable;
4002  perm_scantable = s->c.intra_scantable.permutated;
4003  if (!s->c.h263_aic) {
4004  if (n < 4)
4005  q = s->c.y_dc_scale;
4006  else
4007  q = s->c.c_dc_scale;
4008  q = q << 3;
4009  } else{
4010  /* For AIC we skip quant/dequant of INTRADC */
4011  q = 1 << 3;
4012  qadd=0;
4013  }
4014 
4015  /* note: block[0] is assumed to be positive */
4016  block[0] = (block[0] + (q >> 1)) / q;
4017  start_i = 1;
4018  last_non_zero = 0;
4019  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4020  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
4021  if (s->c.mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
4022  bias= 1<<(QMAT_SHIFT-1);
4023 
4024  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4025  length = s->intra_chroma_ac_vlc_length;
4026  last_length= s->intra_chroma_ac_vlc_last_length;
4027  } else {
4028  length = s->intra_ac_vlc_length;
4029  last_length= s->intra_ac_vlc_last_length;
4030  }
4031  } else {
4032  scantable = s->c.inter_scantable.scantable;
4033  perm_scantable = s->c.inter_scantable.permutated;
4034  start_i = 0;
4035  last_non_zero = -1;
4036  qmat = s->q_inter_matrix[qscale];
4037  matrix = s->c.inter_matrix;
4038  length = s->inter_ac_vlc_length;
4039  last_length= s->inter_ac_vlc_last_length;
4040  }
4041  last_i= start_i;
4042 
4043  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4044  threshold2= (threshold1<<1);
4045 
4046  for(i=63; i>=start_i; i--) {
4047  const int j = scantable[i];
4048  int64_t level = (int64_t)block[j] * qmat[j];
4049 
4050  if(((uint64_t)(level+threshold1))>threshold2){
4051  last_non_zero = i;
4052  break;
4053  }
4054  }
4055 
4056  for(i=start_i; i<=last_non_zero; i++) {
4057  const int j = scantable[i];
4058  int64_t level = (int64_t)block[j] * qmat[j];
4059 
4060 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4061 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4062  if(((uint64_t)(level+threshold1))>threshold2){
4063  if(level>0){
4064  level= (bias + level)>>QMAT_SHIFT;
4065  coeff[0][i]= level;
4066  coeff[1][i]= level-1;
4067 // coeff[2][k]= level-2;
4068  }else{
4069  level= (bias - level)>>QMAT_SHIFT;
4070  coeff[0][i]= -level;
4071  coeff[1][i]= -level+1;
4072 // coeff[2][k]= -level+2;
4073  }
4074  coeff_count[i]= FFMIN(level, 2);
4075  av_assert2(coeff_count[i]);
4076  max |=level;
4077  }else{
4078  coeff[0][i]= (level>>31)|1;
4079  coeff_count[i]= 1;
4080  }
4081  }
4082 
4083  *overflow= s->max_qcoeff < max; //overflow might have happened
4084 
4085  if(last_non_zero < start_i){
4086  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4087  return last_non_zero;
4088  }
4089 
4090  score_tab[start_i]= 0;
4091  survivor[0]= start_i;
4092  survivor_count= 1;
4093 
4094  for(i=start_i; i<=last_non_zero; i++){
4095  int level_index, j, zero_distortion;
4096  int dct_coeff= FFABS(block[ scantable[i] ]);
4097  int best_score=256*256*256*120;
4098 
4099  if (s->fdsp.fdct == ff_fdct_ifast)
4100  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4101  zero_distortion= dct_coeff*dct_coeff;
4102 
4103  for(level_index=0; level_index < coeff_count[i]; level_index++){
4104  int distortion;
4105  int level= coeff[level_index][i];
4106  const int alevel= FFABS(level);
4107  int unquant_coeff;
4108 
4109  av_assert2(level);
4110 
4111  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4112  unquant_coeff= alevel*qmul + qadd;
4113  } else if (s->c.out_format == FMT_MJPEG) {
4114  j = s->c.idsp.idct_permutation[scantable[i]];
4115  unquant_coeff = alevel * matrix[j] * 8;
4116  }else{ // MPEG-1
4117  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4118  if (s->c.mb_intra) {
4119  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4120  unquant_coeff = (unquant_coeff - 1) | 1;
4121  }else{
4122  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4123  unquant_coeff = (unquant_coeff - 1) | 1;
4124  }
4125  unquant_coeff<<= 3;
4126  }
4127 
4128  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4129  level+=64;
4130  if((level&(~127)) == 0){
4131  for(j=survivor_count-1; j>=0; j--){
4132  int run= i - survivor[j];
4133  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4134  score += score_tab[i-run];
4135 
4136  if(score < best_score){
4137  best_score= score;
4138  run_tab[i+1]= run;
4139  level_tab[i+1]= level-64;
4140  }
4141  }
4142 
4143  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4144  for(j=survivor_count-1; j>=0; j--){
4145  int run= i - survivor[j];
4146  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4147  score += score_tab[i-run];
4148  if(score < last_score){
4149  last_score= score;
4150  last_run= run;
4151  last_level= level-64;
4152  last_i= i+1;
4153  }
4154  }
4155  }
4156  }else{
4157  distortion += esc_length*lambda;
4158  for(j=survivor_count-1; j>=0; j--){
4159  int run= i - survivor[j];
4160  int score= distortion + score_tab[i-run];
4161 
4162  if(score < best_score){
4163  best_score= score;
4164  run_tab[i+1]= run;
4165  level_tab[i+1]= level-64;
4166  }
4167  }
4168 
4169  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4170  for(j=survivor_count-1; j>=0; j--){
4171  int run= i - survivor[j];
4172  int score= distortion + score_tab[i-run];
4173  if(score < last_score){
4174  last_score= score;
4175  last_run= run;
4176  last_level= level-64;
4177  last_i= i+1;
4178  }
4179  }
4180  }
4181  }
4182  }
4183 
4184  score_tab[i+1]= best_score;
4185 
4186  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4187  if(last_non_zero <= 27){
4188  for(; survivor_count; survivor_count--){
4189  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4190  break;
4191  }
4192  }else{
4193  for(; survivor_count; survivor_count--){
4194  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4195  break;
4196  }
4197  }
4198 
4199  survivor[ survivor_count++ ]= i+1;
4200  }
4201 
4202  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4203  last_score= 256*256*256*120;
4204  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4205  int score= score_tab[i];
4206  if (i)
4207  score += lambda * 2; // FIXME more exact?
4208 
4209  if(score < last_score){
4210  last_score= score;
4211  last_i= i;
4212  last_level= level_tab[i];
4213  last_run= run_tab[i];
4214  }
4215  }
4216  }
4217 
4218  s->coded_score[n] = last_score;
4219 
4220  dc= FFABS(block[0]);
4221  last_non_zero= last_i - 1;
4222  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4223 
4224  if(last_non_zero < start_i)
4225  return last_non_zero;
4226 
4227  if(last_non_zero == 0 && start_i == 0){
4228  int best_level= 0;
4229  int best_score= dc * dc;
4230 
4231  for(i=0; i<coeff_count[0]; i++){
4232  int level= coeff[i][0];
4233  int alevel= FFABS(level);
4234  int unquant_coeff, score, distortion;
4235 
4236  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4237  unquant_coeff= (alevel*qmul + qadd)>>3;
4238  } else{ // MPEG-1
4239  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4240  unquant_coeff = (unquant_coeff - 1) | 1;
4241  }
4242  unquant_coeff = (unquant_coeff + 4) >> 3;
4243  unquant_coeff<<= 3 + 3;
4244 
4245  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4246  level+=64;
4247  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4248  else score= distortion + esc_length*lambda;
4249 
4250  if(score < best_score){
4251  best_score= score;
4252  best_level= level - 64;
4253  }
4254  }
4255  block[0]= best_level;
4256  s->coded_score[n] = best_score - dc*dc;
4257  if(best_level == 0) return -1;
4258  else return last_non_zero;
4259  }
4260 
4261  i= last_i;
4262  av_assert2(last_level);
4263 
4264  block[ perm_scantable[last_non_zero] ]= last_level;
4265  i -= last_run + 1;
4266 
4267  for(; i>start_i; i -= run_tab[i] + 1){
4268  block[ perm_scantable[i-1] ]= level_tab[i];
4269  }
4270 
4271  return last_non_zero;
4272 }
4273 
4274 static int16_t basis[64][64];
4275 
4276 static void build_basis(uint8_t *perm){
4277  int i, j, x, y;
4278  emms_c();
4279  for(i=0; i<8; i++){
4280  for(j=0; j<8; j++){
4281  for(y=0; y<8; y++){
4282  for(x=0; x<8; x++){
4283  double s= 0.25*(1<<BASIS_SHIFT);
4284  int index= 8*i + j;
4285  int perm_index= perm[index];
4286  if(i==0) s*= sqrt(0.5);
4287  if(j==0) s*= sqrt(0.5);
4288  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4289  }
4290  }
4291  }
4292  }
4293 }
4294 
4295 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4296  int16_t *block, int16_t *weight, int16_t *orig,
4297  int n, int qscale){
4298  int16_t rem[64];
4299  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4300  const uint8_t *scantable;
4301  const uint8_t *perm_scantable;
4302 // unsigned int threshold1, threshold2;
4303 // int bias=0;
4304  int run_tab[65];
4305  int prev_run=0;
4306  int prev_level=0;
4307  int qmul, qadd, start_i, last_non_zero, i, dc;
4308  const uint8_t *length;
4309  const uint8_t *last_length;
4310  int lambda;
4311  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4312 
4313  if(basis[0][0] == 0)
4314  build_basis(s->c.idsp.idct_permutation);
4315 
4316  qmul= qscale*2;
4317  qadd= (qscale-1)|1;
4318  if (s->c.mb_intra) {
4319  scantable = s->c.intra_scantable.scantable;
4320  perm_scantable = s->c.intra_scantable.permutated;
4321  if (!s->c.h263_aic) {
4322  if (n < 4)
4323  q = s->c.y_dc_scale;
4324  else
4325  q = s->c.c_dc_scale;
4326  } else{
4327  /* For AIC we skip quant/dequant of INTRADC */
4328  q = 1;
4329  qadd=0;
4330  }
4331  q <<= RECON_SHIFT-3;
4332  /* note: block[0] is assumed to be positive */
4333  dc= block[0]*q;
4334 // block[0] = (block[0] + (q >> 1)) / q;
4335  start_i = 1;
4336 // if (s->c.mpeg_quant || s->c.out_format == FMT_MPEG1)
4337 // bias= 1<<(QMAT_SHIFT-1);
4338  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4339  length = s->intra_chroma_ac_vlc_length;
4340  last_length= s->intra_chroma_ac_vlc_last_length;
4341  } else {
4342  length = s->intra_ac_vlc_length;
4343  last_length= s->intra_ac_vlc_last_length;
4344  }
4345  } else {
4346  scantable = s->c.inter_scantable.scantable;
4347  perm_scantable = s->c.inter_scantable.permutated;
4348  dc= 0;
4349  start_i = 0;
4350  length = s->inter_ac_vlc_length;
4351  last_length= s->inter_ac_vlc_last_length;
4352  }
4353  last_non_zero = s->c.block_last_index[n];
4354 
4355  dc += (1<<(RECON_SHIFT-1));
4356  for(i=0; i<64; i++){
4357  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4358  }
4359 
4360  sum=0;
4361  for(i=0; i<64; i++){
4362  int one= 36;
4363  int qns=4;
4364  int w;
4365 
4366  w= FFABS(weight[i]) + qns*one;
4367  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4368 
4369  weight[i] = w;
4370 // w=weight[i] = (63*qns + (w/2)) / w;
4371 
4372  av_assert2(w>0);
4373  av_assert2(w<(1<<6));
4374  sum += w*w;
4375  }
4376  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4377 
4378  run=0;
4379  rle_index=0;
4380  for(i=start_i; i<=last_non_zero; i++){
4381  int j= perm_scantable[i];
4382  const int level= block[j];
4383  int coeff;
4384 
4385  if(level){
4386  if(level<0) coeff= qmul*level - qadd;
4387  else coeff= qmul*level + qadd;
4388  run_tab[rle_index++]=run;
4389  run=0;
4390 
4391  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4392  }else{
4393  run++;
4394  }
4395  }
4396 
4397  for(;;){
4398  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4399  int best_coeff=0;
4400  int best_change=0;
4401  int run2, best_unquant_change=0, analyze_gradient;
4402  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4403 
4404  if(analyze_gradient){
4405  for(i=0; i<64; i++){
4406  int w= weight[i];
4407 
4408  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4409  }
4410  s->fdsp.fdct(d1);
4411  }
4412 
4413  if(start_i){
4414  const int level= block[0];
4415  int change, old_coeff;
4416 
4417  av_assert2(s->c.mb_intra);
4418 
4419  old_coeff= q*level;
4420 
4421  for(change=-1; change<=1; change+=2){
4422  int new_level= level + change;
4423  int score, new_coeff;
4424 
4425  new_coeff= q*new_level;
4426  if(new_coeff >= 2048 || new_coeff < 0)
4427  continue;
4428 
4429  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4430  new_coeff - old_coeff);
4431  if(score<best_score){
4432  best_score= score;
4433  best_coeff= 0;
4434  best_change= change;
4435  best_unquant_change= new_coeff - old_coeff;
4436  }
4437  }
4438  }
4439 
4440  run=0;
4441  rle_index=0;
4442  run2= run_tab[rle_index++];
4443  prev_level=0;
4444  prev_run=0;
4445 
4446  for(i=start_i; i<64; i++){
4447  int j= perm_scantable[i];
4448  const int level= block[j];
4449  int change, old_coeff;
4450 
4451  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4452  break;
4453 
4454  if(level){
4455  if(level<0) old_coeff= qmul*level - qadd;
4456  else old_coeff= qmul*level + qadd;
4457  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4458  }else{
4459  old_coeff=0;
4460  run2--;
4461  av_assert2(run2>=0 || i >= last_non_zero );
4462  }
4463 
4464  for(change=-1; change<=1; change+=2){
4465  int new_level= level + change;
4466  int score, new_coeff, unquant_change;
4467 
4468  score=0;
4469  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4470  continue;
4471 
4472  if(new_level){
4473  if(new_level<0) new_coeff= qmul*new_level - qadd;
4474  else new_coeff= qmul*new_level + qadd;
4475  if(new_coeff >= 2048 || new_coeff <= -2048)
4476  continue;
4477  //FIXME check for overflow
4478 
4479  if(level){
4480  if(level < 63 && level > -63){
4481  if(i < last_non_zero)
4482  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4483  - length[UNI_AC_ENC_INDEX(run, level+64)];
4484  else
4485  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4486  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4487  }
4488  }else{
4489  av_assert2(FFABS(new_level)==1);
4490 
4491  if(analyze_gradient){
4492  int g= d1[ scantable[i] ];
4493  if(g && (g^new_level) >= 0)
4494  continue;
4495  }
4496 
4497  if(i < last_non_zero){
4498  int next_i= i + run2 + 1;
4499  int next_level= block[ perm_scantable[next_i] ] + 64;
4500 
4501  if(next_level&(~127))
4502  next_level= 0;
4503 
4504  if(next_i < last_non_zero)
4505  score += length[UNI_AC_ENC_INDEX(run, 65)]
4506  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4507  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4508  else
4509  score += length[UNI_AC_ENC_INDEX(run, 65)]
4510  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4511  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4512  }else{
4513  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4514  if(prev_level){
4515  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4516  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4517  }
4518  }
4519  }
4520  }else{
4521  new_coeff=0;
4522  av_assert2(FFABS(level)==1);
4523 
4524  if(i < last_non_zero){
4525  int next_i= i + run2 + 1;
4526  int next_level= block[ perm_scantable[next_i] ] + 64;
4527 
4528  if(next_level&(~127))
4529  next_level= 0;
4530 
4531  if(next_i < last_non_zero)
4532  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4533  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4534  - length[UNI_AC_ENC_INDEX(run, 65)];
4535  else
4536  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4537  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4538  - length[UNI_AC_ENC_INDEX(run, 65)];
4539  }else{
4540  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4541  if(prev_level){
4542  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4543  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4544  }
4545  }
4546  }
4547 
4548  score *= lambda;
4549 
4550  unquant_change= new_coeff - old_coeff;
4551  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4552 
4553  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4554  unquant_change);
4555  if(score<best_score){
4556  best_score= score;
4557  best_coeff= i;
4558  best_change= change;
4559  best_unquant_change= unquant_change;
4560  }
4561  }
4562  if(level){
4563  prev_level= level + 64;
4564  if(prev_level&(~127))
4565  prev_level= 0;
4566  prev_run= run;
4567  run=0;
4568  }else{
4569  run++;
4570  }
4571  }
4572 
4573  if(best_change){
4574  int j= perm_scantable[ best_coeff ];
4575 
4576  block[j] += best_change;
4577 
4578  if(best_coeff > last_non_zero){
4579  last_non_zero= best_coeff;
4580  av_assert2(block[j]);
4581  }else{
4582  for(; last_non_zero>=start_i; last_non_zero--){
4583  if(block[perm_scantable[last_non_zero]])
4584  break;
4585  }
4586  }
4587 
4588  run=0;
4589  rle_index=0;
4590  for(i=start_i; i<=last_non_zero; i++){
4591  int j= perm_scantable[i];
4592  const int level= block[j];
4593 
4594  if(level){
4595  run_tab[rle_index++]=run;
4596  run=0;
4597  }else{
4598  run++;
4599  }
4600  }
4601 
4602  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4603  }else{
4604  break;
4605  }
4606  }
4607 
4608  return last_non_zero;
4609 }
4610 
4611 /**
4612  * Permute an 8x8 block according to permutation.
4613  * @param block the block which will be permuted according to
4614  * the given permutation vector
4615  * @param permutation the permutation vector
4616  * @param last the last non zero coefficient in scantable order, used to
4617  * speed the permutation up
4618  * @param scantable the used scantable, this is only used to speed the
4619  * permutation up, the block is not (inverse) permutated
4620  * to scantable order!
4621  */
4622 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4623  const uint8_t *scantable, int last)
4624 {
4625  int i;
4626  int16_t temp[64];
4627 
4628  if (last <= 0)
4629  return;
4630  //FIXME it is ok but not clean and might fail for some permutations
4631  // if (permutation[1] == 1)
4632  // return;
4633 
4634  for (i = 0; i <= last; i++) {
4635  const int j = scantable[i];
4636  temp[j] = block[j];
4637  block[j] = 0;
4638  }
4639 
4640  for (i = 0; i <= last; i++) {
4641  const int j = scantable[i];
4642  const int perm_j = permutation[j];
4643  block[perm_j] = temp[j];
4644  }
4645 }
4646 
4647 static int dct_quantize_c(MPVEncContext *const s,
4648  int16_t *block, int n,
4649  int qscale, int *overflow)
4650 {
4651  int i, last_non_zero, q, start_i;
4652  const int *qmat;
4653  const uint8_t *scantable;
4654  int bias;
4655  int max=0;
4656  unsigned int threshold1, threshold2;
4657 
4658  s->fdsp.fdct(block);
4659 
4660  if(s->dct_error_sum)
4661  s->denoise_dct(s, block);
4662 
4663  if (s->c.mb_intra) {
4664  scantable = s->c.intra_scantable.scantable;
4665  if (!s->c.h263_aic) {
4666  if (n < 4)
4667  q = s->c.y_dc_scale;
4668  else
4669  q = s->c.c_dc_scale;
4670  q = q << 3;
4671  } else
4672  /* For AIC we skip quant/dequant of INTRADC */
4673  q = 1 << 3;
4674 
4675  /* note: block[0] is assumed to be positive */
4676  block[0] = (block[0] + (q >> 1)) / q;
4677  start_i = 1;
4678  last_non_zero = 0;
4679  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4680  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4681  } else {
4682  scantable = s->c.inter_scantable.scantable;
4683  start_i = 0;
4684  last_non_zero = -1;
4685  qmat = s->q_inter_matrix[qscale];
4686  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4687  }
4688  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4689  threshold2= (threshold1<<1);
4690  for(i=63;i>=start_i;i--) {
4691  const int j = scantable[i];
4692  int64_t level = (int64_t)block[j] * qmat[j];
4693 
4694  if(((uint64_t)(level+threshold1))>threshold2){
4695  last_non_zero = i;
4696  break;
4697  }else{
4698  block[j]=0;
4699  }
4700  }
4701  for(i=start_i; i<=last_non_zero; i++) {
4702  const int j = scantable[i];
4703  int64_t level = (int64_t)block[j] * qmat[j];
4704 
4705 // if( bias+level >= (1<<QMAT_SHIFT)
4706 // || bias-level >= (1<<QMAT_SHIFT)){
4707  if(((uint64_t)(level+threshold1))>threshold2){
4708  if(level>0){
4709  level= (bias + level)>>QMAT_SHIFT;
4710  block[j]= level;
4711  }else{
4712  level= (bias - level)>>QMAT_SHIFT;
4713  block[j]= -level;
4714  }
4715  max |=level;
4716  }else{
4717  block[j]=0;
4718  }
4719  }
4720  *overflow= s->max_qcoeff < max; //overflow might have happened
4721 
4722  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4723  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4724  ff_block_permute(block, s->c.idsp.idct_permutation,
4725  scantable, last_non_zero);
4726 
4727  return last_non_zero;
4728 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1484
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3962
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1140
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1661
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:422
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:103
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:84
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:220
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:237
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1710
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:430
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:212
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:230
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:271
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2713
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:219
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:224
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2156
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:192
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
h263data.h
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:104
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2617
level
uint8_t level
Definition: svq3.c:205
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1493
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:45
init_unquantize
static av_cold void init_unquantize(MpegEncContext *const s, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:316
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:526
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:820
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:244
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1887
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2639
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:243
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2639
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:205
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1277
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3681
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1909
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:113
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2635
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3642
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:672
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:46
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2630
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:225
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:301
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:203
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4274
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:948
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1505
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:231
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:164
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2830
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:808
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:190
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2764
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:264
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:206
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:215
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
AVFrame::width
int width
Definition: frame.h:482
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
w
uint8_t w
Definition: llviddspenc.c:38
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.h:356
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2639
AVPacket::data
uint8_t * data
Definition: packet.h:535
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:377
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:51
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:189
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2877
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:528
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
MPVEncContext::b_field_select_table
uint8_t *[2][2] b_field_select_table
allocated jointly with p_field_select_table
Definition: mpegvideoenc.h:85
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:88
MPVEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types.
Definition: mpegvideoenc.h:87
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:217
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:552
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:553
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:281
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2812
MPVEncContext::lambda_table
int * lambda_table
Definition: mpegvideoenc.h:53
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2246
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2645
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:936
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:55
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:176
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1241
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:239
mpegutils.h
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:575
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:590
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:862
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:221
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:275
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:189
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:56
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
update_mb_info
static void update_mb_info(MPVEncContext *const s, int startcode)
Definition: mpegvideo_enc.c:2917
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2639
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:180
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1658
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:350
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1160
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2642
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:878
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1926
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:309
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2633
ALIGN
#define ALIGN(a)
aligns the bitstream to the given power of two
Definition: rtjpeg.c:30
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:57
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:198
faandct.h
Floating point AAN DCT.
MPVEncContext::mb_mean
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegvideoenc.h:90
MPVEncContext::b_bidir_forw_mv_table
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame.
Definition: mpegvideoenc.h:80
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:198
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:54
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:829
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:179
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2636
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:722
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:313
fail
#define fail()
Definition: checkasm.h:194
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:58
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:114
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:996
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:266
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:182
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1209
MPVEncContext::mb_var
uint16_t * mb_var
Table for MB variances.
Definition: mpegvideoenc.h:88
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:284
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1242
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1353
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2786
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:216
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_mpeg1_encode_init
static void ff_mpeg1_encode_init(MPVEncContext *s)
Definition: mpeg12enc.h:33
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:378
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
refstruct.h
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:283
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:268
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:273
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:216
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1442
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:228
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:178
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:50
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:96
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4276
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:196
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1228
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
Definition: rv10enc.c:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:310
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MPVEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:199
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2641
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:183
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:175
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:240
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3607
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:884
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:234
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:49
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:281
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:227
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:228
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:177
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1067
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:238
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1270
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4622
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1505
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:271
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:269
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2631
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:829
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2855
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:56
arg
const char * arg
Definition: jacosubdec.c:67
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:272
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:509
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1255
MECmpContext
Definition: me_cmp.h:55
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:203
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:274
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:119
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:206
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:601
run
uint8_t run
Definition: svq3.c:204
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:211
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:95
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:230
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:912
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:270
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:224
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:239
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:52
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:335
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1789
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:446
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:585
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:183
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3606
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:868
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1104
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2634
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:937
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2940
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:119
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:143
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1147
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1305
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1392
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2637
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:256
MPVMainEncContext
Definition: mpegvideoenc.h:172
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:193
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:815
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1301
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:426
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:892
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:818
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1312
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:232
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:502
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:82
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2212
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:536
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1005
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:166
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:210
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:59
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2631
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:222
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:532
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:390
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:125
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:278
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:263
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:98
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:226
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:283
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3696
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:281
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:67
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:478
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:75
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:534
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:377
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:179
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:195
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:541
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:286
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:236
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:259
denoise_dct_c
static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
Definition: mpegvideo_enc.c:3937
MPVEncContext::p_field_select_table
uint8_t *[2] p_field_select_table
Only the first element is allocated.
Definition: mpegvideoenc.h:84
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:276
MBBackup
Definition: mpegvideo_enc.c:2627
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:265
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:280
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:311
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:301
MPVEncContext::b_field_mv_table
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame.
Definition: mpegvideoenc.h:83
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:382
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2632
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:115
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:309
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2629
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:528
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *m)
Definition: rv20enc.c:37
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2974
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:244
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2642
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:37
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:105
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:971
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:121
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:276
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:211
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:497
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:43
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:281
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:525
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:229
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:164
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:879
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:282
MPVEncContext::b_bidir_back_mv_table
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame.
Definition: mpegvideoenc.h:81
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:205
MPVEncContext::mc_mb_var
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegvideoenc.h:89
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:38
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:284
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:233
AVCodecContext::height
int height
Definition: avcodec.h:592
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:493
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1269
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:900
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1849
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2270
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
vshift
static int vshift(enum AVPixelFormat fmt, int plane)
Definition: graph.c:99
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:207
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:292
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:278
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1357
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:94
MPVEncContext::b_direct_mv_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame.
Definition: mpegvideoenc.h:82
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:111
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:193
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:312
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:836
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:218
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2640
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:482
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2633
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:392
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:206
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3605
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:232
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1234
AVRational::den
int den
Denominator.
Definition: rational.h:60
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2639
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:822
MPVEncContext::p_mv_table
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame.
Definition: mpegvideoenc.h:77
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:263
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:217
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:85
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:939
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:892
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:260
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:769
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4295
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:544
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1284
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1366
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3614
MPVEncContext::b_forw_mv_table
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame.
Definition: mpegvideoenc.h:78
MBBackup::c
struct MBBackup::@189 c
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:197
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:188
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1021
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:938
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:512
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:957
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:201
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:122
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:460
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:610
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2633
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:185
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:880
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4647
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2642
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:85
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:282
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:120
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:282
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:198
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:173
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2897
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2639
pixblockdsp.h
MPVEncContext::b_back_mv_table
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame.
Definition: mpegvideoenc.h:79
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:954
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:911
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:308
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:104
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:711
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1605
intmath.h