FFmpeg
libsvtav1.c
Go to the documentation of this file.
1 /*
2  * Scalable Video Technology for AV1 encoder library plugin
3  *
4  * Copyright (c) 2018 Intel Corporation
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdint.h>
24 #include <EbSvtAv1ErrorCodes.h>
25 #include <EbSvtAv1Enc.h>
26 #include <EbSvtAv1Metadata.h>
27 
28 #include "libavutil/common.h"
29 #include "libavutil/frame.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/intreadwrite.h"
33 #include "libavutil/mem.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 
38 #include "codec_internal.h"
39 #include "dovi_rpu.h"
40 #include "encode.h"
41 #include "packet_internal.h"
42 #include "avcodec.h"
43 #include "profiles.h"
44 
45 typedef enum eos_status {
49 }EOS_STATUS;
50 
51 typedef struct SvtContext {
52  const AVClass *class;
53 
54  EbSvtAv1EncConfiguration enc_params;
55  EbComponentType *svt_handle;
56 
57  EbBufferHeaderType *in_buf;
58  int raw_size;
60 
62 
64 
65  EOS_STATUS eos_flag;
66 
68 
69  // User options.
71  int enc_mode;
72  int crf;
73  int qp;
74 } SvtContext;
75 
76 static const struct {
77  EbErrorType eb_err;
78  int av_err;
79  const char *desc;
80 } svt_errors[] = {
81  { EB_ErrorNone, 0, "success" },
82  { EB_ErrorInsufficientResources, AVERROR(ENOMEM), "insufficient resources" },
83  { EB_ErrorUndefined, AVERROR(EINVAL), "undefined error" },
84  { EB_ErrorInvalidComponent, AVERROR(EINVAL), "invalid component" },
85  { EB_ErrorBadParameter, AVERROR(EINVAL), "bad parameter" },
86  { EB_ErrorDestroyThreadFailed, AVERROR_EXTERNAL, "failed to destroy thread" },
87  { EB_ErrorSemaphoreUnresponsive, AVERROR_EXTERNAL, "semaphore unresponsive" },
88  { EB_ErrorDestroySemaphoreFailed, AVERROR_EXTERNAL, "failed to destroy semaphore"},
89  { EB_ErrorCreateMutexFailed, AVERROR_EXTERNAL, "failed to create mutex" },
90  { EB_ErrorMutexUnresponsive, AVERROR_EXTERNAL, "mutex unresponsive" },
91  { EB_ErrorDestroyMutexFailed, AVERROR_EXTERNAL, "failed to destroy mutex" },
92  { EB_NoErrorEmptyQueue, AVERROR(EAGAIN), "empty queue" },
93 };
94 
95 static int svt_map_error(EbErrorType eb_err, const char **desc)
96 {
97  int i;
98 
100  for (i = 0; i < FF_ARRAY_ELEMS(svt_errors); i++) {
101  if (svt_errors[i].eb_err == eb_err) {
102  *desc = svt_errors[i].desc;
103  return svt_errors[i].av_err;
104  }
105  }
106  *desc = "unknown error";
107  return AVERROR_UNKNOWN;
108 }
109 
110 static int svt_print_error(void *log_ctx, EbErrorType err,
111  const char *error_string)
112 {
113  const char *desc;
114  int ret = svt_map_error(err, &desc);
115 
116  av_log(log_ctx, AV_LOG_ERROR, "%s: %s (0x%x)\n", error_string, desc, err);
117 
118  return ret;
119 }
120 
121 static int alloc_buffer(EbSvtAv1EncConfiguration *config, SvtContext *svt_enc)
122 {
123  const size_t luma_size = config->source_width * config->source_height *
124  (config->encoder_bit_depth > 8 ? 2 : 1);
125 
126  EbSvtIOFormat *in_data;
127 
128  svt_enc->raw_size = luma_size * 3 / 2;
129 
130  // allocate buffer for in and out
131  svt_enc->in_buf = av_mallocz(sizeof(*svt_enc->in_buf));
132  if (!svt_enc->in_buf)
133  return AVERROR(ENOMEM);
134 
135  svt_enc->in_buf->p_buffer = av_mallocz(sizeof(*in_data));
136  if (!svt_enc->in_buf->p_buffer)
137  return AVERROR(ENOMEM);
138 
139  svt_enc->in_buf->size = sizeof(*svt_enc->in_buf);
140 
141  return 0;
142 
143 }
144 
145 static void handle_mdcv(struct EbSvtAv1MasteringDisplayInfo *dst,
146  const AVMasteringDisplayMetadata *mdcv)
147 {
148  if (mdcv->has_primaries) {
149  const struct EbSvtAv1ChromaPoints *const points[] = {
150  &dst->r,
151  &dst->g,
152  &dst->b,
153  };
154 
155  for (int i = 0; i < 3; i++) {
156  const struct EbSvtAv1ChromaPoints *dst = points[i];
157  const AVRational *src = mdcv->display_primaries[i];
158 
159  AV_WB16(&dst->x,
160  av_rescale_q(1, src[0], (AVRational){ 1, (1 << 16) }));
161  AV_WB16(&dst->y,
162  av_rescale_q(1, src[1], (AVRational){ 1, (1 << 16) }));
163  }
164 
165  AV_WB16(&dst->white_point.x,
166  av_rescale_q(1, mdcv->white_point[0],
167  (AVRational){ 1, (1 << 16) }));
168  AV_WB16(&dst->white_point.y,
169  av_rescale_q(1, mdcv->white_point[1],
170  (AVRational){ 1, (1 << 16) }));
171  }
172 
173  if (mdcv->has_luminance) {
174  AV_WB32(&dst->max_luma,
175  av_rescale_q(1, mdcv->max_luminance,
176  (AVRational){ 1, (1 << 8) }));
177  AV_WB32(&dst->min_luma,
178  av_rescale_q(1, mdcv->min_luminance,
179  (AVRational){ 1, (1 << 14) }));
180  }
181 }
182 
183 static void handle_side_data(AVCodecContext *avctx,
184  EbSvtAv1EncConfiguration *param)
185 {
186  const AVFrameSideData *cll_sd =
189  const AVFrameSideData *mdcv_sd =
191  avctx->nb_decoded_side_data,
193 
194  if (cll_sd) {
195  const AVContentLightMetadata *cll =
196  (AVContentLightMetadata *)cll_sd->data;
197 
198  AV_WB16(&param->content_light_level.max_cll, cll->MaxCLL);
199  AV_WB16(&param->content_light_level.max_fall, cll->MaxFALL);
200  }
201 
202  if (mdcv_sd) {
203  handle_mdcv(&param->mastering_display,
204  (AVMasteringDisplayMetadata *)mdcv_sd->data);
205  }
206 }
207 
208 static int config_enc_params(EbSvtAv1EncConfiguration *param,
209  AVCodecContext *avctx)
210 {
211  SvtContext *svt_enc = avctx->priv_data;
212  const AVPixFmtDescriptor *desc;
213  const AVDictionaryEntry av_unused *en = NULL;
214 
215  // Update param from options
216  if (svt_enc->enc_mode >= -1)
217  param->enc_mode = svt_enc->enc_mode;
218 
219  if (avctx->bit_rate) {
220  param->target_bit_rate = avctx->bit_rate;
221  if (avctx->rc_max_rate != avctx->bit_rate)
222  param->rate_control_mode = 1;
223  else
224  param->rate_control_mode = 2;
225 
226  param->max_qp_allowed = avctx->qmax;
227  param->min_qp_allowed = avctx->qmin;
228  }
229  param->max_bit_rate = avctx->rc_max_rate;
230  if ((avctx->bit_rate > 0 || avctx->rc_max_rate > 0) && avctx->rc_buffer_size)
231  param->maximum_buffer_size_ms =
232  avctx->rc_buffer_size * 1000LL /
233  FFMAX(avctx->bit_rate, avctx->rc_max_rate);
234 
235  if (svt_enc->crf > 0) {
236  param->qp = svt_enc->crf;
237  param->rate_control_mode = 0;
238  } else if (svt_enc->qp > 0) {
239  param->qp = svt_enc->qp;
240  param->rate_control_mode = 0;
241  param->enable_adaptive_quantization = 0;
242  }
243 
244  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
245  param->color_primaries = avctx->color_primaries;
246  param->matrix_coefficients = (desc->flags & AV_PIX_FMT_FLAG_RGB) ?
247  AVCOL_SPC_RGB : avctx->colorspace;
248  param->transfer_characteristics = avctx->color_trc;
249 
251  param->color_range = avctx->color_range == AVCOL_RANGE_JPEG;
252  else
253  param->color_range = !!(desc->flags & AV_PIX_FMT_FLAG_RGB);
254 
255 #if SVT_AV1_CHECK_VERSION(1, 0, 0)
257  const char *name =
259 
260  switch (avctx->chroma_sample_location) {
261  case AVCHROMA_LOC_LEFT:
262  param->chroma_sample_position = EB_CSP_VERTICAL;
263  break;
265  param->chroma_sample_position = EB_CSP_COLOCATED;
266  break;
267  default:
268  if (!name)
269  break;
270 
271  av_log(avctx, AV_LOG_WARNING,
272  "Specified chroma sample location %s is unsupported "
273  "on the AV1 bit stream level. Usage of a container that "
274  "allows passing this information - such as Matroska - "
275  "is recommended.\n",
276  name);
277  break;
278  }
279  }
280 #endif
281 
282  if (avctx->profile != AV_PROFILE_UNKNOWN)
283  param->profile = avctx->profile;
284 
285  if (avctx->level != AV_LEVEL_UNKNOWN)
286  param->level = avctx->level;
287 
288  // gop_size == 1 case is handled when encoding each frame by setting
289  // pic_type to EB_AV1_KEY_PICTURE. For gop_size > 1, set the
290  // intra_period_length. Even though setting intra_period_length to 0 should
291  // work in this case, it does not.
292  // See: https://gitlab.com/AOMediaCodec/SVT-AV1/-/issues/2076
293  if (avctx->gop_size > 1)
294  param->intra_period_length = avctx->gop_size - 1;
295 
296 #if SVT_AV1_CHECK_VERSION(1, 1, 0)
297  // In order for SVT-AV1 to force keyframes by setting pic_type to
298  // EB_AV1_KEY_PICTURE on any frame, force_key_frames has to be set. Note
299  // that this does not force all frames to be keyframes (it only forces a
300  // keyframe with pic_type is set to EB_AV1_KEY_PICTURE). As of now, SVT-AV1
301  // does not support arbitrary keyframe requests by setting pic_type to
302  // EB_AV1_KEY_PICTURE, so it is done only when gop_size == 1.
303  // FIXME: When SVT-AV1 supports arbitrary keyframe requests, this code needs
304  // to be updated to set force_key_frames accordingly.
305  if (avctx->gop_size == 1)
306  param->force_key_frames = 1;
307 #endif
308 
309  if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
310  param->frame_rate_numerator = avctx->framerate.num;
311  param->frame_rate_denominator = avctx->framerate.den;
312  } else {
313  param->frame_rate_numerator = avctx->time_base.den;
315  param->frame_rate_denominator = avctx->time_base.num
316 #if FF_API_TICKS_PER_FRAME
317  * avctx->ticks_per_frame
318 #endif
319  ;
321  }
322 
323  /* 2 = IDR, closed GOP, 1 = CRA, open GOP */
324  param->intra_refresh_type = avctx->flags & AV_CODEC_FLAG_CLOSED_GOP ? 2 : 1;
325 
326  handle_side_data(avctx, param);
327 
328 #if SVT_AV1_CHECK_VERSION(0, 9, 1)
329  while ((en = av_dict_iterate(svt_enc->svtav1_opts, en))) {
330  EbErrorType ret = svt_av1_enc_parse_parameter(param, en->key, en->value);
331  if (ret != EB_ErrorNone) {
333  av_log(avctx, level, "Error parsing option %s: %s.\n", en->key, en->value);
334  if (avctx->err_recognition & AV_EF_EXPLODE)
335  return AVERROR(EINVAL);
336  }
337  }
338 #else
339  if (av_dict_count(svt_enc->svtav1_opts)) {
341  av_log(avctx, level, "svt-params needs libavcodec to be compiled with SVT-AV1 "
342  "headers >= 0.9.1.\n");
343  if (avctx->err_recognition & AV_EF_EXPLODE)
344  return AVERROR(ENOSYS);
345  }
346 #endif
347 
348  param->source_width = avctx->width;
349  param->source_height = avctx->height;
350 
351  param->encoder_bit_depth = desc->comp[0].depth;
352 
353  if (desc->log2_chroma_w == 1 && desc->log2_chroma_h == 1)
354  param->encoder_color_format = EB_YUV420;
355  else if (desc->log2_chroma_w == 1 && desc->log2_chroma_h == 0)
356  param->encoder_color_format = EB_YUV422;
357  else if (!desc->log2_chroma_w && !desc->log2_chroma_h)
358  param->encoder_color_format = EB_YUV444;
359  else {
360  av_log(avctx, AV_LOG_ERROR , "Unsupported pixel format\n");
361  return AVERROR(EINVAL);
362  }
363 
364  if ((param->encoder_color_format == EB_YUV422 || param->encoder_bit_depth > 10)
365  && param->profile != AV_PROFILE_AV1_PROFESSIONAL ) {
366  av_log(avctx, AV_LOG_WARNING, "Forcing Professional profile\n");
367  param->profile = AV_PROFILE_AV1_PROFESSIONAL;
368  } else if (param->encoder_color_format == EB_YUV444 && param->profile != AV_PROFILE_AV1_HIGH) {
369  av_log(avctx, AV_LOG_WARNING, "Forcing High profile\n");
370  param->profile = AV_PROFILE_AV1_HIGH;
371  }
372 
373  avctx->bit_rate = param->rate_control_mode > 0 ?
374  param->target_bit_rate : 0;
375  avctx->rc_max_rate = param->max_bit_rate;
376  avctx->rc_buffer_size = param->maximum_buffer_size_ms *
377  FFMAX(avctx->bit_rate, avctx->rc_max_rate) / 1000LL;
378 
379  if (avctx->bit_rate || avctx->rc_max_rate || avctx->rc_buffer_size) {
380  AVCPBProperties *cpb_props = ff_encode_add_cpb_side_data(avctx);
381  if (!cpb_props)
382  return AVERROR(ENOMEM);
383 
384  cpb_props->buffer_size = avctx->rc_buffer_size;
385  cpb_props->max_bitrate = avctx->rc_max_rate;
386  cpb_props->avg_bitrate = avctx->bit_rate;
387  }
388 
389  return 0;
390 }
391 
392 static int read_in_data(EbSvtAv1EncConfiguration *param, const AVFrame *frame,
393  EbBufferHeaderType *header_ptr)
394 {
395  EbSvtIOFormat *in_data = (EbSvtIOFormat *)header_ptr->p_buffer;
396  ptrdiff_t linesizes[4];
397  size_t sizes[4];
398  int bytes_shift = param->encoder_bit_depth > 8 ? 1 : 0;
399  int ret, frame_size;
400 
401  for (int i = 0; i < 4; i++)
402  linesizes[i] = frame->linesize[i];
403 
404  ret = av_image_fill_plane_sizes(sizes, frame->format, frame->height,
405  linesizes);
406  if (ret < 0)
407  return ret;
408 
409  frame_size = 0;
410  for (int i = 0; i < 4; i++) {
411  if (sizes[i] > INT_MAX - frame_size)
412  return AVERROR(EINVAL);
413  frame_size += sizes[i];
414  }
415 
416  in_data->luma = frame->data[0];
417  in_data->cb = frame->data[1];
418  in_data->cr = frame->data[2];
419 
420  in_data->y_stride = AV_CEIL_RSHIFT(frame->linesize[0], bytes_shift);
421  in_data->cb_stride = AV_CEIL_RSHIFT(frame->linesize[1], bytes_shift);
422  in_data->cr_stride = AV_CEIL_RSHIFT(frame->linesize[2], bytes_shift);
423 
424  header_ptr->n_filled_len = frame_size;
425  svt_metadata_array_free(&header_ptr->metadata);
426 
427  return 0;
428 }
429 
431 {
432  SvtContext *svt_enc = avctx->priv_data;
433  EbErrorType svt_ret;
434  int ret;
435 
436  svt_enc->eos_flag = EOS_NOT_REACHED;
437 
438  svt_ret = svt_av1_enc_init_handle(&svt_enc->svt_handle, svt_enc, &svt_enc->enc_params);
439  if (svt_ret != EB_ErrorNone) {
440  return svt_print_error(avctx, svt_ret, "Error initializing encoder handle");
441  }
442 
443  ret = config_enc_params(&svt_enc->enc_params, avctx);
444  if (ret < 0) {
445  av_log(avctx, AV_LOG_ERROR, "Error configuring encoder parameters\n");
446  return ret;
447  }
448 
449  svt_ret = svt_av1_enc_set_parameter(svt_enc->svt_handle, &svt_enc->enc_params);
450  if (svt_ret != EB_ErrorNone) {
451  return svt_print_error(avctx, svt_ret, "Error setting encoder parameters");
452  }
453 
454  svt_ret = svt_av1_enc_init(svt_enc->svt_handle);
455  if (svt_ret != EB_ErrorNone) {
456  return svt_print_error(avctx, svt_ret, "Error initializing encoder");
457  }
458 
459  svt_enc->dovi.logctx = avctx;
460  ret = ff_dovi_configure(&svt_enc->dovi, avctx);
461  if (ret < 0)
462  return ret;
463 
464  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
465  EbBufferHeaderType *headerPtr = NULL;
466 
467  svt_ret = svt_av1_enc_stream_header(svt_enc->svt_handle, &headerPtr);
468  if (svt_ret != EB_ErrorNone) {
469  return svt_print_error(avctx, svt_ret, "Error building stream header");
470  }
471 
472  avctx->extradata_size = headerPtr->n_filled_len;
474  if (!avctx->extradata) {
475  av_log(avctx, AV_LOG_ERROR,
476  "Cannot allocate AV1 header of size %d.\n", avctx->extradata_size);
477  return AVERROR(ENOMEM);
478  }
479 
480  memcpy(avctx->extradata, headerPtr->p_buffer, avctx->extradata_size);
481 
482  svt_ret = svt_av1_enc_stream_header_release(headerPtr);
483  if (svt_ret != EB_ErrorNone) {
484  return svt_print_error(avctx, svt_ret, "Error freeing stream header");
485  }
486  }
487 
488  svt_enc->frame = av_frame_alloc();
489  if (!svt_enc->frame)
490  return AVERROR(ENOMEM);
491 
492  return alloc_buffer(&svt_enc->enc_params, svt_enc);
493 }
494 
495 static int eb_send_frame(AVCodecContext *avctx, const AVFrame *frame)
496 {
497  SvtContext *svt_enc = avctx->priv_data;
498  EbBufferHeaderType *headerPtr = svt_enc->in_buf;
499  AVFrameSideData *sd;
500  EbErrorType svt_ret;
501  int ret;
502 
503  if (!frame) {
504  EbBufferHeaderType headerPtrLast;
505 
506  if (svt_enc->eos_flag == EOS_SENT)
507  return 0;
508 
509  memset(&headerPtrLast, 0, sizeof(headerPtrLast));
510  headerPtrLast.pic_type = EB_AV1_INVALID_PICTURE;
511  headerPtrLast.flags = EB_BUFFERFLAG_EOS;
512 
513  svt_av1_enc_send_picture(svt_enc->svt_handle, &headerPtrLast);
514  svt_enc->eos_flag = EOS_SENT;
515  return 0;
516  }
517 
518  ret = read_in_data(&svt_enc->enc_params, frame, headerPtr);
519  if (ret < 0)
520  return ret;
521 
522  headerPtr->flags = 0;
523  headerPtr->p_app_private = NULL;
524  headerPtr->pts = frame->pts;
525 
526  switch (frame->pict_type) {
527  case AV_PICTURE_TYPE_I:
528  headerPtr->pic_type = EB_AV1_KEY_PICTURE;
529  break;
530  default:
531  // Actually means auto, or default.
532  headerPtr->pic_type = EB_AV1_INVALID_PICTURE;
533  break;
534  }
535 
536  if (avctx->gop_size == 1)
537  headerPtr->pic_type = EB_AV1_KEY_PICTURE;
538 
540  if (svt_enc->dovi.cfg.dv_profile && sd) {
541  const AVDOVIMetadata *metadata = (const AVDOVIMetadata *)sd->data;
542  uint8_t *t35;
543  int size;
544  if ((ret = ff_dovi_rpu_generate(&svt_enc->dovi, metadata, FF_DOVI_WRAP_T35,
545  &t35, &size)) < 0)
546  return ret;
547  ret = svt_add_metadata(headerPtr, EB_AV1_METADATA_TYPE_ITUT_T35, t35, size);
548  av_free(t35);
549  if (ret < 0)
550  return AVERROR(ENOMEM);
551  } else if (svt_enc->dovi.cfg.dv_profile) {
552  av_log(avctx, AV_LOG_ERROR, "Dolby Vision enabled, but received frame "
553  "without AV_FRAME_DATA_DOVI_METADATA\n");
554  return AVERROR_INVALIDDATA;
555  }
556 
557 
558  svt_ret = svt_av1_enc_send_picture(svt_enc->svt_handle, headerPtr);
559  if (svt_ret != EB_ErrorNone)
560  return svt_print_error(avctx, svt_ret, "Error sending a frame to encoder");
561 
562  return 0;
563 }
564 
565 static AVBufferRef *get_output_ref(AVCodecContext *avctx, SvtContext *svt_enc, int filled_len)
566 {
567  if (filled_len > svt_enc->max_tu_size) {
568  const int max_frames = 8;
569  int max_tu_size;
570 
571  if (filled_len > svt_enc->raw_size * max_frames) {
572  av_log(avctx, AV_LOG_ERROR, "TU size > %d raw frame size.\n", max_frames);
573  return NULL;
574  }
575 
576  max_tu_size = 1 << av_ceil_log2(filled_len);
577  av_buffer_pool_uninit(&svt_enc->pool);
578  svt_enc->pool = av_buffer_pool_init(max_tu_size + AV_INPUT_BUFFER_PADDING_SIZE, NULL);
579  if (!svt_enc->pool)
580  return NULL;
581 
582  svt_enc->max_tu_size = max_tu_size;
583  }
584  av_assert0(svt_enc->pool);
585 
586  return av_buffer_pool_get(svt_enc->pool);
587 }
588 
590 {
591  SvtContext *svt_enc = avctx->priv_data;
592  EbBufferHeaderType *headerPtr;
593  AVFrame *frame = svt_enc->frame;
594  EbErrorType svt_ret;
595  AVBufferRef *ref;
596  int ret = 0, pict_type;
597 
598  if (svt_enc->eos_flag == EOS_RECEIVED)
599  return AVERROR_EOF;
600 
601  ret = ff_encode_get_frame(avctx, frame);
602  if (ret < 0 && ret != AVERROR_EOF)
603  return ret;
604  if (ret == AVERROR_EOF)
605  frame = NULL;
606 
607  ret = eb_send_frame(avctx, frame);
608  if (ret < 0)
609  return ret;
610  av_frame_unref(svt_enc->frame);
611 
612  svt_ret = svt_av1_enc_get_packet(svt_enc->svt_handle, &headerPtr, svt_enc->eos_flag);
613  if (svt_ret == EB_NoErrorEmptyQueue)
614  return AVERROR(EAGAIN);
615  else if (svt_ret != EB_ErrorNone)
616  return svt_print_error(avctx, svt_ret, "Error getting an output packet from encoder");
617 
618 #if SVT_AV1_CHECK_VERSION(2, 0, 0)
619  if (headerPtr->flags & EB_BUFFERFLAG_EOS) {
620  svt_enc->eos_flag = EOS_RECEIVED;
621  svt_av1_enc_release_out_buffer(&headerPtr);
622  return AVERROR_EOF;
623  }
624 #endif
625 
626  ref = get_output_ref(avctx, svt_enc, headerPtr->n_filled_len);
627  if (!ref) {
628  av_log(avctx, AV_LOG_ERROR, "Failed to allocate output packet.\n");
629  svt_av1_enc_release_out_buffer(&headerPtr);
630  return AVERROR(ENOMEM);
631  }
632  pkt->buf = ref;
633  pkt->data = ref->data;
634 
635  memcpy(pkt->data, headerPtr->p_buffer, headerPtr->n_filled_len);
636  memset(pkt->data + headerPtr->n_filled_len, 0, AV_INPUT_BUFFER_PADDING_SIZE);
637 
638  pkt->size = headerPtr->n_filled_len;
639  pkt->pts = headerPtr->pts;
640  pkt->dts = headerPtr->dts;
641 
642  switch (headerPtr->pic_type) {
643  case EB_AV1_KEY_PICTURE:
645  // fall-through
646  case EB_AV1_INTRA_ONLY_PICTURE:
647  pict_type = AV_PICTURE_TYPE_I;
648  break;
649  case EB_AV1_INVALID_PICTURE:
650  pict_type = AV_PICTURE_TYPE_NONE;
651  break;
652  default:
653  pict_type = AV_PICTURE_TYPE_P;
654  break;
655  }
656 
657  if (headerPtr->pic_type == EB_AV1_NON_REF_PICTURE)
659 
660 #if !(SVT_AV1_CHECK_VERSION(2, 0, 0))
661  if (headerPtr->flags & EB_BUFFERFLAG_EOS)
662  svt_enc->eos_flag = EOS_RECEIVED;
663 #endif
664 
665  ff_side_data_set_encoder_stats(pkt, headerPtr->qp * FF_QP2LAMBDA, NULL, 0, pict_type);
666 
667  svt_av1_enc_release_out_buffer(&headerPtr);
668 
669  return 0;
670 }
671 
673 {
674  SvtContext *svt_enc = avctx->priv_data;
675 
676  if (svt_enc->svt_handle) {
677  svt_av1_enc_deinit(svt_enc->svt_handle);
678  svt_av1_enc_deinit_handle(svt_enc->svt_handle);
679  }
680  if (svt_enc->in_buf) {
681  av_free(svt_enc->in_buf->p_buffer);
682  svt_metadata_array_free(&svt_enc->in_buf->metadata);
683  av_freep(&svt_enc->in_buf);
684  }
685 
686  av_buffer_pool_uninit(&svt_enc->pool);
687  av_frame_free(&svt_enc->frame);
688  ff_dovi_ctx_unref(&svt_enc->dovi);
689 
690  return 0;
691 }
692 
693 #define OFFSET(x) offsetof(SvtContext, x)
694 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
695 static const AVOption options[] = {
696  { "preset", "Encoding preset",
697  OFFSET(enc_mode), AV_OPT_TYPE_INT, { .i64 = -2 }, -2, MAX_ENC_PRESET, VE },
698 
700 
701 #define LEVEL(name, value) name, NULL, 0, AV_OPT_TYPE_CONST, \
702  { .i64 = value }, 0, 0, VE, .unit = "avctx.level"
703  { LEVEL("2.0", 20) },
704  { LEVEL("2.1", 21) },
705  { LEVEL("2.2", 22) },
706  { LEVEL("2.3", 23) },
707  { LEVEL("3.0", 30) },
708  { LEVEL("3.1", 31) },
709  { LEVEL("3.2", 32) },
710  { LEVEL("3.3", 33) },
711  { LEVEL("4.0", 40) },
712  { LEVEL("4.1", 41) },
713  { LEVEL("4.2", 42) },
714  { LEVEL("4.3", 43) },
715  { LEVEL("5.0", 50) },
716  { LEVEL("5.1", 51) },
717  { LEVEL("5.2", 52) },
718  { LEVEL("5.3", 53) },
719  { LEVEL("6.0", 60) },
720  { LEVEL("6.1", 61) },
721  { LEVEL("6.2", 62) },
722  { LEVEL("6.3", 63) },
723  { LEVEL("7.0", 70) },
724  { LEVEL("7.1", 71) },
725  { LEVEL("7.2", 72) },
726  { LEVEL("7.3", 73) },
727 #undef LEVEL
728 
729  { "crf", "Constant Rate Factor value", OFFSET(crf),
730  AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 63, VE },
731  { "qp", "Initial Quantizer level value", OFFSET(qp),
732  AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 63, VE },
733  { "svtav1-params", "Set the SVT-AV1 configuration using a :-separated list of key=value parameters", OFFSET(svtav1_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
734 
735  { "dolbyvision", "Enable Dolby Vision RPU coding", OFFSET(dovi.enable), AV_OPT_TYPE_BOOL, {.i64 = FF_DOVI_AUTOMATIC }, -1, 1, VE, .unit = "dovi" },
736  { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DOVI_AUTOMATIC}, .flags = VE, .unit = "dovi" },
737 
738  {NULL},
739 };
740 
741 static const AVClass class = {
742  .class_name = "libsvtav1",
743  .item_name = av_default_item_name,
744  .option = options,
746 };
747 
748 static const FFCodecDefault eb_enc_defaults[] = {
749  { "b", "0" },
750  { "flags", "+cgop" },
751  { "g", "-1" },
752  { "qmin", "1" },
753  { "qmax", "63" },
754  { NULL },
755 };
756 
758  .p.name = "libsvtav1",
759  CODEC_LONG_NAME("SVT-AV1(Scalable Video Technology for AV1) encoder"),
760  .priv_data_size = sizeof(SvtContext),
761  .p.type = AVMEDIA_TYPE_VIDEO,
762  .p.id = AV_CODEC_ID_AV1,
763  .init = eb_enc_init,
765  .close = eb_enc_close,
767  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE |
769  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P,
771  AV_PIX_FMT_NONE },
772  .color_ranges = AVCOL_RANGE_MPEG | AVCOL_RANGE_JPEG,
773  .p.priv_class = &class,
774  .defaults = eb_enc_defaults,
775  .p.wrapper_name = "libsvtav1",
776 };
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
DOVIContext::cfg
AVDOVIDecoderConfigurationRecord cfg
Currently active dolby vision configuration, or {0} for none.
Definition: dovi_rpu.h:61
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:283
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
level
uint8_t level
Definition: svq3.c:205
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_dovi_ctx_unref
void ff_dovi_ctx_unref(DOVIContext *s)
Completely reset a DOVIContext, preserving only logctx.
Definition: dovi_rpu.c:29
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:691
eb_enc_defaults
static const FFCodecDefault eb_enc_defaults[]
Definition: libsvtav1.c:748
AVCodecContext::decoded_side_data
AVFrameSideData ** decoded_side_data
Array containing static side data, such as HDR10 CLL / MDCV structures.
Definition: avcodec.h:2087
get_output_ref
static AVBufferRef * get_output_ref(AVCodecContext *avctx, SvtContext *svt_enc, int filled_len)
Definition: libsvtav1.c:565
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:951
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:88
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
SvtContext
Definition: libsvtav1.c:51
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1430
AV_FRAME_DATA_DOVI_METADATA
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
Definition: frame.h:208
FF_AV1_PROFILE_OPTS
#define FF_AV1_PROFILE_OPTS
Definition: profiles.h:56
av_unused
#define av_unused
Definition: attributes.h:131
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
pixdesc.h
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:684
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:717
AVPacket::data
uint8_t * data
Definition: packet.h:539
AVOption
AVOption.
Definition: opt.h:429
encode.h
SvtContext::frame
AVFrame * frame
Definition: libsvtav1.c:61
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:502
eb_receive_packet
static int eb_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
Definition: libsvtav1.c:589
FF_CODEC_CAP_NOT_INIT_THREADSAFE
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
Definition: codec_internal.h:35
FFCodec
Definition: codec_internal.h:127
eb_enc_init
static av_cold int eb_enc_init(AVCodecContext *avctx)
Definition: libsvtav1.c:430
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:641
AVDictionary
Definition: dict.c:34
eb_enc_close
static av_cold int eb_enc_close(AVCodecContext *avctx)
Definition: libsvtav1.c:672
AV_PKT_FLAG_DISPOSABLE
#define AV_PKT_FLAG_DISPOSABLE
Flag is used to indicate packets that contain frames that can be discarded by the decoder.
Definition: packet.h:613
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_PROFILE_AV1_PROFESSIONAL
#define AV_PROFILE_AV1_PROFESSIONAL
Definition: defs.h:171
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1273
tf_sess_config.config
config
Definition: tf_sess_config.py:33
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:594
av_chroma_location_name
const char * av_chroma_location_name(enum AVChromaLocation location)
Definition: pixdesc.c:3567
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:338
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:107
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:566
FFCodecDefault
Definition: codec_internal.h:97
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
DOVIContext
Definition: dovi_rpu.h:42
av_ceil_log2
#define av_ceil_log2
Definition: common.h:97
eb_err
EbErrorType eb_err
Definition: libsvtav1.c:77
eb_send_frame
static int eb_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Definition: libsvtav1.c:495
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
dovi_rpu.h
FF_DOVI_WRAP_T35
@ FF_DOVI_WRAP_T35
wrap inside T.35+EMDF
Definition: dovi_rpu.h:159
AVRational::num
int num
Numerator.
Definition: rational.h:59
ff_dovi_configure
int ff_dovi_configure(DOVIContext *s, AVCodecContext *avctx)
Helper wrapper around ff_dovi_configure_ext which infers the codec parameters from an AVCodecContext.
Definition: dovi_rpuenc.c:241
FF_DOVI_AUTOMATIC
#define FF_DOVI_AUTOMATIC
Enable tri-state.
Definition: dovi_rpu.h:49
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
avassert.h
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:677
EOS_RECEIVED
@ EOS_RECEIVED
Definition: libsvtav1.c:48
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
AV_PROFILE_UNKNOWN
#define AV_PROFILE_UNKNOWN
Definition: defs.h:65
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:390
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:530
AVDOVIMetadata
Combined struct representing a combination of header, mapping and color metadata, for attaching to fr...
Definition: dovi_meta.h:337
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
intreadwrite.h
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
AVCodecContext::nb_decoded_side_data
int nb_decoded_side_data
Definition: avcodec.h:2088
SvtContext::dovi
DOVIContext dovi
Definition: libsvtav1.c:67
frame_size
int frame_size
Definition: mxfenc.c:2429
AV_CODEC_CAP_OTHER_THREADS
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
Definition: codec.h:124
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
SvtContext::enc_mode
int enc_mode
Definition: libsvtav1.c:71
AVDOVIDecoderConfigurationRecord::dv_profile
uint8_t dv_profile
Definition: dovi_meta.h:58
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1302
handle_side_data
static void handle_side_data(AVCodecContext *avctx, EbSvtAv1EncConfiguration *param)
Definition: libsvtav1.c:183
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:271
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
if
if(ret)
Definition: filter_design.txt:179
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1287
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:522
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:60
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:701
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:284
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:738
AV_LEVEL_UNKNOWN
#define AV_LEVEL_UNKNOWN
Definition: defs.h:198
ff_dovi_rpu_generate
int ff_dovi_rpu_generate(DOVIContext *s, const AVDOVIMetadata *metadata, int flags, uint8_t **out_rpu, int *out_size)
Synthesize a Dolby Vision RPU reflecting the current state.
Definition: dovi_rpuenc.c:562
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:740
FF_CODEC_RECEIVE_PACKET_CB
#define FF_CODEC_RECEIVE_PACKET_CB(func)
Definition: codec_internal.h:326
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:501
AV_OPT_TYPE_DICT
@ AV_OPT_TYPE_DICT
Underlying C type is AVDictionary*.
Definition: opt.h:290
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:328
SvtContext::pool
AVBufferPool * pool
Definition: libsvtav1.c:63
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1794
ff_libsvtav1_encoder
const FFCodec ff_libsvtav1_encoder
Definition: libsvtav1.c:757
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:683
LEVEL
#define LEVEL(name, value)
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:550
AVPacket::size
int size
Definition: packet.h:540
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1037
codec_internal.h
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
EOS_NOT_REACHED
@ EOS_NOT_REACHED
Definition: libsvtav1.c:46
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
size
int size
Definition: twinvq_data.h:10344
SvtContext::crf
int crf
Definition: libsvtav1.c:72
config_enc_params
static int config_enc_params(EbSvtAv1EncConfiguration *param, AVCodecContext *avctx)
Definition: libsvtav1.c:208
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
SvtContext::svt_handle
EbComponentType * svt_handle
Definition: libsvtav1.c:55
SvtContext::svtav1_opts
AVDictionary * svtav1_opts
Definition: libsvtav1.c:70
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:737
AV_PICTURE_TYPE_NONE
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:278
alloc_buffer
static int alloc_buffer(EbSvtAv1EncConfiguration *config, SvtContext *svt_enc)
Definition: libsvtav1.c:121
frame.h
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
options
static const AVOption options[]
Definition: libsvtav1.c:695
VE
#define VE
Definition: libsvtav1.c:694
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AV_PROFILE_AV1_HIGH
#define AV_PROFILE_AV1_HIGH
Definition: defs.h:170
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:545
svt_map_error
static int svt_map_error(EbErrorType eb_err, const char **desc)
Definition: libsvtav1.c:95
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:286
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
SvtContext::eos_flag
EOS_STATUS eos_flag
Definition: libsvtav1.c:65
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
eos_status
eos_status
Definition: libsvtav1.c:45
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:529
OFFSET
#define OFFSET(x)
Definition: libsvtav1.c:693
av_err
int av_err
Definition: libsvtav1.c:78
SvtContext::max_tu_size
int max_tu_size
Definition: libsvtav1.c:59
common.h
svt_errors
static const struct @159 svt_errors[]
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:276
SvtContext::raw_size
int raw_size
Definition: libsvtav1.c:58
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:708
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
SvtContext::enc_params
EbSvtAv1EncConfiguration enc_params
Definition: libsvtav1.c:54
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:700
avcodec.h
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:352
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:292
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
svt_print_error
static int svt_print_error(void *log_ctx, EbErrorType err, const char *error_string)
Definition: libsvtav1.c:110
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1266
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1650
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
DOVIContext::logctx
void * logctx
Definition: dovi_rpu.h:43
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:582
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
SvtContext::in_buf
EbBufferHeaderType * in_buf
Definition: libsvtav1.c:57
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
desc
const char * desc
Definition: libsvtav1.c:79
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
ff_encode_get_frame
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Definition: encode.c:205
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
packet_internal.h
FF_CODEC_CAP_AUTO_THREADS
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
Definition: codec_internal.h:73
mastering_display_metadata.h
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:116
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
handle_mdcv
static void handle_mdcv(struct EbSvtAv1MasteringDisplayInfo *dst, const AVMasteringDisplayMetadata *mdcv)
Definition: libsvtav1.c:145
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1158
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
EOS_SENT
@ EOS_SENT
Definition: libsvtav1.c:47
imgutils.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
SvtContext::qp
int qp
Definition: libsvtav1.c:73
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:609
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:909
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
read_in_data
static int read_in_data(EbSvtAv1EncConfiguration *param, const AVFrame *frame, EbBufferHeaderType *header_ptr)
Definition: libsvtav1.c:392
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
src
#define src
Definition: vp8dsp.c:248