Go to the documentation of this file.
45 #define DNX10BIT_QMAT_SHIFT 18
46 #define RC_VARIANCE 1 // use variance or ssd for fast rc
47 #define LAMBDA_FRAC_BITS 10
49 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
51 {
"nitris_compat",
"encode with Avid Nitris compatibility",
53 {
"ibias",
"intra quant bias",
55 { .i64 = 0 }, INT_MIN, INT_MAX,
VE },
60 0, 0,
VE, .unit =
"profile" },
62 0, 0,
VE, .unit =
"profile" },
64 0, 0,
VE, .unit =
"profile" },
66 0, 0,
VE, .unit =
"profile" },
68 0, 0,
VE, .unit =
"profile" },
70 0, 0,
VE, .unit =
"profile" },
82 const uint8_t *pixels,
86 for (
i = 0;
i < 4;
i++) {
106 const uint8_t *pixels,
109 memcpy(
block + 0 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
110 memcpy(
block + 7 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
111 memcpy(
block + 1 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
112 memcpy(
block + 6 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
113 memcpy(
block + 2 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
114 memcpy(
block + 5 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
115 memcpy(
block + 3 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
116 memcpy(
block + 4 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
122 int i, j,
level, last_non_zero, start_i;
124 const uint8_t *scantable =
ctx->c.intra_scantable.scantable;
127 unsigned int threshold1, threshold2;
134 qmat = n < 4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
135 bias=
ctx->intra_quant_bias * (1 << (16 - 8));
136 threshold1 = (1 << 16) -
bias - 1;
137 threshold2 = (threshold1 << 1);
139 for (
i = 63;
i >= start_i;
i--) {
143 if (((
unsigned)(
level + threshold1)) > threshold2) {
151 for (
i = start_i;
i <= last_non_zero;
i++) {
155 if (((
unsigned)(
level + threshold1)) > threshold2) {
173 scantable, last_non_zero);
175 return last_non_zero;
181 const uint8_t *scantable =
ctx->c.intra_scantable.scantable;
182 const int *qmat = n<4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
183 int last_non_zero = 0;
191 for (
i = 1;
i < 64; ++
i) {
192 int j = scantable[
i];
204 scantable, last_non_zero);
206 return last_non_zero;
212 int max_level = 1 << (
ctx->bit_depth + 2);
219 ctx->vlc_codes =
ctx->orig_vlc_codes + max_level * 2;
220 ctx->vlc_bits =
ctx->orig_vlc_bits + max_level * 2;
228 offset = (alevel - 1) >> 6;
231 for (j = 0; j < 257; j++) {
232 if (
ctx->cid_table->ac_info[2*j+0] >> 1 == alevel &&
234 (!
run || (
ctx->cid_table->ac_info[2*j+1] & 2) &&
run)) {
238 (
ctx->cid_table->ac_codes[j] << 1) | (sign & 1);
239 ctx->vlc_bits[
index] =
ctx->cid_table->ac_bits[j] + 1;
241 ctx->vlc_codes[
index] =
ctx->cid_table->ac_codes[j];
251 ctx->vlc_bits[
index] +=
ctx->cid_table->index_bits;
255 for (
i = 0;
i < 62;
i++) {
256 int run =
ctx->cid_table->run[
i];
258 ctx->run_codes[
run] =
ctx->cid_table->run_codes[
i];
259 ctx->run_bits[
run] =
ctx->cid_table->run_bits[
i];
267 uint16_t weight_matrix[64] = { 1, };
268 const uint8_t *luma_weight_table =
ctx->cid_table->luma_weight;
269 const uint8_t *chroma_weight_table =
ctx->cid_table->chroma_weight;
277 if (
ctx->bit_depth == 8) {
278 for (
int i = 1;
i < 64;
i++) {
280 weight_matrix[j] =
ctx->cid_table->luma_weight[
i];
283 weight_matrix,
ctx->intra_quant_bias, 1,
284 ctx->m.c.avctx->qmax, 1);
285 for (
int i = 1;
i < 64;
i++) {
287 weight_matrix[j] =
ctx->cid_table->chroma_weight[
i];
290 weight_matrix,
ctx->intra_quant_bias, 1,
291 ctx->m.c.avctx->qmax, 1);
293 for (
int qscale = 1; qscale <=
ctx->m.c.avctx->qmax; qscale++) {
294 for (
int i = 0;
i < 64;
i++) {
295 ctx->qmatrix_l[qscale][
i] <<= 2;
296 ctx->qmatrix_c[qscale][
i] <<= 2;
297 ctx->qmatrix_l16[qscale][0][
i] <<= 2;
298 ctx->qmatrix_l16[qscale][1][
i] <<= 2;
299 ctx->qmatrix_c16[qscale][0][
i] <<= 2;
300 ctx->qmatrix_c16[qscale][1][
i] <<= 2;
305 for (
int qscale = 1; qscale <=
ctx->m.c.avctx->qmax; qscale++) {
306 for (
int i = 1;
i < 64;
i++) {
321 (qscale * luma_weight_table[
i]);
323 (qscale * chroma_weight_table[
i]);
328 ctx->m.q_chroma_intra_matrix16 =
ctx->qmatrix_c16;
329 ctx->m.q_chroma_intra_matrix =
ctx->qmatrix_c;
330 ctx->m.q_intra_matrix16 =
ctx->qmatrix_l16;
331 ctx->m.q_intra_matrix =
ctx->qmatrix_l;
346 ctx->frame_bits = (
ctx->coding_unit_size -
347 ctx->data_offset - 4 -
ctx->min_padding) * 8;
374 "pixel format is incompatible with DNxHD profile\n");
380 "pixel format is incompatible with DNxHR HQX profile\n");
388 "pixel format is incompatible with DNxHR LB/SQ/HQ profile\n");
397 "video parameters incompatible with DNxHD. Valid DNxHD profiles:\n");
403 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
408 "Input dimensions too small, input must be at least 256x120\n");
416 ctx->m.c.mb_intra = 1;
417 ctx->m.c.h263_aic = 1;
434 ctx->block_width_l2 = 4;
435 }
else if (
ctx->bit_depth == 10) {
438 ctx->block_width_l2 = 4;
441 ctx->block_width_l2 = 3;
451 ctx->m.c.mb_height /= 2;
456 "Interlaced encoding is not supported for DNxHR profiles.\n");
460 ctx->m.c.mb_num =
ctx->m.c.mb_height *
ctx->m.c.mb_width;
466 ctx->coding_unit_size =
ctx->frame_size;
468 ctx->frame_size =
ctx->cid_table->frame_size;
469 ctx->coding_unit_size =
ctx->cid_table->coding_unit_size;
472 if (
ctx->m.c.mb_height > 68)
473 ctx->data_offset = 0x170 + (
ctx->m.c.mb_height << 2);
475 ctx->data_offset = 0x280;
483 if (
ctx->nitris_compat)
484 ctx->min_padding = 1600;
525 memset(
buf, 0,
ctx->data_offset);
529 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
534 buf[5] =
ctx->interlaced ?
ctx->cur_field + 2 : 0x01;
541 buf[0x21] =
ctx->bit_depth == 10 ? 0x58 : 0x38;
542 buf[0x22] = 0x88 + (
ctx->interlaced << 2);
566 put_bits(pb,
ctx->cid_table->dc_bits[nbits] + nbits,
567 (
ctx->cid_table->dc_codes[nbits] << nbits) +
573 int16_t *
block,
int last_index,
int n)
575 int last_non_zero = 0;
581 for (
i = 1;
i <= last_index;
i++) {
582 j =
ctx->m.c.intra_scantable.permutated[
i];
585 int run_level =
i - last_non_zero - 1;
586 int rlevel = slevel * (1 << 1) | !!run_level;
590 ctx->run_codes[run_level]);
599 int qscale,
int last_index)
601 const uint8_t *weight_matrix;
606 weight_matrix = ((n % 6) < 2) ?
ctx->cid_table->luma_weight
607 :
ctx->cid_table->chroma_weight;
609 weight_matrix = (n & 2) ?
ctx->cid_table->chroma_weight
610 :
ctx->cid_table->luma_weight;
613 for (
i = 1;
i <= last_index;
i++) {
614 int j =
ctx->m.c.intra_scantable.permutated[
i];
618 level = (1 - 2 *
level) * qscale * weight_matrix[
i];
619 if (
ctx->bit_depth == 10) {
620 if (weight_matrix[
i] != 8)
624 if (weight_matrix[
i] != 32)
630 level = (2 *
level + 1) * qscale * weight_matrix[
i];
631 if (
ctx->bit_depth == 10) {
632 if (weight_matrix[
i] != 8)
636 if (weight_matrix[
i] != 32)
650 for (
i = 0;
i < 64;
i++)
658 int last_non_zero = 0;
661 for (
i = 1;
i <= last_index;
i++) {
662 j =
ctx->m.c.intra_scantable.permutated[
i];
665 int run_level =
i - last_non_zero - 1;
667 !!run_level] +
ctx->run_bits[run_level];
677 const int bs =
ctx->block_width_l2;
678 const int bw = 1 << bs;
679 int dct_y_offset =
ctx->dct_y_offset;
680 int dct_uv_offset =
ctx->dct_uv_offset;
681 int linesize =
ctx->m.c.linesize;
682 int uvlinesize =
ctx->m.c.uvlinesize;
683 const uint8_t *ptr_y =
ctx->thread[0]->src[0] +
684 ((mb_y << 4) *
ctx->m.c.linesize) + (mb_x << bs + 1);
685 const uint8_t *ptr_u =
ctx->thread[0]->src[1] +
686 ((mb_y << 4) *
ctx->m.c.uvlinesize) + (mb_x << bs +
ctx->is_444);
687 const uint8_t *ptr_v =
ctx->thread[0]->src[2] +
688 ((mb_y << 4) *
ctx->m.c.uvlinesize) + (mb_x << bs +
ctx->is_444);
693 (mb_y << 4) + 16 >
ctx->m.c.avctx->height)) {
694 int y_w =
ctx->m.c.avctx->width - (mb_x << 4);
695 int y_h =
ctx->m.c.avctx->height - (mb_y << 4);
696 int uv_w = (y_w + 1) / 2;
702 linesize,
ctx->m.c.linesize,
706 uvlinesize,
ctx->m.c.uvlinesize,
710 uvlinesize,
ctx->m.c.uvlinesize,
714 dct_y_offset = bw * linesize;
715 dct_uv_offset = bw * uvlinesize;
716 ptr_y = &
ctx->edge_buf_y[0];
717 ptr_u = &
ctx->edge_buf_uv[0][0];
718 ptr_v = &
ctx->edge_buf_uv[1][0];
720 (mb_y << 4) + 16 >
ctx->m.c.avctx->height)) {
721 int y_w =
ctx->m.c.avctx->width - (mb_x << 4);
722 int y_h =
ctx->m.c.avctx->height - (mb_y << 4);
723 int uv_w =
ctx->is_444 ? y_w : (y_w + 1) / 2;
726 uvlinesize = 16 + 16 *
ctx->is_444;
729 linesize,
ctx->m.c.linesize,
733 uvlinesize,
ctx->m.c.uvlinesize,
737 uvlinesize,
ctx->m.c.uvlinesize,
741 dct_y_offset = bw * linesize / 2;
742 dct_uv_offset = bw * uvlinesize / 2;
743 ptr_y = &
ctx->edge_buf_y[0];
744 ptr_u = &
ctx->edge_buf_uv[0][0];
745 ptr_v = &
ctx->edge_buf_uv[1][0];
754 if (mb_y + 1 ==
ctx->m.c.mb_height &&
ctx->m.c.avctx->height == 1080) {
755 if (
ctx->interlaced) {
756 ctx->get_pixels_8x4_sym(
ctx->blocks[4],
757 ptr_y + dct_y_offset,
759 ctx->get_pixels_8x4_sym(
ctx->blocks[5],
760 ptr_y + dct_y_offset + bw,
762 ctx->get_pixels_8x4_sym(
ctx->blocks[6],
763 ptr_u + dct_uv_offset,
765 ctx->get_pixels_8x4_sym(
ctx->blocks[7],
766 ptr_v + dct_uv_offset,
769 ctx->m.c.bdsp.clear_block(
ctx->blocks[4]);
770 ctx->m.c.bdsp.clear_block(
ctx->blocks[5]);
771 ctx->m.c.bdsp.clear_block(
ctx->blocks[6]);
772 ctx->m.c.bdsp.clear_block(
ctx->blocks[7]);
776 ptr_y + dct_y_offset, linesize);
778 ptr_y + dct_y_offset + bw, linesize);
780 ptr_u + dct_uv_offset, uvlinesize);
782 ptr_v + dct_uv_offset, uvlinesize);
787 pdsp->
get_pixels(
ctx->blocks[6], ptr_y + dct_y_offset, linesize);
788 pdsp->
get_pixels(
ctx->blocks[7], ptr_y + dct_y_offset + bw, linesize);
792 pdsp->
get_pixels(
ctx->blocks[8], ptr_u + dct_uv_offset, uvlinesize);
793 pdsp->
get_pixels(
ctx->blocks[9], ptr_u + dct_uv_offset + bw, uvlinesize);
797 pdsp->
get_pixels(
ctx->blocks[10], ptr_v + dct_uv_offset, uvlinesize);
798 pdsp->
get_pixels(
ctx->blocks[11], ptr_v + dct_uv_offset + bw, uvlinesize);
810 const static uint8_t component[8]={0,0,1,2,0,0,1,2};
817 int jobnr,
int threadnr)
821 int qscale =
ctx->qscale;
823 ctx =
ctx->thread[threadnr];
825 ctx->m.c.last_dc[0] =
826 ctx->m.c.last_dc[1] =
827 ctx->m.c.last_dc[2] = 1 << (
ctx->bit_depth + 2);
829 for (
int mb_x = 0; mb_x <
ctx->m.c.mb_width; mb_x++) {
830 unsigned mb = mb_y *
ctx->m.c.mb_width + mb_x;
838 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
839 int16_t *src_block =
ctx->blocks[
i];
843 memcpy(
block, src_block, 64 *
sizeof(*
block));
845 ctx->is_444 ? 4 * (n > 0): 4 & (2*
i),
856 dc_bits +=
ctx->cid_table->dc_bits[nbits] + nbits;
866 ctx->mb_rc[(qscale *
ctx->m.c.mb_num) +
mb].ssd = ssd;
867 ctx->mb_rc[(qscale *
ctx->m.c.mb_num) +
mb].
bits = ac_bits + dc_bits + 12 +
868 (1 +
ctx->is_444) * 8 *
ctx->vlc_bits[0];
874 int jobnr,
int threadnr)
879 ctx =
ctx->thread[threadnr];
881 ctx->slice_size[jobnr]);
883 ctx->m.c.last_dc[0] =
884 ctx->m.c.last_dc[1] =
885 ctx->m.c.last_dc[2] = 1 << (
ctx->bit_depth + 2);
886 for (
int mb_x = 0; mb_x <
ctx->m.c.mb_width; mb_x++) {
887 unsigned mb = mb_y *
ctx->m.c.mb_width + mb_x;
888 int qscale =
ctx->mb_qscale[
mb];
896 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
899 int last_index =
ctx->m.dct_quantize(&
ctx->m,
block,
900 ctx->is_444 ? (((
i >> 1) % 3) < 1 ? 0 : 4): 4 & (2*
i),
913 for (
int mb_y = 0,
offset = 0; mb_y <
ctx->m.c.mb_height; mb_y++) {
916 ctx->slice_size[mb_y] = 0;
917 for (
int mb_x = 0; mb_x <
ctx->m.c.mb_width; mb_x++) {
918 unsigned mb = mb_y *
ctx->m.c.mb_width + mb_x;
919 ctx->slice_size[mb_y] +=
ctx->mb_bits[
mb];
921 ctx->slice_size[mb_y] = (
ctx->slice_size[mb_y] + 31
U) & ~31
U;
922 ctx->slice_size[mb_y] >>= 3;
923 thread_size =
ctx->slice_size[mb_y];
929 int jobnr,
int threadnr)
932 int mb_y = jobnr, x, y;
933 int partial_last_row = (mb_y ==
ctx->m.c.mb_height - 1) &&
936 ctx =
ctx->thread[threadnr];
937 if (
ctx->bit_depth == 8) {
938 const uint8_t *
pix =
ctx->thread[0]->src[0] + ((mb_y << 4) *
ctx->m.c.linesize);
939 for (
int mb_x = 0; mb_x <
ctx->m.c.mb_width; ++mb_x,
pix += 16) {
940 unsigned mb = mb_y *
ctx->m.c.mb_width + mb_x;
945 sum =
ctx->m.mpvencdsp.pix_sum(
pix,
ctx->m.c.linesize);
946 varc =
ctx->m.mpvencdsp.pix_norm1(
pix,
ctx->m.c.linesize);
951 for (y = 0; y < bh; y++) {
952 for (x = 0; x < bw; x++) {
953 uint8_t
val =
pix[x + y *
ctx->m.c.linesize];
959 varc = (varc - (((unsigned) sum * sum) >> 8) + 128) >> 8;
961 ctx->mb_cmp[
mb].value = varc;
965 const int linesize =
ctx->m.c.linesize >> 1;
966 for (
int mb_x = 0; mb_x <
ctx->m.c.mb_width; ++mb_x) {
967 const uint16_t *
pix = (
const uint16_t *)
ctx->thread[0]->src[0] +
968 ((mb_y << 4) * linesize) + (mb_x << 4);
969 unsigned mb = mb_y *
ctx->m.c.mb_width + mb_x;
977 for (
i = 0;
i < bh; ++
i) {
978 for (j = 0; j < bw; ++j) {
980 const int sample = (unsigned)
pix[j] >> 6;
998 int lambda, up_step, down_step;
999 int last_lower = INT_MAX, last_higher = 0;
1007 lambda =
ctx->lambda;
1012 if (lambda == last_higher) {
1016 for (
int y = 0; y <
ctx->m.c.mb_height; y++) {
1017 for (
int x = 0; x <
ctx->m.c.mb_width; x++) {
1018 unsigned min = UINT_MAX;
1020 int mb = y *
ctx->m.c.mb_width + x;
1023 int i = (q*
ctx->m.c.mb_num) +
mb;
1024 unsigned score =
ctx->mb_rc[
i].bits * lambda +
1033 ctx->mb_qscale[
mb] = qscale;
1034 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1045 if (bits < ctx->frame_bits) {
1046 last_lower =
FFMIN(lambda, last_lower);
1047 if (last_higher != 0)
1048 lambda = (lambda+last_higher)>>1;
1050 lambda -= down_step;
1053 lambda =
FFMAX(1, lambda);
1054 if (lambda == last_lower)
1057 last_higher =
FFMAX(lambda, last_higher);
1058 if (last_lower != INT_MAX)
1059 lambda = (lambda+last_lower)>>1;
1060 else if ((
int64_t)lambda + up_step > INT_MAX)
1068 ctx->lambda = lambda;
1077 int last_higher = 0;
1078 int last_lower = INT_MAX;
1081 qscale =
ctx->qscale;
1084 ctx->qscale = qscale;
1088 for (
int y = 0; y <
ctx->m.c.mb_height; y++) {
1089 for (
int x = 0; x <
ctx->m.c.mb_width; x++)
1095 if (bits < ctx->frame_bits) {
1098 if (last_higher == qscale - 1) {
1099 qscale = last_higher;
1102 last_lower =
FFMIN(qscale, last_lower);
1103 if (last_higher != 0)
1104 qscale = (qscale + last_higher) >> 1;
1106 qscale -= down_step++;
1111 if (last_lower == qscale + 1)
1113 last_higher =
FFMAX(qscale, last_higher);
1114 if (last_lower != INT_MAX)
1115 qscale = (qscale + last_lower) >> 1;
1117 qscale += up_step++;
1119 if (qscale >=
ctx->m.c.avctx->qmax)
1123 ctx->qscale = qscale;
1127 #define BUCKET_BITS 8
1128 #define RADIX_PASSES 4
1129 #define NBUCKETS (1 << BUCKET_BITS)
1144 int v =
data[
i].value;
1154 buckets[j][
i] =
offset -= buckets[j][
i];
1166 int pos = buckets[v]++;
1189 for (
int y = 0; y <
ctx->m.c.mb_height; y++) {
1190 for (
int x = 0; x <
ctx->m.c.mb_width; x++) {
1191 int mb = y *
ctx->m.c.mb_width + x;
1192 int rc = (
ctx->qscale *
ctx->m.c.mb_num ) +
mb;
1195 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1196 max_bits +=
ctx->mb_rc[rc].bits;
1198 delta_bits =
ctx->mb_rc[rc].bits -
1199 ctx->mb_rc[rc +
ctx->m.c.mb_num].bits;
1201 ctx->mb_cmp[
mb].value =
1202 delta_bits ? ((
ctx->mb_rc[rc].ssd -
1203 ctx->mb_rc[rc +
ctx->m.c.mb_num].ssd) * 100) /
1216 for (
int x = 0; x <
ctx->m.c.mb_num && max_bits >
ctx->frame_bits; x++) {
1217 int mb =
ctx->mb_cmp[x].mb;
1218 int rc = (
ctx->qscale *
ctx->m.c.mb_num ) +
mb;
1219 max_bits -=
ctx->mb_rc[rc].bits -
1220 ctx->mb_rc[rc +
ctx->m.c.mb_num].bits;
1221 if (
ctx->mb_qscale[
mb] < 255)
1222 ctx->mb_qscale[
mb]++;
1223 ctx->mb_bits[
mb] =
ctx->mb_rc[rc +
ctx->m.c.mb_num].bits;
1226 if (max_bits >
ctx->frame_bits)
1234 for (
int i = 0;
i <
ctx->m.c.avctx->thread_count;
i++) {
1235 ctx->thread[
i]->m.c.linesize =
frame->linesize[0] <<
ctx->interlaced;
1236 ctx->thread[
i]->m.c.uvlinesize =
frame->linesize[1] <<
ctx->interlaced;
1237 ctx->thread[
i]->dct_y_offset =
ctx->m.c.linesize *8;
1238 ctx->thread[
i]->dct_uv_offset =
ctx->m.c.uvlinesize*8;
1260 for (
i = 0;
i < 3;
i++) {
1262 if (
ctx->interlaced &&
ctx->cur_field)
1274 "picture could not fit ratecontrol constraints, increase qmax\n");
1281 for (
i = 0;
i <
ctx->m.c.mb_height;
i++) {
1291 ctx->coding_unit_size - 4 -
offset -
ctx->data_offset);
1297 ctx->cur_field ^= 1;
1298 buf +=
ctx->coding_unit_size;
1299 goto encode_coding_unit;
1331 if (
ctx->thread[1]) {
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
#define CODEC_PIXFMTS(...)
static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
static const AVOption options[]
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, int16_t *block, int last_index)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
av_cold void ff_dct_encode_init(MPVEncContext *const s)
static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
int av_log2_16bit(unsigned v)
static void dnxhd_8bit_get_pixels_8x4_sym(int16_t *restrict block, const uint8_t *pixels, ptrdiff_t line_size)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define AV_PROFILE_DNXHR_444
#define DNX10BIT_QMAT_SHIFT
#define MASK_ABS(mask, level)
static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
int mb_decision
macroblock decision mode
int qmax
maximum quantizer
static const FFCodecDefault dnxhd_defaults[]
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
static av_cold int dnxhd_encode_end(AVCodecContext *avctx)
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
AVCodec p
The public AVCodec.
static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(int16_t *restrict block, const uint8_t *pixels, ptrdiff_t line_size)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
#define AV_PIX_FMT_GBRP10
void ff_dnxhd_print_profiles(AVCodecContext *avctx, int loglevel)
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
#define AV_PROFILE_DNXHR_SQ
#define FF_CODEC_ENCODE_CB(func)
static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
static int put_bytes_left(const PutBitContext *s, int round_up)
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
#define AV_PIX_FMT_YUV444P10
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PROFILE_DNXHR_LB
#define AV_PROFILE_DNXHR_HQ
av_cold void ff_blockdsp_init(BlockDSPContext *c)
static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
int ff_dnxhd_get_hr_frame_size(int cid, int w, int h)
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
#define LOCAL_ALIGNED_16(t, v,...)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_PROFILE_DNXHR_HQX
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static av_always_inline void dnxhd_encode_block(PutBitContext *pb, DNXHDEncContext *ctx, int16_t *block, int last_index, int n)
static void radix_sort(RCCMPEntry *data, RCCMPEntry *tmp, int size)
void(* get_pixels)(int16_t *restrict block, const uint8_t *pixels, ptrdiff_t stride)
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
#define CODEC_LONG_NAME(str)
const AVProfile ff_dnxhd_profiles[]
static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static int dnxhd_10bit_dct_quantize_444(MPVEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
static int bias(int x, int c)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
static void radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass)
#define DNXHD_VARIABLE
Indicate that a CIDEntry value must be read in the bitstream.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static int dnxhd_10bit_dct_quantize(MPVEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
static void radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS])
static const AVClass dnxhd_class
#define AV_PIX_FMT_YUV422P10
static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
static av_always_inline void dnxhd_encode_dc(PutBitContext *pb, DNXHDEncContext *ctx, int diff)
static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx)
static int shift(int a, int b)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static av_cold int dnxhd_init_vlc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_ssd_block(int16_t *qblock, int16_t *block)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int dnxhd_find_qscale(DNXHDEncContext *ctx)
const CIDEntry * ff_dnxhd_get_cid_table(int cid)
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
#define i(width, name, range_min, range_max)
const FFCodec ff_dnxhd_encoder
void ff_dnxhdenc_init(DNXHDEncContext *ctx)
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
const uint8_t ff_zigzag_direct[64]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int get_bucket(int value, int shift)
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
@ AV_OPT_TYPE_INT
Underlying C type is int.
int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth)
static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
static float mean(const float *input, int size)
#define FF_MB_DECISION_RD
rate distortion
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Undefined Behavior In the C some operations are like signed integer overflow
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int width
picture width / height.
The exact code depends on how similar the blocks are and how related they are to the block
#define MKTAG(a, b, c, d)
static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
static int first_field(const struct video_data *s)
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, int16_t *block, int n, int qscale, int last_index)