FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "config_components.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/timecode.h"
33 #include "decode.h"
34 #include "cabac.h"
35 #include "cabac_functions.h"
36 #include "error_resilience.h"
37 #include "avcodec.h"
38 #include "h264.h"
39 #include "h264dec.h"
40 #include "h264data.h"
41 #include "h264chroma.h"
42 #include "h264_ps.h"
43 #include "golomb.h"
44 #include "mathops.h"
45 #include "mpegutils.h"
46 #include "rectangle.h"
47 #include "refstruct.h"
48 #include "thread.h"
49 #include "threadframe.h"
50 
51 static const uint8_t field_scan[16+1] = {
52  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
53  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
54  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
55  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
56 };
57 
58 static const uint8_t field_scan8x8[64+1] = {
59  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
60  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
61  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
62  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
63  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
64  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
65  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
66  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
67  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
68  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
69  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
70  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
71  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
72  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
73  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
74  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
75 };
76 
77 static const uint8_t field_scan8x8_cavlc[64+1] = {
78  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
79  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
80  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
81  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
82  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
83  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
84  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
85  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
86  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
87  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
88  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
89  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
90  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
91  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
92  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
93  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
94 };
95 
96 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
97 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
98  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
99  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
100  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
101  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
102  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
103  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
104  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
105  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
106  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
107  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
108  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
109  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
110  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
111  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
112  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
113  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
114 };
115 
116 static void release_unused_pictures(H264Context *h, int remove_current)
117 {
118  int i;
119 
120  /* release non reference frames */
121  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
122  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
123  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
124  ff_h264_unref_picture(&h->DPB[i]);
125  }
126  }
127 }
128 
129 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
130 {
131  const H264Context *h = sl->h264;
132  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
133 
134  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
135  // edge emu needs blocksize + filter length - 1
136  // (= 21x21 for H.264)
137  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
138 
140  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
142  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
143 
144  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
145  !sl->top_borders[0] || !sl->top_borders[1]) {
148  av_freep(&sl->top_borders[0]);
149  av_freep(&sl->top_borders[1]);
150 
153  sl->top_borders_allocated[0] = 0;
154  sl->top_borders_allocated[1] = 0;
155  return AVERROR(ENOMEM);
156  }
157 
158  return 0;
159 }
160 
162 {
163  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
164  const int mb_array_size = h->mb_stride * h->mb_height;
165  const int b4_stride = h->mb_width * 4 + 1;
166  const int b4_array_size = b4_stride * h->mb_height * 4;
167 
168  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
170  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
171  sizeof(uint32_t), av_buffer_allocz);
172  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
173  sizeof(int16_t), av_buffer_allocz);
174  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
175 
176  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
177  !h->ref_index_pool) {
178  av_buffer_pool_uninit(&h->qscale_table_pool);
179  av_buffer_pool_uninit(&h->mb_type_pool);
180  av_buffer_pool_uninit(&h->motion_val_pool);
181  av_buffer_pool_uninit(&h->ref_index_pool);
182  return AVERROR(ENOMEM);
183  }
184 
185  return 0;
186 }
187 
189 {
190  int i, ret = 0;
191 
192  av_assert0(!pic->f->data[0]);
193 
194  pic->tf.f = pic->f;
195  ret = ff_thread_get_ext_buffer(h->avctx, &pic->tf,
196  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
197  if (ret < 0)
198  goto fail;
199 
200  if (pic->needs_fg) {
201  pic->f_grain->format = pic->f->format;
202  pic->f_grain->width = pic->f->width;
203  pic->f_grain->height = pic->f->height;
204  ret = ff_thread_get_buffer(h->avctx, pic->f_grain, 0);
205  if (ret < 0)
206  goto fail;
207  }
208 
210  if (ret < 0)
211  goto fail;
212 
213  if (h->decode_error_flags_pool) {
214  pic->decode_error_flags = av_buffer_pool_get(h->decode_error_flags_pool);
215  if (!pic->decode_error_flags)
216  goto fail;
218  }
219 
220  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
221  int h_chroma_shift, v_chroma_shift;
223  &h_chroma_shift, &v_chroma_shift);
224 
225  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
226  memset(pic->f->data[1] + pic->f->linesize[1]*i,
227  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
228  memset(pic->f->data[2] + pic->f->linesize[2]*i,
229  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
230  }
231  }
232 
233  if (!h->qscale_table_pool) {
235  if (ret < 0)
236  goto fail;
237  }
238 
239  pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
240  pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
241  if (!pic->qscale_table_buf || !pic->mb_type_buf)
242  goto fail;
243 
244  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
245  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
246 
247  for (i = 0; i < 2; i++) {
248  pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
249  pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
250  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
251  goto fail;
252 
253  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
254  pic->ref_index[i] = pic->ref_index_buf[i]->data;
255  }
256 
257  pic->pps = ff_refstruct_ref_c(h->ps.pps);
258 
259  pic->mb_width = h->mb_width;
260  pic->mb_height = h->mb_height;
261  pic->mb_stride = h->mb_stride;
262 
263  return 0;
264 fail:
266  return (ret < 0) ? ret : AVERROR(ENOMEM);
267 }
268 
270 {
271  int i;
272 
273  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
274  if (!h->DPB[i].f->buf[0])
275  return i;
276  }
277  return AVERROR_INVALIDDATA;
278 }
279 
280 
281 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
282 
283 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
284  (((pic) && (pic) >= (old_ctx)->DPB && \
285  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
286  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
287 
288 static void copy_picture_range(H264Picture **to, H264Picture *const *from, int count,
289  H264Context *new_base, const H264Context *old_base)
290 {
291  int i;
292 
293  for (i = 0; i < count; i++) {
294  av_assert1(!from[i] ||
295  IN_RANGE(from[i], old_base, 1) ||
296  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
297  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
298  }
299 }
300 
301 static void color_frame(AVFrame *frame, const int c[4])
302 {
304 
306 
307  for (int p = 0; p < desc->nb_components; p++) {
308  uint8_t *dst = frame->data[p];
309  int is_chroma = p == 1 || p == 2;
310  int bytes = is_chroma ? AV_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width;
311  int height = is_chroma ? AV_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height;
312  if (desc->comp[0].depth >= 9) {
313  ((uint16_t*)dst)[0] = c[p];
314  av_memcpy_backptr(dst + 2, 2, bytes - 2);
315  dst += frame->linesize[p];
316  for (int y = 1; y < height; y++) {
317  memcpy(dst, frame->data[p], 2*bytes);
318  dst += frame->linesize[p];
319  }
320  } else {
321  for (int y = 0; y < height; y++) {
322  memset(dst, c[p], bytes);
323  dst += frame->linesize[p];
324  }
325  }
326  }
327 }
328 
330 
332  const AVCodecContext *src)
333 {
334  H264Context *h = dst->priv_data, *h1 = src->priv_data;
335  int inited = h->context_initialized, err = 0;
336  int need_reinit = 0;
337  int i, ret;
338 
339  if (dst == src)
340  return 0;
341 
342  if (inited && !h1->ps.sps)
343  return AVERROR_INVALIDDATA;
344 
345  if (inited &&
346  (h->width != h1->width ||
347  h->height != h1->height ||
348  h->mb_width != h1->mb_width ||
349  h->mb_height != h1->mb_height ||
350  !h->ps.sps ||
351  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
352  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
353  h->ps.sps->vui.matrix_coeffs != h1->ps.sps->vui.matrix_coeffs)) {
354  need_reinit = 1;
355  }
356 
357  /* copy block_offset since frame_start may not be called */
358  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
359 
360  // SPS/PPS
361  for (int i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++)
362  ff_refstruct_replace(&h->ps.sps_list[i], h1->ps.sps_list[i]);
363  for (int i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++)
364  ff_refstruct_replace(&h->ps.pps_list[i], h1->ps.pps_list[i]);
365 
366  ff_refstruct_replace(&h->ps.pps, h1->ps.pps);
367  h->ps.sps = h1->ps.sps;
368 
369  if (need_reinit || !inited) {
370  h->width = h1->width;
371  h->height = h1->height;
372  h->mb_height = h1->mb_height;
373  h->mb_width = h1->mb_width;
374  h->mb_num = h1->mb_num;
375  h->mb_stride = h1->mb_stride;
376  h->b_stride = h1->b_stride;
377  h->x264_build = h1->x264_build;
378 
379  if (h->context_initialized || h1->context_initialized) {
380  if ((err = h264_slice_header_init(h)) < 0) {
381  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
382  return err;
383  }
384  }
385 
386  /* copy block_offset since frame_start may not be called */
387  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
388  }
389 
390  h->width_from_caller = h1->width_from_caller;
391  h->height_from_caller = h1->height_from_caller;
392  h->coded_picture_number = h1->coded_picture_number;
393  h->first_field = h1->first_field;
394  h->picture_structure = h1->picture_structure;
395  h->mb_aff_frame = h1->mb_aff_frame;
396  h->droppable = h1->droppable;
397 
398  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
399  ret = ff_h264_replace_picture(&h->DPB[i], &h1->DPB[i]);
400  if (ret < 0)
401  return ret;
402  }
403 
404  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
405  ret = ff_h264_replace_picture(&h->cur_pic, &h1->cur_pic);
406  if (ret < 0)
407  return ret;
408 
409  h->enable_er = h1->enable_er;
410  h->workaround_bugs = h1->workaround_bugs;
411  h->droppable = h1->droppable;
412 
413  // extradata/NAL handling
414  h->is_avc = h1->is_avc;
415  h->nal_length_size = h1->nal_length_size;
416 
417  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
418 
419  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
420  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
421  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
422  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
423 
424  h->next_output_pic = h1->next_output_pic;
425  h->next_outputed_poc = h1->next_outputed_poc;
426  h->poc_offset = h1->poc_offset;
427 
428  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
429  h->nb_mmco = h1->nb_mmco;
430  h->mmco_reset = h1->mmco_reset;
431  h->explicit_ref_marking = h1->explicit_ref_marking;
432  h->long_ref_count = h1->long_ref_count;
433  h->short_ref_count = h1->short_ref_count;
434 
435  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
436  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
437  copy_picture_range(h->delayed_pic, h1->delayed_pic,
438  FF_ARRAY_ELEMS(h->delayed_pic), h, h1);
439 
440  h->frame_recovered = h1->frame_recovered;
441 
442  ret = ff_h264_sei_ctx_replace(&h->sei, &h1->sei);
443  if (ret < 0)
444  return ret;
445 
446  h->sei.common.unregistered.x264_build = h1->sei.common.unregistered.x264_build;
447  h->sei.common.mastering_display = h1->sei.common.mastering_display;
448  h->sei.common.content_light = h1->sei.common.content_light;
449 
450  if (!h->cur_pic_ptr)
451  return 0;
452 
453  if (!h->droppable) {
455  h->poc.prev_poc_msb = h->poc.poc_msb;
456  h->poc.prev_poc_lsb = h->poc.poc_lsb;
457  }
458  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
459  h->poc.prev_frame_num = h->poc.frame_num;
460 
461  h->recovery_frame = h1->recovery_frame;
462 
463  return err;
464 }
465 
467  const AVCodecContext *src)
468 {
469  H264Context *h = dst->priv_data;
470  const H264Context *h1 = src->priv_data;
471 
472  h->is_avc = h1->is_avc;
473  h->nal_length_size = h1->nal_length_size;
474 
475  return 0;
476 }
477 
479 {
480  H264Picture *pic;
481  int i, ret;
482  const int pixel_shift = h->pixel_shift;
483 
484  if (!ff_thread_can_start_frame(h->avctx)) {
485  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
486  return -1;
487  }
488 
490  h->cur_pic_ptr = NULL;
491 
493  if (i < 0) {
494  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
495  return i;
496  }
497  pic = &h->DPB[i];
498 
499  pic->reference = h->droppable ? 0 : h->picture_structure;
500 #if FF_API_FRAME_PICTURE_NUMBER
502  pic->f->coded_picture_number = h->coded_picture_number++;
504 #endif
505  pic->field_picture = h->picture_structure != PICT_FRAME;
506  pic->frame_num = h->poc.frame_num;
507  /*
508  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
509  * in later.
510  * See decode_nal_units().
511  */
512  pic->f->flags &= ~AV_FRAME_FLAG_KEY;
513  pic->mmco_reset = 0;
514  pic->recovered = 0;
515  pic->invalid_gap = 0;
516  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
517 
518  pic->f->pict_type = h->slice_ctx[0].slice_type;
519 
520  pic->f->crop_left = h->crop_left;
521  pic->f->crop_right = h->crop_right;
522  pic->f->crop_top = h->crop_top;
523  pic->f->crop_bottom = h->crop_bottom;
524 
525  pic->needs_fg = h->sei.common.film_grain_characteristics.present && !h->avctx->hwaccel &&
526  !(h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
527 
528  if ((ret = alloc_picture(h, pic)) < 0)
529  return ret;
530 
531  h->cur_pic_ptr = pic;
532  ff_h264_unref_picture(&h->cur_pic);
533  if (CONFIG_ERROR_RESILIENCE) {
534  ff_h264_set_erpic(&h->er.cur_pic, NULL);
535  }
536 
537  if ((ret = ff_h264_ref_picture(&h->cur_pic, h->cur_pic_ptr)) < 0)
538  return ret;
539 
540  for (i = 0; i < h->nb_slice_ctx; i++) {
541  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
542  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
543  }
544 
545  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
546  ff_er_frame_start(&h->er);
547  ff_h264_set_erpic(&h->er.last_pic, NULL);
548  ff_h264_set_erpic(&h->er.next_pic, NULL);
549  }
550 
551  for (i = 0; i < 16; i++) {
552  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
553  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
554  }
555  for (i = 0; i < 16; i++) {
556  h->block_offset[16 + i] =
557  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
558  h->block_offset[48 + 16 + i] =
559  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
560  }
561 
562  /* We mark the current picture as non-reference after allocating it, so
563  * that if we break out due to an error it can be released automatically
564  * in the next ff_mpv_frame_start().
565  */
566  h->cur_pic_ptr->reference = 0;
567 
568  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
569 
570  h->next_output_pic = NULL;
571 
572  h->postpone_filter = 0;
573 
574  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
575 
576  if (h->sei.common.unregistered.x264_build >= 0)
577  h->x264_build = h->sei.common.unregistered.x264_build;
578 
579  assert(h->cur_pic_ptr->long_ref == 0);
580 
581  return 0;
582 }
583 
585  const uint8_t *src_y,
586  const uint8_t *src_cb, const uint8_t *src_cr,
587  int linesize, int uvlinesize,
588  int simple)
589 {
590  uint8_t *top_border;
591  int top_idx = 1;
592  const int pixel_shift = h->pixel_shift;
593  int chroma444 = CHROMA444(h);
594  int chroma422 = CHROMA422(h);
595 
596  src_y -= linesize;
597  src_cb -= uvlinesize;
598  src_cr -= uvlinesize;
599 
600  if (!simple && FRAME_MBAFF(h)) {
601  if (sl->mb_y & 1) {
602  if (!MB_MBAFF(sl)) {
603  top_border = sl->top_borders[0][sl->mb_x];
604  AV_COPY128(top_border, src_y + 15 * linesize);
605  if (pixel_shift)
606  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
607  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
608  if (chroma444) {
609  if (pixel_shift) {
610  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
611  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
612  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
613  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
614  } else {
615  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
616  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
617  }
618  } else if (chroma422) {
619  if (pixel_shift) {
620  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
621  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
622  } else {
623  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
624  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
625  }
626  } else {
627  if (pixel_shift) {
628  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
629  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
630  } else {
631  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
632  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
633  }
634  }
635  }
636  }
637  } else if (MB_MBAFF(sl)) {
638  top_idx = 0;
639  } else
640  return;
641  }
642 
643  top_border = sl->top_borders[top_idx][sl->mb_x];
644  /* There are two lines saved, the line above the top macroblock
645  * of a pair, and the line above the bottom macroblock. */
646  AV_COPY128(top_border, src_y + 16 * linesize);
647  if (pixel_shift)
648  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
649 
650  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
651  if (chroma444) {
652  if (pixel_shift) {
653  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
654  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
655  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
656  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
657  } else {
658  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
659  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
660  }
661  } else if (chroma422) {
662  if (pixel_shift) {
663  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
664  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
665  } else {
666  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
667  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
668  }
669  } else {
670  if (pixel_shift) {
671  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
672  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
673  } else {
674  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
675  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
676  }
677  }
678  }
679 }
680 
681 /**
682  * Initialize implicit_weight table.
683  * @param field 0/1 initialize the weight for interlaced MBAFF
684  * -1 initializes the rest
685  */
687 {
688  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
689 
690  for (i = 0; i < 2; i++) {
691  sl->pwt.luma_weight_flag[i] = 0;
692  sl->pwt.chroma_weight_flag[i] = 0;
693  }
694 
695  if (field < 0) {
696  if (h->picture_structure == PICT_FRAME) {
697  cur_poc = h->cur_pic_ptr->poc;
698  } else {
699  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
700  }
701  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
702  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
703  sl->pwt.use_weight = 0;
704  sl->pwt.use_weight_chroma = 0;
705  return;
706  }
707  ref_start = 0;
708  ref_count0 = sl->ref_count[0];
709  ref_count1 = sl->ref_count[1];
710  } else {
711  cur_poc = h->cur_pic_ptr->field_poc[field];
712  ref_start = 16;
713  ref_count0 = 16 + 2 * sl->ref_count[0];
714  ref_count1 = 16 + 2 * sl->ref_count[1];
715  }
716 
717  sl->pwt.use_weight = 2;
718  sl->pwt.use_weight_chroma = 2;
719  sl->pwt.luma_log2_weight_denom = 5;
721 
722  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
723  int64_t poc0 = sl->ref_list[0][ref0].poc;
724  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
725  int w = 32;
726  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
727  int poc1 = sl->ref_list[1][ref1].poc;
728  int td = av_clip_int8(poc1 - poc0);
729  if (td) {
730  int tb = av_clip_int8(cur_poc - poc0);
731  int tx = (16384 + (FFABS(td) >> 1)) / td;
732  int dist_scale_factor = (tb * tx + 32) >> 8;
733  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
734  w = 64 - dist_scale_factor;
735  }
736  }
737  if (field < 0) {
738  sl->pwt.implicit_weight[ref0][ref1][0] =
739  sl->pwt.implicit_weight[ref0][ref1][1] = w;
740  } else {
741  sl->pwt.implicit_weight[ref0][ref1][field] = w;
742  }
743  }
744  }
745 }
746 
747 /**
748  * initialize scan tables
749  */
751 {
752  int i;
753  for (i = 0; i < 16; i++) {
754 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
755  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
756  h->field_scan[i] = TRANSPOSE(field_scan[i]);
757 #undef TRANSPOSE
758  }
759  for (i = 0; i < 64; i++) {
760 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
761  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
762  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
763  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
764  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
765 #undef TRANSPOSE
766  }
767  if (h->ps.sps->transform_bypass) { // FIXME same ugly
768  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
769  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
770  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
771  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
772  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
773  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
774  } else {
775  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
776  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
777  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
778  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
779  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
780  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
781  }
782 }
783 
784 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
785 {
786 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
787  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
788  CONFIG_H264_NVDEC_HWACCEL + \
789  CONFIG_H264_VAAPI_HWACCEL + \
790  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
791  CONFIG_H264_VDPAU_HWACCEL + \
792  CONFIG_H264_VULKAN_HWACCEL)
793  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
794 
795  switch (h->ps.sps->bit_depth_luma) {
796  case 9:
797  if (CHROMA444(h)) {
798  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
799  *fmt++ = AV_PIX_FMT_GBRP9;
800  } else
801  *fmt++ = AV_PIX_FMT_YUV444P9;
802  } else if (CHROMA422(h))
803  *fmt++ = AV_PIX_FMT_YUV422P9;
804  else
805  *fmt++ = AV_PIX_FMT_YUV420P9;
806  break;
807  case 10:
808 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
809  if (h->avctx->colorspace != AVCOL_SPC_RGB)
810  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
811 #endif
812 #if CONFIG_H264_VULKAN_HWACCEL
813  *fmt++ = AV_PIX_FMT_VULKAN;
814 #endif
815  if (CHROMA444(h)) {
816  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
817  *fmt++ = AV_PIX_FMT_GBRP10;
818  } else
819  *fmt++ = AV_PIX_FMT_YUV444P10;
820  } else if (CHROMA422(h))
821  *fmt++ = AV_PIX_FMT_YUV422P10;
822  else {
823 #if CONFIG_H264_VAAPI_HWACCEL
824  // Just add as candidate. Whether VAProfileH264High10 usable or
825  // not is decided by vaapi_decode_make_config() defined in FFmpeg
826  // and vaQueryCodingProfile() defined in libva.
827  *fmt++ = AV_PIX_FMT_VAAPI;
828 #endif
829  *fmt++ = AV_PIX_FMT_YUV420P10;
830  }
831  break;
832  case 12:
833 #if CONFIG_H264_VULKAN_HWACCEL
834  *fmt++ = AV_PIX_FMT_VULKAN;
835 #endif
836  if (CHROMA444(h)) {
837  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
838  *fmt++ = AV_PIX_FMT_GBRP12;
839  } else
840  *fmt++ = AV_PIX_FMT_YUV444P12;
841  } else if (CHROMA422(h))
842  *fmt++ = AV_PIX_FMT_YUV422P12;
843  else
844  *fmt++ = AV_PIX_FMT_YUV420P12;
845  break;
846  case 14:
847  if (CHROMA444(h)) {
848  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
849  *fmt++ = AV_PIX_FMT_GBRP14;
850  } else
851  *fmt++ = AV_PIX_FMT_YUV444P14;
852  } else if (CHROMA422(h))
853  *fmt++ = AV_PIX_FMT_YUV422P14;
854  else
855  *fmt++ = AV_PIX_FMT_YUV420P14;
856  break;
857  case 8:
858 #if CONFIG_H264_VDPAU_HWACCEL
859  *fmt++ = AV_PIX_FMT_VDPAU;
860 #endif
861 #if CONFIG_H264_VULKAN_HWACCEL
862  *fmt++ = AV_PIX_FMT_VULKAN;
863 #endif
864 #if CONFIG_H264_NVDEC_HWACCEL
865  *fmt++ = AV_PIX_FMT_CUDA;
866 #endif
867 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
868  if (h->avctx->colorspace != AVCOL_SPC_RGB)
869  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
870 #endif
871  if (CHROMA444(h)) {
872  if (h->avctx->colorspace == AVCOL_SPC_RGB)
873  *fmt++ = AV_PIX_FMT_GBRP;
874  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
875  *fmt++ = AV_PIX_FMT_YUVJ444P;
876  else
877  *fmt++ = AV_PIX_FMT_YUV444P;
878  } else if (CHROMA422(h)) {
879  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
880  *fmt++ = AV_PIX_FMT_YUVJ422P;
881  else
882  *fmt++ = AV_PIX_FMT_YUV422P;
883  } else {
884 #if CONFIG_H264_DXVA2_HWACCEL
885  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
886 #endif
887 #if CONFIG_H264_D3D11VA_HWACCEL
888  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
889  *fmt++ = AV_PIX_FMT_D3D11;
890 #endif
891 #if CONFIG_H264_VAAPI_HWACCEL
892  *fmt++ = AV_PIX_FMT_VAAPI;
893 #endif
894  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
895  *fmt++ = AV_PIX_FMT_YUVJ420P;
896  else
897  *fmt++ = AV_PIX_FMT_YUV420P;
898  }
899  break;
900  default:
901  av_log(h->avctx, AV_LOG_ERROR,
902  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
903  return AVERROR_INVALIDDATA;
904  }
905 
906  *fmt = AV_PIX_FMT_NONE;
907 
908  for (int i = 0; pix_fmts[i] != AV_PIX_FMT_NONE; i++)
909  if (pix_fmts[i] == h->avctx->pix_fmt && !force_callback)
910  return pix_fmts[i];
911  return ff_get_format(h->avctx, pix_fmts);
912 }
913 
914 /* export coded and cropped frame dimensions to AVCodecContext */
916 {
917  const SPS *sps = h->ps.sps;
918  int cr = sps->crop_right;
919  int cl = sps->crop_left;
920  int ct = sps->crop_top;
921  int cb = sps->crop_bottom;
922  int width = h->width - (cr + cl);
923  int height = h->height - (ct + cb);
924  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
925  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
926 
927  /* handle container cropping */
928  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
929  !sps->crop_top && !sps->crop_left &&
930  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
931  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
932  h->width_from_caller <= width &&
933  h->height_from_caller <= height) {
934  width = h->width_from_caller;
935  height = h->height_from_caller;
936  cl = 0;
937  ct = 0;
938  cr = h->width - width;
939  cb = h->height - height;
940  } else {
941  h->width_from_caller = 0;
942  h->height_from_caller = 0;
943  }
944 
945  h->avctx->coded_width = h->width;
946  h->avctx->coded_height = h->height;
947  h->avctx->width = width;
948  h->avctx->height = height;
949  h->crop_right = cr;
950  h->crop_left = cl;
951  h->crop_top = ct;
952  h->crop_bottom = cb;
953 }
954 
956 {
957  const SPS *sps = h->ps.sps;
958  int i, ret;
959 
960  if (!sps) {
962  goto fail;
963  }
964 
965  ff_set_sar(h->avctx, sps->vui.sar);
966  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
967  &h->chroma_x_shift, &h->chroma_y_shift);
968 
969  if (sps->timing_info_present_flag) {
970  int64_t den = sps->time_scale;
971  if (h->x264_build < 44U)
972  den *= 2;
973  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
974  sps->num_units_in_tick * 2, den, 1 << 30);
975  }
976 
978 
979  h->first_field = 0;
980  h->prev_interlaced_frame = 1;
981 
984  if (ret < 0) {
985  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
986  goto fail;
987  }
988 
989  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
990  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
991  ) {
992  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
993  sps->bit_depth_luma);
995  goto fail;
996  }
997 
998  h->cur_bit_depth_luma =
999  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
1000  h->cur_chroma_format_idc = sps->chroma_format_idc;
1001  h->pixel_shift = sps->bit_depth_luma > 8;
1002  h->chroma_format_idc = sps->chroma_format_idc;
1003  h->bit_depth_luma = sps->bit_depth_luma;
1004 
1005  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
1006  sps->chroma_format_idc);
1007  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
1008  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
1009  ff_h264_pred_init(&h->hpc, AV_CODEC_ID_H264, sps->bit_depth_luma,
1010  sps->chroma_format_idc);
1011  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
1012 
1013  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
1014  ff_h264_slice_context_init(h, &h->slice_ctx[0]);
1015  } else {
1016  for (i = 0; i < h->nb_slice_ctx; i++) {
1017  H264SliceContext *sl = &h->slice_ctx[i];
1018 
1019  sl->h264 = h;
1020  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
1021  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
1022  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
1023 
1025  }
1026  }
1027 
1028  h->context_initialized = 1;
1029 
1030  return 0;
1031 fail:
1033  h->context_initialized = 0;
1034  return ret;
1035 }
1036 
1038 {
1039  switch (a) {
1043  default:
1044  return a;
1045  }
1046 }
1047 
1048 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1049 {
1050  const SPS *sps;
1051  int needs_reinit = 0, must_reinit, ret;
1052 
1053  if (first_slice)
1054  ff_refstruct_replace(&h->ps.pps, h->ps.pps_list[sl->pps_id]);
1055 
1056  if (h->ps.sps != h->ps.pps->sps) {
1057  h->ps.sps = h->ps.pps->sps;
1058 
1059  if (h->mb_width != h->ps.sps->mb_width ||
1060  h->mb_height != h->ps.sps->mb_height ||
1061  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1062  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1063  )
1064  needs_reinit = 1;
1065 
1066  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1067  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1068  needs_reinit = 1;
1069  }
1070  sps = h->ps.sps;
1071 
1072  must_reinit = (h->context_initialized &&
1073  ( 16*sps->mb_width != h->avctx->coded_width
1074  || 16*sps->mb_height != h->avctx->coded_height
1075  || h->cur_bit_depth_luma != sps->bit_depth_luma
1076  || h->cur_chroma_format_idc != sps->chroma_format_idc
1077  || h->mb_width != sps->mb_width
1078  || h->mb_height != sps->mb_height
1079  ));
1080  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1081  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1082  must_reinit = 1;
1083 
1084  if (first_slice && av_cmp_q(sps->vui.sar, h->avctx->sample_aspect_ratio))
1085  must_reinit = 1;
1086 
1087  if (!h->setup_finished) {
1088  h->avctx->profile = ff_h264_get_profile(sps);
1089  h->avctx->level = sps->level_idc;
1090  h->avctx->refs = sps->ref_frame_count;
1091 
1092  h->mb_width = sps->mb_width;
1093  h->mb_height = sps->mb_height;
1094  h->mb_num = h->mb_width * h->mb_height;
1095  h->mb_stride = h->mb_width + 1;
1096 
1097  h->b_stride = h->mb_width * 4;
1098 
1099  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1100 
1101  h->width = 16 * h->mb_width;
1102  h->height = 16 * h->mb_height;
1103 
1104  init_dimensions(h);
1105 
1106  if (sps->vui.video_signal_type_present_flag) {
1107  h->avctx->color_range = sps->vui.video_full_range_flag > 0 ? AVCOL_RANGE_JPEG
1108  : AVCOL_RANGE_MPEG;
1109  if (sps->vui.colour_description_present_flag) {
1110  if (h->avctx->colorspace != sps->vui.matrix_coeffs)
1111  needs_reinit = 1;
1112  h->avctx->color_primaries = sps->vui.colour_primaries;
1113  h->avctx->color_trc = sps->vui.transfer_characteristics;
1114  h->avctx->colorspace = sps->vui.matrix_coeffs;
1115  }
1116  }
1117 
1118  if (h->sei.common.alternative_transfer.present &&
1119  av_color_transfer_name(h->sei.common.alternative_transfer.preferred_transfer_characteristics) &&
1120  h->sei.common.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1121  h->avctx->color_trc = h->sei.common.alternative_transfer.preferred_transfer_characteristics;
1122  }
1123  }
1124  h->avctx->chroma_sample_location = sps->vui.chroma_location;
1125 
1126  if (!h->context_initialized || must_reinit || needs_reinit) {
1127  int flush_changes = h->context_initialized;
1128  h->context_initialized = 0;
1129  if (sl != h->slice_ctx) {
1130  av_log(h->avctx, AV_LOG_ERROR,
1131  "changing width %d -> %d / height %d -> %d on "
1132  "slice %d\n",
1133  h->width, h->avctx->coded_width,
1134  h->height, h->avctx->coded_height,
1135  h->current_slice + 1);
1136  return AVERROR_INVALIDDATA;
1137  }
1138 
1139  av_assert1(first_slice);
1140 
1141  if (flush_changes)
1143 
1144  if ((ret = get_pixel_format(h, 1)) < 0)
1145  return ret;
1146  h->avctx->pix_fmt = ret;
1147 
1148  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1149  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1150 
1151  if ((ret = h264_slice_header_init(h)) < 0) {
1152  av_log(h->avctx, AV_LOG_ERROR,
1153  "h264_slice_header_init() failed\n");
1154  return ret;
1155  }
1156  }
1157 
1158  return 0;
1159 }
1160 
1162 {
1163  const SPS *sps = h->ps.sps;
1164  H264Picture *cur = h->cur_pic_ptr;
1165  AVFrame *out = cur->f;
1166  int interlaced_frame = 0, top_field_first = 0;
1167  int ret;
1168 
1169  out->flags &= ~AV_FRAME_FLAG_INTERLACED;
1170  out->repeat_pict = 0;
1171 
1172  /* Signal interlacing information externally. */
1173  /* Prioritize picture timing SEI information over used
1174  * decoding process if it exists. */
1175  if (h->sei.picture_timing.present) {
1176  int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
1177  h->avctx);
1178  if (ret < 0) {
1179  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1180  if (h->avctx->err_recognition & AV_EF_EXPLODE)
1181  return ret;
1182  h->sei.picture_timing.present = 0;
1183  }
1184  }
1185 
1186  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1187  const H264SEIPictureTiming *pt = &h->sei.picture_timing;
1188  switch (pt->pic_struct) {
1190  break;
1193  interlaced_frame = 1;
1194  break;
1198  interlaced_frame = 1;
1199  else
1200  // try to flag soft telecine progressive
1201  interlaced_frame = !!h->prev_interlaced_frame;
1202  break;
1205  /* Signal the possibility of telecined film externally
1206  * (pic_struct 5,6). From these hints, let the applications
1207  * decide if they apply deinterlacing. */
1208  out->repeat_pict = 1;
1209  break;
1211  out->repeat_pict = 2;
1212  break;
1214  out->repeat_pict = 4;
1215  break;
1216  }
1217 
1218  if ((pt->ct_type & 3) &&
1219  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1220  interlaced_frame = ((pt->ct_type & (1 << 1)) != 0);
1221  } else {
1222  /* Derive interlacing flag from used decoding process. */
1223  interlaced_frame = !!FIELD_OR_MBAFF_PICTURE(h);
1224  }
1225  h->prev_interlaced_frame = interlaced_frame;
1226 
1227  if (cur->field_poc[0] != cur->field_poc[1]) {
1228  /* Derive top_field_first from field pocs. */
1229  top_field_first = (cur->field_poc[0] < cur->field_poc[1]);
1230  } else {
1231  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1232  /* Use picture timing SEI information. Even if it is a
1233  * information of a past frame, better than nothing. */
1234  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1235  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1236  top_field_first = 1;
1237  } else if (interlaced_frame) {
1238  /* Default to top field first when pic_struct_present_flag
1239  * is not set but interlaced frame detected */
1240  top_field_first = 1;
1241  } // else
1242  /* Most likely progressive */
1243  }
1244 
1245  out->flags |= (AV_FRAME_FLAG_INTERLACED * interlaced_frame) |
1246  (AV_FRAME_FLAG_TOP_FIELD_FIRST * top_field_first);
1247 
1248  ret = ff_h2645_sei_to_frame(out, &h->sei.common, AV_CODEC_ID_H264, h->avctx,
1249  &sps->vui, sps->bit_depth_luma, sps->bit_depth_chroma,
1250  cur->poc + (unsigned)(h->poc_offset << 5));
1251  if (ret < 0)
1252  return ret;
1253 
1254  if (h->sei.picture_timing.timecode_cnt > 0) {
1255  uint32_t *tc_sd;
1256  char tcbuf[AV_TIMECODE_STR_SIZE];
1257 
1260  sizeof(uint32_t)*4);
1261  if (!tcside)
1262  return AVERROR(ENOMEM);
1263 
1264  tc_sd = (uint32_t*)tcside->data;
1265  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1266 
1267  for (int i = 0; i < tc_sd[0]; i++) {
1268  int drop = h->sei.picture_timing.timecode[i].dropframe;
1269  int hh = h->sei.picture_timing.timecode[i].hours;
1270  int mm = h->sei.picture_timing.timecode[i].minutes;
1271  int ss = h->sei.picture_timing.timecode[i].seconds;
1272  int ff = h->sei.picture_timing.timecode[i].frame;
1273 
1274  tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
1275  av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
1276  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
1277  }
1278  h->sei.picture_timing.timecode_cnt = 0;
1279  }
1280 
1281  return 0;
1282 }
1283 
1285 {
1286  const SPS *sps = h->ps.sps;
1287  H264Picture *out = h->cur_pic_ptr;
1288  H264Picture *cur = h->cur_pic_ptr;
1289  int i, pics, out_of_order, out_idx;
1290 
1291  cur->mmco_reset = h->mmco_reset;
1292  h->mmco_reset = 0;
1293 
1294  if (sps->bitstream_restriction_flag ||
1295  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1296  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1297  }
1298 
1299  for (i = 0; 1; i++) {
1300  if(i == H264_MAX_DPB_FRAMES || cur->poc < h->last_pocs[i]){
1301  if(i)
1302  h->last_pocs[i-1] = cur->poc;
1303  break;
1304  } else if(i) {
1305  h->last_pocs[i-1]= h->last_pocs[i];
1306  }
1307  }
1308  out_of_order = H264_MAX_DPB_FRAMES - i;
1309  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1310  || (h->last_pocs[H264_MAX_DPB_FRAMES-2] > INT_MIN && h->last_pocs[H264_MAX_DPB_FRAMES-1] - (int64_t)h->last_pocs[H264_MAX_DPB_FRAMES-2] > 2))
1311  out_of_order = FFMAX(out_of_order, 1);
1312  if (out_of_order == H264_MAX_DPB_FRAMES) {
1313  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1314  for (i = 1; i < H264_MAX_DPB_FRAMES; i++)
1315  h->last_pocs[i] = INT_MIN;
1316  h->last_pocs[0] = cur->poc;
1317  cur->mmco_reset = 1;
1318  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1319  int loglevel = h->avctx->frame_num > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1320  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1321  h->avctx->has_b_frames = out_of_order;
1322  }
1323 
1324  pics = 0;
1325  while (h->delayed_pic[pics])
1326  pics++;
1327 
1329 
1330  h->delayed_pic[pics++] = cur;
1331  if (cur->reference == 0)
1332  cur->reference = DELAYED_PIC_REF;
1333 
1334  out = h->delayed_pic[0];
1335  out_idx = 0;
1336  for (i = 1; h->delayed_pic[i] &&
1337  !(h->delayed_pic[i]->f->flags & AV_FRAME_FLAG_KEY) &&
1338  !h->delayed_pic[i]->mmco_reset;
1339  i++)
1340  if (h->delayed_pic[i]->poc < out->poc) {
1341  out = h->delayed_pic[i];
1342  out_idx = i;
1343  }
1344  if (h->avctx->has_b_frames == 0 &&
1345  ((h->delayed_pic[0]->f->flags & AV_FRAME_FLAG_KEY) || h->delayed_pic[0]->mmco_reset))
1346  h->next_outputed_poc = INT_MIN;
1347  out_of_order = out->poc < h->next_outputed_poc;
1348 
1349  if (out_of_order || pics > h->avctx->has_b_frames) {
1350  out->reference &= ~DELAYED_PIC_REF;
1351  for (i = out_idx; h->delayed_pic[i]; i++)
1352  h->delayed_pic[i] = h->delayed_pic[i + 1];
1353  }
1354  if (!out_of_order && pics > h->avctx->has_b_frames) {
1355  h->next_output_pic = out;
1356  if (out_idx == 0 && h->delayed_pic[0] && ((h->delayed_pic[0]->f->flags & AV_FRAME_FLAG_KEY) || h->delayed_pic[0]->mmco_reset)) {
1357  h->next_outputed_poc = INT_MIN;
1358  } else
1359  h->next_outputed_poc = out->poc;
1360 
1361  if (out->recovered) {
1362  // We have reached an recovery point and all frames after it in
1363  // display order are "recovered".
1364  h->frame_recovered |= FRAME_RECOVERED_SEI;
1365  }
1366  out->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
1367 
1368  if (!out->recovered) {
1369  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1370  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1371  h->next_output_pic = NULL;
1372  } else {
1373  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1374  }
1375  }
1376  } else {
1377  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1378  }
1379 
1380  return 0;
1381 }
1382 
1383 /* This function is called right after decoding the slice header for a first
1384  * slice in a field (or a frame). It decides whether we are decoding a new frame
1385  * or a second field in a pair and does the necessary setup.
1386  */
1388  const H2645NAL *nal, int first_slice)
1389 {
1390  int i;
1391  const SPS *sps;
1392 
1393  int last_pic_structure, last_pic_droppable, ret;
1394 
1395  ret = h264_init_ps(h, sl, first_slice);
1396  if (ret < 0)
1397  return ret;
1398 
1399  sps = h->ps.sps;
1400 
1401  if (sps && sps->bitstream_restriction_flag &&
1402  h->avctx->has_b_frames < sps->num_reorder_frames) {
1403  h->avctx->has_b_frames = sps->num_reorder_frames;
1404  }
1405 
1406  last_pic_droppable = h->droppable;
1407  last_pic_structure = h->picture_structure;
1408  h->droppable = (nal->ref_idc == 0);
1409  h->picture_structure = sl->picture_structure;
1410 
1411  h->poc.frame_num = sl->frame_num;
1412  h->poc.poc_lsb = sl->poc_lsb;
1413  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1414  h->poc.delta_poc[0] = sl->delta_poc[0];
1415  h->poc.delta_poc[1] = sl->delta_poc[1];
1416 
1417  if (nal->type == H264_NAL_IDR_SLICE)
1418  h->poc_offset = sl->idr_pic_id;
1419  else if (h->picture_intra_only)
1420  h->poc_offset = 0;
1421 
1422  /* Shorten frame num gaps so we don't have to allocate reference
1423  * frames just to throw them away */
1424  if (h->poc.frame_num != h->poc.prev_frame_num) {
1425  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1426  int max_frame_num = 1 << sps->log2_max_frame_num;
1427 
1428  if (unwrap_prev_frame_num > h->poc.frame_num)
1429  unwrap_prev_frame_num -= max_frame_num;
1430 
1431  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1432  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1433  if (unwrap_prev_frame_num < 0)
1434  unwrap_prev_frame_num += max_frame_num;
1435 
1436  h->poc.prev_frame_num = unwrap_prev_frame_num;
1437  }
1438  }
1439 
1440  /* See if we have a decoded first field looking for a pair...
1441  * Here, we're using that to see if we should mark previously
1442  * decode frames as "finished".
1443  * We have to do that before the "dummy" in-between frame allocation,
1444  * since that can modify h->cur_pic_ptr. */
1445  if (h->first_field) {
1446  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1447  av_assert0(h->cur_pic_ptr);
1448  av_assert0(h->cur_pic_ptr->f->buf[0]);
1449  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1450 
1451  /* Mark old field/frame as completed */
1452  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1453  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1454  }
1455 
1456  /* figure out if we have a complementary field pair */
1457  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1458  /* Previous field is unmatched. Don't display it, but let it
1459  * remain for reference if marked as such. */
1460  if (last_pic_structure != PICT_FRAME) {
1461  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1462  last_pic_structure == PICT_TOP_FIELD);
1463  }
1464  } else {
1465  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1466  /* This and previous field were reference, but had
1467  * different frame_nums. Consider this field first in
1468  * pair. Throw away previous field except for reference
1469  * purposes. */
1470  if (last_pic_structure != PICT_FRAME) {
1471  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1472  last_pic_structure == PICT_TOP_FIELD);
1473  }
1474  } else {
1475  /* Second field in complementary pair */
1476  if (!((last_pic_structure == PICT_TOP_FIELD &&
1477  h->picture_structure == PICT_BOTTOM_FIELD) ||
1478  (last_pic_structure == PICT_BOTTOM_FIELD &&
1479  h->picture_structure == PICT_TOP_FIELD))) {
1480  av_log(h->avctx, AV_LOG_ERROR,
1481  "Invalid field mode combination %d/%d\n",
1482  last_pic_structure, h->picture_structure);
1483  h->picture_structure = last_pic_structure;
1484  h->droppable = last_pic_droppable;
1485  return AVERROR_INVALIDDATA;
1486  } else if (last_pic_droppable != h->droppable) {
1487  avpriv_request_sample(h->avctx,
1488  "Found reference and non-reference fields in the same frame, which");
1489  h->picture_structure = last_pic_structure;
1490  h->droppable = last_pic_droppable;
1491  return AVERROR_PATCHWELCOME;
1492  }
1493  }
1494  }
1495  }
1496 
1497  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1498  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1499  const H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1500  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1501  h->poc.frame_num, h->poc.prev_frame_num);
1502  if (!sps->gaps_in_frame_num_allowed_flag)
1503  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1504  h->last_pocs[i] = INT_MIN;
1505  ret = h264_frame_start(h);
1506  if (ret < 0) {
1507  h->first_field = 0;
1508  return ret;
1509  }
1510 
1511  h->poc.prev_frame_num++;
1512  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1513  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1514  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1515  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1516  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1517 
1518  h->explicit_ref_marking = 0;
1520  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1521  return ret;
1522  /* Error concealment: If a ref is missing, copy the previous ref
1523  * in its place.
1524  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1525  * many assumptions about there being no actual duplicates.
1526  * FIXME: This does not copy padding for out-of-frame motion
1527  * vectors. Given we are concealing a lost frame, this probably
1528  * is not noticeable by comparison, but it should be fixed. */
1529  if (h->short_ref_count) {
1530  int c[4] = {
1531  1<<(h->ps.sps->bit_depth_luma-1),
1532  1<<(h->ps.sps->bit_depth_chroma-1),
1533  1<<(h->ps.sps->bit_depth_chroma-1),
1534  -1
1535  };
1536 
1537  if (prev &&
1538  h->short_ref[0]->f->width == prev->f->width &&
1539  h->short_ref[0]->f->height == prev->f->height &&
1540  h->short_ref[0]->f->format == prev->f->format) {
1541  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1542  if (prev->field_picture)
1543  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1544  ff_thread_release_ext_buffer(&h->short_ref[0]->tf);
1545  h->short_ref[0]->tf.f = h->short_ref[0]->f;
1546  ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
1547  if (ret < 0)
1548  return ret;
1549  h->short_ref[0]->poc = prev->poc + 2U;
1550  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 0);
1551  if (h->short_ref[0]->field_picture)
1552  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 1);
1553  } else if (!h->frame_recovered && !h->avctx->hwaccel)
1554  color_frame(h->short_ref[0]->f, c);
1555  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1556  }
1557  }
1558 
1559  /* See if we have a decoded first field looking for a pair...
1560  * We're using that to see whether to continue decoding in that
1561  * frame, or to allocate a new one. */
1562  if (h->first_field) {
1563  av_assert0(h->cur_pic_ptr);
1564  av_assert0(h->cur_pic_ptr->f->buf[0]);
1565  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1566 
1567  /* figure out if we have a complementary field pair */
1568  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1569  /* Previous field is unmatched. Don't display it, but let it
1570  * remain for reference if marked as such. */
1571  h->missing_fields ++;
1572  h->cur_pic_ptr = NULL;
1573  h->first_field = FIELD_PICTURE(h);
1574  } else {
1575  h->missing_fields = 0;
1576  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1577  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1578  h->picture_structure==PICT_BOTTOM_FIELD);
1579  /* This and the previous field had different frame_nums.
1580  * Consider this field first in pair. Throw away previous
1581  * one except for reference purposes. */
1582  h->first_field = 1;
1583  h->cur_pic_ptr = NULL;
1584  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1585  /* This frame was already output, we cannot draw into it
1586  * anymore.
1587  */
1588  h->first_field = 1;
1589  h->cur_pic_ptr = NULL;
1590  } else {
1591  /* Second field in complementary pair */
1592  h->first_field = 0;
1593  }
1594  }
1595  } else {
1596  /* Frame or first field in a potentially complementary pair */
1597  h->first_field = FIELD_PICTURE(h);
1598  }
1599 
1600  if (!FIELD_PICTURE(h) || h->first_field) {
1601  if (h264_frame_start(h) < 0) {
1602  h->first_field = 0;
1603  return AVERROR_INVALIDDATA;
1604  }
1605  } else {
1606  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1608  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1609  }
1610  /* Some macroblocks can be accessed before they're available in case
1611  * of lost slices, MBAFF or threading. */
1612  if (FIELD_PICTURE(h)) {
1613  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1614  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1615  } else {
1616  memset(h->slice_table, -1,
1617  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1618  }
1619 
1620  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1621  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1622  if (ret < 0)
1623  return ret;
1624 
1625  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1626  h->nb_mmco = sl->nb_mmco;
1627  h->explicit_ref_marking = sl->explicit_ref_marking;
1628 
1629  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1630 
1631  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1632  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1633 
1634  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1635  h->valid_recovery_point = 1;
1636 
1637  if ( h->recovery_frame < 0
1638  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1639  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1640 
1641  if (!h->valid_recovery_point)
1642  h->recovery_frame = h->poc.frame_num;
1643  }
1644  }
1645 
1646  h->cur_pic_ptr->f->flags |= AV_FRAME_FLAG_KEY * !!(nal->type == H264_NAL_IDR_SLICE);
1647 
1648  if (nal->type == H264_NAL_IDR_SLICE ||
1649  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
1650  h->recovery_frame = -1;
1651  h->cur_pic_ptr->recovered = 1;
1652  }
1653  // If we have an IDR, all frames after it in decoded order are
1654  // "recovered".
1655  if (nal->type == H264_NAL_IDR_SLICE)
1656  h->frame_recovered |= FRAME_RECOVERED_IDR;
1657 #if 1
1658  h->cur_pic_ptr->recovered |= h->frame_recovered;
1659 #else
1660  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1661 #endif
1662 
1663  /* Set the frame properties/side data. Only done for the second field in
1664  * field coded frames, since some SEI information is present for each field
1665  * and is merged by the SEI parsing code. */
1666  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1668  if (ret < 0)
1669  return ret;
1670 
1672  if (ret < 0)
1673  return ret;
1674  }
1675 
1676  return 0;
1677 }
1678 
1680  const H2645NAL *nal)
1681 {
1682  const SPS *sps;
1683  const PPS *pps;
1684  int ret;
1685  unsigned int slice_type, tmp, i;
1686  int field_pic_flag, bottom_field_flag;
1687  int first_slice = sl == h->slice_ctx && !h->current_slice;
1688  int picture_structure;
1689 
1690  if (first_slice)
1691  av_assert0(!h->setup_finished);
1692 
1693  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1694 
1695  slice_type = get_ue_golomb_31(&sl->gb);
1696  if (slice_type > 9) {
1697  av_log(h->avctx, AV_LOG_ERROR,
1698  "slice type %d too large at %d\n",
1699  slice_type, sl->first_mb_addr);
1700  return AVERROR_INVALIDDATA;
1701  }
1702  if (slice_type > 4) {
1703  slice_type -= 5;
1704  sl->slice_type_fixed = 1;
1705  } else
1706  sl->slice_type_fixed = 0;
1707 
1708  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1709  sl->slice_type = slice_type;
1710  sl->slice_type_nos = slice_type & 3;
1711 
1712  if (nal->type == H264_NAL_IDR_SLICE &&
1714  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1715  return AVERROR_INVALIDDATA;
1716  }
1717 
1718  sl->pps_id = get_ue_golomb(&sl->gb);
1719  if (sl->pps_id >= MAX_PPS_COUNT) {
1720  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1721  return AVERROR_INVALIDDATA;
1722  }
1723  if (!h->ps.pps_list[sl->pps_id]) {
1724  av_log(h->avctx, AV_LOG_ERROR,
1725  "non-existing PPS %u referenced\n",
1726  sl->pps_id);
1727  return AVERROR_INVALIDDATA;
1728  }
1729  pps = h->ps.pps_list[sl->pps_id];
1730  sps = pps->sps;
1731 
1732  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1733  if (!first_slice) {
1734  if (h->poc.frame_num != sl->frame_num) {
1735  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1736  h->poc.frame_num, sl->frame_num);
1737  return AVERROR_INVALIDDATA;
1738  }
1739  }
1740 
1741  sl->mb_mbaff = 0;
1742 
1743  if (sps->frame_mbs_only_flag) {
1744  picture_structure = PICT_FRAME;
1745  } else {
1746  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1747  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1748  return -1;
1749  }
1750  field_pic_flag = get_bits1(&sl->gb);
1751  if (field_pic_flag) {
1752  bottom_field_flag = get_bits1(&sl->gb);
1753  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1754  } else {
1755  picture_structure = PICT_FRAME;
1756  }
1757  }
1758  sl->picture_structure = picture_structure;
1759  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1760 
1761  if (picture_structure == PICT_FRAME) {
1762  sl->curr_pic_num = sl->frame_num;
1763  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1764  } else {
1765  sl->curr_pic_num = 2 * sl->frame_num + 1;
1766  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1767  }
1768 
1769  if (nal->type == H264_NAL_IDR_SLICE) {
1770  unsigned idr_pic_id = get_ue_golomb_long(&sl->gb);
1771  if (idr_pic_id < 65536) {
1772  sl->idr_pic_id = idr_pic_id;
1773  } else
1774  av_log(h->avctx, AV_LOG_WARNING, "idr_pic_id is invalid\n");
1775  }
1776 
1777  sl->poc_lsb = 0;
1778  sl->delta_poc_bottom = 0;
1779  if (sps->poc_type == 0) {
1780  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1781 
1782  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1783  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1784  }
1785 
1786  sl->delta_poc[0] = sl->delta_poc[1] = 0;
1787  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1788  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1789 
1790  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1791  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1792  }
1793 
1794  sl->redundant_pic_count = 0;
1795  if (pps->redundant_pic_cnt_present)
1796  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1797 
1798  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1799  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1800 
1802  &sl->gb, pps, sl->slice_type_nos,
1803  picture_structure, h->avctx);
1804  if (ret < 0)
1805  return ret;
1806 
1807  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1809  if (ret < 0) {
1810  sl->ref_count[1] = sl->ref_count[0] = 0;
1811  return ret;
1812  }
1813  }
1814 
1815  sl->pwt.use_weight = 0;
1816  for (i = 0; i < 2; i++) {
1817  sl->pwt.luma_weight_flag[i] = 0;
1818  sl->pwt.chroma_weight_flag[i] = 0;
1819  }
1820  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1821  (pps->weighted_bipred_idc == 1 &&
1824  sl->slice_type_nos, &sl->pwt,
1825  picture_structure, h->avctx);
1826  if (ret < 0)
1827  return ret;
1828  }
1829 
1830  sl->explicit_ref_marking = 0;
1831  if (nal->ref_idc) {
1832  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1833  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1834  return AVERROR_INVALIDDATA;
1835  }
1836 
1837  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1838  tmp = get_ue_golomb_31(&sl->gb);
1839  if (tmp > 2) {
1840  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1841  return AVERROR_INVALIDDATA;
1842  }
1843  sl->cabac_init_idc = tmp;
1844  }
1845 
1846  sl->last_qscale_diff = 0;
1847  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
1848  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
1849  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
1850  return AVERROR_INVALIDDATA;
1851  }
1852  sl->qscale = tmp;
1853  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
1854  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
1855  // FIXME qscale / qp ... stuff
1856  if (sl->slice_type == AV_PICTURE_TYPE_SP)
1857  get_bits1(&sl->gb); /* sp_for_switch_flag */
1858  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
1860  get_se_golomb(&sl->gb); /* slice_qs_delta */
1861 
1862  sl->deblocking_filter = 1;
1863  sl->slice_alpha_c0_offset = 0;
1864  sl->slice_beta_offset = 0;
1865  if (pps->deblocking_filter_parameters_present) {
1866  tmp = get_ue_golomb_31(&sl->gb);
1867  if (tmp > 2) {
1868  av_log(h->avctx, AV_LOG_ERROR,
1869  "deblocking_filter_idc %u out of range\n", tmp);
1870  return AVERROR_INVALIDDATA;
1871  }
1872  sl->deblocking_filter = tmp;
1873  if (sl->deblocking_filter < 2)
1874  sl->deblocking_filter ^= 1; // 1<->0
1875 
1876  if (sl->deblocking_filter) {
1877  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
1878  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
1879  if (slice_alpha_c0_offset_div2 > 6 ||
1880  slice_alpha_c0_offset_div2 < -6 ||
1881  slice_beta_offset_div2 > 6 ||
1882  slice_beta_offset_div2 < -6) {
1883  av_log(h->avctx, AV_LOG_ERROR,
1884  "deblocking filter parameters %d %d out of range\n",
1885  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1886  return AVERROR_INVALIDDATA;
1887  }
1888  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
1889  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
1890  }
1891  }
1892 
1893  return 0;
1894 }
1895 
1896 /* do all the per-slice initialization needed before we can start decoding the
1897  * actual MBs */
1899  const H2645NAL *nal)
1900 {
1901  int i, j, ret = 0;
1902 
1903  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
1904  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
1905  return AVERROR_INVALIDDATA;
1906  }
1907 
1908  av_assert1(h->mb_num == h->mb_width * h->mb_height);
1909  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
1910  sl->first_mb_addr >= h->mb_num) {
1911  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
1912  return AVERROR_INVALIDDATA;
1913  }
1914  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
1915  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
1917  if (h->picture_structure == PICT_BOTTOM_FIELD)
1918  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
1919  av_assert1(sl->mb_y < h->mb_height);
1920 
1921  ret = ff_h264_build_ref_list(h, sl);
1922  if (ret < 0)
1923  return ret;
1924 
1925  if (h->ps.pps->weighted_bipred_idc == 2 &&
1927  implicit_weight_table(h, sl, -1);
1928  if (FRAME_MBAFF(h)) {
1929  implicit_weight_table(h, sl, 0);
1930  implicit_weight_table(h, sl, 1);
1931  }
1932  }
1933 
1936  if (!h->setup_finished)
1938 
1939  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
1940  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
1941  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
1942  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
1944  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
1946  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
1947  nal->ref_idc == 0))
1948  sl->deblocking_filter = 0;
1949 
1950  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
1951  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1952  /* Cheat slightly for speed:
1953  * Do not bother to deblock across slices. */
1954  sl->deblocking_filter = 2;
1955  } else {
1956  h->postpone_filter = 1;
1957  }
1958  }
1959  sl->qp_thresh = 15 -
1961  FFMAX3(0,
1962  h->ps.pps->chroma_qp_index_offset[0],
1963  h->ps.pps->chroma_qp_index_offset[1]) +
1964  6 * (h->ps.sps->bit_depth_luma - 8);
1965 
1966  sl->slice_num = ++h->current_slice;
1967 
1968  if (sl->slice_num)
1969  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
1970  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
1971  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
1972  && sl->slice_num >= MAX_SLICES) {
1973  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
1974  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
1975  }
1976 
1977  for (j = 0; j < 2; j++) {
1978  int id_list[16];
1979  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
1980  for (i = 0; i < 16; i++) {
1981  id_list[i] = 60;
1982  if (j < sl->list_count && i < sl->ref_count[j] &&
1983  sl->ref_list[j][i].parent->f->buf[0]) {
1984  int k;
1985  const AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
1986  for (k = 0; k < h->short_ref_count; k++)
1987  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
1988  id_list[i] = k;
1989  break;
1990  }
1991  for (k = 0; k < h->long_ref_count; k++)
1992  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
1993  id_list[i] = h->short_ref_count + k;
1994  break;
1995  }
1996  }
1997  }
1998 
1999  ref2frm[0] =
2000  ref2frm[1] = -1;
2001  for (i = 0; i < 16; i++)
2002  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2003  ref2frm[18 + 0] =
2004  ref2frm[18 + 1] = -1;
2005  for (i = 16; i < 48; i++)
2006  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2007  (sl->ref_list[j][i].reference & 3);
2008  }
2009 
2010  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2011  av_log(h->avctx, AV_LOG_DEBUG,
2012  "slice:%d %c mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2013  sl->slice_num,
2014  (h->picture_structure == PICT_FRAME ? 'F' : h->picture_structure == PICT_TOP_FIELD ? 'T' : 'B'),
2015  sl->mb_y * h->mb_width + sl->mb_x,
2017  sl->slice_type_fixed ? " fix" : "",
2018  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2019  h->poc.frame_num,
2020  h->cur_pic_ptr->field_poc[0],
2021  h->cur_pic_ptr->field_poc[1],
2022  sl->ref_count[0], sl->ref_count[1],
2023  sl->qscale,
2024  sl->deblocking_filter,
2026  sl->pwt.use_weight,
2027  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2028  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2029  }
2030 
2031  return 0;
2032 }
2033 
2035 {
2036  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2037  int first_slice = sl == h->slice_ctx && !h->current_slice;
2038  int ret;
2039 
2040  sl->gb = nal->gb;
2041 
2042  ret = h264_slice_header_parse(h, sl, nal);
2043  if (ret < 0)
2044  return ret;
2045 
2046  // discard redundant pictures
2047  if (sl->redundant_pic_count > 0) {
2048  sl->ref_count[0] = sl->ref_count[1] = 0;
2049  return 0;
2050  }
2051 
2052  if (sl->first_mb_addr == 0 || !h->current_slice) {
2053  if (h->setup_finished) {
2054  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2055  return AVERROR_INVALIDDATA;
2056  }
2057  }
2058 
2059  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2060  if (h->current_slice) {
2061  // this slice starts a new field
2062  // first decode any pending queued slices
2063  if (h->nb_slice_ctx_queued) {
2064  H264SliceContext tmp_ctx;
2065 
2067  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2068  return ret;
2069 
2070  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2071  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2072  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2073  sl = h->slice_ctx;
2074  }
2075 
2076  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2077  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2078  if (ret < 0)
2079  return ret;
2080  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2081  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2082  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2083  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2084  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2085  h->cur_pic_ptr = NULL;
2086  if (ret < 0)
2087  return ret;
2088  } else
2089  return AVERROR_INVALIDDATA;
2090  }
2091 
2092  if (!h->first_field) {
2093  if (h->cur_pic_ptr && !h->droppable) {
2094  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2095  h->picture_structure == PICT_BOTTOM_FIELD);
2096  }
2097  h->cur_pic_ptr = NULL;
2098  }
2099  }
2100 
2101  if (!h->current_slice)
2102  av_assert0(sl == h->slice_ctx);
2103 
2104  if (h->current_slice == 0 && !h->first_field) {
2105  if (
2106  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2107  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2108  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2109  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2110  h->avctx->skip_frame >= AVDISCARD_ALL) {
2111  return 0;
2112  }
2113  }
2114 
2115  if (!first_slice) {
2116  const PPS *pps = h->ps.pps_list[sl->pps_id];
2117 
2118  if (h->ps.pps->sps_id != pps->sps_id ||
2119  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2120  (h->setup_finished && h->ps.pps != pps)*/) {
2121  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2122  return AVERROR_INVALIDDATA;
2123  }
2124  if (h->ps.sps != pps->sps) {
2125  av_log(h->avctx, AV_LOG_ERROR,
2126  "SPS changed in the middle of the frame\n");
2127  return AVERROR_INVALIDDATA;
2128  }
2129  }
2130 
2131  if (h->current_slice == 0) {
2132  ret = h264_field_start(h, sl, nal, first_slice);
2133  if (ret < 0)
2134  return ret;
2135  } else {
2136  if (h->picture_structure != sl->picture_structure ||
2137  h->droppable != (nal->ref_idc == 0)) {
2138  av_log(h->avctx, AV_LOG_ERROR,
2139  "Changing field mode (%d -> %d) between slices is not allowed\n",
2140  h->picture_structure, sl->picture_structure);
2141  return AVERROR_INVALIDDATA;
2142  } else if (!h->cur_pic_ptr) {
2143  av_log(h->avctx, AV_LOG_ERROR,
2144  "unset cur_pic_ptr on slice %d\n",
2145  h->current_slice + 1);
2146  return AVERROR_INVALIDDATA;
2147  }
2148  }
2149 
2150  ret = h264_slice_init(h, sl, nal);
2151  if (ret < 0)
2152  return ret;
2153 
2154  h->nb_slice_ctx_queued++;
2155 
2156  return 0;
2157 }
2158 
2160 {
2161  switch (sl->slice_type) {
2162  case AV_PICTURE_TYPE_P:
2163  return 0;
2164  case AV_PICTURE_TYPE_B:
2165  return 1;
2166  case AV_PICTURE_TYPE_I:
2167  return 2;
2168  case AV_PICTURE_TYPE_SP:
2169  return 3;
2170  case AV_PICTURE_TYPE_SI:
2171  return 4;
2172  default:
2173  return AVERROR_INVALIDDATA;
2174  }
2175 }
2176 
2178  H264SliceContext *sl,
2179  int mb_type, int top_xy,
2180  const int left_xy[LEFT_MBS],
2181  int top_type,
2182  const int left_type[LEFT_MBS],
2183  int mb_xy, int list)
2184 {
2185  int b_stride = h->b_stride;
2186  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2187  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2188  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2189  if (USES_LIST(top_type, list)) {
2190  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2191  const int b8_xy = 4 * top_xy + 2;
2192  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2193  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2194  ref_cache[0 - 1 * 8] =
2195  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2196  ref_cache[2 - 1 * 8] =
2197  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2198  } else {
2199  AV_ZERO128(mv_dst - 1 * 8);
2200  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2201  }
2202 
2203  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2204  if (USES_LIST(left_type[LTOP], list)) {
2205  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2206  const int b8_xy = 4 * left_xy[LTOP] + 1;
2207  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2208  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2209  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2210  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2211  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2212  ref_cache[-1 + 0] =
2213  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2214  ref_cache[-1 + 16] =
2215  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2216  } else {
2217  AV_ZERO32(mv_dst - 1 + 0);
2218  AV_ZERO32(mv_dst - 1 + 8);
2219  AV_ZERO32(mv_dst - 1 + 16);
2220  AV_ZERO32(mv_dst - 1 + 24);
2221  ref_cache[-1 + 0] =
2222  ref_cache[-1 + 8] =
2223  ref_cache[-1 + 16] =
2224  ref_cache[-1 + 24] = LIST_NOT_USED;
2225  }
2226  }
2227  }
2228 
2229  if (!USES_LIST(mb_type, list)) {
2230  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2231  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2232  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2233  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2234  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2235  return;
2236  }
2237 
2238  {
2239  const int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2240  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2241  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2242  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2243  AV_WN32A(&ref_cache[0 * 8], ref01);
2244  AV_WN32A(&ref_cache[1 * 8], ref01);
2245  AV_WN32A(&ref_cache[2 * 8], ref23);
2246  AV_WN32A(&ref_cache[3 * 8], ref23);
2247  }
2248 
2249  {
2250  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2251  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2252  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2253  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2254  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2255  }
2256 }
2257 
2258 /**
2259  * @return non zero if the loop filter can be skipped
2260  */
2261 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2262 {
2263  const int mb_xy = sl->mb_xy;
2264  int top_xy, left_xy[LEFT_MBS];
2265  int top_type, left_type[LEFT_MBS];
2266  const uint8_t *nnz;
2267  uint8_t *nnz_cache;
2268 
2269  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2270 
2271  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2272  if (FRAME_MBAFF(h)) {
2273  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2274  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2275  if (sl->mb_y & 1) {
2276  if (left_mb_field_flag != curr_mb_field_flag)
2277  left_xy[LTOP] -= h->mb_stride;
2278  } else {
2279  if (curr_mb_field_flag)
2280  top_xy += h->mb_stride &
2281  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2282  if (left_mb_field_flag != curr_mb_field_flag)
2283  left_xy[LBOT] += h->mb_stride;
2284  }
2285  }
2286 
2287  sl->top_mb_xy = top_xy;
2288  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2289  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2290  {
2291  /* For sufficiently low qp, filtering wouldn't do anything.
2292  * This is a conservative estimate: could also check beta_offset
2293  * and more accurate chroma_qp. */
2294  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2295  int qp = h->cur_pic.qscale_table[mb_xy];
2296  if (qp <= qp_thresh &&
2297  (left_xy[LTOP] < 0 ||
2298  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2299  (top_xy < 0 ||
2300  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2301  if (!FRAME_MBAFF(h))
2302  return 1;
2303  if ((left_xy[LTOP] < 0 ||
2304  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2305  (top_xy < h->mb_stride ||
2306  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2307  return 1;
2308  }
2309  }
2310 
2311  top_type = h->cur_pic.mb_type[top_xy];
2312  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2313  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2314  if (sl->deblocking_filter == 2) {
2315  if (h->slice_table[top_xy] != sl->slice_num)
2316  top_type = 0;
2317  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2318  left_type[LTOP] = left_type[LBOT] = 0;
2319  } else {
2320  if (h->slice_table[top_xy] == 0xFFFF)
2321  top_type = 0;
2322  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2323  left_type[LTOP] = left_type[LBOT] = 0;
2324  }
2325  sl->top_type = top_type;
2326  sl->left_type[LTOP] = left_type[LTOP];
2327  sl->left_type[LBOT] = left_type[LBOT];
2328 
2329  if (IS_INTRA(mb_type))
2330  return 0;
2331 
2332  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2333  top_type, left_type, mb_xy, 0);
2334  if (sl->list_count == 2)
2335  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2336  top_type, left_type, mb_xy, 1);
2337 
2338  nnz = h->non_zero_count[mb_xy];
2339  nnz_cache = sl->non_zero_count_cache;
2340  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2341  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2342  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2343  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2344  sl->cbp = h->cbp_table[mb_xy];
2345 
2346  if (top_type) {
2347  nnz = h->non_zero_count[top_xy];
2348  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2349  }
2350 
2351  if (left_type[LTOP]) {
2352  nnz = h->non_zero_count[left_xy[LTOP]];
2353  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2354  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2355  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2356  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2357  }
2358 
2359  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2360  * from what the loop filter needs */
2361  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2362  if (IS_8x8DCT(top_type)) {
2363  nnz_cache[4 + 8 * 0] =
2364  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2365  nnz_cache[6 + 8 * 0] =
2366  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2367  }
2368  if (IS_8x8DCT(left_type[LTOP])) {
2369  nnz_cache[3 + 8 * 1] =
2370  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2371  }
2372  if (IS_8x8DCT(left_type[LBOT])) {
2373  nnz_cache[3 + 8 * 3] =
2374  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2375  }
2376 
2377  if (IS_8x8DCT(mb_type)) {
2378  nnz_cache[scan8[0]] =
2379  nnz_cache[scan8[1]] =
2380  nnz_cache[scan8[2]] =
2381  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2382 
2383  nnz_cache[scan8[0 + 4]] =
2384  nnz_cache[scan8[1 + 4]] =
2385  nnz_cache[scan8[2 + 4]] =
2386  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2387 
2388  nnz_cache[scan8[0 + 8]] =
2389  nnz_cache[scan8[1 + 8]] =
2390  nnz_cache[scan8[2 + 8]] =
2391  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2392 
2393  nnz_cache[scan8[0 + 12]] =
2394  nnz_cache[scan8[1 + 12]] =
2395  nnz_cache[scan8[2 + 12]] =
2396  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2397  }
2398  }
2399 
2400  return 0;
2401 }
2402 
2403 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2404 {
2405  uint8_t *dest_y, *dest_cb, *dest_cr;
2406  int linesize, uvlinesize, mb_x, mb_y;
2407  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2408  const int old_slice_type = sl->slice_type;
2409  const int pixel_shift = h->pixel_shift;
2410  const int block_h = 16 >> h->chroma_y_shift;
2411 
2412  if (h->postpone_filter)
2413  return;
2414 
2415  if (sl->deblocking_filter) {
2416  for (mb_x = start_x; mb_x < end_x; mb_x++)
2417  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2418  int mb_xy, mb_type;
2419  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2420  mb_type = h->cur_pic.mb_type[mb_xy];
2421 
2422  if (FRAME_MBAFF(h))
2423  sl->mb_mbaff =
2424  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2425 
2426  sl->mb_x = mb_x;
2427  sl->mb_y = mb_y;
2428  dest_y = h->cur_pic.f->data[0] +
2429  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2430  dest_cb = h->cur_pic.f->data[1] +
2431  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2432  mb_y * sl->uvlinesize * block_h;
2433  dest_cr = h->cur_pic.f->data[2] +
2434  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2435  mb_y * sl->uvlinesize * block_h;
2436  // FIXME simplify above
2437 
2438  if (MB_FIELD(sl)) {
2439  linesize = sl->mb_linesize = sl->linesize * 2;
2440  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2441  if (mb_y & 1) { // FIXME move out of this function?
2442  dest_y -= sl->linesize * 15;
2443  dest_cb -= sl->uvlinesize * (block_h - 1);
2444  dest_cr -= sl->uvlinesize * (block_h - 1);
2445  }
2446  } else {
2447  linesize = sl->mb_linesize = sl->linesize;
2448  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2449  }
2450  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2451  uvlinesize, 0);
2452  if (fill_filter_caches(h, sl, mb_type))
2453  continue;
2454  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2455  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2456 
2457  if (FRAME_MBAFF(h)) {
2458  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2459  linesize, uvlinesize);
2460  } else {
2461  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2462  dest_cr, linesize, uvlinesize);
2463  }
2464  }
2465  }
2466  sl->slice_type = old_slice_type;
2467  sl->mb_x = end_x;
2468  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2469  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2470  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2471 }
2472 
2474 {
2475  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2476  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2477  h->cur_pic.mb_type[mb_xy - 1] :
2478  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2479  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2480  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2481 }
2482 
2483 /**
2484  * Draw edges and report progress for the last MB row.
2485  */
2487 {
2488  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2489  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2490  int height = 16 << FRAME_MBAFF(h);
2491  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2492 
2493  if (sl->deblocking_filter) {
2494  if ((top + height) >= pic_height)
2495  height += deblock_border;
2496  top -= deblock_border;
2497  }
2498 
2499  if (top >= pic_height || (top + height) < 0)
2500  return;
2501 
2502  height = FFMIN(height, pic_height - top);
2503  if (top < 0) {
2504  height = top + height;
2505  top = 0;
2506  }
2507 
2508  ff_h264_draw_horiz_band(h, sl, top, height);
2509 
2510  if (h->droppable || h->er.error_occurred)
2511  return;
2512 
2513  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2514  h->picture_structure == PICT_BOTTOM_FIELD);
2515 }
2516 
2518  int startx, int starty,
2519  int endx, int endy, int status)
2520 {
2521  if (!sl->h264->enable_er)
2522  return;
2523 
2524  if (CONFIG_ERROR_RESILIENCE) {
2525  ff_er_add_slice(sl->er, startx, starty, endx, endy, status);
2526  }
2527 }
2528 
2529 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2530 {
2531  H264SliceContext *sl = arg;
2532  const H264Context *h = sl->h264;
2533  int lf_x_start = sl->mb_x;
2534  int orig_deblock = sl->deblocking_filter;
2535  int ret;
2536 
2537  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2538  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2539 
2540  ret = alloc_scratch_buffers(sl, sl->linesize);
2541  if (ret < 0)
2542  return ret;
2543 
2544  sl->mb_skip_run = -1;
2545 
2546  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2547 
2548  if (h->postpone_filter)
2549  sl->deblocking_filter = 0;
2550 
2551  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2552  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2553 
2554  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && sl->er->error_status_table) {
2555  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2556  if (start_i) {
2557  int prev_status = sl->er->error_status_table[sl->er->mb_index2xy[start_i - 1]];
2558  prev_status &= ~ VP_START;
2559  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2560  sl->er->error_occurred = 1;
2561  }
2562  }
2563 
2564  if (h->ps.pps->cabac) {
2565  /* realign */
2566  align_get_bits(&sl->gb);
2567 
2568  /* init cabac */
2570  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2571  (get_bits_left(&sl->gb) + 7) / 8);
2572  if (ret < 0)
2573  return ret;
2574 
2576 
2577  for (;;) {
2578  int ret, eos;
2579  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2580  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2581  sl->next_slice_idx);
2582  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2583  sl->mb_y, ER_MB_ERROR);
2584  return AVERROR_INVALIDDATA;
2585  }
2586 
2587  ret = ff_h264_decode_mb_cabac(h, sl);
2588 
2589  if (ret >= 0)
2590  ff_h264_hl_decode_mb(h, sl);
2591 
2592  // FIXME optimal? or let mb_decode decode 16x32 ?
2593  if (ret >= 0 && FRAME_MBAFF(h)) {
2594  sl->mb_y++;
2595 
2596  ret = ff_h264_decode_mb_cabac(h, sl);
2597 
2598  if (ret >= 0)
2599  ff_h264_hl_decode_mb(h, sl);
2600  sl->mb_y--;
2601  }
2602  eos = get_cabac_terminate(&sl->cabac);
2603 
2604  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2605  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2606  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2607  sl->mb_y, ER_MB_END);
2608  if (sl->mb_x >= lf_x_start)
2609  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2610  goto finish;
2611  }
2612  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2613  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2614  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2615  av_log(h->avctx, AV_LOG_ERROR,
2616  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2617  sl->mb_x, sl->mb_y,
2618  sl->cabac.bytestream_end - sl->cabac.bytestream);
2619  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2620  sl->mb_y, ER_MB_ERROR);
2621  return AVERROR_INVALIDDATA;
2622  }
2623 
2624  if (++sl->mb_x >= h->mb_width) {
2625  loop_filter(h, sl, lf_x_start, sl->mb_x);
2626  sl->mb_x = lf_x_start = 0;
2627  decode_finish_row(h, sl);
2628  ++sl->mb_y;
2629  if (FIELD_OR_MBAFF_PICTURE(h)) {
2630  ++sl->mb_y;
2631  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2633  }
2634  }
2635 
2636  if (eos || sl->mb_y >= h->mb_height) {
2637  ff_tlog(h->avctx, "slice end %d %d\n",
2638  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2639  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2640  sl->mb_y, ER_MB_END);
2641  if (sl->mb_x > lf_x_start)
2642  loop_filter(h, sl, lf_x_start, sl->mb_x);
2643  goto finish;
2644  }
2645  }
2646  } else {
2647  for (;;) {
2648  int ret;
2649 
2650  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2651  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2652  sl->next_slice_idx);
2653  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2654  sl->mb_y, ER_MB_ERROR);
2655  return AVERROR_INVALIDDATA;
2656  }
2657 
2658  ret = ff_h264_decode_mb_cavlc(h, sl);
2659 
2660  if (ret >= 0)
2661  ff_h264_hl_decode_mb(h, sl);
2662 
2663  // FIXME optimal? or let mb_decode decode 16x32 ?
2664  if (ret >= 0 && FRAME_MBAFF(h)) {
2665  sl->mb_y++;
2666  ret = ff_h264_decode_mb_cavlc(h, sl);
2667 
2668  if (ret >= 0)
2669  ff_h264_hl_decode_mb(h, sl);
2670  sl->mb_y--;
2671  }
2672 
2673  if (ret < 0) {
2674  av_log(h->avctx, AV_LOG_ERROR,
2675  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2676  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2677  sl->mb_y, ER_MB_ERROR);
2678  return ret;
2679  }
2680 
2681  if (++sl->mb_x >= h->mb_width) {
2682  loop_filter(h, sl, lf_x_start, sl->mb_x);
2683  sl->mb_x = lf_x_start = 0;
2684  decode_finish_row(h, sl);
2685  ++sl->mb_y;
2686  if (FIELD_OR_MBAFF_PICTURE(h)) {
2687  ++sl->mb_y;
2688  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2690  }
2691  if (sl->mb_y >= h->mb_height) {
2692  ff_tlog(h->avctx, "slice end %d %d\n",
2693  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2694 
2695  if ( get_bits_left(&sl->gb) == 0
2696  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2697  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2698  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2699 
2700  goto finish;
2701  } else {
2702  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2703  sl->mb_x, sl->mb_y, ER_MB_END);
2704 
2705  return AVERROR_INVALIDDATA;
2706  }
2707  }
2708  }
2709 
2710  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2711  ff_tlog(h->avctx, "slice end %d %d\n",
2712  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2713 
2714  if (get_bits_left(&sl->gb) == 0) {
2715  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2716  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2717  if (sl->mb_x > lf_x_start)
2718  loop_filter(h, sl, lf_x_start, sl->mb_x);
2719 
2720  goto finish;
2721  } else {
2722  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2723  sl->mb_y, ER_MB_ERROR);
2724 
2725  return AVERROR_INVALIDDATA;
2726  }
2727  }
2728  }
2729  }
2730 
2731 finish:
2732  sl->deblocking_filter = orig_deblock;
2733  return 0;
2734 }
2735 
2736 /**
2737  * Call decode_slice() for each context.
2738  *
2739  * @param h h264 master context
2740  */
2742 {
2743  AVCodecContext *const avctx = h->avctx;
2744  H264SliceContext *sl;
2745  int context_count = h->nb_slice_ctx_queued;
2746  int ret = 0;
2747  int i, j;
2748 
2749  h->slice_ctx[0].next_slice_idx = INT_MAX;
2750 
2751  if (h->avctx->hwaccel || context_count < 1)
2752  return 0;
2753 
2754  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2755 
2756  if (context_count == 1) {
2757 
2758  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2759  h->postpone_filter = 0;
2760 
2761  ret = decode_slice(avctx, &h->slice_ctx[0]);
2762  h->mb_y = h->slice_ctx[0].mb_y;
2763  if (ret < 0)
2764  goto finish;
2765  } else {
2766  av_assert0(context_count > 0);
2767  for (i = 0; i < context_count; i++) {
2768  int next_slice_idx = h->mb_width * h->mb_height;
2769  int slice_idx;
2770 
2771  sl = &h->slice_ctx[i];
2772 
2773  /* make sure none of those slices overlap */
2774  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2775  for (j = 0; j < context_count; j++) {
2776  H264SliceContext *sl2 = &h->slice_ctx[j];
2777  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2778 
2779  if (i == j || slice_idx2 < slice_idx)
2780  continue;
2781  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2782  }
2783  sl->next_slice_idx = next_slice_idx;
2784  }
2785 
2786  avctx->execute(avctx, decode_slice, h->slice_ctx,
2787  NULL, context_count, sizeof(h->slice_ctx[0]));
2788 
2789  /* pull back stuff from slices to master context */
2790  sl = &h->slice_ctx[context_count - 1];
2791  h->mb_y = sl->mb_y;
2792 
2793  if (h->postpone_filter) {
2794  h->postpone_filter = 0;
2795 
2796  for (i = 0; i < context_count; i++) {
2797  int y_end, x_end;
2798 
2799  sl = &h->slice_ctx[i];
2800  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2801  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2802 
2803  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2804  sl->mb_y = j;
2805  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2806  j == y_end - 1 ? x_end : h->mb_width);
2807  }
2808  }
2809  }
2810  }
2811 
2812 finish:
2813  h->nb_slice_ctx_queued = 0;
2814  return ret;
2815 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2517
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:416
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:955
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:686
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:225
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:293
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:136
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
av_clip
#define av_clip
Definition: common.h:96
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1048
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:325
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:316
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:91
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1355
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:128
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1161
ff_h264_sei_ctx_replace
static int ff_h264_sei_ctx_replace(H264SEIContext *dst, const H264SEIContext *src)
Definition: h264_sei.h:132
H264Picture::f
AVFrame * f
Definition: h264dec.h:107
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1264
out
FILE * out
Definition: movenc.c:54
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:241
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
av_clip_int8
#define av_clip_int8
Definition: common.h:105
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:97
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_h264_ref_picture
int ff_h264_ref_picture(H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:97
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:940
ff_h2645_sei_to_frame
int ff_h2645_sei_to_frame(AVFrame *frame, H2645SEI *sei, enum AVCodecID codec_id, AVCodecContext *avctx, const H2645VUI *vui, unsigned bit_depth_luma, unsigned bit_depth_chroma, int seed)
Definition: h2645_sei.c:511
H264Picture::ref_index
int8_t * ref_index[2]
Definition: h264dec.h:125
HWACCEL_MAX
#define HWACCEL_MAX
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:64
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:306
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:35
color_frame
static void color_frame(AVFrame *frame, const int c[4])
Definition: h264_slice.c:301
H264Picture::pps
const PPS * pps
Definition: h264dec.h:151
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:123
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:111
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:57
ff_h264_slice_context_init
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init slice context.
Definition: h264dec.c:260
ERContext::mb_index2xy
int * mb_index2xy
Definition: error_resilience.h:58
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2473
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVFrame::width
int width
Definition: frame.h:412
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:53
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:331
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:129
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:573
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:517
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2486
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:261
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: defs.h:59
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:787
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:113
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:205
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:71
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:600
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:536
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:466
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
find_unused_picture
static int find_unused_picture(const H264Context *h)
Definition: h264_slice.c:269
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:649
H264Picture::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: h264dec.h:124
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:29
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:522
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:232
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:34
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:423
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:590
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:147
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:376
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1389
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:224
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:641
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:129
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:230
H264SliceContext
Definition: h264dec.h:171
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:65
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:716
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:292
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:228
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:486
finish
static void finish(void)
Definition: movenc.c:342
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:649
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:130
fail
#define fail()
Definition: checkasm.h:138
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:484
timecode.h
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1284
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:466
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:45
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2992
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:154
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:281
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:453
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
ff_h264_sei_process_picture_timing
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:65
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:478
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:187
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:72
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
H264Picture::f_grain
AVFrame * f_grain
Definition: h264dec.h:110
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:234
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:116
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:70
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:245
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:176
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
refstruct.h
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1037
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1262
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:800
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ff_refstruct_ref_c
const void * ff_refstruct_ref_c(const void *obj)
Analog of ff_refstruct_ref(), but for constant objects.
Definition: refstruct.c:145
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:589
av_memcpy_backptr
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
Definition: mem.c:445
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2034
width
#define width
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:340
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:1838
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:73
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:627
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:421
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:67
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:610
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:834
from
const char * from
Definition: jacosubdec.c:66
to
const char * to
Definition: webvttdec.c:35
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1679
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:75
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:465
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:77
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:188
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:781
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, const uint8_t *src_y, const uint8_t *src_cb, const uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:584
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:177
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:226
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:148
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:216
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
frame
static AVFrame * frame
Definition: demux_decode.c:54
H264Context::enable_er
int enable_er
Definition: h264dec.h:545
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:101
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:324
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:871
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
threadframe.h
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:109
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:188
H264Picture::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: h264dec.h:115
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:140
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:607
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:603
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:277
SPS
Sequence parameter set.
Definition: h264_ps.h:44
H264Ref::parent
const H264Picture * parent
Definition: h264dec.h:168
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:37
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1920
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:283
AVFrame::coded_picture_number
attribute_deprecated int coded_picture_number
picture number in bitstream order
Definition: frame.h:474
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:182
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:353
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
PPS
Picture parameter set.
Definition: h264_ps.h:110
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:560
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1012
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:109
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:76
H264Picture::mb_height
int mb_height
Definition: h264dec.h:153
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:38
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:469
H264SliceContext::qscale
int qscale
Definition: h264dec.h:181
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:784
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2261
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:65
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:635
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:750
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:483
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:281
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:284
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:84
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:231
h264_ps.h
init_dimensions
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:915
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
H264SliceContext::top_type
int top_type
Definition: h264dec.h:208
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:779
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:227
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:38
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:67
H264SEIPictureTiming
Definition: h264_sei.h:54
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:311
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:238
AVFrame::crop_left
size_t crop_left
Definition: frame.h:780
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
H264Picture::reference
int reference
Definition: h264dec.h:145
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:318
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:68
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
rectangle.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:222
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:29
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:473
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:191
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:108
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:119
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:695
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:36
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:146
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:203
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:183
H2645NAL
Definition: h2645_parse.h:34
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:475
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:278
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1544
H264SliceContext::cbp
int cbp
Definition: h264dec.h:249
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:210
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:120
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:224
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:79
height
#define height
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2529
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:317
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:220
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:83
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:178
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:322
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture *const *from, int count, H264Context *new_base, const H264Context *old_base)
Definition: h264_slice.c:288
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:66
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:77
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:32
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:187
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:40
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:51
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2403
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:162
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:236
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:58
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:390
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:138
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:321
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:228
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:61
H264Picture::needs_fg
int needs_fg
whether picture needs film grain synthesis (see f_grain)
Definition: h264dec.h:149
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:38
H264Context
H264Context.
Definition: h264dec.h:331
H264Picture::decode_error_flags
AVBufferRef * decode_error_flags
Definition: h264dec.h:157
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:217
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:138
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:376
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:624
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:39
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:319
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:485
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2741
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:221
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cabac_functions.h
tb
#define tb
Definition: regdef.h:68
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:599
ff_h264_replace_picture
int ff_h264_replace_picture(H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:147
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:221
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:181
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:984
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:636
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:656
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:467
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:262
avcodec.h
H264SliceContext::h264
const struct H264Context * h264
Definition: h264dec.h:172
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:279
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:531
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1387
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:184
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
ff_refstruct_replace
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:156
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:472
U
#define U(x)
Definition: vpx_arith.h:37
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:477
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:272
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:220
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:189
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVFrame::height
int height
Definition: frame.h:412
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
MAX_SLICES
#define MAX_SLICES
Definition: dxva2_hevc.c:32
status
ov_status_e status
Definition: dnn_backend_openvino.c:119
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:299
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1562
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:276
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:127
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
H264SliceContext::mmco
MMCO mmco[H264_MAX_MMCO_COUNT]
Definition: h264dec.h:315
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
H264Picture::mb_width
int mb_width
Definition: h264dec.h:153
ff_h264_unref_picture
void ff_h264_unref_picture(H264Picture *pic)
Definition: h264_picture.c:39
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:820
H264Picture
Definition: h264dec.h:106
ERContext::error_status_table
uint8_t * error_status_table
Definition: error_resilience.h:66
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1898
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:220
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:46
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:161
H264Picture::mb_type_buf
AVBufferRef * mb_type_buf
Definition: h264dec.h:118
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:263
LBOT
#define LBOT
Definition: h264dec.h:70
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:287
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:83
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:72
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:104
H264Context::nal_length_size
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264dec.h:450
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:38
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
H264SliceContext::er
ERContext * er
Definition: h264dec.h:174
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:34
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: h264dec.h:122
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:35
H264SliceContext::idr_pic_id
int idr_pic_id
Definition: h264dec.h:320
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, const int left_xy[LEFT_MBS], int top_type, const int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2177
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:153
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:242
AVFrame::crop_top
size_t crop_top
Definition: frame.h:778
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:173
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
H264_MAX_DPB_FRAMES
@ H264_MAX_DPB_FRAMES
Definition: h264.h:76
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:200
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
LTOP
#define LTOP
Definition: h264dec.h:69
h264.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:280
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:283
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:92
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2159
h
h
Definition: vp9dsp_template.c:2038
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:313
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:478
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:74
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:49
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:33
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:279
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:215
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:179
H264Ref::poc
int poc
Definition: h264dec.h:165
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:95
H264Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: h264dec.h:112
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:33
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:323
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3319
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:134
H264Ref::reference
int reference
Definition: h264dec.h:164
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:116
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:416
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:476
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:37
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2884
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:235
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, const H264Picture *src)
Definition: h264_picture.c:197
H264Context::is_avc
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:449