Go to the documentation of this file.
   48         frame->nb_rpl_elems = 0;
 
   59     int x_cb         = x0 >> 
s->ps.sps->log2_ctb_size;
 
   60     int y_cb         = y0 >> 
s->ps.sps->log2_ctb_size;
 
   61     int pic_width_cb = 
s->ps.sps->ctb_width;
 
   62     int ctb_addr_ts  = 
s->ps.pps->ctb_addr_rs_to_ts[y_cb * pic_width_cb + x_cb];
 
   63     return &
ref->rpl_tab[ctb_addr_ts]->refPicList[0];
 
   98         frame->nb_rpl_elems = 
s->pkt.nb_nals;
 
  107         frame->ctb_count = 
s->ps.sps->ctb_width * 
s->ps.sps->ctb_height;
 
  108         for (j = 0; j < 
frame->ctb_count; j++)
 
  153     s->collocated_ref = 
NULL;
 
  155     if (
s->sh.pic_output_flag)
 
  161     ref->sequence = 
s->seq_decode;
 
  162     ref->frame->crop_left   = 
s->ps.sps->output_window.left_offset;
 
  163     ref->frame->crop_right  = 
s->ps.sps->output_window.right_offset;
 
  164     ref->frame->crop_top    = 
s->ps.sps->output_window.top_offset;
 
  165     ref->frame->crop_bottom = 
s->ps.sps->output_window.bottom_offset;
 
  182     if (
IS_IRAP(
s) && 
s->no_rasl_output_flag == 1) {
 
  187                 frame->sequence != 
s->seq_decode) {
 
  188                 if (
s->sh.no_output_of_prior_pics_flag == 1)
 
  197         int min_poc   = INT_MAX;
 
  203                 frame->sequence == 
s->seq_output) {
 
  205                 if (
frame->poc < min_poc || nb_output == 1) {
 
  206                     min_poc = 
frame->poc;
 
  213         if (!
flush && 
s->seq_output == 
s->seq_decode && 
s->ps.sps &&
 
  214             nb_output <= s->ps.sps->temporal_layer[
s->ps.sps->max_sub_layers - 1].num_reorder_pics)
 
  235                    "Output frame with POC %d.\n", 
frame->poc);
 
  239         if (
s->seq_output != 
s->seq_decode)
 
  251     int min_poc = INT_MAX;
 
  257             frame->sequence == 
s->seq_output &&
 
  263     if (
s->ps.sps && dpb >= 
s->ps.sps->temporal_layer[
s->ps.sps->max_sub_layers - 1].max_dec_pic_buffering) {
 
  267                 frame->sequence == 
s->seq_output &&
 
  270                     min_poc = 
frame->poc;
 
  278                 frame->sequence == 
s->seq_output &&
 
  279                 frame->poc <= min_poc) {
 
  291     int ctb_count    = 
frame->ctb_count;
 
  292     int ctb_addr_ts  = 
s->ps.pps->ctb_addr_rs_to_ts[
s->sh.slice_segment_addr];
 
  295     if (
s->slice_idx >= 
frame->nb_rpl_elems)
 
  298     for (
i = ctb_addr_ts; 
i < ctb_count; 
i++)
 
  319           s->rps[
LT_CURR].nb_refs) && !
s->ps.pps->pps_curr_pic_ref_enabled_flag) {
 
  324     for (list_idx = 0; list_idx < nb_list; list_idx++) {
 
  366                 rpl->
ref[
i]        = rpl_tmp.
ref[idx];
 
  371             memcpy(rpl, &rpl_tmp, 
sizeof(*rpl));
 
  376         if (
s->ps.pps->pps_curr_pic_ref_enabled_flag &&
 
  393     int mask = use_msb ? ~0 : (1 << 
s->ps.sps->log2_max_poc_lsb) - 1;
 
  398         if (
ref->frame->buf[0] && 
ref->sequence == 
s->seq_decode) {
 
  399             if ((
ref->poc & 
mask) == poc && (use_msb || 
ref->poc != 
s->poc))
 
  406                "Could not find ref with POC %d\n", poc);
 
  425     if (!
s->avctx->hwaccel) {
 
  426         if (!
s->ps.sps->pixel_shift) {
 
  428                 memset(
frame->frame->
data[
i], 1 << (
s->ps.sps->bit_depth - 1),
 
  432                 for (y = 0; y < (
s->ps.sps->height >> 
s->ps.sps->vshift[
i]); y++) {
 
  434                     AV_WN16(dst, 1 << (
s->ps.sps->bit_depth - 1));
 
  452                              int poc, 
int ref_flag, uint8_t use_msb)
 
  505         if (!short_rps->
used[
i])
 
  507         else if (i < short_rps->num_negative_pics)
 
  519         int poc  = long_rps->
poc[
i];
 
  554     if (
s->ps.pps->pps_curr_pic_ref_enabled_flag)
 
  
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
const RefPicList * ff_hevc_get_ref_list(const HEVCContext *s, const HEVCFrame *ref, int x0, int y0)
 
static HEVCFrame * find_ref_idx(HEVCContext *s, int poc, uint8_t use_msb)
 
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
 
This structure describes decoded (raw) audio or video data.
 
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
 
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
 
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
 
static int add_candidate_ref(HEVCContext *s, RefPicList *list, int poc, int ref_flag, uint8_t use_msb)
 
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
 
#define HEVC_FRAME_FLAG_LONG_REF
 
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
 
static void mark_ref(HEVCFrame *frame, int flag)
 
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
#define FF_ARRAY_ELEMS(a)
 
#define HEVC_SEQUENCE_COUNTER_INVALID
 
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
 
#define HEVC_FRAME_FLAG_BUMPING
 
static const uint16_t mask[17]
 
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
 
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
 
#define AV_CEIL_RSHIFT(a, b)
 
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
 
static int init_slice_rpl(HEVCContext *s)
 
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
 
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
 
#define HEVC_FRAME_FLAG_SHORT_REF
 
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
 
struct HEVCFrame * ref[HEVC_MAX_REFS]
 
static HEVCFrame * generate_missing_ref(HEVCContext *s, int poc)
 
@ AV_PICTURE_STRUCTURE_BOTTOM_FIELD
coded as bottom field
 
#define HEVC_SEQUENCE_COUNTER_MASK
 
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
 
@ AV_PICTURE_STRUCTURE_TOP_FIELD
coded as top field
 
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
 
void(* flush)(AVBSFContext *ctx)
 
uint8_t poc_msb_present[32]
 
static void * ff_refstruct_allocz(size_t size)
Equivalent to ff_refstruct_alloc_ext(size, 0, NULL, NULL)
 
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
 
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
 
int isLongTerm[HEVC_MAX_REFS]
 
#define HEVC_FRAME_FLAG_OUTPUT
 
static HEVCFrame * alloc_frame(HEVCContext *s)
 
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
 
#define FF_THREAD_FRAME
Decode more than one frame at once.
 
unsigned int num_negative_pics
 
#define i(width, name, range_min, range_max)
 
static void unref_missing_refs(HEVCContext *s)
 
void ff_hevc_bump_frame(HEVCContext *s)
 
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
 
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
 
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
 
void ff_hevc_unref_frame(HEVCFrame *frame, int flags)
 
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
 
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
 
static int ref[MAX_W *MAX_W]
 
#define flags(name, subs,...)
 
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
 
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
 
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
 
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
 
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.