Go to the documentation of this file.
50 if (
sps->profile_idc == 0) {
70 ctx->au_buffer.data_size = 0;
151 int au_end_found = 0, err;
153 while (!au_end_found) {
169 if (!buffer_pkt->data ||
182 if (!nalu_size || nalu_size > INT_MAX) {
202 data_size =
ctx->au_buffer.data_size + nalu_size;
218 memcpy(
ctx->au_buffer.data +
ctx->au_buffer.data_size, in->
data, nalu_size);
220 ctx->au_buffer.data_size = data_size;
222 in->
data += nalu_size;
223 in->
size -= nalu_size;
227 data_size =
ctx->au_buffer.data_size;
229 ctx->au_buffer.data_size = 0;
236 buffer_pkt->data = buffer_pkt->buf->data;
237 buffer_pkt->size = data_size;
239 memcpy(
out->data,
ctx->au_buffer.data, data_size);
247 ctx->au_buffer.data_size = 0;
258 if (!
ctx->in || !
ctx->buffer_pkt)
272 ctx->au_buffer.capacity = 0;
274 ctx->au_buffer.data_size = 0;
282 .
p.
name =
"evc_frame_merge",
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int ff_evc_parse_pps(GetBitContext *gb, EVCParamSets *ps)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static enum AVCodecID evc_frame_merge_codec_ids[]
#define AVERROR_EOF
End of file.
AccessUnitBuffer au_buffer
const FFBitStreamFilter ff_evc_frame_merge_bsf
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static int evc_frame_merge_init(AVBSFContext *bsf)
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
The bitstream filter state.
static void skip_bits(GetBitContext *s, int n)
EVCParserPPS * pps[EVC_MAX_PPS_COUNT]
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static void evc_frame_merge_close(AVBSFContext *bsf)
void ff_evc_ps_free(EVCParamSets *ps)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_evc_parse_sps(GetBitContext *gb, EVCParamSets *ps)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
int ff_evc_parse_slice_header(GetBitContext *gb, EVCParserSliceHeader *sh, const EVCParamSets *ps, enum EVCNALUnitType nalu_type)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
static unsigned int get_bits1(GetBitContext *s)
void(* flush)(AVBSFContext *ctx)
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
AVCodecID
Identify the syntax and semantics of the bitstream.
AVBitStreamFilter p
The public AVBitStreamFilter.
static int parse_nal_unit(AVBSFContext *bsf, const uint8_t *buf, int buf_size)
EVCParserSPS * sps[EVC_MAX_SPS_COUNT]
int(* init)(AVBSFContext *ctx)
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
static int end_of_access_unit_found(const EVCParamSets *ps, const EVCParserSliceHeader *sh, const EVCParserPoc *poc, enum EVCNALUnitType nalu_type)
#define AV_NOPTS_VALUE
Undefined timestamp value.
static void skip_bits1(GetBitContext *s)
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
#define EVC_NALU_LENGTH_PREFIX_SIZE
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static void evc_frame_merge_flush(AVBSFContext *bsf)
void * priv_data
Opaque filter-specific private data.
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
#define AV_INPUT_BUFFER_PADDING_SIZE
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
static uint32_t evc_read_nal_unit_length(const uint8_t *bits, int bits_size, void *logctx)
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
This structure stores compressed data.
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
Called by bitstream filters to get packet for filtering.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int evc_frame_merge_filter(AVBSFContext *bsf, AVPacket *out)
int ff_evc_derive_poc(const EVCParamSets *ps, const EVCParserSliceHeader *sh, EVCParserPoc *poc, enum EVCNALUnitType nalu_type, int tid)