Go to the documentation of this file.
45 #include <theora/theoraenc.h>
66 if (packet->bytes < 0) {
67 message =
"ogg_packet has negative size";
68 }
else if (packet->bytes > 0xffff) {
69 message =
"ogg_packet is larger than 65535 bytes";
70 }
else if (newsize < avc_context->extradata_size) {
71 message =
"extradata_size would overflow";
86 memcpy(avc_context->
extradata + (*
offset), packet->packet, packet->bytes);
87 (*offset) += packet->bytes;
93 #ifdef TH_ENCCTL_2PASS_OUT
98 bytes = th_encode_ctl(
h->t_state, TH_ENCCTL_2PASS_OUT, &buf,
sizeof(buf));
105 h->stats_offset + bytes);
109 memcpy(
h->stats +
h->stats_offset, buf, bytes);
110 h->stats_offset += bytes;
114 memcpy(
h->stats, buf, bytes);
131 #ifdef TH_ENCCTL_2PASS_IN
139 h->stats_size = strlen(avctx->
stats_in) * 3/4;
147 while (
h->stats_size -
h->stats_offset > 0) {
148 bytes = th_encode_ctl(
h->t_state, TH_ENCCTL_2PASS_IN,
149 h->stats +
h->stats_offset,
150 h->stats_size -
h->stats_offset);
157 h->stats_offset += bytes;
169 th_comment t_comment;
177 th_info_init(&t_info);
180 t_info.pic_width = avc_context->
width;
181 t_info.pic_height = avc_context->
height;
192 t_info.aspect_numerator = 1;
193 t_info.aspect_denominator = 1;
197 t_info.colorspace = TH_CS_ITU_REC_470M;
199 t_info.colorspace = TH_CS_ITU_REC_470BG;
201 t_info.colorspace = TH_CS_UNSPECIFIED;
204 t_info.pixel_fmt = TH_PF_420;
206 t_info.pixel_fmt = TH_PF_422;
208 t_info.pixel_fmt = TH_PF_444;
224 t_info.target_bitrate = 0;
226 t_info.target_bitrate = avc_context->
bit_rate;
231 h->t_state = th_encode_alloc(&t_info);
237 h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1;
239 th_info_clear(&t_info);
241 if (th_encode_ctl(
h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
266 th_comment_init(&t_comment);
268 while (th_encode_flushheader(
h->t_state, &t_comment, &o_packet))
272 th_comment_clear(&t_comment);
280 th_ycbcr_buffer t_yuv_buffer;
287 th_encode_packetout(
h->t_state, 1, &o_packet);
295 for (
i = 0;
i < 3;
i++) {
296 t_yuv_buffer[
i].width =
FFALIGN(avc_context->
width, 16) >> (
i &&
h->uv_hshift);
297 t_yuv_buffer[
i].height =
FFALIGN(avc_context->
height, 16) >> (
i &&
h->uv_vshift);
298 t_yuv_buffer[
i].stride =
frame->linesize[
i];
299 t_yuv_buffer[
i].data =
frame->data[
i];
307 result = th_encode_ycbcr_in(
h->t_state, t_yuv_buffer);
312 message =
"differing frame sizes";
315 message =
"encoder is not ready or is finished";
330 result = th_encode_packetout(
h->t_state, 0, &o_packet);
346 memcpy(
pkt->
data, o_packet.packet, o_packet.bytes);
357 if (!(o_packet.granulepos &
h->keyframe_mask))
368 th_encode_free(
h->t_state);
378 .
p.
name =
"libtheora",
395 .p.wrapper_name =
"libtheora",
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
static av_cold int encode_close(AVCodecContext *avc_context)
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
This structure describes decoded (raw) audio or video data.
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
AVCodec p
The public AVCodec.
int flags
AV_CODEC_FLAG_*.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define FF_CODEC_ENCODE_CB(func)
static int ogg_packet(AVFormatContext *s, int *sid, int *dstart, int *dsize, int64_t *fpos)
find the next Ogg packet
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
int global_quality
Global quality for codecs which cannot change it per frame.
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define CODEC_LONG_NAME(str)
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
and forward the result(frame or status change) to the corresponding input. If nothing is possible
int64_t bit_rate
the average bitrate
static int encode_frame(AVCodecContext *avc_context, AVPacket *pkt, const AVFrame *frame, int *got_packet)
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
Decode a base64-encoded string.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
char * stats_out
pass1 encoding statistics output buffer
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
#define AVERROR_EXTERNAL
Generic error in an external library.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
#define AV_BASE64_SIZE(x)
Calculate the output size needed to base64-encode x bytes to a null-terminated string.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
const FFCodec ff_libtheora_encoder
AVCodec struct exposed to libavcodec.
const char * name
Name of the codec implementation.
static int concatenate_packet(unsigned int *offset, AVCodecContext *avc_context, const ogg_packet *packet)
Concatenate an ogg_packet into the extradata.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold int encode_init(AVCodecContext *avc_context)
main external API structure.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
static int get_stats(AVCodecContext *avctx, int eos)
char * av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
Encode data to base64 and null-terminate.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static int submit_stats(AVCodecContext *avctx)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static int FUNC() message(CodedBitstreamContext *ctx, RWContext *rw, SEIRawMessage *current)
This structure stores compressed data.
int width
picture width / height.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.