Go to the documentation of this file.
44 #include <theora/theoraenc.h>
65 if (packet->bytes < 0) {
66 message =
"ogg_packet has negative size";
67 }
else if (packet->bytes > 0xffff) {
68 message =
"ogg_packet is larger than 65535 bytes";
69 }
else if (newsize < avc_context->extradata_size) {
70 message =
"extradata_size would overflow";
85 memcpy(avc_context->
extradata + (*
offset), packet->packet, packet->bytes);
86 (*offset) += packet->bytes;
92 #ifdef TH_ENCCTL_2PASS_OUT
97 bytes = th_encode_ctl(
h->t_state, TH_ENCCTL_2PASS_OUT, &buf,
sizeof(buf));
104 h->stats_offset + bytes);
108 memcpy(
h->stats +
h->stats_offset, buf, bytes);
109 h->stats_offset += bytes;
113 memcpy(
h->stats, buf, bytes);
130 #ifdef TH_ENCCTL_2PASS_IN
138 h->stats_size = strlen(avctx->
stats_in) * 3/4;
146 while (
h->stats_size -
h->stats_offset > 0) {
147 bytes = th_encode_ctl(
h->t_state, TH_ENCCTL_2PASS_IN,
148 h->stats +
h->stats_offset,
149 h->stats_size -
h->stats_offset);
156 h->stats_offset += bytes;
168 th_comment t_comment;
176 th_info_init(&t_info);
179 t_info.pic_width = avc_context->
width;
180 t_info.pic_height = avc_context->
height;
191 t_info.aspect_numerator = 1;
192 t_info.aspect_denominator = 1;
196 t_info.colorspace = TH_CS_ITU_REC_470M;
198 t_info.colorspace = TH_CS_ITU_REC_470BG;
200 t_info.colorspace = TH_CS_UNSPECIFIED;
203 t_info.pixel_fmt = TH_PF_420;
205 t_info.pixel_fmt = TH_PF_422;
207 t_info.pixel_fmt = TH_PF_444;
223 t_info.target_bitrate = 0;
225 t_info.target_bitrate = avc_context->
bit_rate;
230 h->t_state = th_encode_alloc(&t_info);
236 h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1;
238 th_info_clear(&t_info);
240 if (th_encode_ctl(
h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
265 th_comment_init(&t_comment);
267 while (th_encode_flushheader(
h->t_state, &t_comment, &o_packet))
271 th_comment_clear(&t_comment);
279 th_ycbcr_buffer t_yuv_buffer;
286 th_encode_packetout(
h->t_state, 1, &o_packet);
294 for (
i = 0;
i < 3;
i++) {
295 t_yuv_buffer[
i].width =
FFALIGN(avc_context->
width, 16) >> (
i &&
h->uv_hshift);
296 t_yuv_buffer[
i].height =
FFALIGN(avc_context->
height, 16) >> (
i &&
h->uv_vshift);
297 t_yuv_buffer[
i].stride =
frame->linesize[
i];
298 t_yuv_buffer[
i].data =
frame->data[
i];
306 result = th_encode_ycbcr_in(
h->t_state, t_yuv_buffer);
311 message =
"differing frame sizes";
314 message =
"encoder is not ready or is finished";
329 result = th_encode_packetout(
h->t_state, 0, &o_packet);
345 memcpy(
pkt->
data, o_packet.packet, o_packet.bytes);
350 if (!(o_packet.granulepos &
h->keyframe_mask))
361 th_encode_free(
h->t_state);
371 .
p.
name =
"libtheora",
384 .p.wrapper_name =
"libtheora",
static av_cold int encode_close(AVCodecContext *avc_context)
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
This structure describes decoded (raw) audio or video data.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
AVCodec p
The public AVCodec.
int flags
AV_CODEC_FLAG_*.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define FF_CODEC_ENCODE_CB(func)
static int ogg_packet(AVFormatContext *s, int *sid, int *dstart, int *dsize, int64_t *fpos)
find the next Ogg packet
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
int global_quality
Global quality for codecs which cannot change it per frame.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
and forward the result(frame or status change) to the corresponding input. If nothing is possible
int64_t bit_rate
the average bitrate
static int encode_frame(AVCodecContext *avc_context, AVPacket *pkt, const AVFrame *frame, int *got_packet)
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
Decode a base64-encoded string.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
char * stats_out
pass1 encoding statistics output buffer
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
#define AVERROR_EXTERNAL
Generic error in an external library.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
#define AV_BASE64_SIZE(x)
Calculate the output size needed to base64-encode x bytes to a null-terminated string.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
const FFCodec ff_libtheora_encoder
AVCodec struct exposed to libavcodec.
const char * name
Name of the codec implementation.
static int concatenate_packet(unsigned int *offset, AVCodecContext *avc_context, const ogg_packet *packet)
Concatenate an ogg_packet into the extradata.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold int encode_init(AVCodecContext *avc_context)
main external API structure.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
static int get_stats(AVCodecContext *avctx, int eos)
char * av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
Encode data to base64 and null-terminate.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static int submit_stats(AVCodecContext *avctx)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static int FUNC() message(CodedBitstreamContext *ctx, RWContext *rw, SEIRawMessage *current)
This structure stores compressed data.
int width
picture width / height.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.