Go to the documentation of this file.
   34     const uint8_t *buf = avpkt->
data;
 
   35     int ret, buf_size = avpkt->
size;
 
   37     uint8_t 
index[64][4] = { 0 };
 
   38     uint8_t px[4] = { 0, 0, 0, 255 };
 
   48     width  = bytestream2_get_be32(&gb);
 
   49     height = bytestream2_get_be32(&gb);
 
   50     channels = bytestream2_get_byte(&gb);
 
   51     space = bytestream2_get_byte(&gb);
 
   72     for (
int n = 0, off_x = 0; n < 
len; n += 
channels, off_x++) {
 
   80             int chunk = bytestream2_get_byteu(&gb);
 
   87                 memcpy(px, 
index[chunk], 4);
 
   89                 px[0] += ((chunk >> 4) & 0x03) - 2;
 
   90                 px[1] += ((chunk >> 2) & 0x03) - 2;
 
   91                 px[2] += ( chunk       & 0x03) - 2;
 
   93                 int b2 = bytestream2_get_byteu(&gb);
 
   94                 int vg = (chunk & 0x3f) - 32;
 
   95                 px[0] += vg - 8 + ((
b2 >> 4) & 0x0f);
 
   97                 px[2] += vg - 8 +  (
b2       & 0x0f);
 
  
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated space
 
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
 
This structure describes decoded (raw) audio or video data.
 
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
 
const FFCodec ff_qoi_decoder
 
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
 
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
 
AVCodec p
The public AVCodec.
 
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
 
int key_frame
1 -> keyframe, 0-> not
 
static int qoi_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
 
#define FF_CODEC_DECODE_CB(func)
 
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
 
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
 
@ AV_PICTURE_TYPE_I
Intra.
 
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
 
enum AVPictureType pict_type
Picture type of the frame.
 
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
 
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
 
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
 
static double b2(void *priv, double x, double y)
 
const char * name
Name of the codec implementation.
 
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
 
#define QOI_COLOR_HASH(px)
 
main external API structure.
 
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
 
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
 
This structure stores compressed data.
 
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
 
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
 
#define AVERROR_INVALIDDATA
Invalid data found when processing input.