Go to the documentation of this file.
   44         uint8_t *
d = 
frame->data[0] + *y * 
frame->linesize[0];
 
   45         if (*x + 
run >= 
s->width) {
 
   46             int n = 
s->width - *x;
 
   62                       int *x, 
int *y, 
int *plane, 
int bits_per_plane)
 
   65     int shift = *plane * bits_per_plane;
 
   66     unsigned mask  = ((1
U << bits_per_plane) - 1) << 
shift;
 
   70     int pixels_per_value = 8/bits_per_plane;
 
   76         for (j = 8-bits_per_plane; j >= 0; j -= bits_per_plane) {
 
   79             while (xl == 
s->width) {
 
   85                    if (planel >= 
s->nb_planes)
 
   87                    value <<= bits_per_plane;
 
   88                    mask  <<= bits_per_plane;
 
   91                 if (
s->nb_planes == 1 &&
 
   92                     run*pixels_per_value >= 
s->width &&
 
   93                     pixels_per_value < (
s->width / pixels_per_value * pixels_per_value)
 
   95                     for (; xl < pixels_per_value; xl ++) {
 
   96                         j = (j < bits_per_plane ? 8 : j) - bits_per_plane;
 
  100                     run -= 
s->width / pixels_per_value;
 
  101                     xl = 
s->width / pixels_per_value * pixels_per_value;
 
  114     [0] = { 0, 3,  5,   7 }, 
 
  115     [1] = { 0, 2,  4,   6 }, 
 
  116     [2] = { 0, 3,  4,   7 }, 
 
  117     [3] = { 0, 11, 13, 15 }, 
 
  118     [4] = { 0, 10, 12, 14 }, 
 
  119     [5] = { 0, 11, 12, 15 }, 
 
  127     int bits_per_plane, bpp, etype, esize, npal, pos_after_pal;
 
  135     if (bytestream2_get_le16u(&
s->g) != 0x1234)
 
  138     s->width       = bytestream2_get_le16u(&
s->g);
 
  139     s->height      = bytestream2_get_le16u(&
s->g);
 
  141     tmp            = bytestream2_get_byteu(&
s->g);
 
  142     bits_per_plane = 
tmp & 0xF;
 
  143     s->nb_planes   = (
tmp >> 4) + 1;
 
  144     bpp            = bits_per_plane * 
s->nb_planes;
 
  145     if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
 
  150     if (bytestream2_peek_byte(&
s->g) == 0xFF || bpp == 1 || bpp == 4 || bpp == 8) {
 
  152         etype = bytestream2_get_le16(&
s->g);
 
  153         esize = bytestream2_get_le16(&
s->g);
 
  184     if (
s->width != avctx->
width || 
s->height != avctx->
height) {
 
  192     memset(
frame->data[0], 0, 
s->height * 
frame->linesize[0]);
 
  194     frame->palette_has_changed = 1;
 
  197     palette = (uint32_t*)
frame->data[1];
 
  198     if (etype == 1 && esize > 1 && bytestream2_peek_byte(&
s->g) < 6) {
 
  199         int idx = bytestream2_get_byte(&
s->g);
 
  201         for (
i = 0; 
i < npal; 
i++)
 
  203     } 
else if (etype == 2) {
 
  204         npal = 
FFMIN(esize, 16);
 
  205         for (
i = 0; 
i < npal; 
i++) {
 
  206             int pal_idx = bytestream2_get_byte(&
s->g);
 
  209     } 
else if (etype == 3) {
 
  210         npal = 
FFMIN(esize, 16);
 
  211         for (
i = 0; 
i < npal; 
i++) {
 
  212             int pal_idx = bytestream2_get_byte(&
s->g);
 
  215     } 
else if (etype == 4 || etype == 5) {
 
  216         npal = 
FFMIN(esize / 3, 256);
 
  217         for (
i = 0; 
i < npal; 
i++) {
 
  218             palette[
i] = bytestream2_get_be24(&
s->g) << 2;
 
  219             palette[
i] |= 0xFFU << 24 | palette[i] >> 6 & 0x30303;
 
  224             palette[0] = 0xFF000000;
 
  225             palette[1] = 0xFFFFFFFF;
 
  226         } 
else if (bpp == 2) {
 
  228             for (
i = 0; 
i < npal; 
i++)
 
  242     if (bytestream2_get_le16(&
s->g)) {
 
  246             int stop_size, marker, 
t1, 
t2;
 
  249             t2        = bytestream2_get_le16(&
s->g);
 
  253             marker    = bytestream2_get_byte(&
s->g);
 
  255             while (plane < s->nb_planes &&
 
  258                 val = bytestream2_get_byte(&
s->g);
 
  260                     run = bytestream2_get_byte(&
s->g);
 
  262                         run = bytestream2_get_le16(&
s->g);
 
  263                     val = bytestream2_get_byte(&
s->g);
 
  266                 if (bits_per_plane == 8) {
 
  276         if (
s->nb_planes - plane > 1)
 
  279         if (plane < s->nb_planes && x < avctx->
width) {
 
  280             int run = (y + 1) * avctx->
width - x;
 
  281             if (bits_per_plane == 8)
 
  
static void picmemset(PicContext *s, AVFrame *frame, unsigned value, int run, int *x, int *y, int *plane, int bits_per_plane)
 
static const uint8_t cga_mode45_index[6][4]
 
const uint32_t ff_cga_palette[16]
 
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
 
This structure describes decoded (raw) audio or video data.
 
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
 
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
 
AVCodec p
The public AVCodec.
 
static double val(void *priv, double ch)
 
static const uint16_t mask[17]
 
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
 
#define FF_CODEC_DECODE_CB(func)
 
#define CODEC_LONG_NAME(str)
 
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
 
const uint32_t ff_ega_palette[64]
 
@ AV_PICTURE_TYPE_I
Intra.
 
static void picmemset_8bpp(PicContext *s, AVFrame *frame, int value, int run, int *x, int *y)
 
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
 
static av_always_inline int bytestream2_tell(GetByteContext *g)
 
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
 
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
 
static int shift(int a, int b)
 
#define i(width, name, range_min, range_max)
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
 
const char * name
Name of the codec implementation.
 
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
 
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
 
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
 
main external API structure.
 
#define avpriv_request_sample(...)
 
This structure stores compressed data.
 
int width
picture width / height.
 
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
 
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
 
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
 
const FFCodec ff_pictor_decoder
 
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)