Go to the documentation of this file.
   66 #define CACHE_SIZE (1<<(3*NBITS)) 
  109 #define OFFSET(x) offsetof(PaletteUseContext, x) 
  110 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM 
  122     { 
"alpha_threshold", 
"set the alpha threshold for transparency", 
OFFSET(trans_thresh), 
AV_OPT_TYPE_INT, {.i64=128}, 0, 255, 
FLAGS },
 
  148     if (!
in || !inpal || !
out) {
 
  163     return av_clip_uint8( px >> 24                                      ) << 24
 
  164          | av_clip_uint8((px >> 16 & 0xff) + ((er * scale) / (1<<
shift))) << 16
 
  165          | av_clip_uint8((px >>  8 & 0xff) + ((eg * scale) / (1<<
shift))) <<  8
 
  166          | av_clip_uint8((px       & 0xff) + ((eb * scale) / (1<<
shift)));
 
  172     const int dr = 
c1[1] - 
c2[1];
 
  173     const int dg = 
c1[2] - 
c2[2];
 
  174     const int db = 
c1[3] - 
c2[3];
 
  176     if (
c1[0] < trans_thresh && 
c2[0] < trans_thresh) {
 
  178     } 
else if (
c1[0] >= trans_thresh && 
c2[0] >= trans_thresh) {
 
  179         return dr*dr + dg*dg + db*db;
 
  181         return 255*255 + 255*255 + 255*255;
 
  187     int i, pal_id = -1, min_dist = INT_MAX;
 
  190         const uint32_t 
c = palette[
i];
 
  192         if (
c >> 24 >= trans_thresh) { 
 
  194                 palette[
i]>>24 & 0xff,
 
  195                 palette[
i]>>16 & 0xff,
 
  196                 palette[
i]>> 8 & 0xff,
 
  199             const int d = 
diff(palargb, argb, trans_thresh);
 
  218                                   const int trans_thresh,
 
  223     int dx, nearer_kd_id, further_kd_id;
 
  225     const int current_to_target = 
diff(target, current, trans_thresh);
 
  227     if (current_to_target < nearest->dist_sqd) {
 
  229         nearest->
dist_sqd = current_to_target;
 
  233         dx = target[
s] - current[
s];
 
  238         if (nearer_kd_id != -1)
 
  241         if (further_kd_id != -1 && dx*dx < nearest->dist_sqd)
 
  260     int pos = 0, best_node_id = -1, best_dist = INT_MAX, cur_color_id = 0;
 
  266         const struct color_node *kd = &root[cur_color_id];
 
  268         const int current_to_target = 
diff(target, current, trans_thresh);
 
  272         if (current_to_target < best_dist) {
 
  273             best_node_id = cur_color_id;
 
  274             if (!current_to_target)
 
  276             best_dist = current_to_target;
 
  282             const int dx = target[
split] - current[
split];
 
  283             int nearer_kd_id, further_kd_id;
 
  289             if (nearer_kd_id != -1) {
 
  290                 if (further_kd_id != -1) {
 
  301                 cur_color_id = nearer_kd_id;
 
  303             } 
else if (dx*dx < best_dist) {
 
  308                 cur_color_id = further_kd_id;
 
  319         } 
while (node->
dx2 >= best_dist);
 
  330 #define COLORMAP_NEAREST(search, palette, root, target, trans_thresh)                                    \ 
  331     search == COLOR_SEARCH_NNS_ITERATIVE ? colormap_nearest_iterative(root, target, trans_thresh) :      \ 
  332     search == COLOR_SEARCH_NNS_RECURSIVE ? colormap_nearest_recursive(root, target, trans_thresh) :      \ 
  333                                            colormap_nearest_bruteforce(palette, target, trans_thresh) 
  355     if (a < s->trans_thresh && 
s->transparency_index >= 0) {
 
  356         return s->transparency_index;
 
  376                                               uint32_t 
c, 
int *er, 
int *eg, 
int *eb,
 
  387     dstc = 
s->palette[dstx];
 
  388     *er = 
r - (dstc >> 16 & 0xff);
 
  389     *eg = 
g - (dstc >>  8 & 0xff);
 
  390     *eb = 
b - (dstc       & 0xff);
 
  395                                       int x_start, 
int y_start, 
int w, 
int h,
 
  400     const int src_linesize = 
in ->linesize[0] >> 2;
 
  401     const int dst_linesize = 
out->linesize[0];
 
  402     uint32_t *
src = ((uint32_t *)
in ->
data[0]) + y_start*src_linesize;
 
  403     uint8_t  *dst =              
out->data[0]  + y_start*dst_linesize;
 
  408     for (y = y_start; y < 
h; y++) {
 
  409         for (x = x_start; x < 
w; x++) {
 
  413                 const int d = 
s->ordered_dither[(y & 7)<<3 | (x & 7)];
 
  418                 const uint8_t r = av_clip_uint8(r8 + d);
 
  419                 const uint8_t g = av_clip_uint8(g8 + d);
 
  420                 const uint8_t b = av_clip_uint8(b8 + d);
 
  428                 const int right = x < 
w - 1, down = y < 
h - 1;
 
  436                 if (         down) 
src[src_linesize + x    ] = 
dither_color(
src[src_linesize + x    ], er, eg, eb, 3, 3);
 
  437                 if (right && down) 
src[src_linesize + x + 1] = 
dither_color(
src[src_linesize + x + 1], er, eg, eb, 2, 3);
 
  440                 const int right = x < 
w - 1, down = y < 
h - 1, 
left = x > x_start;
 
  449                 if (         down) 
src[src_linesize + x    ] = 
dither_color(
src[src_linesize + x    ], er, eg, eb, 5, 4);
 
  450                 if (right && down) 
src[src_linesize + x + 1] = 
dither_color(
src[src_linesize + x + 1], er, eg, eb, 1, 4);
 
  453                 const int right  = x < 
w - 1, down  = y < 
h - 1, 
left  = x > x_start;
 
  454                 const int right2 = x < 
w - 2,                    left2 = x > x_start + 1;
 
  465                     if (left2)      
src[  src_linesize + x - 2] = 
dither_color(
src[  src_linesize + x - 2], er, eg, eb, 1, 4);
 
  467                     if (1)          
src[  src_linesize + x    ] = 
dither_color(
src[  src_linesize + x    ], er, eg, eb, 3, 4);
 
  468                     if (right)      
src[  src_linesize + x + 1] = 
dither_color(
src[  src_linesize + x + 1], er, eg, eb, 2, 4);
 
  469                     if (right2)     
src[  src_linesize + x + 2] = 
dither_color(
src[  src_linesize + x + 2], er, eg, eb, 1, 4);
 
  473                 const int right = x < 
w - 1, down = y < 
h - 1, 
left = x > x_start;
 
  482                 if (         down) 
src[src_linesize + x    ] = 
dither_color(
src[src_linesize + x    ], er, eg, eb, 1, 2);
 
  505                       int parent_id, 
int node_id,
 
  509     const uint32_t fontcolor = node->
val[1] > 0x50 &&
 
  510                                node->
val[2] > 0x50 &&
 
  511                                node->
val[3] > 0x50 ? 0 : 0xffffff;
 
  512     const int rgb_comp = node->
split - 1;
 
  514                "label=\"%c%02X%c%02X%c%02X%c\" " 
  515                "fillcolor=\"#%02x%02x%02x\" " 
  516                "fontcolor=\"#%06"PRIX32
"\"]\n",
 
  518                "[  "[rgb_comp], node->
val[1],
 
  519                "][ "[rgb_comp], node->
val[2],
 
  520                " ]["[rgb_comp], node->
val[3],
 
  522                node->
val[1], node->
val[2], node->
val[3],
 
  547     av_bprintf(&
buf, 
"    node [style=filled fontsize=10 shape=box]\n");
 
  551     fwrite(
buf.str, 1, 
buf.len, 
f);
 
  562     for (
r = 0; 
r < 256; 
r++) {
 
  563         for (
g = 0; 
g < 256; 
g++) {
 
  564             for (
b = 0; 
b < 256; 
b++) {
 
  566                 const int r1 = 
COLORMAP_NEAREST(search_method, palette, node, argb, trans_thresh);
 
  569                     const uint32_t 
c1 = palette[r1];
 
  570                     const uint32_t 
c2 = palette[r2];
 
  571                     const uint8_t palargb1[] = { 0xff, 
c1>>16 & 0xff, 
c1>> 8 & 0xff, 
c1 & 0xff };
 
  572                     const uint8_t palargb2[] = { 0xff, 
c2>>16 & 0xff, 
c2>> 8 & 0xff, 
c2 & 0xff };
 
  573                     const int d1 = 
diff(palargb1, argb, trans_thresh);
 
  574                     const int d2 = 
diff(palargb2, argb, trans_thresh);
 
  577                                "/!\\ %02X%02X%02X: %d ! %d (%06"PRIX32
" ! %06"PRIX32
") / dist: %d ! %d\n",
 
  578                                r, 
g, 
b, r1, r2, 
c1 & 0xffffff, 
c2 & 0xffffff, d1, d2);
 
  600 #define DECLARE_CMP_FUNC(name, pos)                     \ 
  601 static int cmp_##name(const void *pa, const void *pb)   \ 
  603     const struct color *a = pa;                         \ 
  604     const struct color *b = pb;                         \ 
  605     return   (a->value >> (8 * (3 - (pos))) & 0xff)     \ 
  606            - (b->value >> (8 * (3 - (pos))) & 0xff);    \ 
  617                           const int trans_thresh,
 
  622     unsigned nb_color = 0;
 
  624     struct color tmp_pal[256];
 
  627     ranges.
min[0] = ranges.
min[1] = ranges.
min[2] = 0xff;
 
  628     ranges.
max[0] = ranges.
max[1] = ranges.
max[2] = 0x00;
 
  631         const uint32_t 
c = palette[
i];
 
  637         if (
a < trans_thresh) {
 
  641         if (color_used[
i] || (
a != 0xff) ||
 
  642             r < box->
min[0] || g < box->
min[1] || b < box->
min[2] ||
 
  646         if (
r < ranges.
min[0]) ranges.
min[0] = 
r;
 
  647         if (
g < ranges.
min[1]) ranges.
min[1] = 
g;
 
  648         if (
b < ranges.
min[2]) ranges.
min[2] = 
b;
 
  650         if (
r > ranges.
max[0]) ranges.
max[0] = 
r;
 
  651         if (
g > ranges.
max[1]) ranges.
max[1] = 
g;
 
  652         if (
b > ranges.
max[2]) ranges.
max[2] = 
b;
 
  654         tmp_pal[nb_color].
value  = 
c;
 
  664     wr = ranges.
max[0] - ranges.
min[0];
 
  665     wg = ranges.
max[1] - ranges.
min[1];
 
  666     wb = ranges.
max[2] - ranges.
min[2];
 
  667     if (wr >= wg && wr >= wb) longest = 1;
 
  668     if (wg >= wr && wg >= wb) longest = 2;
 
  669     if (wb >= wr && wb >= wg) longest = 3;
 
  671     *component = longest;
 
  676     return tmp_pal[nb_color >> 1].
pal_id;
 
  682                            const uint32_t *palette,
 
  683                            const int trans_thresh,
 
  687     int component, cur_id;
 
  688     int node_left_id = -1, node_right_id = -1;
 
  691     const int pal_id = 
get_next_color(color_used, palette, trans_thresh, &component, box);
 
  697     cur_id = (*nb_used)++;
 
  700     node->
split = component;
 
  702     node->
val[0] = 
c>>24 & 0xff;
 
  703     node->
val[1] = 
c>>16 & 0xff;
 
  704     node->
val[2] = 
c>> 8 & 0xff;
 
  705     node->
val[3] = 
c     & 0xff;
 
  707     color_used[pal_id] = 1;
 
  711     box1.
max[component-1] = node->
val[component];
 
  712     box2.
min[component-1] = node->
val[component] + 1;
 
  714     node_left_id = 
colormap_insert(
map, color_used, nb_used, palette, trans_thresh, &box1);
 
  716     if (box2.
min[component-1] <= box2.
max[component-1])
 
  717         node_right_id = 
colormap_insert(
map, color_used, nb_used, palette, trans_thresh, &box2);
 
  727     const int c1 = *(
const uint32_t *)
a & 0xffffff;
 
  728     const int c2 = *(
const uint32_t *)
b & 0xffffff;
 
  736     uint32_t last_color = 0;
 
  742     if (
s->transparency_index >= 0) {
 
  744             if ((
s->palette[
i]>>24 & 0xff) == 0) {
 
  745                 s->transparency_index = 
i; 
 
  752         const uint32_t 
c = 
s->palette[
i];
 
  753         if (
i != 0 && 
c == last_color) {
 
  758         if (
c >> 24 < 
s->trans_thresh) {
 
  764     box.
min[0] = box.
min[1] = box.
min[2] = 0x00;
 
  765     box.
max[0] = box.
max[1] = box.
max[2] = 0xff;
 
  772     if (
s->debug_accuracy) {
 
  779                              const AVFrame *in2, 
int frame_count)
 
  782     const uint32_t *palette = 
s->palette;
 
  783     uint32_t *
src1 = (uint32_t *)in1->
data[0];
 
  785     const int src1_linesize = in1->
linesize[0] >> 2;
 
  786     const int src2_linesize = in2->
linesize[0];
 
  788     unsigned mean_err = 0;
 
  791         for (x = 0; x < in1->
width; x++) {
 
  792             const uint32_t 
c1 = 
src1[x];
 
  793             const uint32_t 
c2 = palette[src2[x]];
 
  794             const uint8_t argb1[] = {0xff, 
c1 >> 16 & 0xff, 
c1 >> 8 & 0xff, 
c1 & 0xff};
 
  795             const uint8_t argb2[] = {0xff, 
c2 >> 16 & 0xff, 
c2 >> 8 & 0xff, 
c2 & 0xff};
 
  796             mean_err += 
diff(argb1, argb2, 
s->trans_thresh);
 
  798         src1 += src1_linesize;
 
  799         src2 += src2_linesize;
 
  802     s->total_mean_err += mean_err;
 
  805            mean_err / div, 
s->total_mean_err / (div * frame_count));
 
  811                                   int *xp, 
int *yp, 
int *wp, 
int *hp)
 
  813     int x_start = 0, y_start = 0;
 
  819         int x_end = cur_src->
width  - 1,
 
  820             y_end = cur_src->
height - 1;
 
  821         const uint32_t *prv_srcp = (
const uint32_t *)prv_src->
data[0];
 
  822         const uint32_t *cur_srcp = (
const uint32_t *)cur_src->
data[0];
 
  826         const int prv_src_linesize = prv_src->
linesize[0] >> 2;
 
  827         const int cur_src_linesize = cur_src->
linesize[0] >> 2;
 
  828         const int prv_dst_linesize = prv_dst->
linesize[0];
 
  829         const int cur_dst_linesize = cur_dst->
linesize[0];
 
  832         while (y_start < y_end && !memcmp(prv_srcp + y_start*prv_src_linesize,
 
  833                                           cur_srcp + y_start*cur_src_linesize,
 
  834                                           cur_src->
width * 4)) {
 
  835             memcpy(cur_dstp + y_start*cur_dst_linesize,
 
  836                    prv_dstp + y_start*prv_dst_linesize,
 
  840         while (y_end > y_start && !memcmp(prv_srcp + y_end*prv_src_linesize,
 
  841                                           cur_srcp + y_end*cur_src_linesize,
 
  842                                           cur_src->
width * 4)) {
 
  843             memcpy(cur_dstp + y_end*cur_dst_linesize,
 
  844                    prv_dstp + y_end*prv_dst_linesize,
 
  849         height = y_end + 1 - y_start;
 
  852         while (x_start < x_end) {
 
  854             for (y = y_start; y <= y_end; y++) {
 
  855                 if (prv_srcp[y*prv_src_linesize + x_start] != cur_srcp[y*cur_src_linesize + x_start]) {
 
  864         while (x_end > x_start) {
 
  866             for (y = y_start; y <= y_end; y++) {
 
  867                 if (prv_srcp[y*prv_src_linesize + x_end] != cur_srcp[y*cur_src_linesize + x_end]) {
 
  876         width = x_end + 1 - x_start;
 
  879             for (y = y_start; y <= y_end; y++)
 
  880                 memcpy(cur_dstp + y*cur_dst_linesize,
 
  881                        prv_dstp + y*prv_dst_linesize, x_start);
 
  883         if (x_end != cur_src->
width - 1) {
 
  884             const int copy_len = cur_src->
width - 1 - x_end;
 
  885             for (y = y_start; y <= y_end; y++)
 
  886                 memcpy(cur_dstp + y*cur_dst_linesize + x_end + 1,
 
  887                        prv_dstp + y*prv_dst_linesize + x_end + 1,
 
  912                           s->last_out, 
out, &x, &y, &
w, &
h);
 
  923     ff_dlog(
ctx, 
"%dx%d rect: (%d;%d) -> (%d,%d) [area:%dx%d]\n",
 
  924             w, 
h, x, y, x+
w, y+
h, 
in->width, 
in->height);
 
  933     if (
s->calc_mean_err)
 
  948     s->fs.opt_repeatlast = 1; 
 
  952     outlink->
w = 
ctx->inputs[0]->w;
 
  953     outlink->
h = 
ctx->inputs[0]->h;
 
  967                "Palette input must contain exactly %d pixels. " 
  968                "Specified input has %dx%d=%d pixels\n",
 
  979     const uint32_t *p = (
const uint32_t *)palette_frame->
data[0];
 
  980     const int p_linesize = palette_frame->
linesize[0] >> 2;
 
  982     s->transparency_index = -1;
 
  985         memset(
s->palette, 0, 
sizeof(
s->palette));
 
  986         memset(
s->map, 0, 
sizeof(
s->map));
 
  989         memset(
s->cache, 0, 
sizeof(
s->cache));
 
  993     for (y = 0; y < palette_frame->
height; y++) {
 
  994         for (x = 0; x < palette_frame->
width; x++) {
 
  995             s->palette[
i] = p[x];
 
  996             if (p[x]>>24 < 
s->trans_thresh) {
 
  997                 s->transparency_index = 
i; 
 
 1007         s->palette_loaded = 1;
 
 1022     if (!
master || !second) {
 
 1026     if (!
s->palette_loaded) {
 
 1036 #define DEFINE_SET_FRAME(color_search, name, value)                             \ 
 1037 static int set_frame_##name(PaletteUseContext *s, AVFrame *out, AVFrame *in,    \ 
 1038                             int x_start, int y_start, int w, int h)             \ 
 1040     return set_frame(s, out, in, x_start, y_start, w, h, value, color_search);  \ 
 1043 #define DEFINE_SET_FRAME_COLOR_SEARCH(color_search, color_search_macro)                                 \ 
 1044     DEFINE_SET_FRAME(color_search_macro, color_search##_##none,            DITHERING_NONE)              \ 
 1045     DEFINE_SET_FRAME(color_search_macro, color_search##_##bayer,           DITHERING_BAYER)             \ 
 1046     DEFINE_SET_FRAME(color_search_macro, color_search##_##heckbert,        DITHERING_HECKBERT)          \ 
 1047     DEFINE_SET_FRAME(color_search_macro, color_search##_##floyd_steinberg, DITHERING_FLOYD_STEINBERG)   \ 
 1048     DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2,         DITHERING_SIERRA2)           \ 
 1049     DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2_4a,      DITHERING_SIERRA2_4A)        \ 
 1055 #define DITHERING_ENTRIES(color_search) {       \ 
 1056     set_frame_##color_search##_none,            \ 
 1057     set_frame_##color_search##_bayer,           \ 
 1058     set_frame_##color_search##_heckbert,        \ 
 1059     set_frame_##color_search##_floyd_steinberg, \ 
 1060     set_frame_##color_search##_sierra2,         \ 
 1061     set_frame_##color_search##_sierra2_4a,      \ 
 1072     const int q = p ^ (p >> 3);
 
 1073     return   (p & 4) >> 2 | (q & 4) >> 1 \
 
 1074            | (p & 2) << 1 | (q & 2) << 2 \
 
 1075            | (p & 1) << 4 | (q & 1) << 5;
 
 1084     if (!
s->last_in || !
s->last_out) {
 
 1094         const int delta = 1 << (5 - 
s->bayer_scale); 
 
 1143     .
name          = 
"paletteuse",
 
 1152     .priv_class    = &paletteuse_class,
 
  
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
 
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
 
#define AV_BPRINT_SIZE_UNLIMITED
 
static int config_input_palette(AVFilterLink *inlink)
 
AVPixelFormat
Pixel format.
 
static av_always_inline int diff(const uint8_t *c1, const uint8_t *c2, const int trans_thresh)
 
static av_always_inline int get_dst_color_err(PaletteUseContext *s, uint32_t c, int *er, int *eg, int *eb, const enum color_search_method search_method)
 
static void colormap_nearest_node(const struct color_node *map, const int node_pos, const uint8_t *target, const int trans_thresh, struct nearest_color *nearest)
 
static int query_formats(AVFilterContext *ctx)
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
 
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
 
static void debug_mean_error(PaletteUseContext *s, const AVFrame *in1, const AVFrame *in2, int frame_count)
 
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
 
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
 
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
 
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
 
static av_cold int end(AVCodecContext *avctx)
 
static av_cold int init(AVFilterContext *ctx)
 
int(* set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame *in, int x_start, int y_start, int width, int height)
 
This structure describes decoded (raw) audio or video data.
 
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
 
static av_cold void uninit(AVFilterContext *ctx)
 
void * av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, const uint8_t *elem_data)
Add an element of size elem_size to a dynamic array.
 
static av_always_inline uint8_t colormap_nearest_bruteforce(const uint32_t *palette, const uint8_t *argb, const int trans_thresh)
 
AVFilter ff_vf_paletteuse
 
static int disp_tree(const struct color_node *node, const char *fname)
 
const char * name
Filter name.
 
@ EXT_INFINITY
Extend the frame to infinity.
 
A link between two filters.
 
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
 
static av_always_inline uint8_t colormap_nearest_recursive(const struct color_node *node, const uint8_t *rgb, const int trans_thresh)
 
static int debug_accuracy(const struct color_node *node, const uint32_t *palette, const int trans_thresh, const enum color_search_method search_method)
 
static int dither_value(int p)
 
@ COLOR_SEARCH_BRUTEFORCE
 
static int apply_palette(AVFilterLink *inlink, AVFrame *in, AVFrame **outf)
 
struct cache_node cache[CACHE_SIZE]
 
A filter pad used for either input or output.
 
static int colormap_insert(struct color_node *map, uint8_t *color_used, int *nb_used, const uint32_t *palette, const int trans_thresh, const struct color_rect *box)
 
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
 
static av_always_inline uint8_t colormap_nearest_iterative(const struct color_node *root, const uint8_t *target, const int trans_thresh)
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
static void set_processing_window(enum diff_mode diff_mode, const AVFrame *prv_src, const AVFrame *cur_src, const AVFrame *prv_dst, AVFrame *cur_dst, int *xp, int *yp, int *wp, int *hp)
 
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
 
#define DEFINE_SET_FRAME_COLOR_SEARCH(color_search, color_search_macro)
 
static int config_output(AVFilterLink *outlink)
 
static const AVFilterPad outputs[]
 
static const set_frame_func set_frame_lut[NB_COLOR_SEARCHES][NB_DITHERING]
 
static int load_apply_palette(FFFrameSync *fs)
 
Describe the class of an AVClass context structure.
 
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
 
#define fs(width, name, subs,...)
 
#define COLORMAP_NEAREST(search, palette, root, target, trans_thresh)
 
static int get_next_color(const uint8_t *color_used, const uint32_t *palette, const int trans_thresh, int *component, const struct color_rect *box)
 
#define DITHERING_ENTRIES(color_search)
 
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
 
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
 
static const cmp_func cmp_funcs[]
 
static void disp_node(AVBPrint *buf, const struct color_node *map, int parent_id, int node_id, int depth)
 
static const AVOption paletteuse_options[]
 
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
 
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
 
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
 
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
 
@ DITHERING_FLOYD_STEINBERG
 
uint32_t palette[AVPALETTE_COUNT]
 
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
 
AVFilterContext * src
source filter
 
static char * split(char *message, char delim)
 
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
 
static av_always_inline int color_get(PaletteUseContext *s, uint32_t color, uint8_t a, uint8_t r, uint8_t g, uint8_t b, const enum color_search_method search_method)
Check if the requested color is in the cache already.
 
#define AV_LOG_INFO
Standard information.
 
#define DECLARE_CMP_FUNC(name, pos)
 
static int activate(AVFilterContext *ctx)
 
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
 
#define i(width, name, range_min, range_max)
 
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
 
int w
agreed upon image width
 
static int cmp_pal_entry(const void *a, const void *b)
 
struct cached_color * entries
 
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
 
const char * name
Pad name.
 
static void load_colormap(PaletteUseContext *s)
 
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
 
void av_bprintf(AVBPrint *buf, const char *fmt,...)
 
static av_always_inline int set_frame(PaletteUseContext *s, AVFrame *out, AVFrame *in, int x_start, int y_start, int w, int h, enum dithering_mode dither, const enum color_search_method search_method)
 
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
 
#define FF_ARRAY_ELEMS(a)
 
int h
agreed upon image height
 
@ COLOR_SEARCH_NNS_ITERATIVE
 
int(* cmp_func)(const void *, const void *)
 
struct color_node map[AVPALETTE_COUNT]
 
@ COLOR_SEARCH_NNS_RECURSIVE
 
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
 
static int shift(int a, int b)
 
const VDPAUPixFmtMap * map
 
static const AVFilterPad paletteuse_outputs[]
 
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
 
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
 
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
 
AVFILTER_DEFINE_CLASS(paletteuse)
 
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
 
static av_always_inline int dither_color(uint32_t px, int er, int eg, int eb, int scale, int shift)
 
static const AVFilterPad paletteuse_inputs[]
 
static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame)
 
static const uint8_t dither[8][8]