Go to the documentation of this file.
64 int dstStride,
int srcStride,
int h)
68 for (
int i = 0;
i <
h;
i++) {
83 int dstStride,
int srcStride,
int w)
87 for (
int i = 0;
i <
w;
i++) {
88 const int src_1 =
src[-srcStride];
90 const int src1 =
src[srcStride];
91 const int src2 =
src[2 * srcStride];
92 const int src3 =
src[3 * srcStride];
93 const int src4 =
src[4 * srcStride];
94 const int src5 =
src[5 * srcStride];
95 const int src6 =
src[6 * srcStride];
96 const int src7 =
src[7 * srcStride];
97 const int src8 =
src[8 * srcStride];
98 const int src9 =
src[9 * srcStride];
101 dst[2 * dstStride] =
cm[(9 * (
src2 + src3) - (
src1 + src4) + 8) >> 4];
102 dst[3 * dstStride] =
cm[(9 * (src3 + src4) - (
src2 + src5) + 8) >> 4];
103 dst[4 * dstStride] =
cm[(9 * (src4 + src5) - (src3 + src6) + 8) >> 4];
104 dst[5 * dstStride] =
cm[(9 * (src5 + src6) - (src4 + src7) + 8) >> 4];
105 dst[6 * dstStride] =
cm[(9 * (src6 + src7) - (src5 + src8) + 8) >> 4];
106 dst[7 * dstStride] =
cm[(9 * (src7 + src8) - (src6 + src9) + 8) >> 4];
183 uint8_t *dest_cb, uint8_t *dest_cr,
184 uint8_t *
const *ref_picture,
186 int motion_x,
int motion_y,
int h)
190 int dxy,
mx,
my, src_x, src_y, v_edge_pos;
191 ptrdiff_t
offset, linesize, uvlinesize;
194 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
195 dxy = 2 * dxy +
w->hshift;
196 src_x =
s->mb_x * 16 + (motion_x >> 1);
197 src_y =
s->mb_y * 16 + (motion_y >> 1);
200 v_edge_pos =
s->v_edge_pos;
201 src_x =
av_clip(src_x, -16,
s->width);
202 src_y =
av_clip(src_y, -16,
s->height);
204 if (src_x <= -16 || src_x >=
s->width)
206 if (src_y <= -16 || src_y >=
s->height)
209 linesize =
s->linesize;
210 uvlinesize =
s->uvlinesize;
211 ptr = ref_picture[0] + (src_y * linesize) + src_x;
213 if (src_x < 1 || src_y < 1 || src_x + 17 >=
s->h_edge_pos ||
214 src_y +
h + 1 >= v_edge_pos) {
215 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr - 1 -
s->linesize,
216 s->linesize,
s->linesize, 19, 19,
217 src_x - 1, src_y - 1,
218 s->h_edge_pos,
s->v_edge_pos);
219 ptr =
s->sc.edge_emu_buffer + 1 +
s->linesize;
223 w->put_mspel_pixels_tab[dxy](dest_y, ptr, linesize);
224 w->put_mspel_pixels_tab[dxy](dest_y + 8, ptr + 8, linesize);
225 w->put_mspel_pixels_tab[dxy](dest_y + 8 * linesize, ptr + 8 * linesize, linesize);
226 w->put_mspel_pixels_tab[dxy](dest_y + 8 + 8 * linesize, ptr + 8 + 8 * linesize, linesize);
232 if ((motion_x & 3) != 0)
234 if ((motion_y & 3) != 0)
239 src_x =
s->mb_x * 8 +
mx;
240 src_y =
s->mb_y * 8 +
my;
241 src_x =
av_clip(src_x, -8,
s->width >> 1);
242 if (src_x == (
s->width >> 1))
244 src_y =
av_clip(src_y, -8,
s->height >> 1);
245 if (src_y == (
s->height >> 1))
247 offset = (src_y * uvlinesize) + src_x;
248 ptr = ref_picture[1] +
offset;
250 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
251 s->uvlinesize,
s->uvlinesize,
254 s->h_edge_pos >> 1,
s->v_edge_pos >> 1);
255 ptr =
s->sc.edge_emu_buffer;
257 pix_op[1][dxy](dest_cb, ptr, uvlinesize,
h >> 1);
259 ptr = ref_picture[2] +
offset;
261 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
262 s->uvlinesize,
s->uvlinesize,
265 s->h_edge_pos >> 1,
s->v_edge_pos >> 1);
266 ptr =
s->sc.edge_emu_buffer;
268 pix_op[1][dxy](dest_cr, ptr, uvlinesize,
h >> 1);
276 if (
h->c.block_last_index[n] >= 0) {
277 int16_t *
block1 = blocks1[n];
278 switch (
w->abt_type_table[n]) {
285 h->c.bdsp.clear_block(
w->abt_block2[n]);
290 h->c.bdsp.clear_block(
w->abt_block2[n]);
299 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
318 int coded_mb_count = 0;
319 uint32_t *
const mb_type =
h->c.cur_pic.mb_type;
324 for (
int mb_y = 0; mb_y <
h->c.mb_height; mb_y++)
325 for (
int mb_x = 0; mb_x <
h->c.mb_width; mb_x++)
326 mb_type[mb_y *
h->c.mb_stride + mb_x] =
332 for (
int mb_y = 0; mb_y <
h->c.mb_height; mb_y++)
333 for (
int mb_x = 0; mb_x <
h->c.mb_width; mb_x++)
334 mb_type[mb_y *
h->c.mb_stride + mb_x] =
338 for (
int mb_y = 0; mb_y <
h->c.mb_height; mb_y++) {
342 for (
int mb_x = 0; mb_x <
h->c.mb_width; mb_x++)
343 mb_type[mb_y *
h->c.mb_stride + mb_x] =
346 for (
int mb_x = 0; mb_x <
h->c.mb_width; mb_x++)
347 mb_type[mb_y *
h->c.mb_stride + mb_x] =
353 for (
int mb_x = 0; mb_x <
h->c.mb_width; mb_x++) {
357 for (
int mb_y = 0; mb_y <
h->c.mb_height; mb_y++)
358 mb_type[mb_y *
h->c.mb_stride + mb_x] =
361 for (
int mb_y = 0; mb_y <
h->c.mb_height; mb_y++)
362 mb_type[mb_y *
h->c.mb_stride + mb_x] =
369 for (
int mb_y = 0; mb_y <
h->c.mb_height; mb_y++)
370 for (
int mb_x = 0; mb_x <
h->c.mb_width; mb_x++)
371 coded_mb_count += !
IS_SKIP(mb_type[mb_y *
h->c.mb_stride + mb_x]);
392 w->ms.bit_rate =
get_bits(&gb, 11) * 1024;
404 h->slice_height =
h->c.mb_height /
code;
408 "fps:%d, br:%d, qpbit:%d, abt_flag:%d, j_type_bit:%d, "
409 "tl_mv_flag:%d, mbrl_bit:%d, code:%d, loop_filter:%d, "
411 fps,
w->ms.bit_rate,
w->mspel_bit,
w->abt_flag,
w->j_type_bit,
412 w->top_left_mv_flag,
w->per_mb_rl_bit,
code,
h->loop_filter,
426 h->c.chroma_qscale =
h->c.qscale =
get_bits(&
h->gb, 5);
427 if (
h->c.qscale <= 0)
454 memset(
h->c.cur_pic.mb_type, 0,
455 sizeof(*
h->c.cur_pic.mb_type) *
h->c.mb_height *
h->c.mb_stride);
462 if (
w->per_mb_rl_bit)
465 w->ms.per_mb_rl_table = 0;
467 if (!
w->ms.per_mb_rl_table) {
479 if (
get_bits_left(&
h->gb) * 8LL < (
h->c.width+15)/16 * ((
h->c.height+15)/16))
482 h->c.inter_intra_pred = 0;
483 h->c.no_rounding = 1;
486 "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \n",
487 h->c.qscale,
w->ms.rl_chroma_table_index,
w->ms.rl_table_index,
488 w->ms.dc_table_index,
w->ms.per_mb_rl_table,
w->j_type);
512 if (
w->per_mb_rl_bit)
515 w->ms.per_mb_rl_table = 0;
517 if (!
w->ms.per_mb_rl_table) {
519 w->ms.rl_chroma_table_index =
w->ms.rl_table_index;
528 h->c.inter_intra_pred = 0;
529 h->c.no_rounding ^= 1;
533 "rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d "
534 "per_mb_abt:%d abt_type:%d cbp:%d ii:%d\n",
535 w->ms.rl_table_index,
w->ms.rl_chroma_table_index,
536 w->ms.dc_table_index,
w->ms.mv_table_index,
537 w->ms.per_mb_rl_table,
h->c.qscale,
h->c.mspel,
538 w->per_mb_abt,
w->abt_type,
w->cbp_table_index,
539 h->c.inter_intra_pred);
542 w->ms.esc3_level_length = 0;
543 w->ms.esc3_run_length = 0;
547 &
h->gb, &
h->c.mb_x, &
h->c.mb_y,
548 2 *
h->c.qscale, (
h->c.qscale - 1) | 1,
549 h->loop_filter,
h->c.low_delay);
552 (
h->c.mb_x >> 1) - 1, (
h->c.mb_y >> 1) - 1,
566 if ((((*mx_ptr) | (*my_ptr)) & 1) &&
h->c.mspel)
577 int wrap =
h->c.b8_stride;
578 int xy =
h->c.block_index[0];
580 int16_t *mot_val =
h->c.cur_pic.motion_val[0][xy];
582 const int16_t *
A =
h->c.cur_pic.motion_val[0][xy - 1];
583 const int16_t *
B =
h->c.cur_pic.motion_val[0][xy -
wrap];
584 const int16_t *
C =
h->c.cur_pic.motion_val[0][xy + 2 -
wrap];
586 if (
h->c.mb_x && !
h->c.first_slice_line && !
h->c.mspel &&
w->top_left_mv_flag)
599 }
else if (
type == 1) {
604 if (
h->c.first_slice_line) {
620 static const int sub_cbp_table[3] = { 2, 3, 1 };
624 h->c.block_last_index[n] = -1;
628 if (
w->per_block_abt)
630 w->abt_type_table[n] =
w->abt_type;
649 h->c.block_last_index[n] = 63;
654 h->c.inter_scantable.permutated);
671 if (
IS_SKIP(
h->c.cur_pic.mb_type[
h->c.mb_y *
h->c.mb_stride +
h->c.mb_x])) {
674 for (
i = 0;
i < 6;
i++)
675 h->c.block_last_index[
i] = -1;
678 h->c.mv[0][0][0] = 0;
679 h->c.mv[0][0][1] = 0;
689 h->c.mb_intra = (~
code & 0x40) >> 6;
700 for (
i = 0;
i < 6;
i++) {
707 cbp |=
val << (5 -
i);
711 if (!
h->c.mb_intra) {
716 h->c.bdsp.clear_blocks(
h->block[0]);
722 if (
w->abt_flag &&
w->per_mb_abt) {
724 if (!
w->per_block_abt)
727 w->per_block_abt = 0;
734 h->c.mv[0][0][0] =
mx;
735 h->c.mv[0][0][1] =
my;
737 for (
i = 0;
i < 6;
i++) {
740 "\nerror while decoding inter block: %d x %d (%d)\n",
741 h->c.mb_x,
h->c.mb_y,
i);
747 ff_dlog(
h->c.avctx,
"%d%d ",
h->c.inter_intra_pred, cbp);
748 ff_dlog(
h->c.avctx,
"I at %d %d %d %06X\n",
h->c.mb_x,
h->c.mb_y,
749 ((cbp & 3) ? 1 : 0) + ((cbp & 0x3C) ? 2 : 0),
752 if (
h->c.inter_intra_pred) {
756 h->c.ac_pred,
h->c.h263_aic_dir,
h->c.mb_x,
h->c.mb_y);
763 h->c.bdsp.clear_blocks(
h->block[0]);
764 for (
i = 0;
i < 6;
i++) {
768 "\nerror while decoding intra block: %d x %d (%d)\n",
769 h->c.mb_x,
h->c.mb_y,
i);
796 s->mb_width,
s->mb_height);
static int wmv2_decode_mb(H263DecContext *const h)
#define MV_TYPE_16X16
1 vector for the whole mb
static void wmv2_mspel8_h_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int get_bits_left(GetBitContext *gb)
static av_always_inline int wmv2_get_cbp_table_index(int qscale, int cbp_index)
static int wmv2_decode_inter_block(WMV2DecContext *w, int16_t *block, int n, int cbp)
int ff_intrax8_decode_picture(IntraX8Context *w, MPVPicture *pict, GetBitContext *gb, int *mb_x, int *mb_y, int dquant, int quant_offset, int loopfilter, int lowdelay)
Decode single IntraX8 frame.
int16_t abt_block2[6][64]
static void put_mspel8_mc10_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
qpel_mc_func put_mspel_pixels_tab[8]
int ff_wmv2_decode_secondary_picture_header(H263DecContext *const h)
const FFCodec ff_wmv2_decoder
void ff_simple_idct84_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
#define MSMP4_MB_INTRA_VLC_BITS
static int16_t * wmv2_pred_motion(WMV2DecContext *w, int *px, int *py)
static uint8_t half(int a, int b)
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static av_cold int wmv2_decode_end(AVCodecContext *avctx)
#define FF_DEBUG_PICT_INFO
static av_cold void close(AVCodecParserContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
AVCodec p
The public AVCodec.
static void put_mspel8_mc22_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_mspel_motion(MPVContext *const s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
static int wmv2_decode_picture_header(H263DecContext *const h)
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
void ff_msmpeg4_decode_motion(MSMP4DecContext *const ms, int *mx_ptr, int *my_ptr)
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_CODEC_DECODE_CB(func)
av_cold int ff_intrax8_common_init(AVCodecContext *avctx, IntraX8Context *w, int16_t block[64], int mb_width, int mb_height)
Initialize IntraX8 frame decoder.
const VLCElem * ff_mb_non_intra_vlc[4]
static void put_mspel8_mc02_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static av_cold int decode_ext_header(AVCodecContext *avctx, WMV2DecContext *w)
void ff_simple_idct48_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
static void wmv2_add_block(WMV2DecContext *w, int16_t blocks1[][64], uint8_t *dst, int stride, int n)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define CODEC_LONG_NAME(str)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
void ff_put_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
static int parse_mb_skip(WMV2DecContext *w)
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
av_cold int ff_mpv_decode_close(AVCodecContext *avctx)
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
#define INTER_INTRA_VLC_BITS
int rl_chroma_table_index
int ff_h263_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_frame, AVPacket *avpkt)
static void put_mspel8_mc20_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Average and put pixel Widths can be 16, 8, 4 or 2.
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
VLCElem ff_inter_intra_vlc[8]
VLCElem ff_msmp4_mb_i_vlc[536]
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define MB_NON_INTRA_VLC_BITS
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
static void put_mspel8_mc32_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
#define DECLARE_ALIGNED(n, t, v)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
const uint8_t ff_wmv2_scantableB[64]
#define FRAME_SKIPPED
Frame is not coded.
static void put_mspel8_mc30_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
const uint8_t ff_wmv2_scantableA[64]
static void wmv2_decode_motion(WMV2DecContext *w, int *mx_ptr, int *my_ptr)
void ff_put_pixels8x8_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static int16_t block1[64]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static void put_mspel8_mc12_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
int ff_msmpeg4_decode_block(MSMP4DecContext *const ms, int16_t *block, int n, int coded, const uint8_t *scan_table)
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
const char * name
Name of the codec implementation.
static av_cold void wmv2_mspel_init(WMV2DecContext *w)
static void wmv2_mspel8_v_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int w)
static const float pred[4]
main external API structure.
static av_cold int wmv2_decode_init(AVCodecContext *avctx)
@ AV_PICTURE_TYPE_P
Predicted.
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
#define MB_TYPE_FORWARD_MV
int ff_msmpeg4_coded_block_pred(MpegEncContext *s, int n, uint8_t **coded_block_ptr)