FFmpeg
bmvvideo.c
Go to the documentation of this file.
1 /*
2  * Discworld II BMV video decoder
3  * Copyright (c) 2011 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avassert.h"
23 #include "libavutil/common.h"
24 
25 #include "avcodec.h"
26 #include "bytestream.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 
30 enum BMVFlags{
31  BMV_NOP = 0,
35 
36  BMV_SCROLL = 0x04,
37  BMV_PALETTE = 0x08,
38  BMV_COMMAND = 0x10,
39  BMV_AUDIO = 0x20,
40  BMV_EXT = 0x40,
41  BMV_PRINT = 0x80
42 };
43 
44 #define SCREEN_WIDE 640
45 #define SCREEN_HIGH 429
46 
47 typedef struct BMVDecContext {
49 
51  uint32_t pal[256];
52  const uint8_t *stream;
54 
55 #define NEXT_BYTE(v) (v) = forward ? (v) + 1 : (v) - 1;
56 
57 static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame, int frame_off)
58 {
59  unsigned val, saved_val = 0;
60  int tmplen = src_len;
61  const uint8_t *src, *source_end = source + src_len;
62  uint8_t *frame_end = frame + SCREEN_WIDE * SCREEN_HIGH;
63  uint8_t *dst, *dst_end;
64  int len, mask;
65  int forward = (frame_off <= -SCREEN_WIDE) || (frame_off >= 0);
66  int read_two_nibbles, flag;
67  int advance_mode;
68  int mode = 0;
69  int i;
70 
71  if (src_len <= 0)
72  return AVERROR_INVALIDDATA;
73 
74  if (forward) {
75  src = source;
76  dst = frame;
77  dst_end = frame_end;
78  } else {
79  src = source + src_len - 1;
80  dst = frame_end - 1;
81  dst_end = frame - 1;
82  }
83  for (;;) {
84  int shift = 0;
85  flag = 0;
86 
87  /* The mode/len decoding is a bit strange:
88  * values are coded as variable-length codes with nibble units,
89  * code end is signalled by two top bits in the nibble being nonzero.
90  * And since data is bytepacked and we read two nibbles at a time,
91  * we may get a nibble belonging to the next code.
92  * Hence this convoluted loop.
93  */
94  if (!mode || (tmplen == 4)) {
95  if (src < source || src >= source_end)
96  return AVERROR_INVALIDDATA;
97  val = *src;
98  read_two_nibbles = 1;
99  } else {
100  val = saved_val;
101  read_two_nibbles = 0;
102  }
103  if (!(val & 0xC)) {
104  for (;;) {
105  if(shift>22)
106  return -1;
107  if (!read_two_nibbles) {
108  if (src < source || src >= source_end)
109  return AVERROR_INVALIDDATA;
110  shift += 2;
111  val |= (unsigned)*src << shift;
112  if (*src & 0xC)
113  break;
114  }
115  // two upper bits of the nibble is zero,
116  // so shift top nibble value down into their place
117  read_two_nibbles = 0;
118  shift += 2;
119  mask = (1 << shift) - 1;
120  val = ((val >> 2) & ~mask) | (val & mask);
121  NEXT_BYTE(src);
122  if ((val & (0xC << shift))) {
123  flag = 1;
124  break;
125  }
126  }
127  } else if (mode) {
128  flag = tmplen != 4;
129  }
130  if (flag) {
131  tmplen = 4;
132  } else {
133  saved_val = val >> (4 + shift);
134  tmplen = 0;
135  val &= (1 << (shift + 4)) - 1;
136  NEXT_BYTE(src);
137  }
138  advance_mode = val & 1;
139  len = (val >> 1) - 1;
140  av_assert0(len>0);
141  mode += 1 + advance_mode;
142  if (mode >= 4)
143  mode -= 3;
144  if (len <= 0 || FFABS(dst_end - dst) < len)
145  return AVERROR_INVALIDDATA;
146  switch (mode) {
147  case 1:
148  if (forward) {
149  if (dst - frame + SCREEN_WIDE < frame_off ||
150  dst - frame + SCREEN_WIDE + frame_off < 0 ||
151  frame_end - dst < frame_off + len ||
152  frame_end - dst < len)
153  return AVERROR_INVALIDDATA;
154  for (i = 0; i < len; i++)
155  dst[i] = dst[frame_off + i];
156  dst += len;
157  } else {
158  dst -= len;
159  if (dst - frame + SCREEN_WIDE < frame_off ||
160  dst - frame + SCREEN_WIDE + frame_off < 0 ||
161  frame_end - dst < frame_off + len ||
162  frame_end - dst < len)
163  return AVERROR_INVALIDDATA;
164  for (i = len - 1; i >= 0; i--)
165  dst[i] = dst[frame_off + i];
166  }
167  break;
168  case 2:
169  if (forward) {
170  if (source + src_len - src < len)
171  return AVERROR_INVALIDDATA;
172  memcpy(dst, src, len);
173  dst += len;
174  src += len;
175  } else {
176  if (src - source < len)
177  return AVERROR_INVALIDDATA;
178  dst -= len;
179  src -= len;
180  memcpy(dst, src, len);
181  }
182  break;
183  case 3:
184  val = forward ? dst[-1] : dst[1];
185  if (forward) {
186  memset(dst, val, len);
187  dst += len;
188  } else {
189  dst -= len;
190  memset(dst, val, len);
191  }
192  break;
193  }
194  if (dst == dst_end)
195  return 0;
196  }
197 }
198 
200  int *got_frame, AVPacket *pkt)
201 {
202  BMVDecContext * const c = avctx->priv_data;
203  int type, scr_off;
204  int i, ret;
205  uint8_t *srcptr, *outptr;
206 
207  c->stream = pkt->data;
208  type = bytestream_get_byte(&c->stream);
209  if (type & BMV_AUDIO) {
210  int blobs = bytestream_get_byte(&c->stream);
211  if (pkt->size < blobs * 65 + 2) {
212  av_log(avctx, AV_LOG_ERROR, "Audio data doesn't fit in frame\n");
213  return AVERROR_INVALIDDATA;
214  }
215  c->stream += blobs * 65;
216  }
217  if (type & BMV_COMMAND) {
218  int command_size = (type & BMV_PRINT) ? 8 : 10;
219  if (c->stream - pkt->data + command_size > pkt->size) {
220  av_log(avctx, AV_LOG_ERROR, "Command data doesn't fit in frame\n");
221  return AVERROR_INVALIDDATA;
222  }
223  c->stream += command_size;
224  }
225  if (type & BMV_PALETTE) {
226  if (c->stream - pkt->data > pkt->size - 768) {
227  av_log(avctx, AV_LOG_ERROR, "Palette data doesn't fit in frame\n");
228  return AVERROR_INVALIDDATA;
229  }
230  for (i = 0; i < 256; i++)
231  c->pal[i] = 0xFFU << 24 | bytestream_get_be24(&c->stream);
232  }
233  if (type & BMV_SCROLL) {
234  if (c->stream - pkt->data > pkt->size - 2) {
235  av_log(avctx, AV_LOG_ERROR, "Screen offset data doesn't fit in frame\n");
236  return AVERROR_INVALIDDATA;
237  }
238  scr_off = (int16_t)bytestream_get_le16(&c->stream);
239  } else if ((type & BMV_INTRA) == BMV_INTRA) {
240  scr_off = -640;
241  } else {
242  scr_off = 0;
243  }
244 
245  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
246  return ret;
247 
248  if (decode_bmv_frame(c->stream, pkt->size - (c->stream - pkt->data), c->frame, scr_off)) {
249  av_log(avctx, AV_LOG_ERROR, "Error decoding frame data\n");
250  return AVERROR_INVALIDDATA;
251  }
252 
253  memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
254 #if FF_API_PALETTE_HAS_CHANGED
256  frame->palette_has_changed = type & BMV_PALETTE;
258 #endif
259 
260  outptr = frame->data[0];
261  srcptr = c->frame;
262 
263  for (i = 0; i < avctx->height; i++) {
264  memcpy(outptr, srcptr, avctx->width);
265  srcptr += avctx->width;
266  outptr += frame->linesize[0];
267  }
268 
269  *got_frame = 1;
270 
271  /* always report that the buffer was completely consumed */
272  return pkt->size;
273 }
274 
276 {
277  BMVDecContext * const c = avctx->priv_data;
278 
279  c->avctx = avctx;
280  avctx->pix_fmt = AV_PIX_FMT_PAL8;
281 
282  if (avctx->width != SCREEN_WIDE || avctx->height != SCREEN_HIGH) {
283  av_log(avctx, AV_LOG_ERROR, "Invalid dimension %dx%d\n", avctx->width, avctx->height);
284  return AVERROR_INVALIDDATA;
285  }
286 
287  c->frame = c->frame_base + 640;
288 
289  return 0;
290 }
291 
293  .p.name = "bmv_video",
294  CODEC_LONG_NAME("Discworld II BMV video"),
295  .p.type = AVMEDIA_TYPE_VIDEO,
296  .p.id = AV_CODEC_ID_BMV_VIDEO,
297  .priv_data_size = sizeof(BMVDecContext),
298  .init = decode_init,
300  .p.capabilities = AV_CODEC_CAP_DR1,
301 };
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
BMV_DELTA
@ BMV_DELTA
Definition: bmvvideo.c:33
BMV_EXT
@ BMV_EXT
Definition: bmvvideo.c:40
mask
int mask
Definition: mediacodecdec_common.c:154
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVPacket::data
uint8_t * data
Definition: packet.h:539
FFCodec
Definition: codec_internal.h:127
ff_bmv_video_decoder
const FFCodec ff_bmv_video_decoder
Definition: bmvvideo.c:292
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: bmvvideo.c:199
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
val
static double val(void *priv, double ch)
Definition: aeval.c:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
decode_bmv_frame
static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame, int frame_off)
Definition: bmvvideo.c:57
frame_end
static int64_t frame_end(const SyncQueue *sq, SyncQueueFrame frame, int nb_samples)
Compute the end timestamp of a frame.
Definition: sync_queue.c:128
BMVFlags
BMVFlags
Definition: bmvvideo.c:30
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: bmvvideo.c:275
decode.h
BMVDecContext::stream
const uint8_t * stream
Definition: bmvvideo.c:52
BMV_INTRA
@ BMV_INTRA
Definition: bmvvideo.c:34
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
BMVDecContext::pal
uint32_t pal[256]
Definition: bmvvideo.c:51
BMVDecContext::frame
uint8_t * frame
Definition: bmvvideo.c:50
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
BMV_PALETTE
@ BMV_PALETTE
Definition: bmvvideo.c:37
BMV_PRINT
@ BMV_PRINT
Definition: bmvvideo.c:41
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
source
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
Definition: filter_design.txt:255
BMV_COMMAND
@ BMV_COMMAND
Definition: bmvvideo.c:38
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1697
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
forward
static int forward(AudioFWTDNContext *s, const double *in, int in_length, double **out, int *out_length, int ch, uint64_t sn)
Definition: af_afwtdn.c:620
BMVDecContext::frame_base
uint8_t frame_base[SCREEN_WIDE *(SCREEN_HIGH+1)]
Definition: bmvvideo.c:50
flag
#define flag(name)
Definition: cbs_av1.c:474
SCREEN_WIDE
#define SCREEN_WIDE
Definition: bmvvideo.c:44
BMV_SCROLL
@ BMV_SCROLL
Definition: bmvvideo.c:36
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
common.h
BMVDecContext
Definition: bmvvideo.c:47
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
ret
ret
Definition: filter_design.txt:187
AV_CODEC_ID_BMV_VIDEO
@ AV_CODEC_ID_BMV_VIDEO
Definition: codec_id.h:206
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVCodecContext
main external API structure.
Definition: avcodec.h:451
mode
mode
Definition: ebur128.h:83
SCREEN_HIGH
#define SCREEN_HIGH
Definition: bmvvideo.c:45
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
BMV_NOP
@ BMV_NOP
Definition: bmvvideo.c:31
BMV_AUDIO
@ BMV_AUDIO
Definition: bmvvideo.c:39
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVPacket
This structure stores compressed data.
Definition: packet.h:516
NEXT_BYTE
#define NEXT_BYTE(v)
Definition: bmvvideo.c:55
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
bytestream.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
BMV_END
@ BMV_END
Definition: bmvvideo.c:32
src
#define src
Definition: vp8dsp.c:248
BMVDecContext::avctx
AVCodecContext * avctx
Definition: bmvvideo.c:48