73 int remaining = packet->
size - writer->
pos;
74 if (nb_bytes > remaining) {
75 OPJ_SIZE_T needed = nb_bytes - remaining;
77 if (needed > max_growth) {
78 return (OPJ_SIZE_T)-1;
81 return (OPJ_SIZE_T)-1;
84 memcpy(packet->
data + writer->
pos, out_buffer, nb_bytes);
85 writer->
pos += (
int)nb_bytes;
94 if (writer->
pos == 0) {
95 return (OPJ_SIZE_T)-1;
97 if (nb_bytes + writer->
pos < 0) {
98 nb_bytes = -writer->
pos;
101 int remaining = packet->
size - writer->
pos;
102 if (nb_bytes > remaining) {
103 OPJ_SIZE_T needed = nb_bytes - remaining;
105 if (needed > max_growth) {
106 return (OPJ_SIZE_T)-1;
109 return (OPJ_SIZE_T)-1;
113 writer->
pos += (
int)nb_bytes;
124 if (nb_bytes > packet->
size) {
130 writer->
pos = (
int)nb_bytes;
147 p->image_offset_x0 = 0;
148 p->image_offset_y0 = 0;
151 p->cblockw_init = 32;
152 p->cblockh_init = 32;
156 p->prog_order = OPJ_CPRL;
162 p->subsampling_dx = 1;
163 p->subsampling_dy = 1;
174 opj_image_cmptparm_t cmptparm[4] = {{0}};
180 OPJ_COLOR_SPACE color_space = OPJ_CLRSPC_UNKNOWN;
182 sub_dx[0] = sub_dx[3] = 1;
183 sub_dy[0] = sub_dy[3] = 1;
197 color_space = OPJ_CLRSPC_GRAY;
210 color_space = OPJ_CLRSPC_SRGB;
245 color_space = OPJ_CLRSPC_SYCC;
249 "The requested pixel format '%s' is not supported\n",
254 for (i = 0; i < numcomps; i++) {
257 cmptparm[i].sgnd = 0;
258 cmptparm[i].dx = sub_dx[i];
259 cmptparm[i].dy = sub_dy[i];
260 cmptparm[i].w = (avctx->
width + sub_dx[i] - 1) / sub_dx[i];
261 cmptparm[i].h = (avctx->
height + sub_dy[i] - 1) / sub_dy[i];
264 img = opj_image_create(numcomps, cmptparm, color_space);
273 img->x1 = (avctx->
width - 1) * parameters->subsampling_dx + 1;
274 img->y1 = (avctx->
height - 1) * parameters->subsampling_dy + 1;
284 opj_set_default_encoder_parameters(&ctx->
enc_params);
287 case OPJ_CINEMA2K_24:
289 ctx->
enc_params.max_cs_size = OPJ_CINEMA_24_CS;
290 ctx->
enc_params.max_comp_size = OPJ_CINEMA_24_COMP;
292 case OPJ_CINEMA2K_48:
294 ctx->
enc_params.max_cs_size = OPJ_CINEMA_48_CS;
295 ctx->
enc_params.max_comp_size = OPJ_CINEMA_48_COMP;
297 case OPJ_CINEMA4K_24:
299 ctx->
enc_params.max_cs_size = OPJ_CINEMA_24_CS;
300 ctx->
enc_params.max_comp_size = OPJ_CINEMA_24_COMP;
306 if (ctx->
enc_params.rsiz == OPJ_PROFILE_CINEMA_4K) {
313 if (ctx->
enc_params.rsiz == OPJ_PROFILE_CINEMA_2K) {
323 "Invalid parameter pairing: cinema_mode and profile conflict.\n");
355 const int numcomps = image->numcomps;
357 for (compno = 0; compno < numcomps; ++compno) {
358 if (image->comps[compno].w > frame->
linesize[0] / numcomps) {
364 for (compno = 0; compno < numcomps; ++compno) {
365 for (y = 0; y < avctx->
height; ++y) {
366 image_line = image->comps[compno].data + y * image->comps[compno].w;
367 frame_index = y * frame->
linesize[0] + compno;
368 for (x = 0; x < avctx->
width; ++x) {
369 image_line[x] = frame->
data[0][frame_index];
370 frame_index += numcomps;
372 for (; x < image->comps[compno].w; ++x) {
373 image_line[x] = image_line[x - 1];
376 for (; y < image->comps[compno].h; ++y) {
377 image_line = image->comps[compno].data + y * image->comps[compno].w;
378 for (x = 0; x < image->comps[compno].w; ++x) {
379 image_line[x] = image_line[x - (
int)image->comps[compno].w];
394 const int numcomps = image->numcomps;
395 uint16_t *frame_ptr = (uint16_t *)frame->
data[0];
397 for (compno = 0; compno < numcomps; ++compno) {
398 if (image->comps[compno].w > frame->
linesize[0] / numcomps) {
404 for (compno = 0; compno < numcomps; ++compno) {
405 for (y = 0; y < avctx->
height; ++y) {
406 image_line = image->comps[compno].data + y * image->comps[compno].w;
407 frame_index = y * (frame->
linesize[0] / 2) + compno;
408 for (x = 0; x < avctx->
width; ++x) {
409 image_line[x] = frame_ptr[frame_index] >> 4;
410 frame_index += numcomps;
412 for (; x < image->comps[compno].w; ++x) {
413 image_line[x] = image_line[x - 1];
416 for (; y < image->comps[compno].h; ++y) {
417 image_line = image->comps[compno].data + y * image->comps[compno].w;
418 for (x = 0; x < image->comps[compno].w; ++x) {
419 image_line[x] = image_line[x - (
int)image->comps[compno].w];
434 const int numcomps = image->numcomps;
435 uint16_t *frame_ptr = (uint16_t*)frame->
data[0];
437 for (compno = 0; compno < numcomps; ++compno) {
438 if (image->comps[compno].w > frame->
linesize[0] / numcomps) {
444 for (compno = 0; compno < numcomps; ++compno) {
445 for (y = 0; y < avctx->
height; ++y) {
446 image_line = image->comps[compno].data + y * image->comps[compno].w;
447 frame_index = y * (frame->
linesize[0] / 2) + compno;
448 for (x = 0; x < avctx->
width; ++x) {
449 image_line[x] = frame_ptr[frame_index];
450 frame_index += numcomps;
452 for (; x < image->comps[compno].w; ++x) {
453 image_line[x] = image_line[x - 1];
456 for (; y < image->comps[compno].h; ++y) {
457 image_line = image->comps[compno].data + y * image->comps[compno].w;
458 for (x = 0; x < image->comps[compno].w; ++x) {
459 image_line[x] = image_line[x - (
int)image->comps[compno].w];
476 const int numcomps = image->numcomps;
478 for (compno = 0; compno < numcomps; ++compno) {
479 if (image->comps[compno].w > frame->
linesize[compno]) {
485 for (compno = 0; compno < numcomps; ++compno) {
486 width = (avctx->
width + image->comps[compno].dx - 1) / image->comps[compno].dx;
487 height = (avctx->
height + image->comps[compno].dy - 1) / image->comps[compno].dy;
488 for (y = 0; y <
height; ++y) {
489 image_line = image->comps[compno].data + y * image->comps[compno].w;
490 frame_index = y * frame->
linesize[compno];
491 for (x = 0; x <
width; ++x)
492 image_line[x] = frame->
data[compno][frame_index++];
493 for (; x < image->comps[compno].w; ++x) {
494 image_line[x] = image_line[x - 1];
497 for (; y < image->comps[compno].h; ++y) {
498 image_line = image->comps[compno].data + y * image->comps[compno].w;
499 for (x = 0; x < image->comps[compno].w; ++x) {
500 image_line[x] = image_line[x - (
int)image->comps[compno].w];
517 const int numcomps = image->numcomps;
520 for (compno = 0; compno < numcomps; ++compno) {
521 if (image->comps[compno].w > frame->
linesize[compno]) {
527 for (compno = 0; compno < numcomps; ++compno) {
528 width = (avctx->
width + image->comps[compno].dx - 1) / image->comps[compno].dx;
529 height = (avctx->
height + image->comps[compno].dy - 1) / image->comps[compno].dy;
530 frame_ptr = (uint16_t *)frame->
data[compno];
532 image_line = image->comps[compno].data + y * image->comps[compno].w;
533 frame_index = y * (frame->
linesize[compno] / 2);
534 for (x = 0; x <
width; ++x)
535 image_line[x] = frame_ptr[frame_index++];
536 for (; x < image->comps[compno].w; ++x) {
537 image_line[x] = image_line[x - 1];
540 for (; y < image->comps[compno].h; ++y) {
541 image_line = image->comps[compno].data + y * image->comps[compno].w;
542 for (x = 0; x < image->comps[compno].w; ++x) {
543 image_line[x] = image_line[x - (
int)image->comps[compno].w];
559 opj_codec_t *compress =
NULL;
560 opj_stream_t *stream =
NULL;
650 "The frame's pixel format '%s' is not supported\n",
659 "Could not copy the frame data to the internal image buffer\n");
668 compress = opj_create_compress(ctx->
format);
683 if (!opj_setup_encoder(compress, &ctx->
enc_params, image)) {
688 stream = opj_stream_default_create(OPJ_STREAM_WRITE);
700 opj_stream_set_user_data(stream, &writer,
NULL);
702 if (!opj_start_compress(compress, image, stream) ||
703 !opj_encode(compress, stream) ||
704 !opj_end_compress(compress, stream)) {
717 opj_stream_destroy(stream);
718 opj_destroy_codec(compress);
719 opj_image_destroy(image);
723 #define OFFSET(x) offsetof(LibOpenJPEGContext, x)
724 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
733 {
"cinema_mode",
"Digital Cinema",
OFFSET(cinema_mode),
AV_OPT_TYPE_INT, { .i64 = OPJ_OFF }, OPJ_OFF, OPJ_CINEMA4K_24,
VE,
"cinema_mode" },
738 {
"prog_order",
"Progression Order",
OFFSET(prog_order),
AV_OPT_TYPE_INT, { .i64 = OPJ_LRCP }, OPJ_LRCP, OPJ_CPRL,
VE,
"prog_order" },
759 .
name =
"libopenjpeg",
788 .wrapper_name =
"libopenjpeg",
#define AV_PIX_FMT_YUVA422P16
static const char * format[]
static int libopenjpeg_copy_unpacked16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
8 bits gray, 8 bits alpha
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
static av_cold int init(AVCodecContext *avctx)
#define AV_PIX_FMT_RGBA64
AVCodec ff_libopenjpeg_encoder
#define AV_PIX_FMT_GBRP10
const char * av_default_item_name(void *ptr)
Return the context name.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static const AVClass openjpeg_class
#define AV_PIX_FMT_YUV420P12
static int libopenjpeg_copy_packed8(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
#define AV_PIX_FMT_GRAY10
opj_cparameters_t enc_params
#define AV_PIX_FMT_GRAY12
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int libopenjpeg_copy_unpacked8(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static void cinema_parameters(opj_cparameters_t *p)
#define AV_PIX_FMT_YUVA420P9
static void warning_callback(const char *msg, void *data)
#define AV_PIX_FMT_YUV444P16
static OPJ_OFF_T stream_skip(OPJ_OFF_T nb_bytes, void *user_data)
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
static opj_image_t * mj2_create_image(AVCodecContext *avctx, opj_cparameters_t *parameters)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
AVS_Value void * user_data
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUVA444P16
static OPJ_SIZE_T stream_write(void *out_buffer, OPJ_SIZE_T nb_bytes, void *user_data)
simple assert() macros that are a bit more flexible than ISO C assert().
static int libopenjpeg_copy_packed16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int flags
A combination of AV_PKT_FLAG values.
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define AV_PIX_FMT_YUV422P9
uint8_t nb_components
The number of components each pixel has, (1-4)
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GRAY16
int width
picture width / height.
#define AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
static av_cold int libopenjpeg_encode_init(AVCodecContext *avctx)
#define AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P14
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
#define AV_PIX_FMT_GRAY14
main external API structure.
static int libopenjpeg_copy_packed12(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Describe the class of an AVClass context structure.
#define AV_PIX_FMT_YUV420P9
static void info_callback(const char *msg, void *data)
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int libopenjpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
common internal and external API header
#define AV_PIX_FMT_YUVA444P9
int av_grow_packet(AVPacket *pkt, int grow_by)
Increase packet size, correctly zeroing padding.
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static const AVOption options[]
static void error_callback(const char *msg, void *data)
static OPJ_BOOL stream_seek(OPJ_OFF_T nb_bytes, void *user_data)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int depth
Number of bits in the component.
#define AVERROR_EXTERNAL
Generic error in an external library.
AVPixelFormat
Pixel format.
This structure stores compressed data.
#define AV_PIX_FMT_YUV422P16