35 #if HAVE_OPENJPEG_2_3_OPENJPEG_H
36 # include <openjpeg-2.3/openjpeg.h>
37 #elif HAVE_OPENJPEG_2_2_OPENJPEG_H
38 # include <openjpeg-2.2/openjpeg.h>
39 #elif HAVE_OPENJPEG_2_1_OPENJPEG_H
40 # include <openjpeg-2.1/openjpeg.h>
41 #elif HAVE_OPENJPEG_2_0_OPENJPEG_H
42 # include <openjpeg-2.0/openjpeg.h>
43 #elif HAVE_OPENJPEG_1_5_OPENJPEG_H
44 # include <openjpeg-1.5/openjpeg.h>
46 # include <openjpeg.h>
49 #if HAVE_OPENJPEG_2_3_OPENJPEG_H || HAVE_OPENJPEG_2_2_OPENJPEG_H || HAVE_OPENJPEG_2_1_OPENJPEG_H || HAVE_OPENJPEG_2_0_OPENJPEG_H
50 # define OPENJPEG_MAJOR_VERSION 2
51 # define OPJ(x) OPJ_##x
53 # define OPENJPEG_MAJOR_VERSION 1
59 #if OPENJPEG_MAJOR_VERSION == 1
61 #endif // OPENJPEG_MAJOR_VERSION == 1
63 #if OPENJPEG_MAJOR_VERSION == 1
65 #endif // OPENJPEG_MAJOR_VERSION == 1
91 #if OPENJPEG_MAJOR_VERSION == 2
92 typedef struct PacketWriter {
97 static OPJ_SIZE_T stream_write(
void *out_buffer, OPJ_SIZE_T nb_bytes,
void *
user_data)
101 int remaining = packet->
size - writer->pos;
102 if (nb_bytes > remaining) {
103 OPJ_SIZE_T needed = nb_bytes - remaining;
105 if (needed > max_growth) {
106 return (OPJ_SIZE_T)-1;
109 return (OPJ_SIZE_T)-1;
112 memcpy(packet->
data + writer->pos, out_buffer, nb_bytes);
113 writer->pos += (
int)nb_bytes;
117 static OPJ_OFF_T stream_skip(OPJ_OFF_T nb_bytes,
void *user_data)
122 if (writer->pos == 0) {
123 return (OPJ_SIZE_T)-1;
125 if (nb_bytes + writer->pos < 0) {
126 nb_bytes = -writer->pos;
129 int remaining = packet->
size - writer->pos;
130 if (nb_bytes > remaining) {
131 OPJ_SIZE_T needed = nb_bytes - remaining;
133 if (needed > max_growth) {
134 return (OPJ_SIZE_T)-1;
137 return (OPJ_SIZE_T)-1;
141 writer->pos += (
int)nb_bytes;
145 static OPJ_BOOL
stream_seek(OPJ_OFF_T nb_bytes,
void *user_data)
152 if (nb_bytes > packet->
size) {
158 writer->pos = (
int)nb_bytes;
161 #endif // OPENJPEG_MAJOR_VERSION == 2
176 p->image_offset_x0 = 0;
177 p->image_offset_y0 = 0;
180 p->cblockw_init = 32;
181 p->cblockh_init = 32;
185 p->prog_order =
OPJ(CPRL);
191 p->subsampling_dx = 1;
192 p->subsampling_dy = 1;
203 opj_image_cmptparm_t cmptparm[4] = {{0}};
209 OPJ_COLOR_SPACE color_space =
OPJ(CLRSPC_UNKNOWN);
211 sub_dx[0] = sub_dx[3] = 1;
212 sub_dy[0] = sub_dy[3] = 1;
223 color_space =
OPJ(CLRSPC_GRAY);
236 color_space =
OPJ(CLRSPC_SRGB);
271 color_space =
OPJ(CLRSPC_SYCC);
275 "The requested pixel format '%s' is not supported\n",
280 for (i = 0; i < numcomps; i++) {
283 cmptparm[i].sgnd = 0;
284 cmptparm[i].dx = sub_dx[i];
285 cmptparm[i].dy = sub_dy[i];
286 cmptparm[i].w = (avctx->
width + sub_dx[i] - 1) / sub_dx[i];
287 cmptparm[i].h = (avctx->
height + sub_dy[i] - 1) / sub_dy[i];
290 img = opj_image_create(numcomps, cmptparm, color_space);
299 img->x1 = (avctx->
width - 1) * parameters->subsampling_dx + 1;
300 img->y1 = (avctx->
height - 1) * parameters->subsampling_dy + 1;
310 opj_set_default_encoder_parameters(&ctx->
enc_params);
312 #if HAVE_OPENJPEG_2_3_OPENJPEG_H || HAVE_OPENJPEG_2_2_OPENJPEG_H || HAVE_OPENJPEG_2_1_OPENJPEG_H
314 case OPJ_CINEMA2K_24:
316 ctx->
enc_params.max_cs_size = OPJ_CINEMA_24_CS;
317 ctx->
enc_params.max_comp_size = OPJ_CINEMA_24_COMP;
319 case OPJ_CINEMA2K_48:
321 ctx->
enc_params.max_cs_size = OPJ_CINEMA_48_CS;
322 ctx->
enc_params.max_comp_size = OPJ_CINEMA_48_COMP;
324 case OPJ_CINEMA4K_24:
326 ctx->
enc_params.max_cs_size = OPJ_CINEMA_24_CS;
327 ctx->
enc_params.max_comp_size = OPJ_CINEMA_24_COMP;
333 if (ctx->
enc_params.rsiz == OPJ_PROFILE_CINEMA_4K) {
340 if (ctx->
enc_params.rsiz == OPJ_PROFILE_CINEMA_2K) {
350 "Invalid parameter pairing: cinema_mode and profile conflict.\n");
376 #if OPENJPEG_MAJOR_VERSION == 1
383 #endif // OPENJPEG_MAJOR_VERSION == 1
388 #if OPENJPEG_MAJOR_VERSION == 1
389 opj_image_destroy(ctx->
image);
391 #endif // OPENJPEG_MAJOR_VERSION == 1
402 const int numcomps = image->numcomps;
404 for (compno = 0; compno < numcomps; ++compno) {
405 if (image->comps[compno].w > frame->
linesize[0] / numcomps) {
411 for (compno = 0; compno < numcomps; ++compno) {
412 for (y = 0; y < avctx->
height; ++y) {
413 image_line = image->comps[compno].data + y * image->comps[compno].w;
414 frame_index = y * frame->
linesize[0] + compno;
415 for (x = 0; x < avctx->
width; ++x) {
416 image_line[x] = frame->
data[0][frame_index];
417 frame_index += numcomps;
419 for (; x < image->comps[compno].w; ++x) {
420 image_line[x] = image_line[x - 1];
423 for (; y < image->comps[compno].h; ++y) {
424 image_line = image->comps[compno].data + y * image->comps[compno].w;
425 for (x = 0; x < image->comps[compno].w; ++x) {
426 image_line[x] = image_line[x - (
int)image->comps[compno].w];
441 const int numcomps = image->numcomps;
442 uint16_t *frame_ptr = (uint16_t *)frame->
data[0];
444 for (compno = 0; compno < numcomps; ++compno) {
445 if (image->comps[compno].w > frame->
linesize[0] / numcomps) {
451 for (compno = 0; compno < numcomps; ++compno) {
452 for (y = 0; y < avctx->
height; ++y) {
453 image_line = image->comps[compno].data + y * image->comps[compno].w;
454 frame_index = y * (frame->
linesize[0] / 2) + compno;
455 for (x = 0; x < avctx->
width; ++x) {
456 image_line[x] = frame_ptr[frame_index] >> 4;
457 frame_index += numcomps;
459 for (; x < image->comps[compno].w; ++x) {
460 image_line[x] = image_line[x - 1];
463 for (; y < image->comps[compno].h; ++y) {
464 image_line = image->comps[compno].data + y * image->comps[compno].w;
465 for (x = 0; x < image->comps[compno].w; ++x) {
466 image_line[x] = image_line[x - (
int)image->comps[compno].w];
481 const int numcomps = image->numcomps;
482 uint16_t *frame_ptr = (uint16_t*)frame->
data[0];
484 for (compno = 0; compno < numcomps; ++compno) {
485 if (image->comps[compno].w > frame->
linesize[0] / numcomps) {
491 for (compno = 0; compno < numcomps; ++compno) {
492 for (y = 0; y < avctx->
height; ++y) {
493 image_line = image->comps[compno].data + y * image->comps[compno].w;
494 frame_index = y * (frame->
linesize[0] / 2) + compno;
495 for (x = 0; x < avctx->
width; ++x) {
496 image_line[x] = frame_ptr[frame_index];
497 frame_index += numcomps;
499 for (; x < image->comps[compno].w; ++x) {
500 image_line[x] = image_line[x - 1];
503 for (; y < image->comps[compno].h; ++y) {
504 image_line = image->comps[compno].data + y * image->comps[compno].w;
505 for (x = 0; x < image->comps[compno].w; ++x) {
506 image_line[x] = image_line[x - (
int)image->comps[compno].w];
523 const int numcomps = image->numcomps;
525 for (compno = 0; compno < numcomps; ++compno) {
526 if (image->comps[compno].w > frame->
linesize[compno]) {
532 for (compno = 0; compno < numcomps; ++compno) {
533 width = (avctx->
width + image->comps[compno].dx - 1) / image->comps[compno].dx;
534 height = (avctx->
height + image->comps[compno].dy - 1) / image->comps[compno].dy;
535 for (y = 0; y <
height; ++y) {
536 image_line = image->comps[compno].data + y * image->comps[compno].w;
537 frame_index = y * frame->
linesize[compno];
538 for (x = 0; x <
width; ++x)
539 image_line[x] = frame->
data[compno][frame_index++];
540 for (; x < image->comps[compno].w; ++x) {
541 image_line[x] = image_line[x - 1];
544 for (; y < image->comps[compno].h; ++y) {
545 image_line = image->comps[compno].data + y * image->comps[compno].w;
546 for (x = 0; x < image->comps[compno].w; ++x) {
547 image_line[x] = image_line[x - (
int)image->comps[compno].w];
564 const int numcomps = image->numcomps;
567 for (compno = 0; compno < numcomps; ++compno) {
568 if (image->comps[compno].w > frame->
linesize[compno]) {
574 for (compno = 0; compno < numcomps; ++compno) {
575 width = (avctx->
width + image->comps[compno].dx - 1) / image->comps[compno].dx;
576 height = (avctx->
height + image->comps[compno].dy - 1) / image->comps[compno].dy;
577 frame_ptr = (uint16_t *)frame->
data[compno];
579 image_line = image->comps[compno].data + y * image->comps[compno].w;
580 frame_index = y * (frame->
linesize[compno] / 2);
581 for (x = 0; x <
width; ++x)
582 image_line[x] = frame_ptr[frame_index++];
583 for (; x < image->comps[compno].w; ++x) {
584 image_line[x] = image_line[x - 1];
587 for (; y < image->comps[compno].h; ++y) {
588 image_line = image->comps[compno].data + y * image->comps[compno].w;
589 for (x = 0; x < image->comps[compno].w; ++x) {
590 image_line[x] = image_line[x - (
int)image->comps[compno].w];
605 #if OPENJPEG_MAJOR_VERSION == 1
606 opj_image_t *image = ctx->
image;
607 opj_cinfo_t *compress =
NULL;
608 opj_cio_t *stream =
NULL;
610 #else // OPENJPEG_MAJOR_VERSION == 2
611 PacketWriter writer = { 0 };
612 opj_codec_t *compress =
NULL;
613 opj_stream_t *stream =
NULL;
620 #endif // OPENJPEG_MAJOR_VERSION == 1
701 "The frame's pixel format '%s' is not supported\n",
710 "Could not copy the frame data to the internal image buffer\n");
715 #if OPENJPEG_MAJOR_VERSION == 2
719 #endif // OPENJPEG_MAJOR_VERSION == 2
721 compress = opj_create_compress(ctx->
format);
728 #if OPENJPEG_MAJOR_VERSION == 1
729 opj_setup_encoder(compress, &ctx->
enc_params, image);
730 stream = opj_cio_open((opj_common_ptr) compress,
NULL, 0);
731 #else // OPENJPEG_MAJOR_VERSION == 2
740 if (!opj_setup_encoder(compress, &ctx->
enc_params, image)) {
745 stream = opj_stream_default_create(OPJ_STREAM_WRITE);
746 #endif // OPENJPEG_MAJOR_VERSION == 1
753 #if OPENJPEG_MAJOR_VERSION == 1
758 opj_set_event_mgr((opj_common_ptr) compress, &ctx->
event_mgr, avctx);
759 if (!opj_encode(compress, stream, image,
NULL)) {
765 len = cio_tell(stream);
770 memcpy(pkt->
data, stream->buffer, len);
771 #else // OPENJPEG_MAJOR_VERSION == 2
773 opj_stream_set_write_function(stream, stream_write);
774 opj_stream_set_skip_function(stream, stream_skip);
776 #if HAVE_OPENJPEG_2_3_OPENJPEG_H || HAVE_OPENJPEG_2_2_OPENJPEG_H || HAVE_OPENJPEG_2_1_OPENJPEG_H
777 opj_stream_set_user_data(stream, &writer,
NULL);
778 #elif HAVE_OPENJPEG_2_0_OPENJPEG_H
779 opj_stream_set_user_data(stream, &writer);
781 #error Missing call to opj_stream_set_user_data
784 if (!opj_start_compress(compress, image, stream) ||
785 !opj_encode(compress, stream) ||
786 !opj_end_compress(compress, stream)) {
793 #endif // OPENJPEG_MAJOR_VERSION == 1
800 #if OPENJPEG_MAJOR_VERSION == 2
801 opj_stream_destroy(stream);
802 opj_destroy_codec(compress);
803 opj_image_destroy(image);
805 opj_cio_close(stream);
806 opj_destroy_compress(compress);
813 #if OPENJPEG_MAJOR_VERSION == 1
816 opj_image_destroy(ctx->
image);
818 #endif // OPENJPEG_MAJOR_VERSION == 1
822 #define OFFSET(x) offsetof(LibOpenJPEGContext, x)
823 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
858 .
name =
"libopenjpeg",
#define AV_PIX_FMT_YUVA422P16
static int libopenjpeg_copy_unpacked16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
8 bits gray, 8 bits alpha
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
static av_cold int init(AVCodecContext *avctx)
#define AV_PIX_FMT_RGBA64
AVCodec ff_libopenjpeg_encoder
#define AV_PIX_FMT_GBRP10
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static const AVClass openjpeg_class
#define AV_PIX_FMT_YUV420P12
static int libopenjpeg_copy_packed8(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static av_cold int libopenjpeg_encode_close(AVCodecContext *avctx)
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
opj_cparameters_t enc_params
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int libopenjpeg_copy_unpacked8(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static void cinema_parameters(opj_cparameters_t *p)
#define AV_PIX_FMT_YUVA420P9
static void warning_callback(const char *msg, void *data)
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
static opj_image_t * mj2_create_image(AVCodecContext *avctx, opj_cparameters_t *parameters)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
AVS_Value void * user_data
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUVA444P16
simple assert() macros that are a bit more flexible than ISO C assert().
static int libopenjpeg_copy_packed16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int flags
A combination of AV_PKT_FLAG values.
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define AV_PIX_FMT_YUV422P9
uint8_t nb_components
The number of components each pixel has, (1-4)
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GRAY16
int width
picture width / height.
#define AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
static av_cold int libopenjpeg_encode_init(AVCodecContext *avctx)
#define AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P14
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
static int libopenjpeg_copy_packed12(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static const char * format
Describe the class of an AVClass context structure.
opj_event_mgr_t event_mgr
#define AV_PIX_FMT_YUV420P9
static void info_callback(const char *msg, void *data)
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int libopenjpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
common internal and external API header
#define AV_PIX_FMT_YUVA444P9
int av_grow_packet(AVPacket *pkt, int grow_by)
Increase packet size, correctly zeroing padding.
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static const AVOption options[]
static void error_callback(const char *msg, void *data)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int depth
Number of bits in the component.
#define AVERROR_EXTERNAL
Generic error in an external library.
AVPixelFormat
Pixel format.
This structure stores compressed data.
#define AV_PIX_FMT_YUV422P16