Go to the documentation of this file.
23 #include "config_components.h"
28 #include <rockchip/mpp_frame.h>
29 #include <rockchip/mpp_packet.h>
30 #include <rockchip/rk_mpi.h>
45 #define RKMPP_TIME_BASE AV_TIME_BASE_Q
46 #define RKMPP_ALIGN_SIZE 16
80 mpp_destroy(
ctx->enc);
85 mpp_enc_cfg_deinit(
ctx->cfg);
90 mpp_buffer_put(
ctx->frame_buf);
95 mpp_buffer_group_put(
ctx->buf_group);
115 int ret = mpp_buffer_group_get_internal(&
ctx->buf_group,
116 MPP_BUFFER_TYPE_DRM | MPP_BUFFER_FLAGS_CACHABLE);
127 ret = mpp_buffer_get(
ctx->buf_group, &
ctx->frame_buf, n);
141 MPP_ENC_HEADER_MODE_DEFAULT : MPP_ENC_HEADER_MODE_EACH_IDR;
143 int ret =
ctx->mpi->control(
ctx->enc, MPP_ENC_SET_HEADER_MODE, &
mode);
157 MppPacket packet =
NULL;
159 mpp_packet_set_length(packet, 0);
160 ret =
ctx->mpi->control(
ctx->enc, MPP_ENC_GET_HDR_SYNC, packet);
177 mpp_packet_deinit(&packet);
187 MppCodingType codectype;
190 codectype = MPP_VIDEO_CodingAVC;
193 codectype = MPP_VIDEO_CodingHEVC;
199 ret = mpp_check_support_format(MPP_CTX_ENC, codectype);
212 ret = mpp_init(
ctx->enc, MPP_CTX_ENC, codectype);
218 ret = mpp_enc_cfg_init(&
ctx->cfg);
224 MppEncCfg cfg =
ctx->cfg;
225 ret =
ctx->mpi->control(
ctx->enc, MPP_ENC_GET_CFG, cfg);
231 mpp_enc_cfg_set_s32(cfg,
"prep:width", avctx->
width);
232 mpp_enc_cfg_set_s32(cfg,
"prep:height", avctx->
height);
235 mpp_enc_cfg_set_s32(cfg,
"prep:hor_stride",
ctx->mpp_stride);
236 mpp_enc_cfg_set_s32(cfg,
"prep:ver_stride",
ctx->mpp_height);
239 ctx->pix_fmt = MPP_FMT_YUV420SP;
241 ctx->pix_fmt = MPP_FMT_YUV420P;
244 mpp_enc_cfg_set_s32(cfg,
"prep:format",
ctx->pix_fmt);
247 mpp_enc_cfg_set_s32(cfg,
"prep:colorspace", avctx->
colorspace);
251 mpp_enc_cfg_set_s32(cfg,
"prep:colortrc", avctx->
color_trc);
255 "MppFrameColorRange not equal to AVColorRange");
256 mpp_enc_cfg_set_s32(cfg,
"prep:colorrange", avctx->
color_range);
264 mpp_enc_cfg_set_s32(cfg,
"rc:fps_in_flex", 0);
265 mpp_enc_cfg_set_s32(cfg,
"rc:fps_out_flex", 0);
267 mpp_enc_cfg_set_s32(cfg,
"rc:fps_in_num", avctx->
framerate.
num);
268 mpp_enc_cfg_set_s32(cfg,
"rc:fps_in_denom", avctx->
framerate.
den);
269 mpp_enc_cfg_set_s32(cfg,
"rc:fps_out_num", avctx->
framerate.
num);
270 mpp_enc_cfg_set_s32(cfg,
"rc:fps_out_denom", avctx->
framerate.
den);
274 mpp_enc_cfg_set_s32(cfg,
"rc:gop", avctx->
gop_size);
276 mpp_enc_cfg_set_u32(cfg,
"rc:mode",
ctx->rc_mode);
278 mpp_enc_cfg_set_s32(cfg,
"rc:bps_target", avctx->
bit_rate);
282 seconds =
FFMIN(seconds, 60);
283 mpp_enc_cfg_set_s32(cfg,
"rc:stats_time", seconds);
287 mpp_enc_cfg_set_s32(cfg,
"rc:bps_max", avctx->
rc_max_rate);
289 mpp_enc_cfg_set_s32(cfg,
"rc:bps_min", avctx->
rc_min_rate);
291 mpp_enc_cfg_set_u32(cfg,
"rc:drop_mode", MPP_ENC_RC_DROP_FRM_DISABLED);
293 ret =
ctx->mpi->control(
ctx->enc, MPP_ENC_SET_CFG, cfg);
312 if (mpp_packet_get_eos(packet)) {
317 size_t size = mpp_packet_get_length(packet);
318 void *
data = mpp_packet_get_pos(packet);
331 int64_t dts = mpp_packet_get_dts(packet);
346 MppMeta meta = mpp_packet_get_meta(packet);
353 ret = mpp_meta_get_s32(meta, KEY_OUTPUT_INTRA, &key_frame);
382 int stride = layer->planes[0].pitch;
383 int vertical = layer->planes[1].offset /
stride;
387 ctx->mpp_height = vertical;
388 mpp_enc_cfg_set_s32(
ctx->cfg,
"prep:hor_stride",
ctx->mpp_stride);
389 mpp_enc_cfg_set_s32(
ctx->cfg,
"prep:ver_stride",
ctx->mpp_height);
390 ret =
ctx->mpi->control(
ctx->enc, MPP_ENC_SET_CFG,
ctx->cfg);
397 mpp_frame_set_ver_stride(
frame, vertical);
400 MppBufferInfo
info = {
401 .type = MPP_BUFFER_TYPE_DRM,
402 .size =
desc->objects[0].size,
403 .fd =
desc->objects[0].fd,
420 mpp_buffer_sync_begin(
ctx->frame_buf);
421 void *buf = mpp_buffer_get_ptr(
ctx->frame_buf);
424 int dst_linesizes[4] = {0};
434 f->format,
f->width,
f->height);
435 mpp_frame_set_hor_stride(
frame,
ctx->mpp_stride);
436 mpp_frame_set_ver_stride(
frame,
ctx->mpp_height);
441 mpp_buffer_sync_end(
ctx->frame_buf);
443 mpp_frame_set_buffer(
frame,
ctx->frame_buf);
460 if (
ctx->frame->buf[0]) {
469 mpp_frame_set_fmt(
frame,
ctx->pix_fmt);
470 mpp_frame_set_width(
frame,
ctx->frame->width);
471 mpp_frame_set_height(
frame,
ctx->frame->height);
476 mpp_frame_set_eos(
frame, 1);
485 mpp_frame_deinit(&
frame);
495 MppPacket packet =
NULL;
496 int ret =
ctx->mpi->encode_get_packet(
ctx->enc, &packet);
498 if (
ret == MPP_OK && packet) {
500 mpp_packet_deinit(&packet);
507 if (!
ctx->frame->buf[0]) {
517 if (!
ctx->frame->buf[0])
518 ctx->eof_sent =
true;
527 ctx->mpi->reset(
ctx->enc);
528 ctx->eof_sent =
true;
536 #define OFFSET(x) offsetof(RKMPPEncoderContext, x)
537 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
539 {
"rc",
"rate-control mode",
541 {
"vbr",
"Variable bitrate mode",
543 {
"cbr",
"Constant bitrate mode",
545 {
"avbr",
"Adaptive bit rate mode",
557 #define RKMPP_ENC(NAME, ID) \
558 const FFCodec ff_##NAME##_rkmpp_encoder = { \
559 .p.name = #NAME "_rkmpp", \
560 CODEC_LONG_NAME(#NAME " (rkmpp)"), \
561 .p.type = AVMEDIA_TYPE_VIDEO, \
563 .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | \
564 AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_ENCODER_FLUSH, \
565 .priv_data_size = sizeof(RKMPPEncoderContext), \
566 CODEC_PIXFMTS_ARRAY(rkmpp_pix_fmts), \
567 .color_ranges = AVCOL_RANGE_MPEG | AVCOL_RANGE_JPEG, \
568 .init = rkmpp_init_encoder, \
569 FF_CODEC_RECEIVE_PACKET_CB(rkmpp_receive), \
570 .close = rkmpp_close_encoder, \
571 .flush = rkmpp_flush, \
572 .p.priv_class = &rkmpp_enc_class, \
573 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
574 .p.wrapper_name = "rkmpp", \
575 .hw_configs = rkmpp_hw_configs, \
578 #if CONFIG_H264_RKMPP_ENCODER
582 #if CONFIG_HEVC_RKMPP_ENCODER
static av_cold void rkmpp_flush(AVCodecContext *avctx)
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVColorSpace colorspace
YUV colorspace type.
static const AVCodecHWConfigInternal *const rkmpp_hw_configs[]
int64_t rc_min_rate
minimum bitrate
#define AVERROR_EOF
End of file.
uint8_t * data
The data buffer.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
@ AV_PIX_FMT_DRM_PRIME
DRM-managed buffers exposed through PRIME buffer sharing.
static int rkmpp_receive(AVCodecContext *avctx, AVPacket *pkt)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
int flags
AV_CODEC_FLAG_*.
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
static enum AVPixelFormat rkmpp_pix_fmts[]
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int rkmpp_send_frame(AVCodecContext *avctx)
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
static av_cold int rkmpp_close_encoder(AVCodecContext *avctx)
static int rkmpp_create_frame_buf(AVCodecContext *avctx)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int64_t rc_max_rate
maximum bitrate
int rc_buffer_size
decoder bitstream buffer size
static int rkmpp_set_hw_frame(AVCodecContext *avctx, MppFrame frame)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
int64_t bit_rate
the average bitrate
const char * av_default_item_name(void *ptr)
Return the context name.
static const AVClass rkmpp_enc_class
@ AVCOL_RANGE_UNSPECIFIED
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
static const AVOption rkmpp_options[]
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
#define AV_LOG_INFO
Standard information.
#define HW_CONFIG_ENCODER_FRAMES(format, device_type_)
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static av_always_inline av_const double round(double x)
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
This struct describes a set or pool of "hardware" frames (i.e.
static int rkmpp_export_extradata(AVCodecContext *avctx)
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_INPUT_BUFFER_PADDING_SIZE
main external API structure.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
static av_cold int rkmpp_init_encoder(AVCodecContext *avctx)
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
@ AV_OPT_TYPE_INT
Underlying C type is int.
static int rkmpp_set_sw_frame(AVCodecContext *avctx, MppFrame frame)
static int rkmpp_output_pkt(AVCodecContext *avctx, AVPacket *pkt, MppPacket packet)
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
A reference to a data buffer.
#define RKMPP_ENC(NAME, ID)
This structure stores compressed data.
int width
picture width / height.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.