From d9e3c95ad3ea5efeff5245b77e6ddad6175bc61d Mon Sep 17 00:00:00 2001 From: rigaya Date: Fri, 7 Apr 2023 22:45:51 +0900 Subject: [PATCH 01/13] avdevice/v4l2: add v4l2 multi-planar API support --- libavdevice/v4l2.c | 173 ++++++++++++++++++++++++++++++++------------- 1 file changed, 124 insertions(+), 49 deletions(-) diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c index 365bacd771..e194c1df24 100644 --- a/libavdevice/v4l2.c +++ b/libavdevice/v4l2.c @@ -75,14 +75,16 @@ struct video_data { int frame_size; int interlaced; int top_field_first; + int multi_planer; int ts_mode; TimeFilter *timefilter; int64_t last_time_m; int buffers; atomic_int buffers_queued; - void **buf_start; - unsigned int *buf_len; + int plane_count; + void ***buf_start; + unsigned int **buf_len; char *standard; v4l2_std_id std_id; int channel; @@ -169,11 +171,12 @@ static int device_open(AVFormatContext *ctx, const char* device_path) av_log(ctx, AV_LOG_VERBOSE, "fd:%d capabilities:%x\n", fd, cap.capabilities); - if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) { + if (!(cap.capabilities & (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_CAPTURE_MPLANE))) { av_log(ctx, AV_LOG_ERROR, "Not a video capture device.\n"); err = AVERROR(ENODEV); goto fail; } + s->multi_planer = ((cap.capabilities & (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_CAPTURE_MPLANE)) == V4L2_CAP_VIDEO_CAPTURE_MPLANE) ? 1 : 0; if (!(cap.capabilities & V4L2_CAP_STREAMING)) { av_log(ctx, AV_LOG_ERROR, @@ -193,7 +196,7 @@ static int device_init(AVFormatContext *ctx, int *width, int *height, uint32_t pixelformat) { struct video_data *s = ctx->priv_data; - struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE }; + struct v4l2_format fmt = { .type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE }; int res = 0; fmt.fmt.pix.width = *width; @@ -275,7 +278,7 @@ static void list_framesizes(AVFormatContext *ctx, uint32_t pixelformat) static void list_formats(AVFormatContext *ctx, int type) { const struct video_data *s = ctx->priv_data; - struct v4l2_fmtdesc vfd = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE }; + struct v4l2_fmtdesc vfd = { .type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE }; while(!v4l2_ioctl(s->fd, VIDIOC_ENUM_FMT, &vfd)) { enum AVCodecID codec_id = ff_fmt_v4l2codec(vfd.pixelformat); @@ -339,7 +342,7 @@ static int mmap_init(AVFormatContext *ctx) int i, res; struct video_data *s = ctx->priv_data; struct v4l2_requestbuffers req = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE, .count = desired_video_buffers, .memory = V4L2_MEMORY_MMAP }; @@ -355,45 +358,71 @@ static int mmap_init(AVFormatContext *ctx) return AVERROR(ENOMEM); } s->buffers = req.count; - s->buf_start = av_malloc_array(s->buffers, sizeof(void *)); + s->buf_start = av_malloc_array(s->buffers, sizeof(void **)); if (!s->buf_start) { av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n"); return AVERROR(ENOMEM); } - s->buf_len = av_malloc_array(s->buffers, sizeof(unsigned int)); + s->buf_len = av_malloc_array(s->buffers, sizeof(unsigned int*)); if (!s->buf_len) { av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n"); av_freep(&s->buf_start); return AVERROR(ENOMEM); } + s->plane_count = 0; for (i = 0; i < req.count; i++) { + int total_frame_size = 0; + int plane_count = 0; + struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct v4l2_buffer buf = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .index = i, - .memory = V4L2_MEMORY_MMAP + .type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE, + .index = i, + .memory = V4L2_MEMORY_MMAP, + .m.planes = (s->multi_planer) ? planes : 0, + .length = (s->multi_planer) ? VIDEO_MAX_PLANES : 0 }; if (v4l2_ioctl(s->fd, VIDIOC_QUERYBUF, &buf) < 0) { res = AVERROR(errno); av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF): %s\n", av_err2str(res)); return res; } - - s->buf_len[i] = buf.length; - if (s->frame_size > 0 && s->buf_len[i] < s->frame_size) { - av_log(ctx, AV_LOG_ERROR, - "buf_len[%d] = %d < expected frame size %d\n", - i, s->buf_len[i], s->frame_size); + plane_count = (s->multi_planer) ? buf.length : 1; + if (s->plane_count > 0 && s->plane_count != plane_count) { + av_log(ctx, AV_LOG_ERROR, "Plane count differed between buffers\n"); + return AVERROR(EINVAL); + } + s->plane_count = plane_count; + s->buf_start[i] = av_malloc_array(s->plane_count, sizeof(void *)); + if (!s->buf_start[i]) { + av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n"); return AVERROR(ENOMEM); } - s->buf_start[i] = v4l2_mmap(NULL, buf.length, - PROT_READ | PROT_WRITE, MAP_SHARED, - s->fd, buf.m.offset); + s->buf_len[i] = av_malloc_array(s->plane_count, sizeof(unsigned int*)); + if (!s->buf_len[i]) { + av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n"); + av_freep(&s->buf_start); + return AVERROR(ENOMEM); + } + for (int iplane = 0; iplane < s->plane_count; iplane++) { + s->buf_len[i][iplane] = (s->multi_planer) ? buf.m.planes[iplane].length : buf.length; + total_frame_size += s->buf_len[i][iplane]; + s->buf_start[i][iplane] = v4l2_mmap(NULL, s->buf_len[i][iplane], + PROT_READ | PROT_WRITE, MAP_SHARED, + s->fd, (s->multi_planer) ? buf.m.planes[iplane].m.mem_offset : buf.m.offset); + + if (s->buf_start[i] == MAP_FAILED) { + res = AVERROR(errno); + av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", av_err2str(res)); + return res; + } + } - if (s->buf_start[i] == MAP_FAILED) { - res = AVERROR(errno); - av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", av_err2str(res)); - return res; + if (s->frame_size > 0 && total_frame_size < s->frame_size) { + av_log(ctx, AV_LOG_ERROR, + "buf_len[%d] = %d < expected frame size %d\n", + i, total_frame_size, s->frame_size); + return AVERROR(ENOMEM); } } @@ -420,9 +449,9 @@ static void mmap_release_buffer(void *opaque, uint8_t *data) struct buff_data *buf_descriptor = opaque; struct video_data *s = buf_descriptor->s; - buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + buf.type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; - buf.index = buf_descriptor->index; + buf.index = buf_descriptor->index; av_free(buf_descriptor); enqueue_buffer(s, &buf); @@ -492,9 +521,12 @@ static int convert_timestamp(AVFormatContext *ctx, int64_t *ts) static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt) { struct video_data *s = ctx->priv_data; + struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct v4l2_buffer buf = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .memory = V4L2_MEMORY_MMAP + .type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE, + .memory = V4L2_MEMORY_MMAP, + .m.planes = (s->multi_planer) ? planes : 0, + .length = (s->multi_planer) ? VIDEO_MAX_PLANES : 0 }; struct timeval buf_ts; int res; @@ -532,29 +564,63 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt) } else #endif { + int total_frame_size = 0; + /* CPIA is a compressed format and we don't know the exact number of bytes * used by a frame, so set it here as the driver announces it. */ if (ctx->video_codec_id == AV_CODEC_ID_CPIA) s->frame_size = buf.bytesused; - if (s->frame_size > 0 && buf.bytesused != s->frame_size) { + if (V4L2_TYPE_IS_MULTIPLANAR(buf.type)) { + for (int iplane = 0; iplane < buf.length; iplane++) { + total_frame_size += buf.m.planes[iplane].bytesused; + } + } else { + total_frame_size = buf.bytesused; + } + if (s->frame_size > 0 && total_frame_size != s->frame_size) { av_log(ctx, AV_LOG_WARNING, "Dequeued v4l2 buffer contains %d bytes, but %d were expected. Flags: 0x%08X.\n", - buf.bytesused, s->frame_size, buf.flags); - buf.bytesused = 0; + total_frame_size, s->frame_size, buf.flags); + if (V4L2_TYPE_IS_MULTIPLANAR(buf.type)) { + for (int iplane = 0; iplane < buf.length; iplane++) { + buf.m.planes[iplane].bytesused = 0; + } + } else { + buf.bytesused = 0; + } } } - /* Image is at s->buff_start[buf.index] */ - if (atomic_load(&s->buffers_queued) == FFMAX(s->buffers / 8, 1)) { - /* when we start getting low on queued buffers, fall back on copying data */ - res = av_new_packet(pkt, buf.bytesused); - if (res < 0) { - av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n"); - enqueue_buffer(s, &buf); - return res; + if (atomic_load(&s->buffers_queued) == FFMAX(s->buffers / 8, 1) || V4L2_TYPE_IS_MULTIPLANAR(buf.type)) { + if (V4L2_TYPE_IS_MULTIPLANAR(buf.type)) { + int totalbytes = 0; + for (int iplane = 0; iplane < buf.length; iplane++) { + totalbytes += buf.m.planes[iplane].bytesused; + } + res = av_new_packet(pkt, totalbytes); + if (res < 0) { + av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n"); + enqueue_buffer(s, &buf); + return res; + } + totalbytes = 0; + for (int iplane = 0; iplane < buf.length; iplane++) { + struct v4l2_plane *plane = &buf.m.planes[iplane]; + memcpy(pkt->data + totalbytes, s->buf_start[buf.index][plane->data_offset], plane->bytesused); + totalbytes += plane->bytesused; + } + } else { + /* Image is at s->buff_start[buf.index] */ + /* when we start getting low on queued buffers, fall back on copying data */ + res = av_new_packet(pkt, buf.bytesused); + if (res < 0) { + av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n"); + enqueue_buffer(s, &buf); + return res; + } + memcpy(pkt->data, s->buf_start[buf.index][0], buf.bytesused); } - memcpy(pkt->data, s->buf_start[buf.index], buf.bytesused); res = enqueue_buffer(s, &buf); if (res) { @@ -564,7 +630,7 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt) } else { struct buff_data *buf_descriptor; - pkt->data = s->buf_start[buf.index]; + pkt->data = s->buf_start[buf.index][0]; pkt->size = buf.bytesused; buf_descriptor = av_malloc(sizeof(struct buff_data)); @@ -602,10 +668,13 @@ static int mmap_start(AVFormatContext *ctx) int i, res; for (i = 0; i < s->buffers; i++) { + struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct v4l2_buffer buf = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .index = i, - .memory = V4L2_MEMORY_MMAP + .type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE, + .index = i, + .memory = V4L2_MEMORY_MMAP, + .m.planes = (s->multi_planer) ? planes : 0, + .length = (s->multi_planer) ? VIDEO_MAX_PLANES : 0 }; if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) { @@ -617,7 +686,7 @@ static int mmap_start(AVFormatContext *ctx) } atomic_store(&s->buffers_queued, s->buffers); - type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; if (v4l2_ioctl(s->fd, VIDIOC_STREAMON, &type) < 0) { res = AVERROR(errno); av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", @@ -633,13 +702,19 @@ static void mmap_close(struct video_data *s) enum v4l2_buf_type type; int i; - type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; /* We do not check for the result, because we could * not do anything about it anyway... */ v4l2_ioctl(s->fd, VIDIOC_STREAMOFF, &type); for (i = 0; i < s->buffers; i++) { - v4l2_munmap(s->buf_start[i], s->buf_len[i]); + for (int iplane = 0; iplane < s->plane_count; iplane++) { + v4l2_munmap(s->buf_start[i][iplane], s->buf_len[i][iplane]); + } + } + for (int iplane = 0; iplane < s->plane_count; iplane++) { + av_freep(&s->buf_start[iplane]); + av_freep(&s->buf_len[iplane]); } av_freep(&s->buf_start); av_freep(&s->buf_len); @@ -720,7 +795,7 @@ static int v4l2_set_parameters(AVFormatContext *ctx) tpf = &streamparm.parm.capture.timeperframe; } - streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + streamparm.type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; if (v4l2_ioctl(s->fd, VIDIOC_G_PARM, &streamparm) < 0) { ret = AVERROR(errno); av_log(ctx, AV_LOG_WARNING, "ioctl(VIDIOC_G_PARM): %s\n", av_err2str(ret)); @@ -908,7 +983,7 @@ static int v4l2_read_header(AVFormatContext *ctx) } if (!s->width && !s->height) { - struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE }; + struct v4l2_format fmt = { .type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE }; av_log(ctx, AV_LOG_VERBOSE, "Querying the device for the current frame size\n"); -- 2.41.0 From 25de40876e8e9d3868f22dc37ac492b64be186a0 Mon Sep 17 00:00:00 2001 From: rigaya Date: Fri, 7 Apr 2023 22:47:10 +0900 Subject: [PATCH 02/13] avdevice/v4l2: add nv16, nv24 support --- libavdevice/v4l2-common.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libavdevice/v4l2-common.c b/libavdevice/v4l2-common.c index b5b4448a31..1926179fdc 100644 --- a/libavdevice/v4l2-common.c +++ b/libavdevice/v4l2-common.c @@ -49,6 +49,8 @@ const struct fmt_map ff_fmt_conversion_table[] = { #ifdef V4L2_PIX_FMT_Z16 { AV_PIX_FMT_GRAY16LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_Z16 }, #endif + { AV_PIX_FMT_NV24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV24 }, + { AV_PIX_FMT_NV16, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV16 }, { AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 }, { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG }, { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG }, -- 2.41.0 From ebfac2ef875422a57955bd7c1f09c1b9d48d958a Mon Sep 17 00:00:00 2001 From: rigaya Date: Sat, 8 Apr 2023 09:48:45 +0900 Subject: [PATCH 03/13] avdevice/v4l2: add option to ignore input error (-ignore_input_error). Some device returns error with VIDIOC_S_INPUT, VIDIOC_G_INPUT. --- libavdevice/v4l2.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c index e194c1df24..fc20da7ef7 100644 --- a/libavdevice/v4l2.c +++ b/libavdevice/v4l2.c @@ -77,6 +77,7 @@ struct video_data { int top_field_first; int multi_planer; int ts_mode; + int ignore_input_error; TimeFilter *timefilter; int64_t last_time_m; @@ -798,7 +799,7 @@ static int v4l2_set_parameters(AVFormatContext *ctx) streamparm.type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; if (v4l2_ioctl(s->fd, VIDIOC_G_PARM, &streamparm) < 0) { ret = AVERROR(errno); - av_log(ctx, AV_LOG_WARNING, "ioctl(VIDIOC_G_PARM): %s\n", av_err2str(ret)); + av_log(ctx, AV_LOG_WARNING, "ioctl(VIDIOC_G_PARM): %s\n", av_err2str(ret)); } else if (framerate_q.num && framerate_q.den) { if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) { tpf = &streamparm.parm.capture.timeperframe; @@ -928,8 +929,10 @@ static int v4l2_read_header(AVFormatContext *ctx) av_log(ctx, AV_LOG_DEBUG, "Selecting input_channel: %d\n", s->channel); if (v4l2_ioctl(s->fd, VIDIOC_S_INPUT, &s->channel) < 0) { res = AVERROR(errno); - av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_S_INPUT): %s\n", av_err2str(res)); - goto fail; + av_log(ctx, (s->ignore_input_error) ? AV_LOG_WARNING : AV_LOG_ERROR, "ioctl(VIDIOC_S_INPUT): %s\n", av_err2str(res)); + if (!s->ignore_input_error) { + goto fail; + } } } else { /* get current video input */ @@ -1181,6 +1184,7 @@ static const AVOption options[] = { { "pixel_format", "set preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, { "input_format", "set preferred pixel format (for raw video) or codec name", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, { "framerate", "set frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, + { "ignore_input_error", "ignore input error", OFFSET(ignore_input_error), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, DEC }, { "list_formats", "list available formats and exit", OFFSET(list_format), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC, "list_formats" }, { "all", "show all available formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_ALLFORMATS }, 0, INT_MAX, DEC, "list_formats" }, -- 2.41.0 From 287b4e0959df84cc293a03300e98a4fdb0193935 Mon Sep 17 00:00:00 2001 From: rigaya Date: Sat, 8 Apr 2023 09:49:05 +0900 Subject: [PATCH 04/13] avdevice/v4l2: estimate framerate from dv-timings when VIDIOC_G_PARM returns error. --- libavdevice/v4l2.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c index fc20da7ef7..e5aa33e453 100644 --- a/libavdevice/v4l2.c +++ b/libavdevice/v4l2.c @@ -798,8 +798,38 @@ static int v4l2_set_parameters(AVFormatContext *ctx) streamparm.type = (s->multi_planer) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; if (v4l2_ioctl(s->fd, VIDIOC_G_PARM, &streamparm) < 0) { + // for error cases, try to get frame rate from VIDIOC_G_DV_TIMINGS + struct v4l2_dv_timings timings; ret = AVERROR(errno); + if (v4l2_ioctl(s->fd, VIDIOC_G_DV_TIMINGS, &timings) == 0) { + const int total_width = timings.bt.width + timings.bt.hfrontporch + timings.bt.hsync + timings.bt.hbackporch; + const int total_height = timings.bt.height + timings.bt.vfrontporch + timings.bt.vsync + timings.bt.vbackporch; + int64_t framerate_den = 1001; + int64_t framerate_num = av_rescale(timings.bt.pixelclock, framerate_den, (int64_t)total_width * total_height); + framerate_num = ((framerate_num + 5) / 10) * 10; // round by 10 + if (framerate_num % 1000 == 0) { + tpf->numerator = framerate_den; + tpf->denominator = framerate_num; + } else { + int framerate_num_dst = 0, framerate_den_dst = 0; + framerate_den = 1000; + framerate_num = av_rescale(timings.bt.pixelclock, framerate_den, (int64_t)total_width * total_height); + framerate_num = ((framerate_num + 5) / 10) * 10; // round by 10 + av_reduce(&framerate_num_dst, &framerate_den_dst, framerate_num, framerate_den, INT_MAX); + tpf->numerator = framerate_den_dst; + tpf->denominator = framerate_num_dst; + } + av_log(ctx, AV_LOG_WARNING, "ioctl(VIDIOC_G_PARM): %s, estimated framerate %d/%d from dv timings.\n", + av_err2str(ret), tpf->denominator, tpf->numerator); + } else if (framerate_q.num && framerate_q.den) { + // use user defined framerate for further error cases. + tpf->numerator = framerate_q.num; + tpf->denominator = framerate_q.den; + av_log(ctx, AV_LOG_WARNING, "ioctl(VIDIOC_G_PARM): %s, using framerate %d/%d\n", + av_err2str(ret), framerate_q.num, framerate_q.den); + } else { av_log(ctx, AV_LOG_WARNING, "ioctl(VIDIOC_G_PARM): %s\n", av_err2str(ret)); + } } else if (framerate_q.num && framerate_q.den) { if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) { tpf = &streamparm.parm.capture.timeperframe; -- 2.41.0 From 305e4e3c84e42368c41d3b9e9b814f0027c8f7f2 Mon Sep 17 00:00:00 2001 From: boogie Date: Sat, 17 Jun 2023 18:56:02 +0200 Subject: [PATCH 05/13] v4l2: use always channel 0 if driver does not return it. Always ignore device select / set errors --- libavdevice/v4l2.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c index e5aa33e453..9b64caecaa 100644 --- a/libavdevice/v4l2.c +++ b/libavdevice/v4l2.c @@ -968,8 +968,11 @@ static int v4l2_read_header(AVFormatContext *ctx) /* get current video input */ if (v4l2_ioctl(s->fd, VIDIOC_G_INPUT, &s->channel) < 0) { res = AVERROR(errno); - av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_G_INPUT): %s\n", av_err2str(res)); - goto fail; + av_log(ctx, (s->ignore_input_error) ? AV_LOG_WARNING : AV_LOG_ERROR, "ioctl(VIDIOC_G_INPUT): %s\n", av_err2str(res)); + if (!s->ignore_input_error) + goto fail; + else + s->channel = 0; } } @@ -1214,7 +1217,7 @@ static const AVOption options[] = { { "pixel_format", "set preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, { "input_format", "set preferred pixel format (for raw video) or codec name", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, { "framerate", "set frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, - { "ignore_input_error", "ignore input error", OFFSET(ignore_input_error), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, DEC }, + { "ignore_input_error", "ignore input error", OFFSET(ignore_input_error), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, DEC }, { "list_formats", "list available formats and exit", OFFSET(list_format), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC, "list_formats" }, { "all", "show all available formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_ALLFORMATS }, 0, INT_MAX, DEC, "list_formats" }, -- 2.41.0 From 204a6da5244c5905836c6542e4a3f4f78128ff18 Mon Sep 17 00:00:00 2001 From: boogie Date: Mon, 24 Jul 2023 16:05:01 +0200 Subject: [PATCH 06/13] remove old rkmpp --- libavcodec/rkmppdec.c | 586 ------------------------------------------ 1 file changed, 586 deletions(-) delete mode 100644 libavcodec/rkmppdec.c diff --git a/libavcodec/rkmppdec.c b/libavcodec/rkmppdec.c deleted file mode 100644 index a60962dc86..0000000000 --- a/libavcodec/rkmppdec.c +++ /dev/null @@ -1,586 +0,0 @@ -/* - * RockChip MPP Video Decoder - * Copyright (c) 2017 Lionel CHAZALLON - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include -#include -#include -#include -#include -#include - -#include "avcodec.h" -#include "decode.h" -#include "hwconfig.h" -#include "internal.h" -#include "libavutil/buffer.h" -#include "libavutil/common.h" -#include "libavutil/frame.h" -#include "libavutil/hwcontext.h" -#include "libavutil/hwcontext_drm.h" -#include "libavutil/imgutils.h" -#include "libavutil/log.h" - -#define RECEIVE_FRAME_TIMEOUT 100 -#define FRAMEGROUP_MAX_FRAMES 16 -#define INPUT_MAX_PACKETS 4 - -typedef struct { - MppCtx ctx; - MppApi *mpi; - MppBufferGroup frame_group; - - char first_packet; - char eos_reached; - - AVBufferRef *frames_ref; - AVBufferRef *device_ref; -} RKMPPDecoder; - -typedef struct { - AVClass *av_class; - AVBufferRef *decoder_ref; -} RKMPPDecodeContext; - -typedef struct { - MppFrame frame; - AVBufferRef *decoder_ref; -} RKMPPFrameContext; - -static MppCodingType rkmpp_get_codingtype(AVCodecContext *avctx) -{ - switch (avctx->codec_id) { - case AV_CODEC_ID_H264: return MPP_VIDEO_CodingAVC; - case AV_CODEC_ID_HEVC: return MPP_VIDEO_CodingHEVC; - case AV_CODEC_ID_VP8: return MPP_VIDEO_CodingVP8; - case AV_CODEC_ID_VP9: return MPP_VIDEO_CodingVP9; - default: return MPP_VIDEO_CodingUnused; - } -} - -static uint32_t rkmpp_get_frameformat(MppFrameFormat mppformat) -{ - switch (mppformat) { - case MPP_FMT_YUV420SP: return DRM_FORMAT_NV12; -#ifdef DRM_FORMAT_NV12_10 - case MPP_FMT_YUV420SP_10BIT: return DRM_FORMAT_NV12_10; -#endif - default: return 0; - } -} - -static int rkmpp_write_data(AVCodecContext *avctx, uint8_t *buffer, int size, int64_t pts) -{ - RKMPPDecodeContext *rk_context = avctx->priv_data; - RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; - int ret; - MppPacket packet; - - // create the MPP packet - ret = mpp_packet_init(&packet, buffer, size); - if (ret != MPP_OK) { - av_log(avctx, AV_LOG_ERROR, "Failed to init MPP packet (code = %d)\n", ret); - return AVERROR_UNKNOWN; - } - - mpp_packet_set_pts(packet, pts); - - if (!buffer) - mpp_packet_set_eos(packet); - - ret = decoder->mpi->decode_put_packet(decoder->ctx, packet); - if (ret != MPP_OK) { - if (ret == MPP_ERR_BUFFER_FULL) { - av_log(avctx, AV_LOG_DEBUG, "Buffer full writing %d bytes to decoder\n", size); - ret = AVERROR(EAGAIN); - } else - ret = AVERROR_UNKNOWN; - } - else - av_log(avctx, AV_LOG_DEBUG, "Wrote %d bytes to decoder\n", size); - - mpp_packet_deinit(&packet); - - return ret; -} - -static int rkmpp_close_decoder(AVCodecContext *avctx) -{ - RKMPPDecodeContext *rk_context = avctx->priv_data; - av_buffer_unref(&rk_context->decoder_ref); - return 0; -} - -static void rkmpp_release_decoder(void *opaque, uint8_t *data) -{ - RKMPPDecoder *decoder = (RKMPPDecoder *)data; - - if (decoder->mpi) { - decoder->mpi->reset(decoder->ctx); - mpp_destroy(decoder->ctx); - decoder->ctx = NULL; - } - - if (decoder->frame_group) { - mpp_buffer_group_put(decoder->frame_group); - decoder->frame_group = NULL; - } - - av_buffer_unref(&decoder->frames_ref); - av_buffer_unref(&decoder->device_ref); - - av_free(decoder); -} - -static int rkmpp_init_decoder(AVCodecContext *avctx) -{ - RKMPPDecodeContext *rk_context = avctx->priv_data; - RKMPPDecoder *decoder = NULL; - MppCodingType codectype = MPP_VIDEO_CodingUnused; - int ret; - RK_S64 paramS64; - RK_S32 paramS32; - - avctx->pix_fmt = AV_PIX_FMT_DRM_PRIME; - - // create a decoder and a ref to it - decoder = av_mallocz(sizeof(RKMPPDecoder)); - if (!decoder) { - ret = AVERROR(ENOMEM); - goto fail; - } - - rk_context->decoder_ref = av_buffer_create((uint8_t *)decoder, sizeof(*decoder), rkmpp_release_decoder, - NULL, AV_BUFFER_FLAG_READONLY); - if (!rk_context->decoder_ref) { - av_free(decoder); - ret = AVERROR(ENOMEM); - goto fail; - } - - av_log(avctx, AV_LOG_DEBUG, "Initializing RKMPP decoder.\n"); - - codectype = rkmpp_get_codingtype(avctx); - if (codectype == MPP_VIDEO_CodingUnused) { - av_log(avctx, AV_LOG_ERROR, "Unknown codec type (%d).\n", avctx->codec_id); - ret = AVERROR_UNKNOWN; - goto fail; - } - - ret = mpp_check_support_format(MPP_CTX_DEC, codectype); - if (ret != MPP_OK) { - av_log(avctx, AV_LOG_ERROR, "Codec type (%d) unsupported by MPP\n", avctx->codec_id); - ret = AVERROR_UNKNOWN; - goto fail; - } - - // Create the MPP context - ret = mpp_create(&decoder->ctx, &decoder->mpi); - if (ret != MPP_OK) { - av_log(avctx, AV_LOG_ERROR, "Failed to create MPP context (code = %d).\n", ret); - ret = AVERROR_UNKNOWN; - goto fail; - } - - // initialize mpp - ret = mpp_init(decoder->ctx, MPP_CTX_DEC, codectype); - if (ret != MPP_OK) { - av_log(avctx, AV_LOG_ERROR, "Failed to initialize MPP context (code = %d).\n", ret); - ret = AVERROR_UNKNOWN; - goto fail; - } - - // make decode calls blocking with a timeout - paramS32 = MPP_POLL_BLOCK; - ret = decoder->mpi->control(decoder->ctx, MPP_SET_OUTPUT_BLOCK, ¶mS32); - if (ret != MPP_OK) { - av_log(avctx, AV_LOG_ERROR, "Failed to set blocking mode on MPI (code = %d).\n", ret); - ret = AVERROR_UNKNOWN; - goto fail; - } - - paramS64 = RECEIVE_FRAME_TIMEOUT; - ret = decoder->mpi->control(decoder->ctx, MPP_SET_OUTPUT_BLOCK_TIMEOUT, ¶mS64); - if (ret != MPP_OK) { - av_log(avctx, AV_LOG_ERROR, "Failed to set block timeout on MPI (code = %d).\n", ret); - ret = AVERROR_UNKNOWN; - goto fail; - } - - ret = mpp_buffer_group_get_internal(&decoder->frame_group, MPP_BUFFER_TYPE_ION); - if (ret) { - av_log(avctx, AV_LOG_ERROR, "Failed to retrieve buffer group (code = %d)\n", ret); - ret = AVERROR_UNKNOWN; - goto fail; - } - - ret = decoder->mpi->control(decoder->ctx, MPP_DEC_SET_EXT_BUF_GROUP, decoder->frame_group); - if (ret) { - av_log(avctx, AV_LOG_ERROR, "Failed to assign buffer group (code = %d)\n", ret); - ret = AVERROR_UNKNOWN; - goto fail; - } - - ret = mpp_buffer_group_limit_config(decoder->frame_group, 0, FRAMEGROUP_MAX_FRAMES); - if (ret) { - av_log(avctx, AV_LOG_ERROR, "Failed to set buffer group limit (code = %d)\n", ret); - ret = AVERROR_UNKNOWN; - goto fail; - } - - decoder->first_packet = 1; - - av_log(avctx, AV_LOG_DEBUG, "RKMPP decoder initialized successfully.\n"); - - decoder->device_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_DRM); - if (!decoder->device_ref) { - ret = AVERROR(ENOMEM); - goto fail; - } - ret = av_hwdevice_ctx_init(decoder->device_ref); - if (ret < 0) - goto fail; - - return 0; - -fail: - av_log(avctx, AV_LOG_ERROR, "Failed to initialize RKMPP decoder.\n"); - rkmpp_close_decoder(avctx); - return ret; -} - -static int rkmpp_send_packet(AVCodecContext *avctx, const AVPacket *avpkt) -{ - RKMPPDecodeContext *rk_context = avctx->priv_data; - RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; - int ret; - - // handle EOF - if (!avpkt->size) { - av_log(avctx, AV_LOG_DEBUG, "End of stream.\n"); - decoder->eos_reached = 1; - ret = rkmpp_write_data(avctx, NULL, 0, 0); - if (ret) - av_log(avctx, AV_LOG_ERROR, "Failed to send EOS to decoder (code = %d)\n", ret); - return ret; - } - - // on first packet, send extradata - if (decoder->first_packet) { - if (avctx->extradata_size) { - ret = rkmpp_write_data(avctx, avctx->extradata, - avctx->extradata_size, - avpkt->pts); - if (ret) { - av_log(avctx, AV_LOG_ERROR, "Failed to write extradata to decoder (code = %d)\n", ret); - return ret; - } - } - decoder->first_packet = 0; - } - - // now send packet - ret = rkmpp_write_data(avctx, avpkt->data, avpkt->size, avpkt->pts); - if (ret && ret!=AVERROR(EAGAIN)) - av_log(avctx, AV_LOG_ERROR, "Failed to write data to decoder (code = %d)\n", ret); - - return ret; -} - -static void rkmpp_release_frame(void *opaque, uint8_t *data) -{ - AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)data; - AVBufferRef *framecontextref = (AVBufferRef *)opaque; - RKMPPFrameContext *framecontext = (RKMPPFrameContext *)framecontextref->data; - - mpp_frame_deinit(&framecontext->frame); - av_buffer_unref(&framecontext->decoder_ref); - av_buffer_unref(&framecontextref); - - av_free(desc); -} - -static int rkmpp_retrieve_frame(AVCodecContext *avctx, AVFrame *frame) -{ - RKMPPDecodeContext *rk_context = avctx->priv_data; - RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; - RKMPPFrameContext *framecontext = NULL; - AVBufferRef *framecontextref = NULL; - int ret; - MppFrame mppframe = NULL; - MppBuffer buffer = NULL; - AVDRMFrameDescriptor *desc = NULL; - AVDRMLayerDescriptor *layer = NULL; - int mode; - MppFrameFormat mppformat; - uint32_t drmformat; - - ret = decoder->mpi->decode_get_frame(decoder->ctx, &mppframe); - if (ret != MPP_OK && ret != MPP_ERR_TIMEOUT) { - av_log(avctx, AV_LOG_ERROR, "Failed to get a frame from MPP (code = %d)\n", ret); - goto fail; - } - - if (mppframe) { - // Check whether we have a special frame or not - if (mpp_frame_get_info_change(mppframe)) { - AVHWFramesContext *hwframes; - - av_log(avctx, AV_LOG_INFO, "Decoder noticed an info change (%dx%d), format=%d\n", - (int)mpp_frame_get_width(mppframe), (int)mpp_frame_get_height(mppframe), - (int)mpp_frame_get_fmt(mppframe)); - - avctx->width = mpp_frame_get_width(mppframe); - avctx->height = mpp_frame_get_height(mppframe); - - decoder->mpi->control(decoder->ctx, MPP_DEC_SET_INFO_CHANGE_READY, NULL); - - av_buffer_unref(&decoder->frames_ref); - - decoder->frames_ref = av_hwframe_ctx_alloc(decoder->device_ref); - if (!decoder->frames_ref) { - ret = AVERROR(ENOMEM); - goto fail; - } - - mppformat = mpp_frame_get_fmt(mppframe); - drmformat = rkmpp_get_frameformat(mppformat); - - hwframes = (AVHWFramesContext*)decoder->frames_ref->data; - hwframes->format = AV_PIX_FMT_DRM_PRIME; - hwframes->sw_format = drmformat == DRM_FORMAT_NV12 ? AV_PIX_FMT_NV12 : AV_PIX_FMT_NONE; - hwframes->width = avctx->width; - hwframes->height = avctx->height; - ret = av_hwframe_ctx_init(decoder->frames_ref); - if (ret < 0) - goto fail; - - // here decoder is fully initialized, we need to feed it again with data - ret = AVERROR(EAGAIN); - goto fail; - } else if (mpp_frame_get_eos(mppframe)) { - av_log(avctx, AV_LOG_DEBUG, "Received a EOS frame.\n"); - decoder->eos_reached = 1; - ret = AVERROR_EOF; - goto fail; - } else if (mpp_frame_get_discard(mppframe)) { - av_log(avctx, AV_LOG_DEBUG, "Received a discard frame.\n"); - ret = AVERROR(EAGAIN); - goto fail; - } else if (mpp_frame_get_errinfo(mppframe)) { - av_log(avctx, AV_LOG_ERROR, "Received a errinfo frame.\n"); - ret = AVERROR_UNKNOWN; - goto fail; - } - - // here we should have a valid frame - av_log(avctx, AV_LOG_DEBUG, "Received a frame.\n"); - - // setup general frame fields - frame->format = AV_PIX_FMT_DRM_PRIME; - frame->width = mpp_frame_get_width(mppframe); - frame->height = mpp_frame_get_height(mppframe); - frame->pts = mpp_frame_get_pts(mppframe); - frame->color_range = mpp_frame_get_color_range(mppframe); - frame->color_primaries = mpp_frame_get_color_primaries(mppframe); - frame->color_trc = mpp_frame_get_color_trc(mppframe); - frame->colorspace = mpp_frame_get_colorspace(mppframe); - - mode = mpp_frame_get_mode(mppframe); - frame->interlaced_frame = ((mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_DEINTERLACED); - frame->top_field_first = ((mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_TOP_FIRST); - - mppformat = mpp_frame_get_fmt(mppframe); - drmformat = rkmpp_get_frameformat(mppformat); - - // now setup the frame buffer info - buffer = mpp_frame_get_buffer(mppframe); - if (buffer) { - desc = av_mallocz(sizeof(AVDRMFrameDescriptor)); - if (!desc) { - ret = AVERROR(ENOMEM); - goto fail; - } - - desc->nb_objects = 1; - desc->objects[0].fd = mpp_buffer_get_fd(buffer); - desc->objects[0].size = mpp_buffer_get_size(buffer); - - desc->nb_layers = 1; - layer = &desc->layers[0]; - layer->format = drmformat; - layer->nb_planes = 2; - - layer->planes[0].object_index = 0; - layer->planes[0].offset = 0; - layer->planes[0].pitch = mpp_frame_get_hor_stride(mppframe); - - layer->planes[1].object_index = 0; - layer->planes[1].offset = layer->planes[0].pitch * mpp_frame_get_ver_stride(mppframe); - layer->planes[1].pitch = layer->planes[0].pitch; - - // we also allocate a struct in buf[0] that will allow to hold additionnal information - // for releasing properly MPP frames and decoder - framecontextref = av_buffer_allocz(sizeof(*framecontext)); - if (!framecontextref) { - ret = AVERROR(ENOMEM); - goto fail; - } - - // MPP decoder needs to be closed only when all frames have been released. - framecontext = (RKMPPFrameContext *)framecontextref->data; - framecontext->decoder_ref = av_buffer_ref(rk_context->decoder_ref); - framecontext->frame = mppframe; - - frame->data[0] = (uint8_t *)desc; - frame->buf[0] = av_buffer_create((uint8_t *)desc, sizeof(*desc), rkmpp_release_frame, - framecontextref, AV_BUFFER_FLAG_READONLY); - - if (!frame->buf[0]) { - ret = AVERROR(ENOMEM); - goto fail; - } - - frame->hw_frames_ctx = av_buffer_ref(decoder->frames_ref); - if (!frame->hw_frames_ctx) { - ret = AVERROR(ENOMEM); - goto fail; - } - - return 0; - } else { - av_log(avctx, AV_LOG_ERROR, "Failed to retrieve the frame buffer, frame is dropped (code = %d)\n", ret); - mpp_frame_deinit(&mppframe); - } - } else if (decoder->eos_reached) { - return AVERROR_EOF; - } else if (ret == MPP_ERR_TIMEOUT) { - av_log(avctx, AV_LOG_DEBUG, "Timeout when trying to get a frame from MPP\n"); - } - - return AVERROR(EAGAIN); - -fail: - if (mppframe) - mpp_frame_deinit(&mppframe); - - if (framecontext) - av_buffer_unref(&framecontext->decoder_ref); - - if (framecontextref) - av_buffer_unref(&framecontextref); - - if (desc) - av_free(desc); - - return ret; -} - -static int rkmpp_receive_frame(AVCodecContext *avctx, AVFrame *frame) -{ - RKMPPDecodeContext *rk_context = avctx->priv_data; - RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; - int ret = MPP_NOK; - AVPacket pkt = {0}; - RK_S32 usedslots, freeslots; - - if (!decoder->eos_reached) { - // we get the available slots in decoder - ret = decoder->mpi->control(decoder->ctx, MPP_DEC_GET_STREAM_COUNT, &usedslots); - if (ret != MPP_OK) { - av_log(avctx, AV_LOG_ERROR, "Failed to get decoder used slots (code = %d).\n", ret); - return ret; - } - - freeslots = INPUT_MAX_PACKETS - usedslots; - if (freeslots > 0) { - ret = ff_decode_get_packet(avctx, &pkt); - if (ret < 0 && ret != AVERROR_EOF) { - return ret; - } - - ret = rkmpp_send_packet(avctx, &pkt); - av_packet_unref(&pkt); - - if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, "Failed to send packet to decoder (code = %d)\n", ret); - return ret; - } - } - - // make sure we keep decoder full - if (freeslots > 1) - return AVERROR(EAGAIN); - } - - return rkmpp_retrieve_frame(avctx, frame); -} - -static void rkmpp_flush(AVCodecContext *avctx) -{ - RKMPPDecodeContext *rk_context = avctx->priv_data; - RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; - int ret = MPP_NOK; - - av_log(avctx, AV_LOG_DEBUG, "Flush.\n"); - - ret = decoder->mpi->reset(decoder->ctx); - if (ret == MPP_OK) { - decoder->first_packet = 1; - } else - av_log(avctx, AV_LOG_ERROR, "Failed to reset MPI (code = %d)\n", ret); -} - -static const AVCodecHWConfigInternal *const rkmpp_hw_configs[] = { - HW_CONFIG_INTERNAL(DRM_PRIME), - NULL -}; - -#define RKMPP_DEC_CLASS(NAME) \ - static const AVClass rkmpp_##NAME##_dec_class = { \ - .class_name = "rkmpp_" #NAME "_dec", \ - .version = LIBAVUTIL_VERSION_INT, \ - }; - -#define RKMPP_DEC(NAME, ID, BSFS) \ - RKMPP_DEC_CLASS(NAME) \ - AVCodec ff_##NAME##_rkmpp_decoder = { \ - .name = #NAME "_rkmpp", \ - .long_name = NULL_IF_CONFIG_SMALL(#NAME " (rkmpp)"), \ - .type = AVMEDIA_TYPE_VIDEO, \ - .id = ID, \ - .priv_data_size = sizeof(RKMPPDecodeContext), \ - .init = rkmpp_init_decoder, \ - .close = rkmpp_close_decoder, \ - .receive_frame = rkmpp_receive_frame, \ - .flush = rkmpp_flush, \ - .priv_class = &rkmpp_##NAME##_dec_class, \ - .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \ - .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_DRM_PRIME, \ - AV_PIX_FMT_NONE}, \ - .hw_configs = rkmpp_hw_configs, \ - .bsfs = BSFS, \ - .wrapper_name = "rkmpp", \ - }; - -RKMPP_DEC(h264, AV_CODEC_ID_H264, "h264_mp4toannexb") -RKMPP_DEC(hevc, AV_CODEC_ID_HEVC, "hevc_mp4toannexb") -RKMPP_DEC(vp8, AV_CODEC_ID_VP8, NULL) -RKMPP_DEC(vp9, AV_CODEC_ID_VP9, NULL) -- 2.41.0 From e10c2ce641ed9e9018ee257679eaa3841d3c0f9f Mon Sep 17 00:00:00 2001 From: boogie Date: Mon, 24 Jul 2023 16:01:21 +0200 Subject: [PATCH 07/13] prepare buildsystem for rkmpp --- configure | 18 +++++++++++++++++- libavcodec/Makefile | 16 ++++++++++++---- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/configure b/configure index fb55e04ee7..041039c9c4 100755 --- a/configure +++ b/configure @@ -3070,8 +3070,10 @@ nvenc_encoder_deps="nvenc" aac_mf_encoder_deps="mediafoundation" ac3_mf_encoder_deps="mediafoundation" av1_cuvid_decoder_deps="cuvid CUVIDAV1PICPARAMS" +av1_rkmpp_decoder_deps="rkmpp" h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m" h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m" +h263_rkmpp_decoder_deps="rkmpp" h264_amf_encoder_deps="amf" h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser" h264_cuvid_decoder_deps="cuvid" @@ -3087,6 +3089,8 @@ h264_qsv_decoder_select="h264_mp4toannexb_bsf qsvdec" h264_qsv_encoder_select="atsc_a53 qsvenc" h264_rkmpp_decoder_deps="rkmpp" h264_rkmpp_decoder_select="h264_mp4toannexb_bsf" +h264_rkmpp_encoder_deps="rkmpp" +h264_rkmpp_encoder_select="h264_mp4toannexb_bsf" h264_vaapi_encoder_select="cbs_h264 vaapi_encode" h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m" h264_v4l2m2m_decoder_select="h264_mp4toannexb_bsf" @@ -3103,6 +3107,8 @@ hevc_qsv_decoder_select="hevc_mp4toannexb_bsf qsvdec" hevc_qsv_encoder_select="hevcparse qsvenc" hevc_rkmpp_decoder_deps="rkmpp" hevc_rkmpp_decoder_select="hevc_mp4toannexb_bsf" +hevc_rkmpp_encoder_deps="rkmpp" +hevc_rkmpp_encoder_select="hevc_mp4toannexb_bsf" hevc_vaapi_encoder_deps="VAEncPictureParameterBufferHEVC" hevc_vaapi_encoder_select="cbs_h265 vaapi_encode" hevc_v4l2m2m_decoder_deps="v4l2_m2m hevc_v4l2_m2m" @@ -3117,6 +3123,7 @@ mjpeg_vaapi_encoder_select="cbs_jpeg jpegtables vaapi_encode" mp3_mf_encoder_deps="mediafoundation" mpeg1_cuvid_decoder_deps="cuvid" mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m" +mpeg1_rkmpp_decoder_deps="rkmpp" mpeg2_crystalhd_decoder_select="crystalhd" mpeg2_cuvid_decoder_deps="cuvid" mpeg2_mmal_decoder_deps="mmal" @@ -3125,6 +3132,7 @@ mpeg2_qsv_decoder_select="qsvdec" mpeg2_qsv_encoder_select="qsvenc" mpeg2_vaapi_encoder_select="cbs_mpeg2 vaapi_encode" mpeg2_v4l2m2m_decoder_deps="v4l2_m2m mpeg2_v4l2_m2m" +mpeg2_rkmpp_decoder_deps="rkmpp" mpeg4_crystalhd_decoder_select="crystalhd" mpeg4_cuvid_decoder_deps="cuvid" mpeg4_mediacodec_decoder_deps="mediacodec" @@ -3132,6 +3140,7 @@ mpeg4_mmal_decoder_deps="mmal" mpeg4_omx_encoder_deps="omx" mpeg4_v4l2m2m_decoder_deps="v4l2_m2m mpeg4_v4l2_m2m" mpeg4_v4l2m2m_encoder_deps="v4l2_m2m mpeg4_v4l2_m2m" +mpeg4_rkmpp_decoder_deps="rkmpp" msmpeg4_crystalhd_decoder_select="crystalhd" nvenc_h264_encoder_select="h264_nvenc_encoder" nvenc_hevc_encoder_select="hevc_nvenc_encoder" @@ -3144,6 +3153,7 @@ vp8_cuvid_decoder_deps="cuvid" vp8_mediacodec_decoder_deps="mediacodec" vp8_qsv_decoder_select="qsvdec" vp8_rkmpp_decoder_deps="rkmpp" +vp8_rkmpp_encoder_deps="rkmpp" vp8_vaapi_encoder_deps="VAEncPictureParameterBufferVP8" vp8_vaapi_encoder_select="vaapi_encode" vp8_v4l2m2m_decoder_deps="v4l2_m2m vp8_v4l2_m2m" @@ -6541,8 +6551,14 @@ enabled openssl && { check_pkg_config openssl openssl openssl/ssl.h OP enabled pocketsphinx && require_pkg_config pocketsphinx pocketsphinx pocketsphinx/pocketsphinx.h ps_init enabled rkmpp && { require_pkg_config rkmpp rockchip_mpp rockchip/rk_mpi.h mpp_create && require_pkg_config rockchip_mpp "rockchip_mpp >= 1.3.7" rockchip/rk_mpi.h mpp_create && + { check_lib librga rga/RgaApi.h c_RkRgaInit -lrga || + die "ERROR: librga is necessary for rkmpp"; } && + prepend rkmpp_deps "librga" && + { check_lib libyuv libyuv/planar_functions.h SplitUVPlane -lyuv || + die "ERROR: libyuv is necessary for rkmpp"; } && + prepend rkmpp_deps "libyuv" && { enabled libdrm || - die "ERROR: rkmpp requires --enable-libdrm"; } + die "ERROR: rkmpp requires --enable-libdrm"; } } enabled vapoursynth && require_pkg_config vapoursynth "vapoursynth-script >= 42" VSScript.h vsscript_init diff --git a/libavcodec/Makefile b/libavcodec/Makefile index b3d284d7d0..b9b821458a 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -231,6 +231,7 @@ OBJS-$(CONFIG_AURA_DECODER) += cyuv.o OBJS-$(CONFIG_AURA2_DECODER) += aura.o OBJS-$(CONFIG_AV1_DECODER) += av1dec.o OBJS-$(CONFIG_AV1_CUVID_DECODER) += cuviddec.o +OBJS-$(CONFIG_AV1_RKMPP_DECODER) += rkmpp.o rkplane.o rkmppdec.o OBJS-$(CONFIG_AVRN_DECODER) += avrndec.o OBJS-$(CONFIG_AVRP_DECODER) += r210dec.o OBJS-$(CONFIG_AVRP_ENCODER) += r210enc.o @@ -364,6 +365,7 @@ OBJS-$(CONFIG_H263_ENCODER) += mpeg4video.o \ h263.o ituh263enc.o h263data.o OBJS-$(CONFIG_H263_V4L2M2M_DECODER) += v4l2_m2m_dec.o OBJS-$(CONFIG_H263_V4L2M2M_ENCODER) += v4l2_m2m_enc.o +OBJS-$(CONFIG_H263_RKMPP_DECODER) += rkmpp.o rkplane.o rkmppdec.o OBJS-$(CONFIG_H264_DECODER) += h264dec.o h264_cabac.o h264_cavlc.o \ h264_direct.o h264_loopfilter.o \ h264_mb.o h264_picture.o \ @@ -380,7 +382,8 @@ OBJS-$(CONFIG_NVENC_H264_ENCODER) += nvenc.o nvenc_h264.o OBJS-$(CONFIG_H264_OMX_ENCODER) += omx.o OBJS-$(CONFIG_H264_QSV_DECODER) += qsvdec.o OBJS-$(CONFIG_H264_QSV_ENCODER) += qsvenc_h264.o -OBJS-$(CONFIG_H264_RKMPP_DECODER) += rkmppdec.o +OBJS-$(CONFIG_H264_RKMPP_DECODER) += rkmpp.o rkplane.o rkmppdec.o +OBJS-$(CONFIG_H264_RKMPP_ENCODER) += rkmpp.o rkplane.o rkmppenc.o OBJS-$(CONFIG_H264_VAAPI_ENCODER) += vaapi_encode_h264.o h264_levels.o OBJS-$(CONFIG_H264_VIDEOTOOLBOX_ENCODER) += videotoolboxenc.o OBJS-$(CONFIG_H264_V4L2M2M_DECODER) += v4l2_m2m_dec.o @@ -401,7 +404,8 @@ OBJS-$(CONFIG_NVENC_HEVC_ENCODER) += nvenc.o nvenc_hevc.o OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec.o OBJS-$(CONFIG_HEVC_QSV_ENCODER) += qsvenc_hevc.o hevc_ps_enc.o \ hevc_data.o -OBJS-$(CONFIG_HEVC_RKMPP_DECODER) += rkmppdec.o +OBJS-$(CONFIG_HEVC_RKMPP_DECODER) += rkmpp.o rkplane.o rkmppdec.o +OBJS-$(CONFIG_HEVC_RKMPP_ENCODER) += rkmpp.o rkplane.o rkmppenc.o OBJS-$(CONFIG_HEVC_VAAPI_ENCODER) += vaapi_encode_h265.o h265_profile_level.o OBJS-$(CONFIG_HEVC_V4L2M2M_DECODER) += v4l2_m2m_dec.o OBJS-$(CONFIG_HEVC_V4L2M2M_ENCODER) += v4l2_m2m_enc.o @@ -488,6 +492,7 @@ OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpeg12.o OBJS-$(CONFIG_MPEG1_CUVID_DECODER) += cuviddec.o OBJS-$(CONFIG_MPEG1_V4L2M2M_DECODER) += v4l2_m2m_dec.o +OBJS-$(CONFIG_MPEG1_RKMPP_DECODER) += rkmpp.o rkplane.o rkmppdec.o OBJS-$(CONFIG_MPEG2_MMAL_DECODER) += mmaldec.o OBJS-$(CONFIG_MPEG2_QSV_DECODER) += qsvdec.o OBJS-$(CONFIG_MPEG2_QSV_ENCODER) += qsvenc_mpeg2.o @@ -498,12 +503,14 @@ OBJS-$(CONFIG_MPEG2_MEDIACODEC_DECODER) += mediacodecdec.o OBJS-$(CONFIG_MPEG2_VAAPI_ENCODER) += vaapi_encode_mpeg2.o OBJS-$(CONFIG_MPEG2_V4L2M2M_DECODER) += v4l2_m2m_dec.o OBJS-$(CONFIG_MPEG4_DECODER) += xvididct.o +OBJS-$(CONFIG_MPEG2_RKMPP_DECODER) += rkmpp.o rkplane.o rkmppdec.o OBJS-$(CONFIG_MPEG4_ENCODER) += mpeg4videoenc.o OBJS-$(CONFIG_MPEG4_CUVID_DECODER) += cuviddec.o OBJS-$(CONFIG_MPEG4_MEDIACODEC_DECODER) += mediacodecdec.o OBJS-$(CONFIG_MPEG4_OMX_ENCODER) += omx.o OBJS-$(CONFIG_MPEG4_V4L2M2M_DECODER) += v4l2_m2m_dec.o OBJS-$(CONFIG_MPEG4_V4L2M2M_ENCODER) += v4l2_m2m_enc.o +OBJS-$(CONFIG_MPEG4_RKMPP_DECODER) += rkmpp.o rkplane.o rkmppdec.o OBJS-$(CONFIG_MPL2_DECODER) += mpl2dec.o ass.o OBJS-$(CONFIG_MSA1_DECODER) += mss3.o OBJS-$(CONFIG_MSCC_DECODER) += mscc.o @@ -716,7 +723,8 @@ OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp56rac.o OBJS-$(CONFIG_VP8_CUVID_DECODER) += cuviddec.o OBJS-$(CONFIG_VP8_MEDIACODEC_DECODER) += mediacodecdec.o OBJS-$(CONFIG_VP8_QSV_DECODER) += qsvdec.o -OBJS-$(CONFIG_VP8_RKMPP_DECODER) += rkmppdec.o +OBJS-$(CONFIG_VP8_RKMPP_DECODER) += rkmpp.o rkplane.o rkmppdec.o +OBJS-$(CONFIG_VP8_RKMPP_ENCODER) += rkmpp.o rkplane.o rkmppenc.o OBJS-$(CONFIG_VP8_VAAPI_ENCODER) += vaapi_encode_vp8.o OBJS-$(CONFIG_VP8_V4L2M2M_DECODER) += v4l2_m2m_dec.o OBJS-$(CONFIG_VP8_V4L2M2M_ENCODER) += v4l2_m2m_enc.o @@ -725,7 +733,7 @@ OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9data.o vp9dsp.o vp9lpf.o vp9r vp9dsp_8bpp.o vp9dsp_10bpp.o vp9dsp_12bpp.o OBJS-$(CONFIG_VP9_CUVID_DECODER) += cuviddec.o OBJS-$(CONFIG_VP9_MEDIACODEC_DECODER) += mediacodecdec.o -OBJS-$(CONFIG_VP9_RKMPP_DECODER) += rkmppdec.o +OBJS-$(CONFIG_VP9_RKMPP_DECODER) += rkmpp.o rkplane.o rkmppdec.o OBJS-$(CONFIG_VP9_VAAPI_ENCODER) += vaapi_encode_vp9.o OBJS-$(CONFIG_VP9_QSV_ENCODER) += qsvenc_vp9.o OBJS-$(CONFIG_VPLAYER_DECODER) += textdec.o ass.o -- 2.41.0 From f19e87b3dc632156cc0fc9eec9949a6e1e8cddf7 Mon Sep 17 00:00:00 2001 From: boogie Date: Mon, 24 Jul 2023 16:14:02 +0200 Subject: [PATCH 08/13] register all codec as highest prio --- libavcodec/allcodecs.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index 2e9a3581de..03a07aaa41 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -28,6 +28,19 @@ #include "libavutil/thread.h" #include "avcodec.h" #include "version.h" +extern AVCodec ff_av1_rkmpp_decoder; +extern AVCodec ff_h263_rkmpp_decoder; +extern AVCodec ff_h264_rkmpp_decoder; +extern AVCodec ff_hevc_rkmpp_decoder; +extern AVCodec ff_mpeg1_rkmpp_decoder; +extern AVCodec ff_mpeg2_rkmpp_decoder; +extern AVCodec ff_mpeg4_rkmpp_decoder; +extern AVCodec ff_vp8_rkmpp_decoder; +extern AVCodec ff_vp9_rkmpp_decoder; + +extern AVCodec ff_vp8_rkmpp_encoder; +extern AVCodec ff_h264_rkmpp_encoder; +extern AVCodec ff_hevc_rkmpp_encoder; extern AVCodec ff_a64multi_encoder; extern AVCodec ff_a64multi5_encoder; @@ -147,12 +160,10 @@ extern AVCodec ff_h264_v4l2m2m_decoder; extern AVCodec ff_h264_mediacodec_decoder; extern AVCodec ff_h264_mmal_decoder; extern AVCodec ff_h264_qsv_decoder; -extern AVCodec ff_h264_rkmpp_decoder; extern AVCodec ff_hap_encoder; extern AVCodec ff_hap_decoder; extern AVCodec ff_hevc_decoder; extern AVCodec ff_hevc_qsv_decoder; -extern AVCodec ff_hevc_rkmpp_decoder; extern AVCodec ff_hevc_v4l2m2m_decoder; extern AVCodec ff_hnm4_video_decoder; extern AVCodec ff_hq_hqa_decoder; @@ -357,10 +368,8 @@ extern AVCodec ff_vp6a_decoder; extern AVCodec ff_vp6f_decoder; extern AVCodec ff_vp7_decoder; extern AVCodec ff_vp8_decoder; -extern AVCodec ff_vp8_rkmpp_decoder; extern AVCodec ff_vp8_v4l2m2m_decoder; extern AVCodec ff_vp9_decoder; -extern AVCodec ff_vp9_rkmpp_decoder; extern AVCodec ff_vp9_v4l2m2m_decoder; extern AVCodec ff_vqa_decoder; extern AVCodec ff_webp_decoder; -- 2.41.0 From 6852941a67515c3cb337804679706797575e1fb2 Mon Sep 17 00:00:00 2001 From: boogie Date: Mon, 24 Jul 2023 16:10:39 +0200 Subject: [PATCH 09/13] initial --- libavcodec/rkmpp.c | 272 +++++++++++++++++ libavcodec/rkmpp.h | 279 ++++++++++++++++++ libavcodec/rkmppdec.c | 335 +++++++++++++++++++++ libavcodec/rkmppenc.c | 620 ++++++++++++++++++++++++++++++++++++++ libavcodec/rkplane.c | 669 ++++++++++++++++++++++++++++++++++++++++++ libavcodec/rkplane.h | 17 ++ 6 files changed, 2192 insertions(+) create mode 100644 libavcodec/rkmpp.c create mode 100644 libavcodec/rkmpp.h create mode 100644 libavcodec/rkmppdec.c create mode 100644 libavcodec/rkmppenc.c create mode 100644 libavcodec/rkplane.c create mode 100644 libavcodec/rkplane.h diff --git a/libavcodec/rkmpp.c b/libavcodec/rkmpp.c new file mode 100644 index 0000000000..a33a139569 --- /dev/null +++ b/libavcodec/rkmpp.c @@ -0,0 +1,272 @@ +/* + * RockChip MPP Video Codec + * Copyright (c) 2023 Huseyin BIYIK + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include +#include +#include "rkmpp.h" + +static rkformat rkformats[13] = { + { .av = AV_PIX_FMT_YUV420P, .mpp = MPP_FMT_YUV420P, .drm = DRM_FORMAT_YUV420, .rga = RK_FORMAT_YCbCr_420_P}, + { .av = AV_PIX_FMT_YUV422P, .mpp = MPP_FMT_YUV422P, .drm = DRM_FORMAT_YUV422, .rga = RK_FORMAT_YCbCr_422_P}, + { .av = AV_PIX_FMT_NV12, .mpp = MPP_FMT_YUV420SP, .drm = DRM_FORMAT_NV12, .rga = RK_FORMAT_YCbCr_420_SP}, + { .av = AV_PIX_FMT_NV16, .mpp = MPP_FMT_YUV422SP, .drm = DRM_FORMAT_NV16, .rga = RK_FORMAT_YCbCr_422_SP}, + { .av = AV_PIX_FMT_NONE, .mpp = MPP_FMT_YUV420SP_10BIT, .drm = DRM_FORMAT_NV15, .rga = RK_FORMAT_YCbCr_420_SP_10B}, + { .av = AV_PIX_FMT_BGR24, .mpp = MPP_FMT_BGR888, .drm = DRM_FORMAT_BGR888, .rga = RK_FORMAT_BGR_888}, + { .av = AV_PIX_FMT_BGR0, .mpp = MPP_FMT_BGRA8888, .drm = DRM_FORMAT_XRGB8888, .rga = RK_FORMAT_BGRX_8888}, + { .av = AV_PIX_FMT_BGRA, .mpp = MPP_FMT_BGRA8888, .drm = DRM_FORMAT_ARGB8888, .rga = RK_FORMAT_BGRA_8888}, + { .av = AV_PIX_FMT_BGR565, .mpp = MPP_FMT_BGR565, .drm = DRM_FORMAT_BGR565, .rga = RK_FORMAT_BGR_565}, + { .av = AV_PIX_FMT_YUYV422, .mpp = MPP_FMT_YUV422_YUYV, .drm = DRM_FORMAT_YUYV, .rga = RK_FORMAT_YUYV_422}, + { .av = AV_PIX_FMT_UYVY422, .mpp = MPP_FMT_YUV422_UYVY, .drm = DRM_FORMAT_UYVY, .rga = RK_FORMAT_UYVY_422}, + { .av = AV_PIX_FMT_NV24, .mpp = MPP_FMT_YUV444SP, .drm = DRM_FORMAT_NV24, .rga = RK_FORMAT_UNKNOWN}, + { .av = AV_PIX_FMT_YUV444P, .mpp = MPP_FMT_YUV444P, .drm = DRM_FORMAT_YUV444, .rga = RK_FORMAT_UNKNOWN}, +}; + +#define GETFORMAT(NAME, TYPE)\ +int rkmpp_get_##NAME##_format(rkformat *format, TYPE informat){ \ + for(int i=0; i < 13; i++){ \ + if(rkformats[i].NAME == informat){ \ + format->av = rkformats[i].av;\ + format->mpp = rkformats[i].mpp;\ + format->drm = rkformats[i].drm;\ + format->rga = rkformats[i].rga;\ + return 0;\ + }\ + }\ + return -1;\ +} + +GETFORMAT(drm, uint32_t) +GETFORMAT(mpp, MppFrameFormat) +GETFORMAT(rga, enum _Rga_SURF_FORMAT) +GETFORMAT(av, enum AVPixelFormat) + +MppCodingType rkmpp_get_codingtype(AVCodecContext *avctx) +{ + switch (avctx->codec_id) { + case AV_CODEC_ID_H263: return MPP_VIDEO_CodingH263; + case AV_CODEC_ID_H264: return MPP_VIDEO_CodingAVC; + case AV_CODEC_ID_HEVC: return MPP_VIDEO_CodingHEVC; + case AV_CODEC_ID_AV1: return MPP_VIDEO_CodingAV1; + case AV_CODEC_ID_VP8: return MPP_VIDEO_CodingVP8; + case AV_CODEC_ID_VP9: return MPP_VIDEO_CodingVP9; + case AV_CODEC_ID_MPEG1VIDEO: /* fallthrough */ + case AV_CODEC_ID_MPEG2VIDEO: return MPP_VIDEO_CodingMPEG2; + case AV_CODEC_ID_MPEG4: return MPP_VIDEO_CodingMPEG4; + default: return MPP_VIDEO_CodingUnused; + } +} + +int rkmpp_close_codec(AVCodecContext *avctx) +{ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + + av_packet_unref(&codec->lastpacket); + av_frame_unref(&codec->lastframe); + + av_buffer_unref(&rk_context->codec_ref); + return 0; +} + +void rkmpp_release_codec(void *opaque, uint8_t *data) +{ + RKMPPCodec *codec = (RKMPPCodec *)data; + + if (codec->mpi) { + codec->mpi->reset(codec->ctx); + mpp_destroy(codec->ctx); + codec->ctx = NULL; + } + + if (codec->buffer_group) { + mpp_buffer_group_put(codec->buffer_group); + codec->buffer_group = NULL; + } + + if(codec->hwframes_ref) + av_buffer_unref(&codec->hwframes_ref); + if(codec->hwdevice_ref) + av_buffer_unref(&codec->hwdevice_ref); + + av_free(codec); +} + +int rkmpp_init_codec(AVCodecContext *avctx) +{ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = NULL; + MppCodingType codectype = MPP_VIDEO_CodingUnused; + char *env; + int ret; + + // create a codec and a ref to it + codec = av_mallocz(sizeof(RKMPPCodec)); + if (!codec) { + ret = AVERROR(ENOMEM); + goto fail; + } + + rk_context->codec_ref = av_buffer_create((uint8_t *)codec, sizeof(*codec), rkmpp_release_codec, + NULL, AV_BUFFER_FLAG_READONLY); + if (!rk_context->codec_ref) { + av_free(codec); + ret = AVERROR(ENOMEM); + goto fail; + } + + env = getenv("FFMPEG_RKMPP_LOG_FPS"); + if (env != NULL) + codec->print_fps = !!atoi(env); + + av_log(avctx, AV_LOG_DEBUG, "Initializing RKMPP Codec.\n"); + + codectype = rkmpp_get_codingtype(avctx); + if (codectype == MPP_VIDEO_CodingUnused) { + av_log(avctx, AV_LOG_ERROR, "Unknown codec type (%d).\n", avctx->codec_id); + ret = AVERROR_UNKNOWN; + goto fail; + } + + ret = mpp_check_support_format(codec->mppctxtype, codectype); + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_ERROR, "Codec type (%d) unsupported by MPP\n", avctx->codec_id); + ret = AVERROR_UNKNOWN; + goto fail; + } + + // Create the MPP context + ret = mpp_create(&codec->ctx, &codec->mpi); + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_ERROR, "Failed to create MPP context (code = %d).\n", ret); + ret = AVERROR_UNKNOWN; + goto fail; + } + + if(avctx->codec->receive_frame){ + codec->init_callback = rkmpp_init_decoder; + codec->mppctxtype = MPP_CTX_DEC; + + ret = 1; + codec->mpi->control(codec->ctx, MPP_DEC_SET_PARSER_FAST_MODE, &ret); + + avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts); + + // override the the pixfmt according env variable + env = getenv("FFMPEG_RKMPP_PIXFMT"); + if(env != NULL){ + if(!strcmp(env, "YUV420P")) + avctx->pix_fmt = AV_PIX_FMT_YUV420P; + else if (!strcmp(env, "NV12")) + avctx->pix_fmt = AV_PIX_FMT_NV12; + else if(!strcmp(env, "DRMPRIME")) + avctx->pix_fmt = AV_PIX_FMT_DRM_PRIME; + } + } else if (avctx->codec->encode2){ + codec->mppctxtype = MPP_CTX_ENC; + codec->init_callback = rkmpp_init_encoder; + } else { + ret = AVERROR(ENOMEM); + av_log(avctx, AV_LOG_DEBUG, "RKMPP Codec can not determine if the mode is decoder or encoder\n"); + goto fail; + } + + av_log(avctx, AV_LOG_INFO, "Picture format is %s.\n", av_get_pix_fmt_name(avctx->pix_fmt)); + + // initialize mpp + ret = mpp_init(codec->ctx, codec->mppctxtype, codectype); + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_ERROR, "Failed to initialize MPP context (code = %d).\n", ret); + ret = AVERROR_UNKNOWN; + goto fail; + } + + env = getenv("FFMPEG_RKMPP_NORGA"); + if(env != NULL){ + codec->norga = 1; + av_log(avctx, AV_LOG_INFO, "Bypassing RGA and using libyuv soft conversion\n"); + } + + ret = mpp_buffer_group_get_internal(&codec->buffer_group, MPP_BUFFER_TYPE_ION | MPP_BUFFER_FLAGS_DMA32); + if (ret) { + av_log(avctx, AV_LOG_ERROR, "Failed to get buffer group (code = %d)\n", ret); + ret = AVERROR_UNKNOWN; + goto fail; + } + + ret = codec->init_callback(avctx); + if(ret){ + av_log(avctx, AV_LOG_ERROR, "Failed to init Codec (code = %d).\n", ret); + goto fail; + } + + return 0; + +fail: + av_log(avctx, AV_LOG_ERROR, "Failed to initialize RKMPP Codec.\n"); + rkmpp_close_codec(avctx); + return ret; +} + +void rkmpp_flush(AVCodecContext *avctx) +{ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + + av_log(avctx, AV_LOG_DEBUG, "Flush.\n"); + + codec->mpi->reset(codec->ctx); + codec->last_frame_time = codec->frames = codec->hascfg = 0; + + av_packet_unref(&codec->lastpacket); + av_frame_unref(&codec->lastframe); +} + +uint64_t rkmpp_update_latency(AVCodecContext *avctx, int latency) +{ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + struct timespec tv; + uint64_t curr_time; + float fps = 0.0f; + + if (!codec->print_fps) + return 0; + + clock_gettime(CLOCK_MONOTONIC, &tv); + curr_time = tv.tv_sec * 10e5 + tv.tv_nsec / 10e2; + if (latency == -1){ + latency = codec->last_frame_time ? curr_time - codec->last_frame_time : 0; + codec->last_frame_time = curr_time; + codec->latencies[codec->frames % RKMPP_FPS_FRAME_MACD] = latency; + return latency; + } else if (latency == 0 || codec->frames < RKMPP_FPS_FRAME_MACD) { + fps = -1.0f; + } else { + for(int i = 0; i < RKMPP_FPS_FRAME_MACD; i++) { + fps += codec->latencies[i]; + } + fps = RKMPP_FPS_FRAME_MACD * 1000000.0f / fps; + } + av_log(avctx, AV_LOG_INFO, + "[FFMPEG RKMPP] FPS(MACD%d): %6.1f || Frames: %" PRIu64 " || Latency: %d us || Buffer Delay %" PRIu64 "us\n", + RKMPP_FPS_FRAME_MACD, fps, codec->frames, latency, (uint64_t)(curr_time - codec->last_frame_time)); + + return 0; +} diff --git a/libavcodec/rkmpp.h b/libavcodec/rkmpp.h new file mode 100644 index 0000000000..1e4280dc3b --- /dev/null +++ b/libavcodec/rkmpp.h @@ -0,0 +1,279 @@ +#include +#include +#include +#include + +#include "internal.h" +#include "avcodec.h" +#include "hwconfig.h" +#include "decode.h" +#include "encode.h" +#include "rga/RgaApi.h" +#include "libavutil/macros.h" +#include "libavutil/log.h" +#include "libavutil/opt.h" +#include "libavutil/buffer.h" +#include "libavutil/pixfmt.h" +#include "libavutil/pixdesc.h" +#include "libavutil/hwcontext_drm.h" + +// HACK: Older BSP kernel use NA12 for NV15. +#ifndef DRM_FORMAT_NV15 // fourcc_code('N', 'V', '1', '5') +#define DRM_FORMAT_NV15 fourcc_code('N', 'A', '1', '2') +#endif + +#define RKMPP_FPS_FRAME_MACD 30 +#define RKMPP_STRIDE_ALIGN 16 +#define RKMPP_RGA_MIN_SIZE 128 +#define RKMPP_RGA_MAX_SIZE 4096 +#define RKMPP_MPPFRAME_BUFINDEX 7 +#define HDR_SIZE 1024 +#define QMAX_H26x 51 +#define QMIN_H26x 10 +#define QMAX_VPx 127 +#define QMIN_VPx 40 +#define QMAX_JPEG 99 +#define QMIN_JPEG 1 + + +#define DRMFORMATNAME(buf, format) \ + buf[0] = format & 0xff; \ + buf[1] = (format >> 8) & 0xff; \ + buf[2] = (format >> 16) & 0xff; \ + buf[3] = (format >> 24) & 0x7f; \ + +typedef struct { + AVClass *av_class; + AVBufferRef *codec_ref; + int rc_mode; + int profile; + int qmin; + int qmax; + int level; + int coder; + int dct8x8; + enum AVPixelFormat postrga_format; + int postrga_width; + int postrga_height; +} RKMPPCodecContext; + +typedef struct { + MppCtx ctx; + MppApi *mpi; + MppBufferGroup buffer_group; + MppCtxType mppctxtype; + MppEncCfg enccfg; + int hascfg; + int64_t ptsstep; + int64_t pts; + + AVPacket lastpacket; + AVFrame lastframe; + AVBufferRef *hwframes_ref; + AVBufferRef *hwdevice_ref; + + char print_fps; + uint64_t last_frame_time; + uint64_t frames; + uint64_t latencies[RKMPP_FPS_FRAME_MACD]; + + int8_t norga; + int (*init_callback)(struct AVCodecContext *avctx); +} RKMPPCodec; + +typedef struct { + enum AVPixelFormat av; + MppFrameFormat mpp; + uint32_t drm; + enum _Rga_SURF_FORMAT rga; +} rkformat; + +MppCodingType rkmpp_get_codingtype(AVCodecContext *avctx); +int rkmpp_get_drm_format(rkformat *format, uint32_t informat); +int rkmpp_get_mpp_format(rkformat *format, MppFrameFormat informat); +int rkmpp_get_rga_format(rkformat *format, enum _Rga_SURF_FORMAT informat); +int rkmpp_get_av_format(rkformat *format, enum AVPixelFormat informat); +int rkmpp_init_encoder(AVCodecContext *avctx); +int rkmpp_encode(AVCodecContext *avctx, AVPacket *packet, const AVFrame *frame, int *got_packet); +int rkmpp_init_decoder(AVCodecContext *avctx); +int rkmpp_receive_frame(AVCodecContext *avctx, AVFrame *frame); +int rkmpp_init_codec(AVCodecContext *avctx); +int rkmpp_close_codec(AVCodecContext *avctx); +void rkmpp_release_codec(void *opaque, uint8_t *data); +void rkmpp_flush(AVCodecContext *avctx); +uint64_t rkmpp_update_latency(AVCodecContext *avctx, int latency); + +#define OFFSET(x) offsetof(RKMPPCodecContext, x) +#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM + +#define ENCODEROPTS() \ + { "rc_mode", "Set rate control mode", OFFSET(rc_mode), AV_OPT_TYPE_INT, \ + { .i64 = MPP_ENC_RC_MODE_CBR }, MPP_ENC_RC_MODE_VBR, MPP_ENC_RC_MODE_BUTT, VE, "rc_mode"}, \ + {"VBR", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MPP_ENC_RC_MODE_VBR }, 0, 0, VE, "rc_mode" }, \ + {"CBR", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MPP_ENC_RC_MODE_CBR }, 0, 0, VE, "rc_mode" }, \ + {"CQP", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MPP_ENC_RC_MODE_FIXQP }, 0, 0, VE, "rc_mode" }, \ + {"AVBR", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MPP_ENC_RC_MODE_AVBR }, 0, 0, VE, "rc_mode" }, \ + { "quality_min", "Minimum Quality", OFFSET(qmin), AV_OPT_TYPE_INT, \ + { .i64=50 }, 0, 100, VE, "qmin"}, \ + { "quality_max", "Maximum Quality", OFFSET(qmax), AV_OPT_TYPE_INT, \ + { .i64=100 }, 0, 100, VE, "qmax"}, \ + { "width", "scale to Width", OFFSET(postrga_width), AV_OPT_TYPE_INT, \ + { .i64=0 }, 0, RKMPP_RGA_MAX_SIZE, VE, "width"}, \ + { "height", "scale to Height", OFFSET(postrga_height), AV_OPT_TYPE_INT, \ + { .i64=0 }, 0, RKMPP_RGA_MAX_SIZE, VE, "height"}, + +static const AVOption options_h264_encoder[] = { + ENCODEROPTS() + { "profile", "Set profile restrictions", OFFSET(profile), AV_OPT_TYPE_INT, + { .i64=FF_PROFILE_H264_HIGH }, -1, FF_PROFILE_H264_HIGH, VE, "profile"}, + { "baseline", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_H264_BASELINE}, INT_MIN, INT_MAX, VE, "profile" }, + { "main", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_H264_MAIN}, INT_MIN, INT_MAX, VE, "profile" }, + { "high", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_H264_HIGH}, INT_MIN, INT_MAX, VE, "profile" }, + { "level", "Compression Level", OFFSET(level), AV_OPT_TYPE_INT, + { .i64 = 0 }, FF_LEVEL_UNKNOWN, 0xff, VE, "level"}, + { "1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 10 }, 0, 0, VE, "level"}, + { "1.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 11 }, 0, 0, VE, "level"}, + { "1.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 12 }, 0, 0, VE, "level"}, + { "1.3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, 0, 0, VE, "level"}, + { "2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 20 }, 0, 0, VE, "level"}, + { "2.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 21 }, 0, 0, VE, "level"}, + { "2.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 22 }, 0, 0, VE, "level"}, + { "3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, 0, 0, VE, "level"}, + { "3.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, 0, 0, VE, "level"}, + { "3.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, 0, 0, VE, "level"}, + { "4", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, 0, 0, VE, "level"}, + { "4.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, 0, 0, VE, "level"}, + { "4.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, 0, 0, VE, "level"}, + { "5", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, 0, 0, VE, "level"}, + { "5.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, 0, 0, VE, "level"}, + { "5.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, 0, 0, VE, "level"}, + { "6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 60 }, 0, 0, VE, "level"}, + { "6.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 61 }, 0, 0, VE, "level"}, + { "6.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 62 }, 0, 0, VE, "level"}, + { "coder", "Entropy coder type (from 0 to 1) (default cabac)", OFFSET(coder), AV_OPT_TYPE_INT, + { .i64 = 1 }, 0, 1, VE, "coder"}, + { "cavlc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "coder" }, + { "cabac", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "coder" }, + { "8x8dct", "High profile 8x8 transform.", OFFSET(dct8x8), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE}, + { NULL } +}; + +static const AVOption options_hevc_encoder[] = { + ENCODEROPTS() + { "level", "Compression Level", OFFSET(level), AV_OPT_TYPE_INT, + { .i64 = 0 }, FF_LEVEL_UNKNOWN, 0xff, VE, "level"}, + { "1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, 0, 0, VE, "level"}, + { "2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 60 }, 0, 0, VE, "level"}, + { "2.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 63 }, 0, 0, VE, "level"}, + { "3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 90 }, 0, 0, VE, "level"}, + { "3.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 93 }, 0, 0, VE, "level"}, + { "4", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 120 }, 0, 0, VE, "level"}, + { "4.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 123 }, 0, 0, VE, "level"}, + { "5", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 150 }, 0, 0, VE, "level"}, + { "5.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 153 }, 0, 0, VE, "level"}, + { "5.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 156 }, 0, 0, VE, "level"}, + { "6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 180 }, 0, 0, VE, "level"}, + { "6.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 183 }, 0, 0, VE, "level"}, + { "6.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 186 }, 0, 0, VE, "level"}, + { "8.5", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 255 }, 0, 0, VE, "level"}, + { NULL } +}; + +static const AVOption options_vp8_encoder[] = { + ENCODEROPTS() + { NULL } +}; + +#define DECODEROPTIONS(NAME, TYPE) \ +static const AVOption options_##NAME##_##TYPE[] = { \ + { NULL } \ + }; + +DECODEROPTIONS(h263, decoder); +DECODEROPTIONS(h264, decoder); +DECODEROPTIONS(hevc, decoder); +DECODEROPTIONS(av1, decoder); +DECODEROPTIONS(vp8, decoder); +DECODEROPTIONS(vp9, decoder); +DECODEROPTIONS(mpeg1, decoder); +DECODEROPTIONS(mpeg2, decoder); +DECODEROPTIONS(mpeg4, decoder); + +static const AVCodecDefault rkmpp_enc_defaults[] = { + { "b", "6M" }, + { "g", "60" }, + { NULL } +}; + +static const enum AVPixelFormat rkmppvepu1formats[] = { + AV_PIX_FMT_NV16, + AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUYV422, + AV_PIX_FMT_UYVY422, + AV_PIX_FMT_BGRA, + AV_PIX_FMT_BGR0, + AV_PIX_FMT_NV12, + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_DRM_PRIME, + AV_PIX_FMT_NONE, +}; + +static const enum AVPixelFormat rkmppvepu5formats[] = { + AV_PIX_FMT_NV24, + AV_PIX_FMT_YUV444P, + AV_PIX_FMT_NV16, + AV_PIX_FMT_YUV422P, + AV_PIX_FMT_BGR24, + AV_PIX_FMT_YUYV422, + AV_PIX_FMT_UYVY422, + AV_PIX_FMT_BGRA, + AV_PIX_FMT_BGR0, + AV_PIX_FMT_NV12, + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_DRM_PRIME, + AV_PIX_FMT_NONE, +}; + +#define RKMPP_CODEC(NAME, ID, BSFS, TYPE) \ + static const AVClass rkmpp_##NAME##_##TYPE##_class = { \ + .class_name = "rkmpp_" #NAME "_" #TYPE, \ + .item_name = av_default_item_name,\ + .option = options_##NAME##_##TYPE, \ + .version = LIBAVUTIL_VERSION_INT, \ + }; \ + const AVCodec ff_##NAME##_rkmpp_##TYPE = { \ + .name = #NAME "_rkmpp_" #TYPE, \ + .long_name = #NAME " (rkmpp " #TYPE " )", \ + .type = AVMEDIA_TYPE_VIDEO, \ + .id = ID, \ + .priv_data_size = sizeof(RKMPPCodecContext), \ + .init = rkmpp_init_codec, \ + .close = rkmpp_close_codec, \ + .flush = rkmpp_flush, \ + .priv_class = &rkmpp_##NAME##_##TYPE##_class, \ + .bsfs = BSFS, \ + .wrapper_name = "rkmpp", + + +#define RKMPP_DEC(NAME, ID, BSFS) \ + RKMPP_CODEC(NAME, ID, BSFS, decoder) \ + .receive_frame = rkmpp_receive_frame, \ + .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \ + .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_DRM_PRIME, \ + AV_PIX_FMT_NV12, \ + AV_PIX_FMT_YUV420P, \ + AV_PIX_FMT_NONE}, \ + .hw_configs = (const AVCodecHWConfigInternal *const []) { HW_CONFIG_INTERNAL(DRM_PRIME), \ + HW_CONFIG_INTERNAL(NV12), \ + NULL}, \ + }; + +#define RKMPP_ENC(NAME, ID, VEPU) \ + RKMPP_CODEC(NAME, ID, NULL, encoder) \ + .encode2 = rkmpp_encode, \ + .capabilities = AV_CODEC_CAP_HARDWARE, \ + .defaults = rkmpp_enc_defaults, \ + .pix_fmts = rkmpp##VEPU##formats, \ + .hw_configs = (const AVCodecHWConfigInternal *const []) { HW_CONFIG_INTERNAL(NV12), \ + NULL}, \ + }; diff --git a/libavcodec/rkmppdec.c b/libavcodec/rkmppdec.c new file mode 100644 index 0000000000..acec713140 --- /dev/null +++ b/libavcodec/rkmppdec.c @@ -0,0 +1,335 @@ +/* + * RockChip MPP Video Decoder + * Copyright (c) 2017 Lionel CHAZALLON + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "rkmpp.h" +#include "rkplane.h" + + +int rkmpp_init_decoder(AVCodecContext *avctx){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + + int ret; + + ret = codec->mpi->control(codec->ctx, MPP_DEC_SET_EXT_BUF_GROUP, codec->buffer_group); + if (ret) { + av_log(avctx, AV_LOG_ERROR, "Failed to assign buffer group (code = %d)\n", ret); + return AVERROR_UNKNOWN; + } + + codec->mpi->control(codec->ctx, MPP_DEC_SET_DISABLE_ERROR, NULL); + if (ret < 0) { + av_log(avctx, AV_LOG_ERROR, "Failed to prepare Codec (code = %d)\n", ret); + return AVERROR_UNKNOWN; + } + + avctx->coded_width = FFALIGN(avctx->width, 64); + avctx->coded_height = FFALIGN(avctx->height, 64); + + codec->hwdevice_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_DRM); + if (!codec->hwdevice_ref) { + return AVERROR(ENOMEM); + } + + ret = av_hwdevice_ctx_init(codec->hwdevice_ref); + if (ret < 0) + return ret; + + av_buffer_unref(&codec->hwframes_ref); + codec->hwframes_ref = av_hwframe_ctx_alloc(codec->hwdevice_ref); + if (!codec->hwframes_ref) { + return AVERROR(ENOMEM); + } + + return 0; +} + +static int rkmpp_get_frame(AVCodecContext *avctx, AVFrame *frame, int timeout) +{ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppFrameFormat mpp_format; + MppFrame mppframe = NULL; + MppBuffer buffer = NULL; + rkformat format; + int ret, latency; + int mpp_width, mpp_height, mpp_mode; + enum AVColorRange mpp_color_range; + enum AVColorPrimaries mpp_color_primaries; + enum AVColorTransferCharacteristic mpp_color_trc; + enum AVColorSpace mpp_color_space; + int64_t mpp_pts; + + codec->mpi->control(codec->ctx, MPP_SET_OUTPUT_TIMEOUT, (MppParam)&timeout); + + ret = codec->mpi->decode_get_frame(codec->ctx, &mppframe); + if (ret != MPP_OK && ret != MPP_ERR_TIMEOUT) { + av_log(avctx, AV_LOG_ERROR, "Failed to get frame (code = %d)\n", ret); + return AVERROR_UNKNOWN; + } + + if (!mppframe) { + av_log(avctx, AV_LOG_DEBUG, "Timeout getting decoded frame.\n"); + return AVERROR(EAGAIN); + } + + if (mpp_frame_get_eos(mppframe)) { + av_log(avctx, AV_LOG_DEBUG, "Received a EOS frame.\n"); + ret = AVERROR_EOF; + goto clean; + } + + if (mpp_frame_get_discard(mppframe)) { + av_log(avctx, AV_LOG_DEBUG, "Received a discard frame.\n"); + ret = AVERROR(EAGAIN); + goto clean; + } + + if (mpp_frame_get_errinfo(mppframe)) { + av_log(avctx, AV_LOG_ERROR, "Received a errinfo frame.\n"); + ret = AVERROR_UNKNOWN; + goto clean; + } + + mpp_format = mpp_frame_get_fmt(mppframe) & MPP_FRAME_FMT_MASK; + + if (mpp_frame_get_info_change(mppframe)) { + if(codec->hascfg) + ret = AVERROR(EAGAIN); + else{ + if (avctx->pix_fmt == AV_PIX_FMT_DRM_PRIME){ + char drmname[4]; + AVHWFramesContext *hwframes; + rkmpp_get_mpp_format(&format, mpp_format); + DRMFORMATNAME(drmname, format.drm) + + hwframes = (AVHWFramesContext*)codec->hwframes_ref->data; + hwframes->format = AV_PIX_FMT_DRM_PRIME; + hwframes->sw_format = format.av; + hwframes->width = avctx->width; + hwframes->height = avctx->height; + ret = av_hwframe_ctx_init(codec->hwframes_ref); + // FIXME: handle error + av_log(avctx, AV_LOG_INFO, "Decoder is set to DRM Prime with format %s.\n", drmname); + } else if (mpp_format == MPP_FMT_YUV420SP_10BIT) + av_log(avctx, AV_LOG_WARNING, "10bit NV15 plane will be downgraded to 8bit %s.\n", av_get_pix_fmt_name(avctx->pix_fmt)); + codec->hascfg = 1; + } + + av_log(avctx, AV_LOG_INFO, "Decoder noticed an info change\n"); + codec->mpi->control(codec->ctx, MPP_DEC_SET_INFO_CHANGE_READY, NULL); + goto clean; + } + + // here we should have a valid frame + av_log(avctx, AV_LOG_DEBUG, "Received a frame.\n"); + + // now setup the frame buffer info + buffer = mpp_frame_get_buffer(mppframe); + if (!buffer) { + av_log(avctx, AV_LOG_ERROR, "Failed to get the frame buffer, frame is dropped (code = %d)\n", ret); + ret = AVERROR(EAGAIN); + goto clean; + } + + latency = rkmpp_update_latency(avctx, -1); + mpp_pts = mpp_frame_get_pts(mppframe); + mpp_width = mpp_frame_get_width(mppframe); + mpp_height = mpp_frame_get_height(mppframe); + mpp_color_range = mpp_frame_get_color_range(mppframe); + mpp_color_primaries = mpp_frame_get_color_primaries(mppframe); + mpp_color_trc = mpp_frame_get_color_trc(mppframe); + mpp_color_space = mpp_frame_get_colorspace(mppframe); + mpp_mode = mpp_frame_get_mode(mppframe); + + if (avctx->pix_fmt == AV_PIX_FMT_DRM_PRIME){ + ret = import_mpp_to_drm(avctx, mppframe, frame); + } else if (mpp_format == MPP_FMT_YUV420SP_10BIT && avctx->pix_fmt == AV_PIX_FMT_NV12){ + ret = mpp_nv15_av_nv12(avctx, mppframe, frame); + } else if (mpp_format == MPP_FMT_YUV420SP_10BIT && avctx->pix_fmt == AV_PIX_FMT_YUV420P){ + ret = mpp_nv15_av_yuv420p(avctx, mppframe, frame); + } else if (mpp_format == MPP_FMT_YUV420SP && avctx->pix_fmt == AV_PIX_FMT_NV12){ + ret = mpp_nv12_av_nv12(avctx, mppframe, frame); + } else { + rkmpp_get_mpp_format(&format, mpp_format); + ret = convert_mpp_to_av(avctx, mppframe, frame, format.av, avctx->pix_fmt); + } + + if(ret < 0){ + av_log(avctx, AV_LOG_ERROR, "Failed set frame buffer (code = %d)\n", ret); + return ret; + } + + // setup general frame fields + frame->format = avctx->pix_fmt; + frame->width = mpp_width; + frame->height = mpp_height; + frame->color_range = mpp_color_range; + frame->color_primaries = mpp_color_primaries; + frame->color_trc = mpp_color_trc; + frame->colorspace = mpp_color_space; + frame->pts = mpp_pts; + + // when mpp can not determine the color space, it returns reserved (0) value + // firefox does not understand this and instead expect unspecified (2) values + frame->color_primaries = frame->color_primaries == AVCOL_PRI_RESERVED0 ? AVCOL_PRI_UNSPECIFIED : frame->color_primaries; + frame->color_trc = frame->color_trc == AVCOL_TRC_RESERVED0 ? AVCOL_TRC_UNSPECIFIED : frame->color_trc; + frame->colorspace = frame->colorspace == AVCOL_SPC_RGB ? AVCOL_SPC_UNSPECIFIED: frame->color_trc; + + frame->interlaced_frame = ((mpp_mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_DEINTERLACED); + frame->top_field_first = ((mpp_mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_TOP_FIRST); + + codec->frames++; + rkmpp_update_latency(avctx, latency); + return 0; + +clean: + if (mppframe) + mpp_frame_deinit(&mppframe); + return ret; +} + +static int rkmpp_send_packet(AVCodecContext *avctx, AVPacket *packet) +{ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppPacket mpkt; + int64_t pts = packet->pts; + int ret; + + if(pts == AV_NOPTS_VALUE || pts < 0){ + if(!codec->ptsstep && avctx->framerate.den && avctx->framerate.num){ + int64_t x = avctx->pkt_timebase.den * (int64_t)avctx->framerate.den; + int64_t y = avctx->pkt_timebase.num * (int64_t)avctx->framerate.num; + codec->ptsstep = x / y; + } + if(codec->ptsstep && (packet->dts == AV_NOPTS_VALUE || packet->dts < 0)){ + pts = codec->pts; + codec->pts += codec->ptsstep; + } else { + codec->pts = packet->dts; + pts = packet->dts; + } + } + + ret = mpp_packet_init(&mpkt, packet->data, packet->size); + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_ERROR, "Failed to init MPP packet (code = %d)\n", ret); + return AVERROR_UNKNOWN; + } + + mpp_packet_set_pts(mpkt, pts); + + ret = codec->mpi->decode_put_packet(codec->ctx, mpkt); + mpp_packet_deinit(&mpkt); + + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_TRACE, "Decoder buffer full\n"); + return AVERROR(EAGAIN); + } + + av_log(avctx, AV_LOG_DEBUG, "Wrote %d bytes to decoder\n", packet->size); + return 0; +} + +static int rkmpp_send_eos(AVCodecContext *avctx) +{ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppPacket mpkt; + int ret; + + ret = mpp_packet_init(&mpkt, NULL, 0); + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_ERROR, "Failed to init EOS packet (code = %d)\n", ret); + return AVERROR_UNKNOWN; + } + + mpp_packet_set_eos(mpkt); + + do { + ret = codec->mpi->decode_put_packet(codec->ctx, mpkt); + } while (ret != MPP_OK); + mpp_packet_deinit(&mpkt); + + return 0; +} + +int rkmpp_receive_frame(AVCodecContext *avctx, AVFrame *frame) +{ + AVCodecInternal *avci = avctx->internal; + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + AVPacket *packet = &codec->lastpacket; + int ret_send, ret_get; + + // get packet if not already available from previous iteration + if (!avci->draining){ + if(!packet->size){ + switch(ff_decode_get_packet(avctx, packet)){ + case AVERROR_EOF: + av_log(avctx, AV_LOG_DEBUG, "Decoder Draining.\n"); + return rkmpp_send_eos(avctx); + case AVERROR(EAGAIN): + av_log(avctx, AV_LOG_TRACE, "Decoder Can't get packet retrying.\n"); + return AVERROR(EAGAIN); + } + } + +sendpacket: + // there is definitely a packet to send to decoder here + ret_send = rkmpp_send_packet(avctx, packet); + if (ret_send == 0){ + // send successful, continue until decoder input buffer is full + av_packet_unref(packet); + return AVERROR(EAGAIN); + } else if (ret_send < 0 && ret_send != AVERROR(EAGAIN)) { + // something went wrong, raise error + av_log(avctx, AV_LOG_ERROR, "Decoder Failed to send data (code = %d)\n", ret_send); + return ret_send; + } + } + + // were here only when draining and buffer is full + ret_get = rkmpp_get_frame(avctx, frame, MPP_TIMEOUT_BLOCK); + + if (ret_get == AVERROR_EOF){ + av_log(avctx, AV_LOG_DEBUG, "Decoder is at EOS.\n"); + // this is not likely but lets handle it in case synchronization issues of mpp + } else if (ret_get == AVERROR(EAGAIN) && ret_send == AVERROR(EAGAIN)) + goto sendpacket; + // only for logging + else if (ret_get < 0 && ret_get != AVERROR(EAGAIN)) // FIXME + av_log(avctx, AV_LOG_ERROR, "Decoder Failed to get frame (code = %d)\n", ret_get); + + return ret_get; +} + + +RKMPP_DEC(h263, AV_CODEC_ID_H263, NULL) +RKMPP_DEC(h264, AV_CODEC_ID_H264, "h264_mp4toannexb") +RKMPP_DEC(hevc, AV_CODEC_ID_HEVC, "hevc_mp4toannexb") +RKMPP_DEC(av1, AV_CODEC_ID_AV1, NULL) +RKMPP_DEC(vp8,AV_CODEC_ID_VP8, NULL) +RKMPP_DEC(vp9, AV_CODEC_ID_VP9, NULL) +RKMPP_DEC(mpeg1, AV_CODEC_ID_MPEG1VIDEO, NULL) +RKMPP_DEC(mpeg2, AV_CODEC_ID_MPEG2VIDEO, NULL) +RKMPP_DEC(mpeg4,AV_CODEC_ID_MPEG4, "mpeg4_unpack_bframes") + diff --git a/libavcodec/rkmppenc.c b/libavcodec/rkmppenc.c new file mode 100644 index 0000000000..f4da62cca7 --- /dev/null +++ b/libavcodec/rkmppenc.c @@ -0,0 +1,620 @@ +/* + * RockChip MPP Video Decoder + * Copyright (c) 2023 Huseyin BIYIK + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "rkmpp.h" +#include "rkplane.h" + +static int rkmpp_config_withframe(AVCodecContext *avctx, MppFrame mppframe, AVFrame *frame){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppEncCfg cfg = codec->enccfg; + + if(codec->hascfg == 0){ + rkformat format; + + int ret; + avctx->time_base.num = avctx->framerate.den; + avctx->time_base.den = avctx->framerate.num; + + mpp_enc_cfg_set_s32(cfg, "prep:width", mpp_frame_get_width(mppframe)); + mpp_enc_cfg_set_s32(cfg, "prep:height", mpp_frame_get_height(mppframe)); + mpp_enc_cfg_set_s32(cfg, "prep:hor_stride", mpp_frame_get_hor_stride(mppframe)); + mpp_enc_cfg_set_s32(cfg, "prep:ver_stride", mpp_frame_get_ver_stride(mppframe)); + mpp_enc_cfg_set_s32(cfg, "prep:format", mpp_frame_get_fmt(mppframe) & MPP_FRAME_FMT_MASK); + ret = codec->mpi->control(codec->ctx, MPP_ENC_SET_CFG, cfg); + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_ERROR, "Failed to config with frame (code = %d).\n", ret); + return AVERROR_UNKNOWN; + } + codec->hascfg = 1; + rkmpp_get_mpp_format(&format, mpp_frame_get_fmt(mppframe)); + av_log(avctx, AV_LOG_INFO, "Reconfigured with w=%d, h=%d, format=%s.\n", mpp_frame_get_width(mppframe), + mpp_frame_get_height(mppframe), av_get_pix_fmt_name(format.av)); + return 0; + } + return 0; +} + +static int rkmpp_config(AVCodecContext *avctx){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppEncCfg cfg = codec->enccfg; + RK_U32 rc_mode, split_mode, split_arg, split_out, fps_num, fps_den; + MppCodingType coding_type = rkmpp_get_codingtype(avctx); + MppEncHeaderMode header_mode; + MppEncSeiMode sei_mode; + int ret, max_bps, min_bps, qmin, qmax; + + //prep config + mpp_enc_cfg_set_s32(cfg, "prep:width", avctx->width); + mpp_enc_cfg_set_s32(cfg, "prep:height", avctx->height); + mpp_enc_cfg_set_s32(cfg, "prep:hor_stride", FFALIGN(avctx->width, RKMPP_STRIDE_ALIGN)); + mpp_enc_cfg_set_s32(cfg, "prep:ver_stride", FFALIGN(avctx->height, RKMPP_STRIDE_ALIGN)); + // later to be reconfigured with the first frame received + mpp_enc_cfg_set_s32(cfg, "prep:format", MPP_FMT_YUV420SP); + mpp_enc_cfg_set_s32(cfg, "prep:mirroring", 0); + mpp_enc_cfg_set_s32(cfg, "prep:rotation", 0); + mpp_enc_cfg_set_s32(cfg, "prep:flip", 0); + + //rc config + // make sure time base of avctx is synced to input frames + av_reduce(&fps_num, &fps_den, avctx->time_base.den, avctx->time_base.num, 65535); + + /* fix input / output frame rate */ + mpp_enc_cfg_set_s32(cfg, "rc:fps_in_flex", 0); + mpp_enc_cfg_set_s32(cfg, "rc:fps_in_num", fps_num); + mpp_enc_cfg_set_s32(cfg, "rc:fps_in_denorm", fps_den); + mpp_enc_cfg_set_s32(cfg, "rc:fps_out_flex", 0); + mpp_enc_cfg_set_s32(cfg, "rc:fps_out_num",fps_num); + mpp_enc_cfg_set_s32(cfg, "rc:fps_out_denorm", fps_den); + + mpp_enc_cfg_set_s32(cfg, "rc:gop", FFMAX(avctx->gop_size, 1)); + + // config rc: mode + rc_mode = rk_context->rc_mode; + if(rc_mode == MPP_ENC_RC_MODE_BUTT) + rc_mode = MPP_ENC_RC_MODE_CBR; + + switch(rc_mode){ + case MPP_ENC_RC_MODE_VBR: + av_log(avctx, AV_LOG_INFO, "Rate Control mode is set to VBR\n"); break; + case MPP_ENC_RC_MODE_CBR: + av_log(avctx, AV_LOG_INFO, "Rate Control mode is set to CBR\n"); break; + case MPP_ENC_RC_MODE_FIXQP: + av_log(avctx, AV_LOG_INFO, "Rate Control mode is set to CQP\n"); break; + case MPP_ENC_RC_MODE_AVBR: + av_log(avctx, AV_LOG_INFO, "Rate Control mode is set to AVBR\n"); break; + } + + mpp_enc_cfg_set_u32(cfg, "rc:mode", rc_mode); + + // config rc: bps + mpp_enc_cfg_set_u32(cfg, "rc:bps_target", avctx->bit_rate); + + switch (rc_mode) { + case MPP_ENC_RC_MODE_FIXQP : { + /* do not setup bitrate on FIXQP mode */ + min_bps = max_bps = avctx->bit_rate; + break; + } + case MPP_ENC_RC_MODE_CBR : { + /* CBR mode has narrow bound */ + max_bps = avctx->bit_rate * 17 / 16; + min_bps = avctx->bit_rate * 15 / 16; + break; + } + case MPP_ENC_RC_MODE_VBR : + case MPP_ENC_RC_MODE_AVBR : { + /* VBR mode has wide bound */ + max_bps = avctx->bit_rate * 17 / 16; + min_bps = avctx->bit_rate * 1 / 16; + break; + } + default : { + /* default use CBR mode */ + max_bps = avctx->bit_rate * 17 / 16; + min_bps = avctx->bit_rate * 15 / 16; + break; + } + } + + mpp_enc_cfg_set_s32(cfg, "rc:bps_max", max_bps); + mpp_enc_cfg_set_s32(cfg, "rc:bps_min", min_bps); + + av_log(avctx, AV_LOG_INFO, "Bitrate Target/Min/Max is set to %ld/%d/%d\n", avctx->bit_rate, min_bps, max_bps); + + // config rc: drop behaviour + mpp_enc_cfg_set_u32(cfg, "rc:drop_mode", MPP_ENC_RC_DROP_FRM_DISABLED); + mpp_enc_cfg_set_u32(cfg, "rc:drop_thd", 20); // 20% of max bps + mpp_enc_cfg_set_u32(cfg, "rc:drop_gap", 1); // Do not continuous drop frame + + // config rc: qp + switch (coding_type) { + case MPP_VIDEO_CodingAVC : + case MPP_VIDEO_CodingHEVC : { + qmax = QMIN_H26x + (100 - rk_context->qmin) * (QMAX_H26x - QMIN_H26x) / 100; + qmin = QMIN_H26x + (100 - rk_context->qmax) * (QMAX_H26x - QMIN_H26x) / 100; + switch (rc_mode) { + case MPP_ENC_RC_MODE_FIXQP : { + mpp_enc_cfg_set_s32(cfg, "rc:qp_init", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_max", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_min", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_max_i", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_min_i", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_ip", 0); + break; + } + case MPP_ENC_RC_MODE_CBR : + case MPP_ENC_RC_MODE_VBR : + case MPP_ENC_RC_MODE_AVBR : { + mpp_enc_cfg_set_s32(cfg, "rc:qp_init", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_max", qmax); + mpp_enc_cfg_set_s32(cfg, "rc:qp_min", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_max_i",qmax); + mpp_enc_cfg_set_s32(cfg, "rc:qp_min_i", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_ip", 2); + break; + } + default : { + av_log(avctx, AV_LOG_ERROR, "Unsupported Encoder Mode %d.\n", rc_mode); + break; + } + } + break; + } + case MPP_VIDEO_CodingVP8 : { + // vp8 only setup base qp range + qmax = QMIN_VPx + (100 - rk_context->qmin) * (QMAX_VPx - QMIN_VPx) / 100; + qmin = QMIN_VPx + (100 - rk_context->qmax) * (QMAX_VPx - QMIN_VPx) / 100; + mpp_enc_cfg_set_s32(cfg, "rc:qp_init", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_max", qmax); + mpp_enc_cfg_set_s32(cfg, "rc:qp_min", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_max_i", qmax); + mpp_enc_cfg_set_s32(cfg, "rc:qp_min_i", qmin); + mpp_enc_cfg_set_s32(cfg, "rc:qp_ip", 6); + break; + } + case MPP_VIDEO_CodingMJPEG : { + qmax = QMIN_JPEG + (100 - rk_context->qmin) * (QMAX_JPEG - QMIN_JPEG) / 100; + qmin = QMIN_JPEG + (100 - rk_context->qmax) * (QMAX_JPEG- QMIN_JPEG) / 100; + // jpeg use special codec config to control qtable + mpp_enc_cfg_set_s32(cfg, "jpeg:q_factor", 80); + mpp_enc_cfg_set_s32(cfg, "jpeg:qf_max", qmax); + mpp_enc_cfg_set_s32(cfg, "jpeg:qf_min", qmin); + break; + } + default : { + break; + } + } + + // setup codec + mpp_enc_cfg_set_s32(cfg, "codec:type", coding_type); + switch (coding_type) { + case MPP_VIDEO_CodingAVC : { + avctx->profile = rk_context->profile; + avctx->level = rk_context->level; + mpp_enc_cfg_set_s32(cfg, "h264:profile", avctx->profile); + mpp_enc_cfg_set_s32(cfg, "h264:level", avctx->level); + mpp_enc_cfg_set_s32(cfg, "h264:cabac_en", rk_context->coder); + mpp_enc_cfg_set_s32(cfg, "h264:cabac_idc", 0); + mpp_enc_cfg_set_s32(cfg, "h264:trans8x8", rk_context->dct8x8 && avctx->profile == FF_PROFILE_H264_HIGH ? 1 : 0); + switch(avctx->profile){ + case FF_PROFILE_H264_BASELINE: av_log(avctx, AV_LOG_INFO, "Profile is set to BASELINE\n"); break; + case FF_PROFILE_H264_MAIN: av_log(avctx, AV_LOG_INFO, "Profile is set to MAIN\n"); break; + case FF_PROFILE_H264_HIGH: + av_log(avctx, AV_LOG_INFO, "Profile is set to HIGH\n"); + if(rk_context->dct8x8) + av_log(avctx, AV_LOG_INFO, "8x8 Transform is enabled\n"); + break; + } + av_log(avctx, AV_LOG_INFO, "Level is set to %d\n", avctx->level); + if(rk_context->coder) + av_log(avctx, AV_LOG_INFO, "Coder is set to CABAC\n"); + else + av_log(avctx, AV_LOG_INFO, "Coder is set to CAVLC\n"); + break; + } + case MPP_VIDEO_CodingHEVC : { + avctx->profile = FF_PROFILE_HEVC_MAIN; + avctx->level = rk_context->level; + mpp_enc_cfg_set_s32(cfg, "h265:profile", avctx->profile); + mpp_enc_cfg_set_s32(cfg, "h265:level", avctx->level); + switch(avctx->profile){ + case FF_PROFILE_HEVC_MAIN: av_log(avctx, AV_LOG_INFO, "Profile is set to MAIN\n"); break; + case FF_PROFILE_HEVC_MAIN_10: av_log(avctx, AV_LOG_INFO, "Profile is set to MAIN 10\n"); break; + } + av_log(avctx, AV_LOG_INFO, "Level is set to %d\n", avctx->level == 255 ? 85 : avctx->level / 3); + break; + } + case MPP_VIDEO_CodingMJPEG : + case MPP_VIDEO_CodingVP8 : + mpp_enc_cfg_set_s32(cfg, "vp8:disable_ivf", 1); + break; + default : { + av_log(avctx, AV_LOG_ERROR, "Unsupported coding type for config (code = %d).\n", coding_type); + break; + } + } + + av_log(avctx, AV_LOG_INFO, "Quality Min/Max is set to %d%%(Quant=%d) / %d%%(Quant=%d)\n", + rk_context->qmin, qmax, rk_context->qmax, qmin); + + split_mode = 0; + split_arg = 0; + split_out = 0; + + if (split_mode) { + mpp_enc_cfg_set_s32(cfg, "split:mode", split_mode); + mpp_enc_cfg_set_s32(cfg, "split:arg", split_arg); + mpp_enc_cfg_set_s32(cfg, "split:out", split_out); + } + + ret = codec->mpi->control(codec->ctx, MPP_ENC_SET_CFG, cfg); + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_ERROR, "Failed to set cfg on MPI (code = %d).\n", ret); + return AVERROR_UNKNOWN; + } + + sei_mode = MPP_ENC_SEI_MODE_DISABLE; + ret = codec->mpi->control(codec->ctx, MPP_ENC_SET_SEI_CFG, &sei_mode); + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_ERROR, "Failed to set sei cfg on MPI (code = %d).\n", ret); + return AVERROR_UNKNOWN; + } + + header_mode = MPP_ENC_HEADER_MODE_EACH_IDR; + if (coding_type == MPP_VIDEO_CodingAVC || coding_type == MPP_VIDEO_CodingHEVC) { + ret = codec->mpi->control(codec->ctx, MPP_ENC_SET_HEADER_MODE, &header_mode); + if (ret) { + av_log(avctx, AV_LOG_ERROR, "Failed header mode on MPI (code = %d).\n", ret); + return ret; + } + } + + return 0; +} + +//https://github.com/rockchip-linux/mpp/issues/417 +//Encoder does not support 422 planes, but we can do this with rga +//FIX-ME: NV12/YUV422P do not have libyuv fallbacks when encoding vp8 +static int check_vp8_planes(AVCodecContext *avctx, enum AVPixelFormat pix_fmt){ + MppCodingType coding_type = rkmpp_get_codingtype(avctx); + RKMPPCodecContext *rk_context = avctx->priv_data; + + if(coding_type == MPP_VIDEO_CodingVP8 && + (pix_fmt == AV_PIX_FMT_NV16 || + pix_fmt == AV_PIX_FMT_YUV422P)){ + rk_context->postrga_format = AV_PIX_FMT_NV12; + + if (avctx->width < RKMPP_RGA_MIN_SIZE || avctx->width > RKMPP_RGA_MAX_SIZE){ + av_log(avctx, AV_LOG_ERROR, "Frame width (%d) not in rga scalable range (%d - %d)\n", + avctx->width, RKMPP_RGA_MIN_SIZE, RKMPP_RGA_MAX_SIZE); + return -1; + } else + rk_context->postrga_width = avctx->width; + + if (avctx->height < RKMPP_RGA_MIN_SIZE || avctx->height > RKMPP_RGA_MAX_SIZE){ + av_log(avctx, AV_LOG_ERROR, "Frame height (%d) not in rga scalable range (%d - %d)\n", + avctx->height, RKMPP_RGA_MIN_SIZE, RKMPP_RGA_MAX_SIZE); + return -1; + } else + rk_context->postrga_height = avctx->height; + } else + rk_context->postrga_format = AV_PIX_FMT_NONE; + return 0; +} + +static int check_scaling(AVCodecContext *avctx, enum AVPixelFormat pix_fmt){ + RKMPPCodecContext *rk_context = avctx->priv_data; + + if(rk_context->postrga_width || rk_context->postrga_height){ + if(pix_fmt != AV_PIX_FMT_NV16 && pix_fmt != AV_PIX_FMT_NV12 && + pix_fmt != AV_PIX_FMT_YUV422P && pix_fmt != AV_PIX_FMT_YUV420P){ + av_log(avctx, AV_LOG_ERROR, "Scaling is only supported for NV12,NV16,YUV420P,YUV422P. %s requested\n", + av_get_pix_fmt_name(pix_fmt)); + return -1; + } + // align it to accepted RGA range + rk_context->postrga_width = FFMAX(rk_context->postrga_width, RKMPP_RGA_MIN_SIZE); + rk_context->postrga_height = FFMAX(rk_context->postrga_height, RKMPP_RGA_MIN_SIZE); + rk_context->postrga_width = FFMIN(rk_context->postrga_width, RKMPP_RGA_MAX_SIZE); + rk_context->postrga_height = FFMIN(rk_context->postrga_height, RKMPP_RGA_MAX_SIZE); + avctx->width = rk_context->postrga_width; + avctx->height = rk_context->postrga_height; + if(rk_context->postrga_format == AV_PIX_FMT_NONE) + rk_context->postrga_format = pix_fmt; + } + return 0; +} + +int rkmpp_init_encoder(AVCodecContext *avctx){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppCodingType coding_type = rkmpp_get_codingtype(avctx); + RK_U8 enc_hdr_buf[HDR_SIZE]; + MppPacket packet = NULL; + size_t packetlen; + void *packetpos; + int ret; + int input_timeout = 500; + + // ENCODER SETUP + ret = mpp_enc_cfg_init(&codec->enccfg); + if (ret) { + av_log(avctx, AV_LOG_ERROR, "Codec failed to initialize encoder config (code = %d)\n", ret); + ret = AVERROR_UNKNOWN; + goto fail; + } + + ret = codec->mpi->control(codec->ctx, MPP_ENC_GET_CFG, codec->enccfg); + if (ret) { + av_log(avctx, AV_LOG_ERROR, "Codec failed to get encoder config (code = %d)\n", ret); + ret = AVERROR_UNKNOWN; + goto fail; + } + + if(avctx->pix_fmt != AV_PIX_FMT_DRM_PRIME){ + if(check_vp8_planes(avctx, avctx->pix_fmt)){ + ret = AVERROR_UNKNOWN; + goto fail; + } + if(check_scaling(avctx, avctx->pix_fmt)){ + ret = AVERROR_UNKNOWN; + goto fail; + } + } + + if(rkmpp_config(avctx)){ + ret = AVERROR_UNKNOWN; + goto fail; + } + + // copy sps/pps/vps to extradata for h26x + if(coding_type == MPP_VIDEO_CodingAVC || coding_type == MPP_VIDEO_CodingHEVC){ + memset(enc_hdr_buf, 0 , HDR_SIZE); + + ret = mpp_packet_init(&packet, (void *)enc_hdr_buf, HDR_SIZE); + if (!packet) { + av_log(avctx, AV_LOG_ERROR, "Failed to init extra info packet (code = %d).\n", ret); + ret = AVERROR_UNKNOWN; + goto fail; + } + + mpp_packet_set_length(packet, 0); + ret = codec->mpi->control(codec->ctx, MPP_ENC_GET_HDR_SYNC, packet); + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_ERROR, "Failed to get extra info on MPI (code = %d).\n", ret); + ret = AVERROR_UNKNOWN; + goto fail; + } + + /* get and write sps/pps for H.264/H.265 */ + packetpos = mpp_packet_get_pos(packet); + packetlen = mpp_packet_get_length(packet); + + if (avctx->extradata != NULL) { + av_free(avctx->extradata); + avctx->extradata = NULL; + } + avctx->extradata = av_malloc(packetlen + AV_INPUT_BUFFER_PADDING_SIZE); + if (avctx->extradata == NULL) { + ret = AVERROR(ENOMEM); + goto fail; + } + avctx->extradata_size = packetlen + AV_INPUT_BUFFER_PADDING_SIZE; + memcpy(avctx->extradata, packetpos, packetlen); + memset(avctx->extradata + packetlen, 0, AV_INPUT_BUFFER_PADDING_SIZE); + mpp_packet_deinit(&packet); + } + + codec->mpi->control(codec->ctx, MPP_SET_INPUT_TIMEOUT, &input_timeout); + return 0; + +fail: + av_log(avctx, AV_LOG_ERROR, "Failed to initialize RKMPP Codec.\n"); + if(packet) + mpp_packet_deinit(&packet); + return ret; +} + +static void rkmpp_release_packet_buf(void *opaque, uint8_t *data){ + MppPacket mpppacket = opaque; + mpp_packet_deinit(&mpppacket); +} + +static int rkmpp_send_frame(AVCodecContext *avctx, AVFrame *frame){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppFrame mppframe = NULL; + rkformat format; + int ret=0, keepframe=0; + + // EOS frame, avframe=NULL + if (!frame) { + av_log(avctx, AV_LOG_DEBUG, "End of stream.\n"); + mpp_frame_init(&mppframe); + mpp_frame_set_eos(mppframe, 1); + } else { + if (avctx->pix_fmt == AV_PIX_FMT_DRM_PRIME){ + // the frame is coming from a DRMPRIME enabled decoder, no copy necessary + // just import existing fd and buffer to mmpp + mppframe = import_drm_to_mpp(avctx, frame); + } else { + // the frame is coming from a RKMPP decoder, no copy necessary + // use existing mppframe which is atatched to + // RKMPP_MPPFRAME_BUFINDEX of the frame buffers + // those frames need to be cleaned by the decoder itself therefore dont clean them + mppframe = get_mppframe_from_av(frame); + if(mppframe) + keepframe = 1; + else + // soft frames needs to be copied to a buffer region where mpp supports. + // a copy is necessary here + mppframe = create_mpp_frame(frame->width, frame->height, avctx->pix_fmt, codec->buffer_group, NULL, frame); + } + + if(!mppframe){ + ret = AVERROR_UNKNOWN; + goto clean; + } + + rkmpp_get_mpp_format(&format, mpp_frame_get_fmt(mppframe)); + + if(check_vp8_planes(avctx, format.av)){ + ret = AVERROR_UNKNOWN; + goto clean; + } + if(check_scaling(avctx, format.av)){ + ret = AVERROR_UNKNOWN; + goto clean; + } + + if(rk_context->postrga_format != AV_PIX_FMT_NONE || rk_context->postrga_width || rk_context->postrga_height){ + MppFrame postmppframe = NULL; + + postmppframe = create_mpp_frame(rk_context->postrga_width , rk_context->postrga_height, rk_context->postrga_format, + codec->buffer_group, NULL, NULL); + + if(!postmppframe){ + ret = AVERROR_UNKNOWN; + av_log(avctx, AV_LOG_ERROR, "Error creating post mpp frame\n"); + goto clean; + } + + ret = rga_convert_mpp_mpp(avctx, mppframe, postmppframe); + if(ret){ + mpp_frame_deinit(&postmppframe); + av_log(avctx, AV_LOG_ERROR, "Error applying Post RGA\n"); + goto clean; + } + + if(!keepframe) + mpp_frame_deinit(&mppframe); + else + keepframe = 0; + + mppframe = postmppframe; + } + + mpp_frame_set_pts(mppframe, frame->pts); + } + + ret = rkmpp_config_withframe(avctx, mppframe, frame); + if(ret){ + ret = AVERROR_UNKNOWN; + goto clean; + } + + // put the frame in encoder + ret = codec->mpi->encode_put_frame(codec->ctx, mppframe); + + if (ret != MPP_OK) { + av_log(avctx, AV_LOG_DEBUG, "Encoder buffer full\n"); + ret = AVERROR(EAGAIN); + } else + av_log(avctx, AV_LOG_DEBUG, "Wrote %ld bytes to encoder\n", mpp_frame_get_buf_size(mppframe)); + +clean: + if(!keepframe) + mpp_frame_deinit(&mppframe); + return ret; +} + + +static int rkmpp_get_packet(AVCodecContext *avctx, AVPacket *packet, int timeout){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppPacket mpppacket = NULL; + MppMeta meta = NULL; + int ret, keyframe=0; + + codec->mpi->control(codec->ctx, MPP_SET_OUTPUT_TIMEOUT, (MppParam)&timeout); + + ret = codec->mpi->encode_get_packet(codec->ctx, &mpppacket); + + // rest of above code is never tested most likely broken + if (ret != MPP_OK && ret != MPP_ERR_TIMEOUT) { + av_log(avctx, AV_LOG_ERROR, "Failed to get packet (code = %d)\n", ret); + return AVERROR(EAGAIN); + } + + if (!mpppacket) { + av_log(avctx, AV_LOG_DEBUG, "Timeout getting encoded packet.\n"); + return AVERROR(EAGAIN); + } + + // TO-DO: Handle EOS + if (mpp_packet_get_eos(mpppacket)) { + av_log(avctx, AV_LOG_DEBUG, "Received an EOS packet.\n"); + ret = AVERROR_EOF; + goto fail; + } + + av_log(avctx, AV_LOG_DEBUG, "Received a packet.\n"); + + packet->data = mpp_packet_get_data(mpppacket); + packet->size = mpp_packet_get_length(mpppacket); + packet->buf = av_buffer_create(packet->data, packet->size, rkmpp_release_packet_buf, + mpppacket, AV_BUFFER_FLAG_READONLY); + if (!packet->buf) { + ret = AVERROR(ENOMEM); + goto fail; + } + + packet->pts = mpp_packet_get_pts(mpppacket); + packet->dts = mpp_packet_get_pts(mpppacket); + codec->frames++; + + meta = mpp_packet_get_meta(mpppacket); + if (meta) + mpp_meta_get_s32(meta, KEY_OUTPUT_INTRA, &keyframe); + if (keyframe) + packet->flags |= AV_PKT_FLAG_KEY; + + return 0; +fail: + if (mpppacket) + mpp_packet_deinit(&mpppacket); + return ret; +} + + +int rkmpp_encode(AVCodecContext *avctx, AVPacket *packet, const AVFrame *frame, int *got_packet){ + int ret; + + ret = rkmpp_send_frame(avctx, (AVFrame *)frame); + if (ret) + return ret; + + ret = rkmpp_get_packet(avctx, packet, MPP_TIMEOUT_BLOCK); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { + *got_packet = 0; + } else if (ret) { + return ret; + } else { + *got_packet = 1; + } + return 0; +} + +RKMPP_ENC(h264, AV_CODEC_ID_H264, vepu5) +RKMPP_ENC(hevc, AV_CODEC_ID_HEVC, vepu5) +RKMPP_ENC(vp8, AV_CODEC_ID_VP8, vepu1) diff --git a/libavcodec/rkplane.c b/libavcodec/rkplane.c new file mode 100644 index 0000000000..a138a1883e --- /dev/null +++ b/libavcodec/rkplane.c @@ -0,0 +1,669 @@ +/* + * RockChip MPP Plane Conversions + * Copyright (c) 2023 Huseyin BYIIK + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +/* + * Mpp decoder/encoder outputs & inputs generally semi-planar pictures + * FFmpeg on the other hand uses planar pictures. Normally libavfilter has + * several filters to handle this conversion however it is not cpu affective + * This file handles several plane conversion with hardware accelerated RGA chip in + * rockchip socs, whenever there is a failure it falls back to libyuv which is SIMD + * optimized plane conversion library. Failure is expected time to time, because + * RGA is not always consistent in between kernel versions of rockchips downstream + * + * Normally both RGA and enhancements in libyuv should be a part or libavfilter, but + * currently this is easier. May be in future someone take this up and move to avfilter. + */ + +#include +#include "rkmpp.h" +#include "rkplane.h" +#include "libyuv/planar_functions.h" +#include "libyuv/scale_uv.h" +#include "libyuv/scale.h" + + +#define AV_VSTRIDE(AVFRAME) (FFALIGN(AVFRAME->buf[0] && AVFRAME->buf[1] ? AVFRAME->buf[0]->size / AVFRAME->linesize[0] : (AVFRAME->data[1] - AVFRAME->data[0]) / AVFRAME->linesize[0], 16)) + +static void rkmpp_release_mppframe(void *opaque, uint8_t *data) +{ + MppFrame mppframe = opaque; + mpp_frame_deinit(&mppframe); +} + +static void rkmpp_release_drm_desc(void *opaque, uint8_t *data) +{ + AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)opaque; + av_free(desc); +} + + +static int set_mppframe_to_avbuff(MppFrame mppframe, AVFrame * frame, int index){ + int i; + + // find the first available buffer in [buf[0], buf[4]] + for(i=0; i<5; i++){ + if(i > 3) + return -1; + else if(!frame->buf[i]) + break; + } + + frame->buf[i] = av_buffer_create(mppframe, mpp_frame_get_buf_size(mppframe), + rkmpp_release_mppframe, mppframe, AV_BUFFER_FLAG_READONLY); + + if(i >= 0){ + if(index >= 0) + frame->data[index] = frame->buf[i]->data; + return 0; + } + + return -1; +} + +static int set_drmdesc_to_avbuff(AVDRMFrameDescriptor *desc, AVFrame *frame){ + int i; + + for(i=0; i<5; i++){ + if(i > 3) + return -1; + else if(!frame->buf[i]) + break; + } + + frame->buf[i] = av_buffer_create((unsigned char *) desc, sizeof(AVDRMFrameDescriptor), + rkmpp_release_drm_desc, desc, AV_BUFFER_FLAG_READONLY); + + return i; +} + +static int rga_scale(uint64_t src_fd, uint64_t src_y, uint16_t src_width, uint16_t src_height, uint16_t src_hstride, uint16_t src_vstride, + uint64_t dst_fd, uint64_t dst_y, uint16_t dst_width, uint16_t dst_height, uint16_t dst_hstride, uint16_t dst_vstride, + enum _Rga_SURF_FORMAT informat, enum _Rga_SURF_FORMAT outformat){ + rga_info_t src = {0}; + rga_info_t dst = {0}; + + if(dst_hstride < dst_width) + dst_width = FFALIGN(dst_width, RKMPP_STRIDE_ALIGN); + if(dst_vstride < dst_height) + dst_height = FFALIGN(dst_height, RKMPP_STRIDE_ALIGN); + if(src_hstride < src_width) + src_width = FFALIGN(src_width, RKMPP_STRIDE_ALIGN); + if(src_vstride < src_height) + src_height = FFALIGN(src_height, RKMPP_STRIDE_ALIGN); + + src.fd = src_fd; + src.virAddr = (void *)src_y; + src.mmuFlag = 1; + src.format = informat; + rga_set_rect(&src.rect, 0, 0, + src_width, src_height, src_hstride, src_vstride, informat); + + dst.fd = dst_fd; + dst.virAddr = (void *)dst_y; + dst.mmuFlag = 1; + dst.format = outformat; + rga_set_rect(&dst.rect, 0, 0, + dst_width, dst_height, dst_hstride, dst_vstride, outformat); + + return c_RkRgaBlit(&src, &dst, NULL); +} + +int rga_convert_mpp_mpp(AVCodecContext *avctx, MppFrame in_mppframe, MppFrame out_mppframe){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + rkformat informat, outformat; + + if (!codec->norga){ + if(!out_mppframe) + return -1; + rkmpp_get_mpp_format(&informat, mpp_frame_get_fmt(in_mppframe) & MPP_FRAME_FMT_MASK); + rkmpp_get_mpp_format(&outformat, mpp_frame_get_fmt(out_mppframe) & MPP_FRAME_FMT_MASK); + if(rga_scale(mpp_buffer_get_fd(mpp_frame_get_buffer(in_mppframe)), 0, + mpp_frame_get_width(in_mppframe), mpp_frame_get_height(in_mppframe), + mpp_frame_get_hor_stride(in_mppframe), mpp_frame_get_ver_stride(in_mppframe), + mpp_buffer_get_fd(mpp_frame_get_buffer(out_mppframe)), 0, + mpp_frame_get_width(out_mppframe), mpp_frame_get_height(out_mppframe), + mpp_frame_get_hor_stride(out_mppframe), mpp_frame_get_ver_stride(out_mppframe), + informat.rga, + outformat.rga)){ + av_log(avctx, AV_LOG_WARNING, "RGA failed falling back to soft conversion\n"); + codec->norga = 1; // fallback to soft conversion + return -1; + } else + return 0; + } + + return -1; +} + +static void mpp_nv12_av_yuv420p_soft(MppFrame mppframe, AVFrame *frame){ + // warning: mpp frame must not be released until displayed + MppBuffer buffer = mpp_frame_get_buffer(mppframe); + int hstride = mpp_frame_get_hor_stride(mppframe); + int vstride = mpp_frame_get_ver_stride(mppframe); + + frame->data[0] = mpp_buffer_get_ptr(buffer); // use existing y plane + frame->linesize[0] = hstride; + + // convert only uv plane from semi-planar to planar + SplitUVPlane(frame->data[0] + hstride * vstride, hstride, + frame->data[1], frame->linesize[1], frame->data[2], frame->linesize[2], + (frame->width + 1) >> 1, (frame->height + 1) >> 1); +} + +static void mpp_nv16_av_yuv420p_soft(MppFrame mppframe, AVFrame *frame){ + // warning: mpp frame must not be released until displayed + MppBuffer buffer = mpp_frame_get_buffer(mppframe); + int hstride = mpp_frame_get_hor_stride(mppframe); + int vstride = mpp_frame_get_ver_stride(mppframe); + char *src = (char *)mpp_buffer_get_ptr(buffer) + hstride * vstride; + + // scale down uv plane by 2 and write it to y plane of avbuffer temporarily + UVScale(src, hstride, frame->width, frame->height, + frame->data[0], frame->linesize[0], + (frame->width + 1) >> 1, (frame->height + 1) >> 1, kFilterNone); + + // convert uv plane from semi-planar to planar + SplitUVPlane(frame->data[0], frame->linesize[0], + frame->data[1], frame->linesize[1], frame->data[2], frame->linesize[2], + (frame->width + 1) >> 1, (frame->height + 1) >> 1); + + // use existing y plane from mppbuffer + frame->data[0] = mpp_buffer_get_ptr(buffer); + frame->linesize[0] = hstride; +} + +static void mpp_nv16_av_nv12_soft(MppFrame mppframe, AVFrame *frame){ + // warning: mpp frame must not be released until displayed + MppBuffer buffer = mpp_frame_get_buffer(mppframe); + int hstride = mpp_frame_get_hor_stride(mppframe); + int vstride = mpp_frame_get_ver_stride(mppframe); + char *src = (char *)mpp_buffer_get_ptr(buffer) + hstride * vstride; + + // scale down uv plane by 2 and write it to uv plane of avbuffer + UVScale(src, hstride, frame->width, frame->height, + frame->data[1], frame->linesize[0], + (frame->width + 1) >> 1, (frame->height + 1) >> 1, kFilterNone); + + // use existing y plane from mppbuffer + frame->data[0] = mpp_buffer_get_ptr(buffer); + frame->linesize[0] = hstride; +} + +static MppFrame wrap_mpp_to_avframe(AVCodecContext *avctx, AVFrame *frame, MppFrame targetframe){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppBuffer targetbuffer = NULL; + int planesize; + + if(!targetframe) + targetframe = create_mpp_frame(avctx->width, avctx->height, avctx->pix_fmt, codec->buffer_group, NULL, NULL); + + if(!targetframe) + return NULL; + + targetbuffer = mpp_frame_get_buffer(targetframe); + planesize = mpp_frame_get_hor_stride(targetframe) * mpp_frame_get_ver_stride(targetframe); + + frame->data[0] = mpp_buffer_get_ptr(targetbuffer); + frame->linesize[0] = mpp_frame_get_hor_stride(targetframe); + frame->width = avctx->width; + frame->height = avctx->height; + frame->extended_data = frame->data; + + switch(avctx->pix_fmt){ + case AV_PIX_FMT_YUV420P: + frame->data[1] = frame->data[0] + planesize; + frame->linesize[1] = (frame->linesize[0] + 1) >> 1; + frame->data[2] = frame->data[1] + ((planesize + 1) >> 2); + frame->linesize[2] = frame->linesize[1]; + return targetframe; + case AV_PIX_FMT_NV12: + frame->data[1] = frame->data[0] + planesize; + frame->linesize[1] = frame->linesize[0]; + return targetframe; + } + + rkmpp_release_mppframe(targetframe, NULL); + return NULL; +} + +MppFrame create_mpp_frame(int width, int height, enum AVPixelFormat avformat, MppBufferGroup buffer_group, AVDRMFrameDescriptor *desc, AVFrame *frame){ + MppFrame mppframe = NULL; + MppBuffer mppbuffer = NULL; + rkformat format; + int avmap[3][4]; //offset, dststride, width, height of max 3 planes + int size, ret, hstride, vstride; + int hstride_mult = 1; + int planes = 2; + int haspitch = 0; + int overshoot = 1024; + + ret = mpp_frame_init(&mppframe); + + if (ret) { + goto clean; + } + + vstride = FFALIGN(height, RKMPP_STRIDE_ALIGN); + + switch(avformat){ + case AV_PIX_FMT_NV12: + planes = 2; + hstride = FFALIGN(width, RKMPP_STRIDE_ALIGN); + // y plane + avmap[0][0] = 0; + avmap[0][1] = hstride; + avmap[0][2] = width, + avmap[0][3] = height; + // uv plane + avmap[1][0] = hstride * vstride; // uv offset = y plane size + avmap[1][1] = hstride; // uv stride = hstride + avmap[1][2] = width; // uv width = width + avmap[1][3] = (height + 1)>> 1; // uv height = height / 2 + size = avmap[1][0] + ((avmap[1][0] + 1) >> 1) + overshoot; // total size = y+uv planesize + break; + case AV_PIX_FMT_YUV420P: + planes = 3; + hstride = FFALIGN(width, RKMPP_STRIDE_ALIGN); + // y plane + avmap[0][0] = 0; + avmap[0][1] = hstride; + avmap[0][2] = width, + avmap[0][3] = height; + // u plane + avmap[1][0] = hstride * vstride; // u offset = y plane size + avmap[1][1] = (hstride + 1)>> 1; // u stride = hstride / 2 + avmap[1][2] = (width + 1)>> 1; // u width = width / 2 + avmap[1][3] = (height + 1)>> 1; // u height = height / 2 + // v plane + avmap[2][0] = avmap[1][0] + ((avmap[1][0] + 1) >> 2); // v offset = y+u plane size + avmap[2][1] = avmap[1][1]; // v stride = hstride / 2 + avmap[2][2] = avmap[1][2]; // v width = width / 2 + avmap[2][3] = avmap[1][3]; // v height = height / 2 + size = avmap[2][0] + ((avmap[1][0] + 1) >> 2) + overshoot; // total size = y+u+v planesize + break; + case AV_PIX_FMT_NV16: + planes = 2; + hstride = FFALIGN(width, RKMPP_STRIDE_ALIGN); + // y plane + avmap[0][0] = 0; + avmap[0][1] = hstride; + avmap[0][2] = width, + avmap[0][3] = height; + // uv plane + avmap[1][0] = hstride * vstride; // uv offset = y plane size + avmap[1][1] = hstride; // uv stride = hstride + avmap[1][2] = width; // uv width = width + avmap[1][3] = height; // uv height = height + size = avmap[1][0] * 2 + overshoot; // total size = y+uv planesize + break; + case AV_PIX_FMT_YUV422P: + planes = 3; + hstride = FFALIGN(width, RKMPP_STRIDE_ALIGN); + //y plane + avmap[0][0] = 0; + avmap[0][1] = hstride; + avmap[0][2] = width, + avmap[0][3] = height; + //u plane + avmap[1][0] = hstride * vstride; // u offset = y plane size + avmap[1][1] = (hstride + 1)>> 1; // u stride = hstride / 2 + avmap[1][2] = width; // u width = width + avmap[1][3] = height; // u height = height + //v plane + avmap[2][0] = avmap[1][0] + ((avmap[1][0] + 1) >> 1); // v offset = y+u plane size + avmap[2][1] = avmap[1][1]; // v stride = hstride + avmap[2][2] = avmap[1][2]; // v width = width + avmap[2][3] = avmap[1][3]; // v height = height / 2 + size = avmap[1][0] * 2 + overshoot; // total size = y+u+v planesize + break; + case AV_PIX_FMT_NV24: + planes = 2; + hstride = FFALIGN(width, RKMPP_STRIDE_ALIGN); + // y plane + avmap[0][0] = 0; + avmap[0][1] = hstride; + avmap[0][2] = width, + avmap[0][3] = height; + // uv plane + avmap[1][0] = hstride * vstride; // uv offset = y plane size + avmap[1][1] = hstride << 1; // uv stride = hstride * 2 + avmap[1][2] = width << 1; // uv width = width * 2 + avmap[1][3] = height; // uv height = height + size = avmap[1][0] * 3 + overshoot; // total size = y+u+v planesize + break; + case AV_PIX_FMT_YUV444P: + planes = 3; + hstride = FFALIGN(width, RKMPP_STRIDE_ALIGN); + //y plane + avmap[0][0] = 0; + avmap[0][1] = hstride; + avmap[0][2] = width, + avmap[0][3] = height; + //u plane + avmap[1][0] = hstride * vstride; // u offset = y plane size + avmap[1][1] = hstride; // u stride = hstride + avmap[1][2] = width; // u width = width + avmap[1][3] = height; // u height = height + //v plane + avmap[2][0] = avmap[1][0] * 2; // v offset = y+u plane size + avmap[2][1] = avmap[1][1]; // v stride = hstride + avmap[2][2] = avmap[1][2]; // v width = width + avmap[2][3] = avmap[1][3]; // v height = height + size = avmap[1][0] * 3 + overshoot; // total size = y+u+v planesize + break; + case AV_PIX_FMT_YUYV422: + case AV_PIX_FMT_UYVY422: + planes = 1; + haspitch = 1; + hstride_mult = 2; + hstride = FFALIGN(width * hstride_mult, RKMPP_STRIDE_ALIGN); + avmap[0][0] = 0; + avmap[0][1] = hstride; + avmap[0][2] = width << 1, + avmap[0][3] = height; + size = hstride * vstride; + break; + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_BGR24: + haspitch = 1; + hstride_mult = 3; + hstride = FFALIGN(width * hstride_mult, RKMPP_STRIDE_ALIGN); + avmap[0][0] = 0; + avmap[0][1] = hstride; + avmap[0][2] = width * 3, + avmap[0][3] = height; + size = hstride * vstride; + planes = 1; + break; + case AV_PIX_FMT_0RGB: + case AV_PIX_FMT_0BGR: + case AV_PIX_FMT_BGR0: + case AV_PIX_FMT_RGB0: + case AV_PIX_FMT_ARGB: + case AV_PIX_FMT_ABGR: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGBA: + haspitch = 1; + hstride_mult = 4; + hstride = FFALIGN(width * hstride_mult, RKMPP_STRIDE_ALIGN); + avmap[0][0] = 0; + avmap[0][1] = hstride; + avmap[0][2] = width << 2, + avmap[0][3] = height; + size = hstride * vstride; + planes = 1; + break; + } + + if(desc){ + MppBufferInfo info; + AVDRMLayerDescriptor *layer = &desc->layers[0]; + rkmpp_get_drm_format(&format, layer->format); + + size = desc->objects[0].size; + if(haspitch) + hstride = layer->planes[0].pitch; + else + hstride = layer->planes[0].pitch * hstride_mult; + + if(planes == 1) + vstride = size / hstride; + else + vstride = layer->planes[1].offset / hstride; + + memset(&info, 0, sizeof(info)); + info.type = MPP_BUFFER_TYPE_DRM; + info.size = size; + info.fd = desc->objects[0].fd; + + ret = mpp_buffer_import(&mppbuffer, &info); + rkmpp_get_drm_format(&format, layer->format); + } else { + ret = mpp_buffer_get(buffer_group, &mppbuffer, size); + rkmpp_get_av_format(&format, avformat); + } + + if (ret) + goto clean; + + mpp_frame_set_width(mppframe, width); + mpp_frame_set_height(mppframe, height); + mpp_frame_set_fmt(mppframe, format.mpp); + mpp_frame_set_hor_stride(mppframe, hstride); + mpp_frame_set_ver_stride(mppframe, vstride); + mpp_frame_set_buffer(mppframe, mppbuffer); + mpp_frame_set_buf_size(mppframe, size); + mpp_buffer_put(mppbuffer); + + if(frame){ + for(int i = 0; i < planes; i++){ + CopyPlane(frame->data[i], frame->linesize[i], + (char *)mpp_buffer_get_ptr(mppbuffer) + avmap[i][0], avmap[i][1], avmap[i][2], avmap[i][3]); + } + } + + return mppframe; + +clean: + if(mppbuffer) + mpp_buffer_put(mppbuffer); + if(mppframe) + mpp_frame_deinit(&mppframe); + return mppframe; +} +//for decoder +int mpp_nv15_av_yuv420p(AVCodecContext *avctx, MppFrame nv15frame, AVFrame *frame){ + // rga1 which supports yuv420P output does not support nv15 input + // therefore this first converts NV15->NV12 with rga2 than NV12 -> yuv420P with libyuv + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppFrame nv12frame = create_mpp_frame(mpp_frame_get_width(nv15frame), mpp_frame_get_height(nv15frame), + AV_PIX_FMT_NV12, codec->buffer_group, NULL, NULL); + MppFrame yuv420pframe = NULL; + int ret = rga_convert_mpp_mpp(avctx, nv15frame, nv12frame); + + rkmpp_release_mppframe(nv15frame, NULL); + + if(!ret){ + MppFrame yuv420pframe = wrap_mpp_to_avframe(avctx, frame, NULL); + if(yuv420pframe && + !set_mppframe_to_avbuff(nv12frame, frame, RKMPP_MPPFRAME_BUFINDEX) && + !set_mppframe_to_avbuff(yuv420pframe, frame, RKMPP_MPPFRAME_BUFINDEX - 1)){ + mpp_nv12_av_yuv420p_soft(nv12frame, frame); + return 0; + } + } + + if(nv12frame) + rkmpp_release_mppframe(nv12frame, NULL); + if(yuv420pframe) + rkmpp_release_mppframe(yuv420pframe, NULL); + return -1; +} + +//for decoder +int mpp_nv12_av_nv12(AVCodecContext *avctx, MppFrame mppframe, AVFrame *frame){ + if(wrap_mpp_to_avframe(avctx, frame, mppframe)){ + return set_mppframe_to_avbuff(mppframe, frame, RKMPP_MPPFRAME_BUFINDEX); + } + + rkmpp_release_mppframe(mppframe, NULL); + return -1; +} +//for decoder +int mpp_nv15_av_nv12(AVCodecContext *avctx, MppFrame nv15frame, AVFrame *frame){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppFrame nv12frame = create_mpp_frame(mpp_frame_get_width(nv15frame), mpp_frame_get_height(nv15frame), + AV_PIX_FMT_NV12, codec->buffer_group, NULL, NULL); + int ret = rga_convert_mpp_mpp(avctx, nv15frame, nv12frame); + + rkmpp_release_mppframe(nv15frame, NULL); + + if(!ret){ + ret = mpp_nv12_av_nv12(avctx, nv12frame, frame); + } else { + if(nv12frame) + rkmpp_release_mppframe(nv12frame, NULL); + av_log(avctx, AV_LOG_ERROR, "RGA failed to convert NV15 -> NV12. No Soft Conversion Possible\n"); + } + + return ret; +} + +int convert_mpp_to_av(AVCodecContext *avctx, MppFrame mppframe, AVFrame *frame, + enum AVPixelFormat informat, enum AVPixelFormat outformat){ + MppFrame targetframe = wrap_mpp_to_avframe(avctx, frame, NULL); + int ret=0; + + if(!targetframe){ + rkmpp_release_mppframe(mppframe, NULL); + return -1; + } + + if(set_mppframe_to_avbuff(targetframe, frame, RKMPP_MPPFRAME_BUFINDEX - 1)) + return -1; + if(set_mppframe_to_avbuff(mppframe, frame, RKMPP_MPPFRAME_BUFINDEX)) + return -1; + + if(rga_convert_mpp_mpp(avctx, mppframe, targetframe)){ + if (informat == AV_PIX_FMT_NV16 && outformat == AV_PIX_FMT_NV12) + mpp_nv16_av_nv12_soft(mppframe, frame); + else if (informat == AV_PIX_FMT_NV16 && outformat == AV_PIX_FMT_YUV420P) + mpp_nv16_av_yuv420p_soft(mppframe, frame); + else if (informat == AV_PIX_FMT_NV12 && outformat == AV_PIX_FMT_YUV420P) + mpp_nv12_av_yuv420p_soft(mppframe, frame); + else { + ret = -1; + av_log(avctx, AV_LOG_ERROR, "No software conversion for %s -> %s available\n", + av_get_pix_fmt_name(informat), av_get_pix_fmt_name(outformat)); + } + } + + return ret; +} + +MppFrame import_drm_to_mpp(AVCodecContext *avctx, AVFrame *frame){ + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + MppFrame mppframe = NULL; + AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor*) frame->data[0]; + AVDRMLayerDescriptor *layer = &desc->layers[0]; + rkformat format; + char drmname[4]; + DRMFORMATNAME(drmname, layer->format) + + if(rkmpp_get_drm_format(&format, layer->format)){ + av_log(avctx, AV_LOG_ERROR, "Unsupported DRM Format %s\n", drmname); + return NULL; + } + + if(format.drm == DRM_FORMAT_NV15){ + // encoder does not support 10bit frames, we down scale them to 8bit + MppFrame nv15frame = create_mpp_frame(frame->width, frame->height, AV_PIX_FMT_NONE, NULL, desc, NULL); + if(nv15frame){ + mppframe = create_mpp_frame(frame->width, frame->height, AV_PIX_FMT_NV12, codec->buffer_group, NULL, NULL); + if(mppframe && rga_convert_mpp_mpp(avctx, nv15frame, mppframe)) + rkmpp_release_mppframe(mppframe, NULL); + rkmpp_release_mppframe(nv15frame, NULL); + } + } else { + mppframe = create_mpp_frame(frame->width, frame->height, format.av, NULL, desc, NULL); + } + + return mppframe; +} + +int import_mpp_to_drm(AVCodecContext *avctx, MppFrame mppframe, AVFrame *frame) +{ + // mppframe & desc is cleared when AVFrame is released + RKMPPCodecContext *rk_context = avctx->priv_data; + RKMPPCodec *codec = (RKMPPCodec *)rk_context->codec_ref->data; + AVDRMFrameDescriptor *desc = NULL; + AVDRMLayerDescriptor *layer = NULL; + rkformat format; + MppBuffer buffer = mpp_frame_get_buffer(mppframe); + int hstride = mpp_frame_get_hor_stride(mppframe); + int vstride = mpp_frame_get_ver_stride(mppframe); + int ret; + + rkmpp_get_mpp_format(&format, mpp_frame_get_fmt(mppframe) & MPP_FRAME_FMT_MASK); + + if(set_mppframe_to_avbuff(mppframe, frame, -1)){ + ret = AVERROR(ENOMEM); + goto error; + } + + desc = av_mallocz(sizeof(AVDRMFrameDescriptor)); + if (!desc || set_drmdesc_to_avbuff(desc, frame) < 0) { + ret = AVERROR(ENOMEM); + goto error; + } + + desc->nb_objects = 1; + desc->objects[0].fd = mpp_buffer_get_fd(buffer); + desc->objects[0].size = mpp_buffer_get_size(buffer); + + desc->nb_layers = 1; + layer = &desc->layers[0]; + layer->format = format.drm; + layer->nb_planes = 2; + + layer->planes[0].object_index = 0; + layer->planes[0].offset = 0; + layer->planes[0].pitch = hstride; + + layer->planes[1].object_index = 0; + layer->planes[1].offset = hstride * vstride; + layer->planes[1].pitch = hstride; + + frame->data[0] = (uint8_t *)desc; + + frame->hw_frames_ctx = av_buffer_ref(codec->hwframes_ref); + if (!frame->hw_frames_ctx) { + ret = AVERROR(ENOMEM); + goto error; + } + + return 0; + +error: + av_log(avctx, AV_LOG_ERROR, "Memory Error during importing mpp frame to drmprime\n"); + if (mppframe) + rkmpp_release_mppframe(mppframe, NULL); + if (desc) + rkmpp_release_drm_desc(desc, NULL); + + return ret; +} + +MppFrame get_mppframe_from_av(AVFrame *frame){ + if(frame->data[RKMPP_MPPFRAME_BUFINDEX]){ + rkmpp_frame_type * mppframe = (rkmpp_frame_type *) frame->data[RKMPP_MPPFRAME_BUFINDEX]; + if(mppframe->name && !strcmp(mppframe->name, "mpp_frame") && + mpp_frame_get_fmt(frame->data[RKMPP_MPPFRAME_BUFINDEX]) != MPP_FMT_YUV420SP_10BIT) + return frame->data[RKMPP_MPPFRAME_BUFINDEX]; + } + return NULL; +} diff --git a/libavcodec/rkplane.h b/libavcodec/rkplane.h new file mode 100644 index 0000000000..f52d29228b --- /dev/null +++ b/libavcodec/rkplane.h @@ -0,0 +1,17 @@ +#include +#include "avcodec.h" + +typedef struct { + char *name; +} rkmpp_frame_type; + +int rga_convert_mpp_mpp(AVCodecContext *avctx, MppFrame in_mppframe, MppFrame out_mppframe); +int mpp_nv15_av_yuv420p(AVCodecContext *avctx, MppFrame mppframe, AVFrame *frame); +int mpp_nv15_av_nv12(AVCodecContext *avctx, MppFrame mppframe, AVFrame *frame); +int mpp_nv12_av_nv12(AVCodecContext *avctx, MppFrame mppframe, AVFrame *frame); +int convert_mpp_to_av(AVCodecContext *avctx, MppFrame mppframe, AVFrame *frame, + enum AVPixelFormat informat, enum AVPixelFormat outformat); +MppFrame create_mpp_frame(int width, int height, enum AVPixelFormat avformat, MppBufferGroup buffer_group, AVDRMFrameDescriptor *desc, AVFrame *frame); +MppFrame import_drm_to_mpp(AVCodecContext *avctx, AVFrame *frame); +int import_mpp_to_drm(AVCodecContext *avctx, MppFrame mppframe, AVFrame *frame); +MppFrame get_mppframe_from_av(AVFrame *frame); -- 2.41.0 From f3fcb573fb6cf83dd5c9f144592fb047a575d833 Mon Sep 17 00:00:00 2001 From: boogie Date: Mon, 24 Jul 2023 19:27:37 +0200 Subject: [PATCH 10/13] lavu/hwcontext_drm: Add internal frame allocation --- libavutil/hwcontext_drm.c | 322 +++++++++++++++++++++++++++++++++++++- libavutil/hwcontext_drm.h | 14 +- 2 files changed, 328 insertions(+), 8 deletions(-) diff --git a/libavutil/hwcontext_drm.c b/libavutil/hwcontext_drm.c index 7a9fdbd263..2d1962c9c0 100644 --- a/libavutil/hwcontext_drm.c +++ b/libavutil/hwcontext_drm.c @@ -18,6 +18,7 @@ #include "config.h" +#define _GNU_SOURCE #include #include #include @@ -30,7 +31,7 @@ #include #endif -#include +#include #include #include "avassert.h" @@ -39,6 +40,103 @@ #include "hwcontext_internal.h" #include "imgutils.h" +/** + * Copy from libdrm_macros.h while is not exposed by libdrm, + * be replaced by #include "libdrm_macros.h" someday. + */ + +/** + * Static (compile-time) assertion. + * Basically, use COND to dimension an array. If COND is false/zero the + * array size will be -1 and we'll get a compilation error. + */ +#define STATIC_ASSERT(COND) \ + do { \ + (void) sizeof(char [1 - 2*!(COND)]); \ + } while (0) + +#if defined(ANDROID) && !defined(__LP64__) +#include /* for EINVAL */ + +extern void *__mmap2(void *, size_t, int, int, int, size_t); + +static inline void *drm_mmap(void *addr, size_t length, int prot, int flags, + int fd, loff_t offset) +{ + /* offset must be aligned to 4096 (not necessarily the page size) */ + if (offset & 4095) { + errno = EINVAL; + return MAP_FAILED; + } + + return __mmap2(addr, length, prot, flags, fd, (size_t) (offset >> 12)); +} + +# define drm_munmap(addr, length) \ + munmap(addr, length) + +#else + +/* assume large file support exists */ +# define drm_mmap(addr, length, prot, flags, fd, offset) \ + mmap(addr, length, prot, flags, fd, offset) + +static inline int drm_munmap(void *addr, size_t length) +{ + /* Copied from configure code generated by AC_SYS_LARGEFILE */ +#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + \ + (((off_t) 1 << 31) << 31)) + STATIC_ASSERT(LARGE_OFF_T % 2147483629 == 721 && + LARGE_OFF_T % 2147483647 == 1); +#undef LARGE_OFF_T + + return munmap(addr, length); +} +#endif + +// default +static int card_index = 0; + +static const struct { + enum AVPixelFormat pixfmt; + uint32_t drm_format; +} supported_formats[] = { + { AV_PIX_FMT_NV12, DRM_FORMAT_NV12, }, +//#ifdef DRM_FORMAT_NV12_10 +// { AV_PIX_FMT_P010LE, DRM_FORMAT_NV12_10, }, +// { AV_PIX_FMT_P010BE, DRM_FORMAT_NV12_10 | DRM_FORMAT_BIG_ENDIAN }, +//#endif + { AV_PIX_FMT_NV21, DRM_FORMAT_NV21, }, + { AV_PIX_FMT_YUV420P, DRM_FORMAT_YUV420, }, + { AV_PIX_FMT_YUYV422, DRM_FORMAT_YUYV, }, + { AV_PIX_FMT_YVYU422, DRM_FORMAT_YVYU, }, + { AV_PIX_FMT_UYVY422, DRM_FORMAT_UYVY, }, + { AV_PIX_FMT_NV16, DRM_FORMAT_NV16, }, + { AV_PIX_FMT_YUV422P, DRM_FORMAT_YUV422, }, +#ifdef DRM_FORMAT_R16 + { AV_PIX_FMT_GRAY16LE, DRM_FORMAT_R16, }, + { AV_PIX_FMT_GRAY16BE, DRM_FORMAT_R16 | DRM_FORMAT_BIG_ENDIAN }, +#endif + { AV_PIX_FMT_BGR8, DRM_FORMAT_BGR233, }, + { AV_PIX_FMT_RGB555LE, DRM_FORMAT_XRGB1555, }, + { AV_PIX_FMT_RGB555BE, DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN }, + { AV_PIX_FMT_BGR555LE, DRM_FORMAT_XBGR1555, }, + { AV_PIX_FMT_BGR555BE, DRM_FORMAT_XBGR1555 | DRM_FORMAT_BIG_ENDIAN }, + { AV_PIX_FMT_RGB565LE, DRM_FORMAT_RGB565, }, + { AV_PIX_FMT_RGB565BE, DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN }, + { AV_PIX_FMT_BGR565LE, DRM_FORMAT_BGR565, }, + { AV_PIX_FMT_BGR565BE, DRM_FORMAT_BGR565 | DRM_FORMAT_BIG_ENDIAN }, + { AV_PIX_FMT_RGB24, DRM_FORMAT_RGB888, }, + { AV_PIX_FMT_BGR24, DRM_FORMAT_BGR888, }, + { AV_PIX_FMT_0RGB, DRM_FORMAT_BGRX8888, }, + { AV_PIX_FMT_0BGR, DRM_FORMAT_RGBX8888, }, + { AV_PIX_FMT_RGB0, DRM_FORMAT_XBGR8888, }, + { AV_PIX_FMT_BGR0, DRM_FORMAT_XRGB8888, }, + { AV_PIX_FMT_ARGB, DRM_FORMAT_BGRA8888, }, + { AV_PIX_FMT_ABGR, DRM_FORMAT_RGBA8888, }, + { AV_PIX_FMT_RGBA, DRM_FORMAT_ABGR8888, }, + { AV_PIX_FMT_BGRA, DRM_FORMAT_ARGB8888, }, +}; static void drm_device_free(AVHWDeviceContext *hwdev) { @@ -52,7 +150,14 @@ static int drm_device_create(AVHWDeviceContext *hwdev, const char *device, { AVDRMDeviceContext *hwctx = hwdev->hwctx; drmVersionPtr version; - + char drm_dev[] = "/dev/dri/card0000"; + uint64_t has_dumb; + + if (!device) { + snprintf(drm_dev, sizeof(drm_dev), DRM_DEV_NAME, DRM_DIR_NAME, + card_index); + device = drm_dev; + } hwctx->fd = open(device, O_RDWR); if (hwctx->fd < 0) return AVERROR(errno); @@ -72,11 +177,219 @@ static int drm_device_create(AVHWDeviceContext *hwdev, const char *device, drmFreeVersion(version); + if (drmGetCap(hwctx->fd, DRM_CAP_DUMB_BUFFER, &has_dumb) < 0 || + !has_dumb) { + av_log(hwdev, AV_LOG_ERROR, "drm device '%s' " + "does not support dumb buffers\n", device); + close(hwctx->fd); + return AVERROR(EINVAL); + } + hwdev->free = &drm_device_free; return 0; } +static int drm_frames_get_constraints(AVHWDeviceContext *hwdev, + const void *hwconfig, + AVHWFramesConstraints *constraints) +{ + int i; + + constraints->min_width = 16; + constraints->min_height = 16; + + constraints->valid_hw_formats = + av_malloc_array(2, sizeof(enum AVPixelFormat)); + if (!constraints->valid_hw_formats) + return AVERROR(ENOMEM); + constraints->valid_hw_formats[0] = AV_PIX_FMT_DRM_PRIME; + constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE; + + constraints->valid_sw_formats = + av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1, + sizeof(enum AVPixelFormat)); + if (!constraints->valid_sw_formats) + return AVERROR(ENOMEM); + for(i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) + constraints->valid_sw_formats[i] = supported_formats[i].pixfmt; + constraints->valid_sw_formats[i] = AV_PIX_FMT_NONE; + + return 0; +} + +static void free_drm_frame_descriptor(AVDRMDeviceContext *hwctx, + AVDRMFrameDescriptor *desc) +{ + int i; + if (!desc) + return; + + for (i = 0; i < desc->nb_objects; i++) { + AVDRMObjectDescriptor *object = &desc->objects[i]; + if (object->ptr) + drm_munmap(object->ptr, object->size); + if (object->fd > 0) { + int ret; + uint32_t handle = 0; + + ret = drmPrimeFDToHandle(hwctx->fd, object->fd, &handle); + if (ret) + av_log(NULL, AV_LOG_WARNING, + "Failed to convert drm fd to handle: %m\n"); + if (handle > 0) { + struct drm_mode_destroy_dumb data = { + .handle = handle, + }; + ret = drmIoctl(hwctx->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &data); + if (ret) + av_log(NULL, AV_LOG_WARNING, + "Failed to free drm handle: %m\n"); + } + + ret = close(object->fd); + if (ret) + av_log(NULL, AV_LOG_WARNING, + "Failed to close drm buffer fd = %d: %m\n", object->fd); + } + } + + memset(desc, 0, sizeof(*desc)); + av_free(desc); +} + +static void drm_buffer_free(void *opaque, uint8_t *data) +{ + AVHWFramesContext *hwfc = opaque; + AVDRMDeviceContext *hwctx = hwfc->device_ctx->hwctx; + AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)data; + + free_drm_frame_descriptor(hwctx, desc); +} + +static AVBufferRef *drm_pool_alloc(void *opaque, size_t size) +{ + int ret; + AVHWFramesContext *hwfc = opaque; + AVDRMDeviceContext *hwctx = hwfc->device_ctx->hwctx; + AVDRMFrameDescriptor *desc; + AVDRMLayerDescriptor *layer; + AVBufferRef *ref; + + int i; + const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(hwfc->sw_format); + struct drm_mode_create_dumb dmcb; + struct drm_mode_map_dumb dmmd; + + desc = av_mallocz(sizeof(*desc)); + if (!desc) + return NULL; + + memset(&dmcb, 0, sizeof(struct drm_mode_create_dumb)); + dmcb.bpp = av_get_bits_per_pixel(pixdesc); + dmcb.width = FFALIGN(hwfc->width, 16); + dmcb.height = FFALIGN(hwfc->height, 16); + ret = drmIoctl(hwctx->fd, DRM_IOCTL_MODE_CREATE_DUMB, &dmcb); + if (ret < 0) { + av_log(hwfc, AV_LOG_ERROR, + "Failed to create dumb: %m.\n", + dmcb.width, dmcb.height, dmcb.bpp); + goto fail; + } + av_assert0(dmcb.size >= dmcb.width * dmcb.height * dmcb.bpp / 8); + + desc->nb_objects = 1; + desc->nb_layers = 1; + ret = drmPrimeHandleToFD(hwctx->fd, dmcb.handle, DRM_CLOEXEC | DRM_RDWR, + &desc->objects[0].fd); + if (ret) { + av_log(hwfc, AV_LOG_ERROR, "Failed to convert handle to fd: %m\n"); + goto fail; + } + memset(&dmmd, 0, sizeof(dmmd)); + dmmd.handle = dmcb.handle; + + ret = drmIoctl(hwctx->fd, DRM_IOCTL_MODE_MAP_DUMB, &dmmd); + if (ret) { + av_log(hwfc, AV_LOG_ERROR, "Failed to map dumb: %m\n"); + goto fail; + } + + // default read and write + desc->objects[0].ptr = drm_mmap(NULL, dmcb.size, PROT_READ | PROT_WRITE, + MAP_SHARED, hwctx->fd, dmmd.offset); + if (desc->objects[0].ptr == MAP_FAILED) { + av_log(hwfc, AV_LOG_ERROR, "Failed to drm_mmap: %m\n"); + goto fail; + } + + desc->objects[0].size = dmcb.size; + + layer = &desc->layers[0]; + for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) { + if (supported_formats[i].pixfmt == hwfc->sw_format) { + layer->format = supported_formats[i].drm_format; + break; + } + } + layer->nb_planes = av_pix_fmt_count_planes(hwfc->sw_format); + layer->planes[0].object_index = 0; + layer->planes[0].offset = 0; + layer->planes[0].pitch = + av_image_get_linesize(hwfc->sw_format, hwfc->width, 0); + + for (i = 1; i < layer->nb_planes; i++) { + layer->planes[i].object_index = 0; + layer->planes[i].offset = layer->planes[i-1].pitch * hwfc->height; + layer->planes[i].pitch = + av_image_get_linesize(hwfc->sw_format, hwfc->width, i); + } + + ref = av_buffer_create((uint8_t*)desc, sizeof(*desc), drm_buffer_free, + opaque, 0); + if (!ref) { + av_log(hwfc, AV_LOG_ERROR, "Failed to create drm buffer.\n"); + goto fail; + } + + return ref; + +fail: + free_drm_frame_descriptor(hwctx, desc); + return NULL; +} + +static int drm_frames_init(AVHWFramesContext *hwfc) +{ + int i; + if (hwfc->pool) { + // has been set outside? + return 0; + } + + for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) + if (supported_formats[i].pixfmt == hwfc->sw_format) + break; + if (i >= FF_ARRAY_ELEMS(supported_formats)) { + av_log(hwfc, AV_LOG_ERROR, "Unsupported format: %s.\n", + av_get_pix_fmt_name(hwfc->sw_format)); + return AVERROR(EINVAL); + } + + hwfc->internal->pool_internal = + av_buffer_pool_init2(sizeof(AVDRMFrameDescriptor), hwfc, drm_pool_alloc, + NULL); + if (!hwfc->internal->pool_internal) { + av_log(hwfc, AV_LOG_ERROR, "Failed to create drm buffer pool.\n"); + return AVERROR(ENOMEM); + } + + return 0; +} + +static void drm_frames_uninit(AVHWFramesContext *hwfc av_unused) +{} + static int drm_get_buffer(AVHWFramesContext *hwfc, AVFrame *frame) { frame->buf[0] = av_buffer_pool_get(hwfc->pool); @@ -309,8 +622,11 @@ const HWContextType ff_hwcontext_type_drm = { .device_create = &drm_device_create, - .frames_get_buffer = &drm_get_buffer, + .frames_get_constraints = &drm_frames_get_constraints, + .frames_get_buffer = &drm_get_buffer, + .frames_init = &drm_frames_init, + .frames_uninit = &drm_frames_uninit, .transfer_get_formats = &drm_transfer_get_formats, .transfer_data_to = &drm_transfer_data_to, .transfer_data_from = &drm_transfer_data_from, diff --git a/libavutil/hwcontext_drm.h b/libavutil/hwcontext_drm.h index 42709f215e..80f2e8597e 100644 --- a/libavutil/hwcontext_drm.h +++ b/libavutil/hwcontext_drm.h @@ -25,11 +25,6 @@ /** * @file * API-specific header for AV_HWDEVICE_TYPE_DRM. - * - * Internal frame allocation is not currently supported - all frames - * must be allocated by the user. Thus AVHWFramesContext is always - * NULL, though this may change if support for frame allocation is - * added in future. */ enum { @@ -50,6 +45,12 @@ typedef struct AVDRMObjectDescriptor { * DRM PRIME fd for the object. */ int fd; + /** + * DRM PRIME mapped virtual ptr for above fd. + * + * The content of this buffer must be readonly when acting decoder's out buffer. + */ + void *ptr; /** * Total size of the object. * @@ -141,10 +142,13 @@ typedef struct AVDRMFrameDescriptor { AVDRMObjectDescriptor objects[AV_DRM_MAX_PLANES]; /** * Number of layers in the frame. + * + * Set by users if need more than 1. */ int nb_layers; /** * Array of layers in the frame. + * NOTE: total planes of layers must not be more than AV_NUM_DATA_POINTERS. */ AVDRMLayerDescriptor layers[AV_DRM_MAX_PLANES]; } AVDRMFrameDescriptor; -- 2.41.0 From 51962528d54301898213fb92bb593a707f2bf87b Mon Sep 17 00:00:00 2001 From: boogie Date: Mon, 24 Jul 2023 22:19:14 +0200 Subject: [PATCH 11/13] add yuv444 & nv24 support for drm hwcontext --- libavutil/hwcontext_drm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libavutil/hwcontext_drm.c b/libavutil/hwcontext_drm.c index 2d1962c9c0..9210b6c9f5 100644 --- a/libavutil/hwcontext_drm.c +++ b/libavutil/hwcontext_drm.c @@ -117,6 +117,8 @@ static const struct { { AV_PIX_FMT_GRAY16LE, DRM_FORMAT_R16, }, { AV_PIX_FMT_GRAY16BE, DRM_FORMAT_R16 | DRM_FORMAT_BIG_ENDIAN }, #endif + { AV_PIX_FMT_NV24, DRM_FORMAT_NV24, }, + { AV_PIX_FMT_YUV444P, DRM_FORMAT_YUV444, }, { AV_PIX_FMT_BGR8, DRM_FORMAT_BGR233, }, { AV_PIX_FMT_RGB555LE, DRM_FORMAT_XRGB1555, }, { AV_PIX_FMT_RGB555BE, DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN }, -- 2.41.0 From de5398097e8c3aa9f2ef69b50d8be63c87932a19 Mon Sep 17 00:00:00 2001 From: boogie Date: Fri, 28 Jul 2023 22:08:47 +0200 Subject: [PATCH 12/13] add AV_PIX_FMT_NV15 definition as a placeholder to enable supported DRM frames --- libavcodec/rkmpp.c | 2 +- libavutil/hwcontext_drm.c | 1 + libavutil/pixfmt.h | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/libavcodec/rkmpp.c b/libavcodec/rkmpp.c index a33a139569..22bd4c2171 100644 --- a/libavcodec/rkmpp.c +++ b/libavcodec/rkmpp.c @@ -27,7 +27,7 @@ static rkformat rkformats[13] = { { .av = AV_PIX_FMT_YUV422P, .mpp = MPP_FMT_YUV422P, .drm = DRM_FORMAT_YUV422, .rga = RK_FORMAT_YCbCr_422_P}, { .av = AV_PIX_FMT_NV12, .mpp = MPP_FMT_YUV420SP, .drm = DRM_FORMAT_NV12, .rga = RK_FORMAT_YCbCr_420_SP}, { .av = AV_PIX_FMT_NV16, .mpp = MPP_FMT_YUV422SP, .drm = DRM_FORMAT_NV16, .rga = RK_FORMAT_YCbCr_422_SP}, - { .av = AV_PIX_FMT_NONE, .mpp = MPP_FMT_YUV420SP_10BIT, .drm = DRM_FORMAT_NV15, .rga = RK_FORMAT_YCbCr_420_SP_10B}, + { .av = AV_PIX_FMT_NV15, .mpp = MPP_FMT_YUV420SP_10BIT, .drm = DRM_FORMAT_NV15, .rga = RK_FORMAT_YCbCr_420_SP_10B}, { .av = AV_PIX_FMT_BGR24, .mpp = MPP_FMT_BGR888, .drm = DRM_FORMAT_BGR888, .rga = RK_FORMAT_BGR_888}, { .av = AV_PIX_FMT_BGR0, .mpp = MPP_FMT_BGRA8888, .drm = DRM_FORMAT_XRGB8888, .rga = RK_FORMAT_BGRX_8888}, { .av = AV_PIX_FMT_BGRA, .mpp = MPP_FMT_BGRA8888, .drm = DRM_FORMAT_ARGB8888, .rga = RK_FORMAT_BGRA_8888}, diff --git a/libavutil/hwcontext_drm.c b/libavutil/hwcontext_drm.c index 9210b6c9f5..32bf958c5d 100644 --- a/libavutil/hwcontext_drm.c +++ b/libavutil/hwcontext_drm.c @@ -138,6 +138,7 @@ static const struct { { AV_PIX_FMT_ABGR, DRM_FORMAT_RGBA8888, }, { AV_PIX_FMT_RGBA, DRM_FORMAT_ABGR8888, }, { AV_PIX_FMT_BGRA, DRM_FORMAT_ARGB8888, }, + { AV_PIX_FMT_NV15, DRM_FORMAT_NV15, }, }; static void drm_device_free(AVHWDeviceContext *hwdev) diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h index 46ef211add..b04975fba0 100644 --- a/libavutil/pixfmt.h +++ b/libavutil/pixfmt.h @@ -360,6 +360,8 @@ enum AVPixelFormat { AV_PIX_FMT_X2RGB10LE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined AV_PIX_FMT_X2RGB10BE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_NV15, ///< Rockchip Specific 10bit NV12 + AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions }; -- 2.41.0 From 0e4b85d085cb8ce785185c92c91ba90411a20ed4 Mon Sep 17 00:00:00 2001 From: boogie Date: Mon, 31 Jul 2023 01:32:54 +0200 Subject: [PATCH 13/13] give pixel descriptor to prevent crashes. descriptor might not be %100 correct --- libavutil/pixdesc.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c index 18c7a0efc8..3216dc19e3 100644 --- a/libavutil/pixdesc.c +++ b/libavutil/pixdesc.c @@ -2395,6 +2395,18 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = { .name = "vulkan", .flags = AV_PIX_FMT_FLAG_HWACCEL, }, + [AV_PIX_FMT_NV15] = { + .name = "nv15", + .nb_components = 3, + .log2_chroma_w = 1, + .log2_chroma_h = 1, + .comp = { + { 0, 1, 0, 0, 10 }, /* Y */ + { 1, 2, 0, 0, 10 }, /* U */ + { 1, 2, 1, 0, 10 }, /* V */ + }, + .flags = AV_PIX_FMT_FLAG_PLANAR, + }, }; #if FF_API_PLUS1_MINUS1 FF_ENABLE_DEPRECATION_WARNINGS -- 2.41.0