summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authoregnappahz2022-07-23 12:10:31 +0200
committeregnappahz2022-07-23 12:10:31 +0200
commitfc73b56541b299c9f74cc17501a3371871ff87dd (patch)
tree1a2444192ea699acc2e571e703f244bd4619a37d
parentefb709e85c8ea7190633372838472f08ddc398e8 (diff)
downloadaur-fc73b56541b299c9f74cc17501a3371871ff87dd.tar.gz
Updated
-rw-r--r--.SRCINFO10
-rw-r--r--010-ffmpeg-lavc-svt_hevc-add-libsvt-hevc-encoder-wrapper.patch674
-rw-r--r--030-ffmpeg-Add-ability-for-ffmpeg-to-run-svt-vp9.patch792
-rw-r--r--050-ffmpeg-vmaf-2.x.patch949
-rw-r--r--PKGBUILD9
5 files changed, 4 insertions, 2430 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 5b3aa4fcc737..e8fab3376936 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,6 +1,6 @@
pkgbase = ffmpeg-nocuda
pkgdesc = Complete solution to record, convert and stream audio and video (without nvidias propriatary blobs)
- pkgver = 5.0.1
+ pkgver = 5.1
pkgrel = 3
url = https://www.ffmpeg.org/
arch = x86_64
@@ -78,14 +78,8 @@ pkgbase = ffmpeg-nocuda
provides = libswscale.so
provides = ffmpeg
conflicts = ffmpeg
- source = git+https://git.ffmpeg.org/ffmpeg.git#tag=n5.0.1
- source = 010-ffmpeg-lavc-svt_hevc-add-libsvt-hevc-encoder-wrapper.patch
- source = 030-ffmpeg-Add-ability-for-ffmpeg-to-run-svt-vp9.patch
+ source = git+https://git.ffmpeg.org/ffmpeg.git#tag=n5.1
source = 040-ffmpeg-add-av_stream_get_first_dts-for-chromium.patch
- source = 050-ffmpeg-vmaf-2.x.patch
- sha256sums = SKIP
- sha256sums = SKIP
- sha256sums = SKIP
sha256sums = SKIP
sha256sums = SKIP
diff --git a/010-ffmpeg-lavc-svt_hevc-add-libsvt-hevc-encoder-wrapper.patch b/010-ffmpeg-lavc-svt_hevc-add-libsvt-hevc-encoder-wrapper.patch
deleted file mode 100644
index d2046a12307d..000000000000
--- a/010-ffmpeg-lavc-svt_hevc-add-libsvt-hevc-encoder-wrapper.patch
+++ /dev/null
@@ -1,674 +0,0 @@
-From eb67d4a6ae6707740dc8403424f31409fdce5704 Mon Sep 17 00:00:00 2001
-From: Jing Sun <jing.a.sun@intel.com>
-Date: Wed, 21 Nov 2018 11:33:04 +0800
-Subject: [PATCH] lavc/svt_hevc: add libsvt hevc encoder wrapper
-
-Signed-off-by: Zhengxu Huang <zhengxu.huang@intel.com>
-Signed-off-by: Hassene Tmar <hassene.tmar@intel.com>
-Signed-off-by: Jun Zhao <jun.zhao@intel.com>
-Signed-off-by: Jing Sun <jing.a.sun@intel.com>
-Signed-off-by: Austin Hu <austin.hu@intel.com>
-Signed-off-by: Guo Jiansheng <jiansheng.guo@intel.com>
-Signed-off-by: Christopher Degawa <ccom@randomderp.com>
-Signed-off-by: Guo Jiansheng <jiansheng.guo@intel.com>
----
- configure | 4 +
- libavcodec/Makefile | 1 +
- libavcodec/allcodecs.c | 1 +
- libavcodec/libsvt_hevc.c | 584 +++++++++++++++++++++++++++++++++++++++++++++++
- 4 files changed, 590 insertions(+)
- create mode 100644 libavcodec/libsvt_hevc.c
-
-diff --git a/configure b/configure
-index 6bfd98b..93c87cb 100755
---- a/configure
-+++ b/configure
-@@ -288,6 +288,7 @@ External library support:
- --enable-libwebp enable WebP encoding via libwebp [no]
- --enable-libx264 enable H.264 encoding via x264 [no]
- --enable-libx265 enable HEVC encoding via x265 [no]
-+ --enable-libsvthevc enable HEVC encoding via svt [no]
- --enable-libxavs enable AVS encoding via xavs [no]
- --enable-libxavs2 enable AVS2 encoding via xavs2 [no]
- --enable-libxcb enable X11 grabbing using XCB [autodetect]
-@@ -1792,6 +1793,7 @@ EXTERNAL_LIBRARY_LIST="
- gnutls
- jni
- ladspa
-+ libsvthevc
- libaom
- libass
- libbluray
-@@ -3315,6 +3317,7 @@ libx264_encoder_select="atsc_a53"
- libx264rgb_encoder_deps="libx264"
- libx264rgb_encoder_select="libx264_encoder"
- libx265_encoder_deps="libx265"
-+libsvt_hevc_encoder_deps="libsvthevc"
- libxavs_encoder_deps="libxavs"
- libxavs2_encoder_deps="libxavs2"
- libxvid_encoder_deps="libxvid"
-@@ -6535,6 +6538,7 @@ enabled mmal && { check_lib mmal interface/mmal/mmal.h mmal_port_co
- check_lib mmal interface/mmal/mmal.h mmal_port_connect -lmmal_core -lmmal_util -lmmal_vc_client -lbcm_host; } ||
- die "ERROR: mmal not found" &&
- check_func_headers interface/mmal/mmal.h "MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS"; }
-+enabled libsvthevc && require_pkg_config libsvthevc SvtHevcEnc EbApi.h EbInitHandle
- enabled openal && { { for al_extralibs in "${OPENAL_LIBS}" "-lopenal" "-lOpenAL32"; do
- check_lib openal 'AL/al.h' alGetError "${al_extralibs}" && break; done } ||
- die "ERROR: openal not found"; } &&
-diff --git a/libavcodec/Makefile b/libavcodec/Makefile
-index 4fa8d7a..192ed45 100644
---- a/libavcodec/Makefile
-+++ b/libavcodec/Makefile
-@@ -1064,6 +1064,7 @@ OBJS-$(CONFIG_LIBWEBP_ANIM_ENCODER) += libwebpenc_common.o libwebpenc_anim
- OBJS-$(CONFIG_LIBX262_ENCODER) += libx264.o
- OBJS-$(CONFIG_LIBX264_ENCODER) += libx264.o
- OBJS-$(CONFIG_LIBX265_ENCODER) += libx265.o
-+OBJS-$(CONFIG_LIBSVT_HEVC_ENCODER) += libsvt_hevc.o
- OBJS-$(CONFIG_LIBXAVS_ENCODER) += libxavs.o
- OBJS-$(CONFIG_LIBXAVS2_ENCODER) += libxavs2.o
- OBJS-$(CONFIG_LIBXVID_ENCODER) += libxvid.o
-diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
-index 623db2a..280480f 100644
---- a/libavcodec/allcodecs.c
-+++ b/libavcodec/allcodecs.c
-@@ -774,6 +774,7 @@ extern LIBX264_CONST AVCodec ff_libx264_encoder;
- #endif
- extern const AVCodec ff_libx264rgb_encoder;
- extern AVCodec ff_libx265_encoder;
-+extern AVCodec ff_libsvt_hevc_encoder;
- extern const AVCodec ff_libxavs_encoder;
- extern const AVCodec ff_libxavs2_encoder;
- extern const AVCodec ff_libxvid_encoder;
-diff --git a/libavcodec/libsvt_hevc.c b/libavcodec/libsvt_hevc.c
-new file mode 100644
-index 0000000..2c013a9
---- /dev/null
-+++ b/libavcodec/libsvt_hevc.c
-@@ -0,0 +1,584 @@
-+/*
-+* Scalable Video Technology for HEVC encoder library plugin
-+*
-+* Copyright (c) 2019 Intel Corporation
-+*
-+* This file is part of FFmpeg.
-+*
-+* FFmpeg is free software; you can redistribute it and/or
-+* modify it under the terms of the GNU Lesser General Public
-+* License as published by the Free Software Foundation; either
-+* version 2.1 of the License, or (at your option) any later version.
-+*
-+* FFmpeg is distributed in the hope that it will be useful,
-+* but WITHOUT ANY WARRANTY; without even the implied warranty of
-+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+* Lesser General Public License for more details.
-+*
-+* You should have received a copy of the GNU Lesser General Public
-+* License along with this program; if not, write to the Free Software
-+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+*/
-+
-+#include "EbApi.h"
-+
-+#include "libavutil/common.h"
-+#include "libavutil/frame.h"
-+#include "libavutil/opt.h"
-+
-+#include "internal.h"
-+#include "avcodec.h"
-+#include "encode.h"
-+
-+typedef enum eos_status {
-+ EOS_NOT_REACHED = 0,
-+ EOS_SENT,
-+ EOS_RECEIVED
-+}EOS_STATUS;
-+
-+typedef struct SvtContext {
-+ AVClass *class;
-+
-+ EB_H265_ENC_CONFIGURATION enc_params;
-+ EB_COMPONENTTYPE *svt_handle;
-+ EB_BUFFERHEADERTYPE in_buf;
-+ uint8_t *in_data;
-+ EOS_STATUS eos_flag;
-+
-+ // User options.
-+ int profile;
-+ int hierarchical_level;
-+ int enc_mode;
-+ int tier;
-+ int level;
-+ int rc_mode;
-+ int scd;
-+ int tune;
-+ int base_layer_switch_mode;
-+ int qp;
-+ int aud;
-+ int asm_type;
-+ int forced_idr;
-+ int la_depth;
-+ int thread_count;
-+ int target_socket;
-+ int high_dynamic_range;
-+ int unrestricted_motion_vector;
-+ int tile_row_count;
-+ int tile_col_count;
-+ int tile_slice_mode;
-+ int pred_struct;
-+ int vid_info;
-+} SvtContext;
-+
-+static int error_mapping(EB_ERRORTYPE svt_ret)
-+{
-+ switch (svt_ret) {
-+ case EB_ErrorInsufficientResources:
-+ return AVERROR(ENOMEM);
-+
-+ case EB_ErrorUndefined:
-+ case EB_ErrorInvalidComponent:
-+ case EB_ErrorBadParameter:
-+ return AVERROR(EINVAL);
-+
-+ case EB_ErrorDestroyThreadFailed:
-+ case EB_ErrorSemaphoreUnresponsive:
-+ case EB_ErrorDestroySemaphoreFailed:
-+ case EB_ErrorCreateMutexFailed:
-+ case EB_ErrorMutexUnresponsive:
-+ case EB_ErrorDestroyMutexFailed:
-+ return AVERROR_EXTERNAL;
-+
-+ case EB_NoErrorEmptyQueue:
-+ return AVERROR(EAGAIN);
-+
-+ case EB_ErrorNone:
-+ return 0;
-+
-+ default:
-+ return AVERROR_UNKNOWN;
-+ }
-+}
-+
-+static void free_buffer(SvtContext *svt_enc)
-+{
-+ if (svt_enc && svt_enc->in_data) {
-+ av_freep(&svt_enc->in_data);
-+ svt_enc->in_data = NULL;
-+ }
-+}
-+
-+static EB_ERRORTYPE alloc_buffer(SvtContext *svt_enc)
-+{
-+ EB_BUFFERHEADERTYPE *in_buf = &svt_enc->in_buf;
-+ EB_H265_ENC_INPUT *in_data = NULL;
-+
-+ memset(in_buf, 0, sizeof(*in_buf));
-+ in_buf->nSize = sizeof(*in_buf);
-+ in_buf->sliceType = EB_INVALID_PICTURE;
-+
-+ in_data = (EB_H265_ENC_INPUT *)av_mallocz(sizeof(*in_data));
-+ if (in_data) {
-+ svt_enc->in_data = in_buf->pBuffer = (uint8_t *)in_data;
-+ return EB_ErrorNone;
-+ } else {
-+ return EB_ErrorInsufficientResources;
-+ }
-+}
-+
-+static int config_enc_params(EB_H265_ENC_CONFIGURATION *param,
-+ AVCodecContext *avctx)
-+{
-+ SvtContext *svt_enc = avctx->priv_data;
-+
-+ param->sourceWidth = avctx->width;
-+ param->sourceHeight = avctx->height;
-+
-+ if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P10) ||
-+ (avctx->pix_fmt == AV_PIX_FMT_YUV422P10) ||
-+ (avctx->pix_fmt == AV_PIX_FMT_YUV444P10)) {
-+ av_log(avctx, AV_LOG_DEBUG, "Set 10 bits depth input\n");
-+ param->encoderBitDepth = 10;
-+ } else {
-+ av_log(avctx, AV_LOG_DEBUG, "Set 8 bits depth input\n");
-+ param->encoderBitDepth = 8;
-+ }
-+
-+ if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P) ||
-+ (avctx->pix_fmt == AV_PIX_FMT_YUV420P10))
-+ param->encoderColorFormat = EB_YUV420;
-+ else if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P) ||
-+ (avctx->pix_fmt == AV_PIX_FMT_YUV422P10))
-+ param->encoderColorFormat = EB_YUV422;
-+ else
-+ param->encoderColorFormat = EB_YUV444;
-+
-+ param->profile = svt_enc->profile;
-+
-+ if (FF_PROFILE_HEVC_MAIN_STILL_PICTURE == param->profile) {
-+ av_log(avctx, AV_LOG_ERROR, "Main Still Picture Profile not supported\n");
-+ return EB_ErrorBadParameter;
-+ }
-+
-+ if ((param->encoderColorFormat >= EB_YUV422) &&
-+ (param->profile != FF_PROFILE_HEVC_REXT)) {
-+ av_log(avctx, AV_LOG_WARNING, "Rext Profile forced for 422 or 444\n");
-+ param->profile = FF_PROFILE_HEVC_REXT;
-+ }
-+
-+ if ((FF_PROFILE_HEVC_MAIN == param->profile) &&
-+ (param->encoderBitDepth > 8)) {
-+ av_log(avctx, AV_LOG_WARNING, "Main10 Profile forced for 10 bits\n");
-+ param->profile = FF_PROFILE_HEVC_MAIN_10;
-+ }
-+
-+ param->targetBitRate = avctx->bit_rate;
-+ param->vbvMaxrate = avctx->rc_max_rate;
-+ param->vbvBufsize = avctx->rc_buffer_size;
-+
-+ if (avctx->gop_size > 0)
-+ param->intraPeriodLength = avctx->gop_size - 1;
-+
-+ if ((avctx->framerate.num > 0) && (avctx->framerate.den > 0)) {
-+ param->frameRateNumerator = avctx->framerate.num;
-+ param->frameRateDenominator =
-+ avctx->framerate.den * avctx->ticks_per_frame;
-+ } else {
-+ param->frameRateNumerator = avctx->time_base.den;
-+ param->frameRateDenominator =
-+ avctx->time_base.num * avctx->ticks_per_frame;
-+ }
-+
-+ param->hierarchicalLevels = svt_enc->hierarchical_level;
-+ param->encMode = svt_enc->enc_mode;
-+ param->tier = svt_enc->tier;
-+ param->level = svt_enc->level;
-+ param->rateControlMode = svt_enc->rc_mode;
-+ param->sceneChangeDetection = svt_enc->scd;
-+ param->tune = svt_enc->tune;
-+ param->baseLayerSwitchMode = svt_enc->base_layer_switch_mode;
-+ param->qp = svt_enc->qp;
-+ param->accessUnitDelimiter = svt_enc->aud;
-+ param->asmType = svt_enc->asm_type;
-+ param->intraRefreshType = svt_enc->forced_idr;
-+ param->highDynamicRangeInput = svt_enc->high_dynamic_range;
-+ param->targetSocket = svt_enc->target_socket;
-+ if (param->rateControlMode) {
-+ param->maxQpAllowed = avctx->qmax;
-+ param->minQpAllowed = avctx->qmin;
-+ }
-+
-+ if (svt_enc->la_depth != -1)
-+ param->lookAheadDistance = svt_enc->la_depth;
-+
-+ if ((svt_enc->thread_count > 0) &&
-+ (svt_enc->thread_count < (EB_THREAD_COUNT_MIN_CORE * EB_THREAD_COUNT_FACTOR))) {
-+ param->threadCount = EB_THREAD_COUNT_MIN_CORE * EB_THREAD_COUNT_FACTOR;
-+ av_log(avctx, AV_LOG_WARNING, "Thread count is set too small, forced to %"PRId32"\n",
-+ param->threadCount);
-+ } else if (svt_enc->thread_count % EB_THREAD_COUNT_MIN_CORE) {
-+ param->threadCount = (svt_enc->thread_count + EB_THREAD_COUNT_MIN_CORE - 1)
-+ / EB_THREAD_COUNT_MIN_CORE * EB_THREAD_COUNT_MIN_CORE;
-+ av_log(avctx, AV_LOG_DEBUG, "Thread count is rounded to %"PRId32"\n",
-+ param->threadCount);
-+ } else {
-+ param->threadCount = svt_enc->thread_count;
-+ }
-+
-+ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)
-+ param->codeVpsSpsPps = 0;
-+ else
-+ param->codeVpsSpsPps = 1;
-+
-+ param->codeEosNal = 1;
-+
-+ if (svt_enc->unrestricted_motion_vector == 0 || svt_enc->unrestricted_motion_vector == 1) {
-+ param->unrestrictedMotionVector = svt_enc->unrestricted_motion_vector;
-+ } else {
-+ av_log(avctx, AV_LOG_ERROR, "Unrestricted Motion Vector should be set 0 or 1\n");
-+ return EB_ErrorBadParameter;
-+ }
-+
-+ if(svt_enc->tile_row_count >= 1 && svt_enc->tile_row_count <= 16) {
-+ param->tileRowCount = svt_enc->tile_row_count;
-+ } else {
-+ av_log(avctx, AV_LOG_ERROR, "Tile Row Count should between 1-16\n");
-+ return EB_ErrorBadParameter;
-+ }
-+
-+ if(svt_enc->tile_col_count >= 1 && svt_enc->tile_col_count <= 16) {
-+ param->tileColumnCount = svt_enc->tile_col_count;
-+ } else {
-+ av_log(avctx, AV_LOG_ERROR, "Tile Column Count should between 1-16\n");
-+ return EB_ErrorBadParameter;
-+ }
-+
-+ if(svt_enc->tile_slice_mode == 0 || svt_enc->tile_slice_mode == 1) {
-+ param->tileSliceMode = svt_enc->tile_slice_mode;
-+ } else {
-+ av_log(avctx, AV_LOG_ERROR, "Tile Slice Mode should be set 0 or 1\n");
-+ return EB_ErrorBadParameter;
-+ }
-+
-+ if(svt_enc->pred_struct >= 0 && svt_enc->pred_struct <= 2) {
-+ param->predStructure = svt_enc->pred_struct;
-+ } else {
-+ av_log(avctx, AV_LOG_ERROR, "Pred Structure should between 0-2\n");
-+ return EB_ErrorBadParameter;
-+ }
-+
-+ if(svt_enc->vid_info == 0 || svt_enc->vid_info == 1) {
-+ param->videoUsabilityInfo = svt_enc->vid_info;
-+ } else {
-+ av_log(avctx, AV_LOG_ERROR, "Video Usability Info should be set 0 or 1\n");
-+ return EB_ErrorBadParameter;
-+ }
-+ return EB_ErrorNone;
-+}
-+
-+static void read_in_data(EB_H265_ENC_CONFIGURATION *config,
-+ const AVFrame *frame,
-+ EB_BUFFERHEADERTYPE *header_ptr)
-+{
-+ uint8_t is16bit;
-+ uint64_t frame_size;
-+ EB_H265_ENC_INPUT *in_data = (EB_H265_ENC_INPUT *)header_ptr->pBuffer;
-+
-+ is16bit = config->encoderBitDepth > 8;
-+ frame_size = (uint64_t)(config->sourceWidth * config->sourceHeight) << is16bit;
-+
-+ in_data->luma = frame->data[0];
-+ in_data->cb = frame->data[1];
-+ in_data->cr = frame->data[2];
-+
-+ in_data->yStride = frame->linesize[0] >> is16bit;
-+ in_data->cbStride = frame->linesize[1] >> is16bit;
-+ in_data->crStride = frame->linesize[2] >> is16bit;
-+
-+ if (config->encoderColorFormat == EB_YUV420)
-+ frame_size *= 3/2u;
-+ else if (config->encoderColorFormat == EB_YUV422)
-+ frame_size *= 2u;
-+ else
-+ frame_size *= 3u;
-+
-+ header_ptr->nFilledLen += frame_size;
-+}
-+
-+static av_cold int eb_enc_init(AVCodecContext *avctx)
-+{
-+ SvtContext *svt_enc = avctx->priv_data;
-+ EB_ERRORTYPE svt_ret;
-+
-+ svt_enc->eos_flag = EOS_NOT_REACHED;
-+
-+ svt_ret = EbInitHandle(&svt_enc->svt_handle, svt_enc, &svt_enc->enc_params);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to init handle\n");
-+ return error_mapping(svt_ret);
-+ }
-+
-+ svt_ret = config_enc_params(&svt_enc->enc_params, avctx);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to config parameters\n");
-+ goto failed_init_handle;
-+ }
-+
-+ svt_ret = EbH265EncSetParameter(svt_enc->svt_handle, &svt_enc->enc_params);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to set parameters\n");
-+ goto failed_init_handle;
-+ }
-+
-+ svt_ret = EbInitEncoder(svt_enc->svt_handle);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to init encoder\n");
-+ goto failed_init_handle;
-+ }
-+
-+ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
-+ EB_BUFFERHEADERTYPE *header_ptr = NULL;
-+
-+ svt_ret = EbH265EncStreamHeader(svt_enc->svt_handle, &header_ptr);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to build stream header\n");
-+ goto failed_init_encoder;
-+ }
-+
-+ avctx->extradata_size = header_ptr->nFilledLen;
-+ avctx->extradata = av_malloc(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
-+ if (!avctx->extradata) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate extradata\n");
-+ svt_ret = EB_ErrorInsufficientResources;
-+ goto failed_init_encoder;
-+ }
-+ memcpy(avctx->extradata, header_ptr->pBuffer, avctx->extradata_size);
-+ memset(avctx->extradata+avctx->extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
-+ }
-+
-+ svt_ret = alloc_buffer(svt_enc);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to alloc data buffer\n");
-+ goto failed_init_encoder;
-+ }
-+ return 0;
-+
-+failed_init_encoder:
-+ EbDeinitEncoder(svt_enc->svt_handle);
-+failed_init_handle:
-+ EbDeinitHandle(svt_enc->svt_handle);
-+ svt_enc->svt_handle = NULL;
-+ svt_enc = NULL;
-+ return error_mapping(svt_ret);
-+}
-+
-+static int eb_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
-+ const AVFrame *frame, int *got_packet)
-+{
-+ SvtContext *svt_enc = avctx->priv_data;
-+ EB_BUFFERHEADERTYPE *header_ptr = &svt_enc->in_buf;
-+ EB_ERRORTYPE svt_ret;
-+ int av_ret;
-+
-+ if (EOS_RECEIVED == svt_enc->eos_flag) {
-+ *got_packet = 0;
-+ return 0;
-+ }
-+
-+ if (!frame) {
-+ if (!svt_enc->eos_flag) {
-+ svt_enc->eos_flag = EOS_SENT;
-+
-+ header_ptr->nAllocLen = 0;
-+ header_ptr->nFilledLen = 0;
-+ header_ptr->nTickCount = 0;
-+ header_ptr->nFlags = EB_BUFFERFLAG_EOS;
-+ header_ptr->pBuffer = NULL;
-+
-+ EbH265EncSendPicture(svt_enc->svt_handle, header_ptr);
-+
-+ av_log(avctx, AV_LOG_DEBUG, "Sent EOS\n");
-+ }
-+ } else {
-+ read_in_data(&svt_enc->enc_params, frame, header_ptr);
-+ header_ptr->pts = frame->pts;
-+
-+ EbH265EncSendPicture(svt_enc->svt_handle, header_ptr);
-+
-+ av_log(avctx, AV_LOG_DEBUG, "Sent PTS %"PRId64"\n", header_ptr->pts);
-+ }
-+
-+ header_ptr = NULL;
-+ svt_ret = EbH265GetPacket(svt_enc->svt_handle, &header_ptr, svt_enc->eos_flag);
-+
-+ if (svt_ret == EB_NoErrorEmptyQueue) {
-+ *got_packet = 0;
-+ av_log(avctx, AV_LOG_DEBUG, "Received none\n");
-+ return 0;
-+ } else if (svt_ret == EB_ErrorMax) {
-+ *got_packet = 0;
-+ av_log(avctx, AV_LOG_ERROR, "Received NULL packet with error code 0x%X\n", header_ptr->nFlags);
-+ return AVERROR_INVALIDDATA;
-+ }
-+
-+ av_log(avctx, AV_LOG_DEBUG, "Received PTS %"PRId64" packet\n", header_ptr->pts);
-+
-+ av_ret = ff_alloc_packet(avctx, pkt, header_ptr->nFilledLen);
-+ if (av_ret) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate a packet\n");
-+ EbH265ReleaseOutBuffer(&header_ptr);
-+ return av_ret;
-+ }
-+
-+ memcpy(pkt->data, header_ptr->pBuffer, header_ptr->nFilledLen);
-+ pkt->size = header_ptr->nFilledLen;
-+ pkt->pts = header_ptr->pts;
-+ pkt->dts = header_ptr->dts;
-+
-+ if ((header_ptr->sliceType == EB_IDR_PICTURE) ||
-+ (header_ptr->sliceType == EB_I_PICTURE))
-+ pkt->flags |= AV_PKT_FLAG_KEY;
-+ if (header_ptr->sliceType == EB_NON_REF_PICTURE)
-+ pkt->flags |= AV_PKT_FLAG_DISPOSABLE;
-+
-+ EbH265ReleaseOutBuffer(&header_ptr);
-+
-+ *got_packet = 1;
-+
-+ if (EB_BUFFERFLAG_EOS == header_ptr->nFlags)
-+ svt_enc->eos_flag = EOS_RECEIVED;
-+
-+ return 0;
-+}
-+
-+static av_cold int eb_enc_close(AVCodecContext *avctx)
-+{
-+ SvtContext *svt_enc = avctx->priv_data;
-+
-+ if (svt_enc) {
-+ free_buffer(svt_enc);
-+
-+ if (svt_enc->svt_handle) {
-+ EbDeinitEncoder(svt_enc->svt_handle);
-+ EbDeinitHandle(svt_enc->svt_handle);
-+ svt_enc->svt_handle = NULL;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+#define OFFSET(x) offsetof(SvtContext, x)
-+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
-+static const AVOption options[] = {
-+ { "asm_type", "Assembly instruction set type [0: C Only, 1: Auto]", OFFSET(asm_type),
-+ AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE },
-+
-+ { "aud", "Include Access Unit Delimiter", OFFSET(aud),
-+ AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
-+
-+ { "bl_mode", "Random Access Prediction Structure type setting", OFFSET(base_layer_switch_mode),
-+ AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
-+
-+ { "forced-idr", "If forcing keyframes, force them as IDR frames.", OFFSET(forced_idr),
-+ AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VE },
-+
-+ { "hielevel", "Hierarchical prediction levels setting", OFFSET(hierarchical_level),
-+ AV_OPT_TYPE_INT, { .i64 = 3 }, 0, 3, VE , "hielevel"},
-+ { "flat", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "hielevel" },
-+ { "1 level", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "hielevel" },
-+ { "2 level", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, INT_MIN, INT_MAX, VE, "hielevel" },
-+ { "3 level", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3 }, INT_MIN, INT_MAX, VE, "hielevel" },
-+
-+ { "la_depth", "Look ahead distance [0, 256]", OFFSET(la_depth),
-+ AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 256, VE },
-+
-+ { "level", "Set level (level_idc)", OFFSET(level),
-+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 0xff, VE, "level" },
-+
-+ { "preset", "Encoding preset [0, 12]",
-+ OFFSET(enc_mode), AV_OPT_TYPE_INT, { .i64 = 7 }, 0, 12, VE },
-+
-+ { "profile", "Profile setting, Main Still Picture Profile not supported", OFFSET(profile),
-+ AV_OPT_TYPE_INT, { .i64 = FF_PROFILE_HEVC_MAIN }, FF_PROFILE_HEVC_MAIN, FF_PROFILE_HEVC_REXT, VE, "profile"},
-+
-+ { "qp", "QP value for intra frames", OFFSET(qp),
-+ AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 51, VE },
-+
-+ { "rc", "Bit rate control mode", OFFSET(rc_mode),
-+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE , "rc"},
-+ { "cqp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "rc" },
-+ { "vbr", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "rc" },
-+
-+ { "sc_detection", "Scene change detection", OFFSET(scd),
-+ AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE },
-+
-+ { "socket", "Target CPU socket to use. -1 use all available", OFFSET(target_socket),
-+ AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 1, VE },
-+
-+ { "thread_count", "Number of threads [0: Auto, 96: Min]", OFFSET(thread_count),
-+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, VE },
-+
-+ { "tier", "Set tier (general_tier_flag)", OFFSET(tier),
-+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE, "tier" },
-+ { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, VE, "tier" },
-+ { "high", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, VE, "tier" },
-+
-+ { "tune", "Quality tuning mode", OFFSET(tune), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, VE, "tune" },
-+ { "sq", "Visually optimized mode", 0,
-+ AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "tune" },
-+ { "oq", "PSNR / SSIM optimized mode", 0,
-+ AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "tune" },
-+ { "vmaf", "VMAF optimized mode", 0,
-+ AV_OPT_TYPE_CONST, { .i64 = 2 }, INT_MIN, INT_MAX, VE, "tune" },
-+ { "hdr", "High dynamic range input (HDR10)", OFFSET(high_dynamic_range), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 1, VE, "hdr" },
-+ { "umv", "Enables or disables unrestricted motion vectors", OFFSET(unrestricted_motion_vector),
-+ AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE },
-+ { "tile_row_cnt", "tile count in the row", OFFSET(tile_row_count), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 16, VE },
-+ { "tile_col_cnt", "tile count in the column", OFFSET(tile_col_count), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 16, VE },
-+ { "tile_slice_mode", "per slice per tile, only valid for multi-tile", OFFSET(tile_slice_mode),
-+ AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
-+ { "pred_struct", "The prediction structure", OFFSET(pred_struct), AV_OPT_TYPE_INT, { .i64 = 2 }, 0, 2, VE },
-+ { "vid_info", "Enables or disables sending a vui structure in the HEVC Elementary bitstream.", OFFSET(vid_info),
-+ AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
-+ {NULL},
-+};
-+
-+static const AVClass class = {
-+ .class_name = "libsvt_hevc",
-+ .item_name = av_default_item_name,
-+ .option = options,
-+ .version = LIBAVUTIL_VERSION_INT,
-+};
-+
-+static const AVCodecDefault eb_enc_defaults[] = {
-+ { "b", "7M" },
-+ { "qmin", "10" },
-+ { "qmax", "48" },
-+ { "g", "-2" },
-+ { NULL },
-+};
-+
-+AVCodec ff_libsvt_hevc_encoder = {
-+ .name = "libsvt_hevc",
-+ .long_name = NULL_IF_CONFIG_SMALL("SVT-HEVC(Scalable Video Technology for HEVC) encoder"),
-+ .priv_data_size = sizeof(SvtContext),
-+ .type = AVMEDIA_TYPE_VIDEO,
-+ .id = AV_CODEC_ID_HEVC,
-+ .init = eb_enc_init,
-+ .encode2 = eb_encode_frame,
-+ .close = eb_enc_close,
-+ .capabilities = AV_CODEC_CAP_DELAY,
-+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P,
-+ AV_PIX_FMT_YUV420P10,
-+ AV_PIX_FMT_YUV422P,
-+ AV_PIX_FMT_YUV422P10,
-+ AV_PIX_FMT_YUV444P,
-+ AV_PIX_FMT_YUV444P10,
-+ AV_PIX_FMT_NONE },
-+ .priv_class = &class,
-+ .defaults = eb_enc_defaults,
-+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
-+ .wrapper_name = "libsvt_hevc",
-+};
---
-2.7.4
-
diff --git a/030-ffmpeg-Add-ability-for-ffmpeg-to-run-svt-vp9.patch b/030-ffmpeg-Add-ability-for-ffmpeg-to-run-svt-vp9.patch
deleted file mode 100644
index fad2b35bb7d4..000000000000
--- a/030-ffmpeg-Add-ability-for-ffmpeg-to-run-svt-vp9.patch
+++ /dev/null
@@ -1,792 +0,0 @@
-From dbd08eed9ec185b907d78508ea1d44c3a8d9ca6d Mon Sep 17 00:00:00 2001
-From: hassene <hassene.tmar@intel.com>
-Date: Fri, 15 Feb 2019 17:43:54 -0800
-Subject: [PATCH] Add ability for ffmpeg to run svt vp9
-
-Signed-off-by: hassene <hassene.tmar@intel.com>
-Signed-off-by: Jing Sun <jing.a.sun@intel.com>
-Signed-off-by: Austin Hu <austin.hu@intel.com>
-Signed-off-by: Guo Jiansheng <jiansheng.guo@intel.com>
-Signed-off-by: Andrei Bich <dronimal@yandex-team.ru>
----
- configure | 4 +
- libavcodec/Makefile | 1 +
- libavcodec/allcodecs.c | 1 +
- libavcodec/libsvt_vp9.c | 705 ++++++++++++++++++++++++++++++++++++++++++++++++
- 4 files changed, 711 insertions(+)
- create mode 100644 libavcodec/libsvt_vp9.c
-
-diff --git a/configure b/configure
-index b124411..2fa6805 100755
---- a/configure
-+++ b/configure
-@@ -285,6 +285,7 @@ External library support:
- --enable-libvorbis enable Vorbis en/decoding via libvorbis,
- native implementation exists [no]
- --enable-libvpx enable VP8 and VP9 de/encoding via libvpx [no]
-+ --enable-libsvtvp9 enable VP9 encoding via svt [no]
- --enable-libwebp enable WebP encoding via libwebp [no]
- --enable-libx264 enable H.264 encoding via x264 [no]
- --enable-libx265 enable HEVC encoding via x265 [no]
-@@ -1832,6 +1833,7 @@ EXTERNAL_LIBRARY_LIST="
- librtmp
- libshine
- libsmbclient
-+ libsvtvp9
- libsnappy
- libsoxr
- libspeex
-@@ -3311,6 +3313,7 @@ libvpx_vp8_decoder_deps="libvpx"
- libvpx_vp8_encoder_deps="libvpx"
- libvpx_vp9_decoder_deps="libvpx"
- libvpx_vp9_encoder_deps="libvpx"
-+libsvt_vp9_encoder_deps="libsvtvp9"
- libwebp_encoder_deps="libwebp"
- libwebp_anim_encoder_deps="libwebp"
- libx262_encoder_deps="libx262"
-@@ -6523,6 +6526,7 @@ enabled libvpx && {
- fi
- }
-
-+enabled libsvtvp9 && require_pkg_config libsvtvp9 SvtVp9Enc EbSvtVp9Enc.h eb_vp9_svt_init_handle
- enabled libwebp && {
- enabled libwebp_encoder && require_pkg_config libwebp "libwebp >= 0.2.0" webp/encode.h WebPGetEncoderVersion
- enabled libwebp_anim_encoder && check_pkg_config libwebp_anim_encoder "libwebpmux >= 0.4.0" webp/mux.h WebPAnimEncoderOptionsInit; }
-diff --git a/libavcodec/Makefile b/libavcodec/Makefile
-index 04f28c6..060e198 100644
---- a/libavcodec/Makefile
-+++ b/libavcodec/Makefile
-@@ -1060,6 +1060,7 @@ OBJS-$(CONFIG_LIBVPX_VP8_DECODER) += libvpxdec.o
- OBJS-$(CONFIG_LIBVPX_VP8_ENCODER) += libvpxenc.o
- OBJS-$(CONFIG_LIBVPX_VP9_DECODER) += libvpxdec.o libvpx.o
- OBJS-$(CONFIG_LIBVPX_VP9_ENCODER) += libvpxenc.o libvpx.o
-+OBJS-$(CONFIG_LIBSVT_VP9_ENCODER) += libsvt_vp9.o
- OBJS-$(CONFIG_LIBWEBP_ENCODER) += libwebpenc_common.o libwebpenc.o
- OBJS-$(CONFIG_LIBWEBP_ANIM_ENCODER) += libwebpenc_common.o libwebpenc_animencoder.o
- OBJS-$(CONFIG_LIBX262_ENCODER) += libx264.o
-diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
-index 623db2a..56307d8 100644
---- a/libavcodec/allcodecs.c
-+++ b/libavcodec/allcodecs.c
-@@ -759,6 +759,7 @@ extern const AVCodec ff_libvpx_vp8_encoder;
- extern const AVCodec ff_libvpx_vp8_decoder;
- extern AVCodec ff_libvpx_vp9_encoder;
- extern AVCodec ff_libvpx_vp9_decoder;
-+extern AVCodec ff_libsvt_vp9_encoder;
- /* preferred over libwebp */
- extern const AVCodec ff_libwebp_anim_encoder;
- extern const AVCodec ff_libwebp_encoder;
-diff --git a/libavcodec/libsvt_vp9.c b/libavcodec/libsvt_vp9.c
-new file mode 100644
-index 0000000..6ed6e62
---- /dev/null
-+++ b/libavcodec/libsvt_vp9.c
-@@ -0,0 +1,705 @@
-+/*
-+* Scalable Video Technology for VP9 encoder library plugin
-+*
-+* Copyright (c) 2018 Intel Corporation
-+*
-+* This file is part of FFmpeg.
-+*
-+* FFmpeg is free software; you can redistribute it and/or
-+* modify it under the terms of the GNU Lesser General Public
-+* License as published by the Free Software Foundation; either
-+* version 2.1 of the License, or (at your option) any later version.
-+*
-+* FFmpeg is distributed in the hope that it will be useful,
-+* but WITHOUT ANY WARRANTY; without even the implied warranty of
-+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+* Lesser General Public License for more details.
-+*
-+* You should have received a copy of the GNU Lesser General Public
-+* License along with this program; if not, write to the Free Software
-+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+*/
-+
-+#include <stdint.h>
-+#include "EbSvtVp9ErrorCodes.h"
-+#include "EbSvtVp9Enc.h"
-+
-+#include "libavutil/common.h"
-+#include "libavutil/frame.h"
-+#include "libavutil/opt.h"
-+#include "libavcodec/get_bits.h"
-+
-+#include "internal.h"
-+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(58, 93, 100)
-+#include "encode.h"
-+#endif
-+
-+#include "avcodec.h"
-+
-+#define SUPERFRAME_INDEX_MAX_SIZE 128
-+
-+#define RECIVED_FRAMES_MAX_SIZE 32
-+#define MAX_VP9_SUPERFRAME_SIZE 8
-+
-+typedef enum eos_status {
-+ EOS_NOT_REACHED = 0,
-+ EOS_REACHED,
-+ EOS_TOTRIGGER
-+}EOS_STATUS;
-+
-+typedef struct SvtReceivedFrameStruct {
-+ // fields for AVPacket
-+ AVBufferRef *buf;
-+ int64_t pts;
-+ int64_t dts;
-+ int size;
-+ int flags;
-+
-+ // svt fields:
-+ int ready_flag; // frame or superframe in data is visible
-+ int frames_count;
-+ int frames_sizes[MAX_VP9_SUPERFRAME_SIZE];
-+} SvtReceivedFrameStruct;
-+
-+typedef struct SvtContext {
-+ AVClass *class;
-+
-+ EbSvtVp9EncConfiguration enc_params;
-+ EbComponentType *svt_handle;
-+
-+ EbBufferHeaderType *in_buf;
-+ int raw_size;
-+
-+ AVFrame *frame;
-+
-+ AVBufferPool* pool;
-+
-+ EOS_STATUS eos_flag;
-+
-+ // User options.
-+ int enc_mode;
-+ int rc_mode;
-+ int tune;
-+ int qp;
-+
-+ int target_socket;
-+
-+ int forced_idr;
-+
-+ int level;
-+
-+ int base_layer_switch_mode;
-+
-+
-+ int64_t last_ready_dts;
-+ SvtReceivedFrameStruct received_frames[RECIVED_FRAMES_MAX_SIZE];
-+ int received_frames_size;
-+} SvtContext;
-+
-+static int error_mapping(EbErrorType svt_ret)
-+{
-+ int err;
-+
-+ switch (svt_ret) {
-+ case EB_ErrorInsufficientResources:
-+ err = AVERROR(ENOMEM);
-+ break;
-+
-+ case EB_ErrorUndefined:
-+ case EB_ErrorInvalidComponent:
-+ case EB_ErrorBadParameter:
-+ err = AVERROR(EINVAL);
-+ break;
-+
-+ case EB_ErrorDestroyThreadFailed:
-+ case EB_ErrorSemaphoreUnresponsive:
-+ case EB_ErrorDestroySemaphoreFailed:
-+ case EB_ErrorCreateMutexFailed:
-+ case EB_ErrorMutexUnresponsive:
-+ case EB_ErrorDestroyMutexFailed:
-+ err = AVERROR_EXTERNAL;
-+ break;
-+
-+ case EB_NoErrorEmptyQueue:
-+ err = AVERROR(EAGAIN);
-+
-+ case EB_ErrorNone:
-+ err = 0;
-+ break;
-+
-+ default:
-+ err = AVERROR_UNKNOWN;
-+ }
-+
-+ return err;
-+}
-+
-+static void free_buffer(SvtContext *svt_enc)
-+{
-+ if (svt_enc->in_buf) {
-+ EbSvtEncInput *in_data = (EbSvtEncInput *)svt_enc->in_buf->p_buffer;
-+ av_freep(&in_data);
-+ av_freep(&svt_enc->in_buf);
-+ }
-+ av_buffer_pool_uninit(&svt_enc->pool);
-+}
-+
-+static int alloc_buffer(EbSvtVp9EncConfiguration *config, SvtContext *svt_enc)
-+{
-+ const size_t luma_size_8bit =
-+ config->source_width * config->source_height;
-+ const size_t luma_size_10bit =
-+ (config->encoder_bit_depth > 8) ? luma_size_8bit : 0;
-+
-+ EbSvtEncInput *in_data;
-+
-+ svt_enc->raw_size = ((luma_size_8bit + luma_size_10bit) * 3 / 2) * MAX_VP9_SUPERFRAME_SIZE + SUPERFRAME_INDEX_MAX_SIZE;
-+
-+ // allocate buffer for in and out
-+ svt_enc->in_buf = av_mallocz(sizeof(*svt_enc->in_buf));
-+ if (!svt_enc->in_buf)
-+ goto failed;
-+
-+
-+ svt_enc->in_buf->p_buffer = (unsigned char *)av_mallocz(sizeof(*in_data));
-+ if (!svt_enc->in_buf->p_buffer)
-+ goto failed;
-+
-+ svt_enc->in_buf->size = sizeof(*svt_enc->in_buf);
-+ svt_enc->in_buf->p_app_private = NULL;
-+
-+ svt_enc->pool = av_buffer_pool_init(svt_enc->raw_size, NULL);
-+ if (!svt_enc->pool)
-+ goto failed;
-+
-+ svt_enc->received_frames_size = 0;
-+ svt_enc->last_ready_dts = -1e9;
-+
-+ return 0;
-+
-+failed:
-+ free_buffer(svt_enc);
-+ return AVERROR(ENOMEM);
-+}
-+
-+static int config_enc_params(EbSvtVp9EncConfiguration *param,
-+ AVCodecContext *avctx)
-+{
-+ SvtContext *svt_enc = avctx->priv_data;
-+ int ret;
-+ int ten_bits = 0;
-+
-+ param->source_width = avctx->width;
-+ param->source_height = avctx->height;
-+
-+ if (avctx->pix_fmt == AV_PIX_FMT_YUV420P10LE) {
-+ av_log(avctx, AV_LOG_DEBUG , "Encoder 10 bits depth input\n");
-+ // Disable Compressed 10-bit format default
-+ ten_bits = 1;
-+ }
-+
-+ // Update param from options
-+ param->enc_mode = svt_enc->enc_mode;
-+ param->level = svt_enc->level;
-+ param->rate_control_mode = svt_enc->rc_mode;
-+ param->tune = svt_enc->tune;
-+ param->base_layer_switch_mode = svt_enc->base_layer_switch_mode;
-+ param->qp = svt_enc->qp;
-+ param->target_socket = svt_enc->target_socket;
-+ param->target_bit_rate = avctx->bit_rate;
-+ if (avctx->gop_size > 0)
-+ param->intra_period = avctx->gop_size - 1;
-+
-+ if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
-+ param->frame_rate_numerator = avctx->framerate.num;
-+ param->frame_rate_denominator = avctx->framerate.den * avctx->ticks_per_frame;
-+ } else {
-+ param->frame_rate_numerator = avctx->time_base.den;
-+ param->frame_rate_denominator = avctx->time_base.num * avctx->ticks_per_frame;
-+ }
-+
-+ if (param->rate_control_mode) {
-+ param->max_qp_allowed = avctx->qmax;
-+ param->min_qp_allowed = avctx->qmin;
-+ }
-+
-+ if (ten_bits) {
-+ param->encoder_bit_depth = 10;
-+ }
-+
-+ ret = alloc_buffer(param, svt_enc);
-+
-+ return ret;
-+}
-+
-+static void read_in_data(EbSvtVp9EncConfiguration *config,
-+ const AVFrame *frame,
-+ EbBufferHeaderType *headerPtr)
-+{
-+ uint8_t is16bit = config->encoder_bit_depth > 8;
-+ uint64_t luma_size =
-+ (uint64_t)config->source_width * config->source_height<< is16bit;
-+ EbSvtEncInput *in_data = (EbSvtEncInput *)headerPtr->p_buffer;
-+
-+ // support yuv420p and yuv420p010
-+ in_data->luma = frame->data[0];
-+ in_data->cb = frame->data[1];
-+ in_data->cr = frame->data[2];
-+
-+ // stride info
-+ in_data->y_stride = frame->linesize[0] >> is16bit;
-+ in_data->cb_stride = frame->linesize[1] >> is16bit;
-+ in_data->cr_stride = frame->linesize[2] >> is16bit;
-+
-+ headerPtr->n_filled_len += luma_size * 3/2u;
-+}
-+
-+static av_cold int eb_enc_init(AVCodecContext *avctx)
-+{
-+ SvtContext *svt_enc = avctx->priv_data;
-+ EbErrorType svt_ret;
-+
-+ svt_enc->eos_flag = EOS_NOT_REACHED;
-+
-+ svt_ret = eb_vp9_svt_init_handle(&svt_enc->svt_handle, svt_enc, &svt_enc->enc_params);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Error init encoder handle\n");
-+ goto failed;
-+ }
-+
-+ svt_ret = config_enc_params(&svt_enc->enc_params, avctx);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Error configure encoder parameters\n");
-+ goto failed_init_handle;
-+ }
-+
-+ svt_ret = eb_vp9_svt_enc_set_parameter(svt_enc->svt_handle, &svt_enc->enc_params);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Error setting encoder parameters\n");
-+ goto failed_init_handle;
-+ }
-+
-+ svt_ret = eb_vp9_init_encoder(svt_enc->svt_handle);
-+ if (svt_ret != EB_ErrorNone) {
-+ av_log(avctx, AV_LOG_ERROR, "Error init encoder\n");
-+ goto failed_init_handle;
-+ }
-+
-+ svt_enc->frame = av_frame_alloc();
-+ if (!svt_enc->frame)
-+ return AVERROR(ENOMEM);
-+
-+ // if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
-+ // EbBufferHeaderType* headerPtr;
-+ // headerPtr->size = sizeof(headerPtr);
-+ // headerPtr->n_filled_len = 0; /* in/out */
-+ // headerPtr->p_buffer = av_malloc(10 * 1024 * 1024);
-+ // headerPtr->n_alloc_len = (10 * 1024 * 1024);
-+ //
-+ // if (!headerPtr->p_buffer) {
-+ // av_log(avctx, AV_LOG_ERROR,
-+ // "Cannot allocate buffer size %d.\n", headerPtr->n_alloc_len);
-+ // svt_ret = EB_ErrorInsufficientResources;
-+ // goto failed_init_enc;
-+ // }
-+ //
-+ // svt_ret = eb_svt_enc_stream_header(svt_enc->svt_handle, &headerPtr);
-+ // if (svt_ret != EB_ErrorNone) {
-+ // av_log(avctx, AV_LOG_ERROR, "Error when build stream header.\n");
-+ // av_freep(&headerPtr->p_buffer);
-+ // goto failed_init_enc;
-+ // }
-+ //
-+ // avctx->extradata_size = headerPtr->n_filled_len;
-+ // avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
-+ // if (!avctx->extradata) {
-+ // av_log(avctx, AV_LOG_ERROR,
-+ // "Cannot allocate VP9 header of size %d.\n", avctx->extradata_size);
-+ // av_freep(&headerPtr->p_buffer);
-+ // svt_ret = EB_ErrorInsufficientResources;
-+ // goto failed_init_enc;
-+ // }
-+ // memcpy(avctx->extradata, headerPtr->p_buffer, avctx->extradata_size);
-+ //
-+ // av_freep(&headerPtr->p_buffer);
-+ // }
-+ return 0;
-+
-+//failed_init_enc:
-+// eb_deinit_encoder(svt_enc->svt_handle);
-+failed_init_handle:
-+ eb_vp9_deinit_handle(svt_enc->svt_handle);
-+failed:
-+ free_buffer(svt_enc);
-+ return error_mapping(svt_ret);
-+}
-+
-+static int eb_send_frame(AVCodecContext *avctx, const AVFrame *frame)
-+{
-+ SvtContext *svt_enc = avctx->priv_data;
-+ EbBufferHeaderType *headerPtr = svt_enc->in_buf;
-+
-+ if (!frame) {
-+ if (svt_enc->eos_flag == EOS_REACHED)
-+ return 0;
-+
-+ EbBufferHeaderType headerPtrLast;
-+ headerPtrLast.n_alloc_len = 0;
-+ headerPtrLast.n_filled_len = 0;
-+ headerPtrLast.n_tick_count = 0;
-+ headerPtrLast.p_app_private = NULL;
-+ headerPtrLast.p_buffer = NULL;
-+ headerPtrLast.flags = EB_BUFFERFLAG_EOS;
-+
-+ eb_vp9_svt_enc_send_picture(svt_enc->svt_handle, &headerPtrLast);
-+ svt_enc->eos_flag = EOS_REACHED;
-+ av_log(avctx, AV_LOG_DEBUG, "Finish sending frames!!!\n");
-+ return 0;
-+ }
-+
-+ read_in_data(&svt_enc->enc_params, frame, headerPtr);
-+
-+ headerPtr->flags = 0;
-+ headerPtr->p_app_private = NULL;
-+ headerPtr->pts = frame->pts;
-+ switch (frame->pict_type) {
-+ case AV_PICTURE_TYPE_I:
-+ headerPtr->pic_type = svt_enc->forced_idr > 0 ? EB_IDR_PICTURE : EB_I_PICTURE;
-+ break;
-+ case AV_PICTURE_TYPE_P:
-+ headerPtr->pic_type = EB_P_PICTURE;
-+ break;
-+ case AV_PICTURE_TYPE_B:
-+ headerPtr->pic_type = EB_B_PICTURE;
-+ break;
-+ default:
-+ headerPtr->pic_type = EB_INVALID_PICTURE;
-+ break;
-+ }
-+ eb_vp9_svt_enc_send_picture(svt_enc->svt_handle, headerPtr);
-+
-+ return 0;
-+}
-+
-+static int is_frame_visible(uint8_t const* ptr, int size) {
-+ GetBitContext gb;
-+ int ret, visible, profile;
-+ if ((ret = init_get_bits8(&gb, ptr, size)) < 0) {
-+ return ret;
-+ }
-+
-+ // frame marker
-+ get_bits(&gb, 2);
-+ profile = get_bits1(&gb);
-+ profile |= get_bits1(&gb) << 1;
-+
-+ // reserved_zero
-+ if (profile == 3) profile += get_bits1(&gb); // reserved_zero
-+
-+ // read show_existing_frame
-+ if (get_bits1(&gb)) {
-+ // show_existing_frame == 1
-+ visible = 1;
-+ } else {
-+ // show_existing_frame == 0
-+ // keyframe (frame_type actually)
-+ get_bits1(&gb);
-+ // read show_frame
-+ visible = get_bits1(&gb) ? 2 : 0;
-+ }
-+
-+ return visible;
-+}
-+
-+static int get_received_frame(SvtContext *svt_enc, AVPacket *pkt) {
-+ SvtReceivedFrameStruct* rfs = &svt_enc->received_frames[0];
-+
-+ if (svt_enc->received_frames_size == 0 || !rfs->ready_flag) {
-+ return AVERROR(EAGAIN);
-+ }
-+
-+ pkt->buf = rfs->buf;
-+ pkt->data = rfs->buf->data;
-+ pkt->dts = rfs->dts;
-+ pkt->pts = rfs->pts;
-+ pkt->flags = rfs->flags;
-+ pkt->size = rfs->size;
-+
-+ --svt_enc->received_frames_size;
-+ for (int i = 0; i < svt_enc->received_frames_size; ++i) {
-+ svt_enc->received_frames[i] = svt_enc->received_frames[i + 1];
-+ }
-+
-+ return 0;
-+}
-+
-+static int put_received_frame(AVCodecContext *avctx, uint8_t* data, int size, int keyframe, int64_t dts, int64_t pts) {
-+ SvtContext *svt_enc = avctx->priv_data;
-+ SvtReceivedFrameStruct* rfs;
-+
-+ if (svt_enc->received_frames_size == 0 || svt_enc->received_frames[svt_enc->received_frames_size - 1].ready_flag) {
-+ ++svt_enc->received_frames_size;
-+ if (svt_enc->received_frames_size > RECIVED_FRAMES_MAX_SIZE) {
-+ av_log(avctx, AV_LOG_ERROR, "Fail: svt_enc->received_frames_size > RECIVED_FRAMES_MAX_SIZE \n");
-+ return AVERROR_BUG;
-+ }
-+
-+ rfs = &svt_enc->received_frames[svt_enc->received_frames_size - 1];
-+
-+ rfs->buf = av_buffer_pool_get(svt_enc->pool);
-+ if (!rfs->buf) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate output packet.\n");
-+ return AVERROR(ENOMEM);
-+ }
-+
-+ rfs->size = 0;
-+ rfs->flags = 0;
-+ rfs->ready_flag = 0;
-+ rfs->frames_count = 0;
-+ } else {
-+ rfs = &svt_enc->received_frames[svt_enc->received_frames_size - 1];
-+ }
-+
-+ rfs->pts = pts;
-+ rfs->dts = dts;
-+ rfs->flags = (keyframe ? AV_PKT_FLAG_KEY : 0);
-+
-+ ++rfs->frames_count;
-+ if (rfs->frames_count > MAX_VP9_SUPERFRAME_SIZE) {
-+ av_log(avctx, AV_LOG_ERROR, "Fail: rfs->frames_count > MAX_VP9_SUPERFRAME_SIZE \n");
-+ return AVERROR_BUG;
-+ }
-+
-+ rfs->frames_sizes[rfs->frames_count - 1] = size;
-+
-+ memcpy(rfs->buf->data + rfs->size, data, size);
-+ rfs->size += size;
-+
-+ int visible = is_frame_visible(data, size);
-+ if (visible < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Fail: is_frame_visible \n");
-+ return visible;
-+ }
-+
-+
-+ rfs->ready_flag = visible;
-+
-+ if (rfs->ready_flag) {
-+ if (rfs->dts <= svt_enc->last_ready_dts) {
-+ rfs->dts = svt_enc->last_ready_dts + 1;
-+ }
-+ svt_enc->last_ready_dts = rfs->dts;
-+
-+ }
-+
-+ // add superframe_index if needed
-+ if (rfs->ready_flag && rfs->frames_count > 1) {
-+ // superframe_header:
-+ // 110 - superframe_marker
-+ // 11 = 3 = bytes_per_framesize_minus_1 - use 4-bytes size
-+ // xxx = frames_in_superframe_minus_1
-+ uint8_t header = 0b11011000;
-+ header |= (rfs->frames_count - 1) & 0b111;
-+
-+ uint8_t* ptr = rfs->buf->data + rfs->size;
-+
-+ ptr[0] = header;
-+ ++ptr;
-+
-+ for (int i = 0; i < rfs->frames_count; ++i) {
-+ ptr[0] = (rfs->frames_sizes[i] >> 0) & 0xff;
-+ ptr[1] = (rfs->frames_sizes[i] >> 8) & 0xff;
-+ ptr[2] = (rfs->frames_sizes[i] >> 16) & 0xff;
-+ ptr[3] = (rfs->frames_sizes[i] >> 24) & 0xff;
-+
-+ ptr += 4;
-+ }
-+
-+ ptr[0] = header;
-+ ++ptr;
-+
-+ rfs->size = ptr - rfs->buf->data;
-+ }
-+
-+ return 0;
-+}
-+
-+static int eb_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
-+{
-+ SvtContext *svt_enc = avctx->priv_data;
-+ EbBufferHeaderType *headerPtr;
-+ EbErrorType svt_ret;
-+ AVBufferRef *ref;
-+ int ret = 0;
-+
-+ if (get_received_frame(svt_enc, pkt) == 0) {
-+ return 0;
-+ }
-+
-+ if (EOS_TOTRIGGER == svt_enc->eos_flag) {
-+ pkt = NULL;
-+ return AVERROR_EOF;
-+ }
-+
-+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(58, 93, 100)
-+ AVFrame *frame = svt_enc->frame;
-+ ret = ff_encode_get_frame(avctx, frame);
-+ if (ret < 0 && ret != AVERROR_EOF) {
-+ return ret;
-+ }
-+ if (ret == AVERROR_EOF)
-+ frame = NULL;
-+
-+ eb_send_frame(avctx, frame);
-+ av_frame_unref(svt_enc->frame);
-+#endif
-+
-+
-+ for (;;) {
-+ svt_ret = eb_vp9_svt_get_packet(svt_enc->svt_handle, &headerPtr, svt_enc->eos_flag);
-+ if (svt_ret == EB_NoErrorEmptyQueue) {
-+ return AVERROR(EAGAIN);
-+ }
-+
-+ if (EB_BUFFERFLAG_EOS & headerPtr->flags)
-+ svt_enc->eos_flag = EOS_TOTRIGGER;
-+
-+ ret = 0;
-+
-+ // ignore headerPtr->dts on purpose
-+
-+ if (headerPtr->flags & EB_BUFFERFLAG_SHOW_EXT) {
-+ ret = put_received_frame(avctx, headerPtr->p_buffer, headerPtr->n_filled_len - 4, 0, headerPtr->pts - 3, headerPtr->pts - 3);
-+ if (ret != 0) goto end;
-+ ret = put_received_frame(avctx, headerPtr->p_buffer + headerPtr->n_filled_len - 4, 1, 0, headerPtr->pts - 2, headerPtr->pts - 2);
-+ if (ret != 0) goto end;
-+ ret = put_received_frame(avctx, headerPtr->p_buffer + headerPtr->n_filled_len - 3, 1, 0, headerPtr->pts - 1, headerPtr->pts - 1);
-+ if (ret != 0) goto end;
-+ ret = put_received_frame(avctx, headerPtr->p_buffer + headerPtr->n_filled_len - 2, 1, 0, headerPtr->pts + 0, headerPtr->pts + 0);
-+ if (ret != 0) goto end;
-+ ret = put_received_frame(avctx, headerPtr->p_buffer + headerPtr->n_filled_len - 1, 1, 0, headerPtr->pts + 1, headerPtr->pts + 1);
-+ if (ret != 0) goto end;
-+ } else {
-+ ret = put_received_frame(avctx, headerPtr->p_buffer, headerPtr->n_filled_len, headerPtr->pic_type == EB_IDR_PICTURE, headerPtr->pts, headerPtr->pts);
-+ if (ret != 0) goto end;
-+ }
-+
-+ ret = get_received_frame(svt_enc, pkt);
-+
-+ end:
-+ eb_vp9_svt_release_out_buffer(&headerPtr);
-+
-+ if (ret == AVERROR(EAGAIN)) {
-+ continue;
-+ }
-+
-+ break;
-+ }
-+
-+
-+
-+ return ret;
-+}
-+
-+static av_cold int eb_enc_close(AVCodecContext *avctx)
-+{
-+ SvtContext *svt_enc = avctx->priv_data;
-+
-+ eb_vp9_deinit_encoder(svt_enc->svt_handle);
-+ eb_vp9_deinit_handle(svt_enc->svt_handle);
-+
-+ av_frame_free(&svt_enc->frame);
-+
-+ free_buffer(svt_enc);
-+
-+ return 0;
-+}
-+
-+#define OFFSET(x) offsetof(SvtContext, x)
-+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
-+static const AVOption options[] = {
-+ { "preset", "Encoding preset [1, 1]",
-+ OFFSET(enc_mode), AV_OPT_TYPE_INT, { .i64 = 9 }, 0, 9, VE },
-+
-+ { "level", "Set level (level_idc)", OFFSET(level),
-+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 0xff, VE, "level" },
-+
-+#define LEVEL(name, value) name, NULL, 0, AV_OPT_TYPE_CONST, \
-+ { .i64 = value }, 0, 0, VE, "level"
-+ { LEVEL("1", 10) },
-+ { LEVEL("2", 20) },
-+ { LEVEL("2.1", 21) },
-+ { LEVEL("3", 30) },
-+ { LEVEL("3.1", 31) },
-+ { LEVEL("4", 40) },
-+ { LEVEL("4.1", 41) },
-+ { LEVEL("5", 50) },
-+ { LEVEL("5.1", 51) },
-+ { LEVEL("5.2", 52) },
-+ { LEVEL("6", 60) },
-+ { LEVEL("6.1", 61) },
-+ { LEVEL("6.2", 62) },
-+#undef LEVEL
-+
-+ { "tune", "Tune mode", OFFSET(tune),
-+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, VE , "tune"},
-+ { "vq", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "tune" },
-+ { "ssim", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "tune" },
-+ { "vmaf", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, INT_MIN, INT_MAX, VE, "tune" },
-+
-+ { "rc", "Bit rate control mode", OFFSET(rc_mode),
-+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, VE , "rc"},
-+ { "cqp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "rc" },
-+ { "vbr", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "rc" },
-+ { "cbr", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, INT_MIN, INT_MAX, VE, "rc" },
-+
-+ { "qp", "QP value for intra frames", OFFSET(qp),
-+ AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 51, VE },
-+
-+ { "socket", "Target CPU socket to use. -1 use all available", OFFSET(target_socket),
-+ AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE },
-+
-+ { "bl_mode", "Random Access Prediction Structure type setting", OFFSET(base_layer_switch_mode),
-+ AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
-+
-+ { "forced-idr", "If forcing keyframes, force them as IDR frames.", OFFSET(forced_idr),
-+ AV_OPT_TYPE_BOOL, { .i64 = 0 }, -1, 1, VE },
-+
-+ {NULL},
-+};
-+
-+static const AVClass class = {
-+ .class_name = "libsvt_vp9",
-+ .item_name = av_default_item_name,
-+ .option = options,
-+ .version = LIBAVUTIL_VERSION_INT,
-+};
-+
-+static const AVCodecDefault eb_enc_defaults[] = {
-+ { "b", "7M" },
-+ { "flags", "-cgop" },
-+ { "qmin", "10" },
-+ { "qmax", "48" },
-+ { NULL },
-+};
-+
-+AVCodec ff_libsvt_vp9_encoder = {
-+ .name = "libsvt_vp9",
-+ .long_name = NULL_IF_CONFIG_SMALL("SVT-VP9(Scalable Video Technology for VP9) encoder"),
-+ .priv_data_size = sizeof(SvtContext),
-+ .type = AVMEDIA_TYPE_VIDEO,
-+ .id = AV_CODEC_ID_VP9,
-+ .init = eb_enc_init,
-+#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 93, 100)
-+ .send_frame = eb_send_frame,
-+#endif
-+ .receive_packet = eb_receive_packet,
-+ .close = eb_enc_close,
-+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
-+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P,
-+ AV_PIX_FMT_NONE },
-+ .priv_class = &class,
-+ .defaults = eb_enc_defaults,
-+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
-+ .wrapper_name = "libsvt_vp9",
-+};
---
-2.7.4
-
diff --git a/050-ffmpeg-vmaf-2.x.patch b/050-ffmpeg-vmaf-2.x.patch
deleted file mode 100644
index 6619a99c177f..000000000000
--- a/050-ffmpeg-vmaf-2.x.patch
+++ /dev/null
@@ -1,949 +0,0 @@
---- a/configure
-+++ b/configure
-@@ -3751,7 +3751,7 @@ vaguedenoiser_filter_deps="gpl"
- vflip_vulkan_filter_deps="vulkan spirv_compiler"
- vidstabdetect_filter_deps="libvidstab"
- vidstabtransform_filter_deps="libvidstab"
--libvmaf_filter_deps="libvmaf pthreads"
-+libvmaf_filter_deps="libvmaf"
- zmq_filter_deps="libzmq"
- zoompan_filter_deps="swscale"
- zscale_filter_deps="libzimg const_nan"
-@@ -6626,7 +6626,7 @@ enabled libtwolame && require libtwolame twolame.h twolame_init -ltwolame
- enabled libuavs3d && require_pkg_config libuavs3d "uavs3d >= 1.1.41" uavs3d.h uavs3d_decode
- enabled libv4l2 && require_pkg_config libv4l2 libv4l2 libv4l2.h v4l2_ioctl
- enabled libvidstab && require_pkg_config libvidstab "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit
--enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 1.5.2" libvmaf.h compute_vmaf
-+enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 2.0.0" libvmaf.h vmaf_init
- enabled libvo_amrwbenc && require libvo_amrwbenc vo-amrwbenc/enc_if.h E_IF_init -lvo-amrwbenc
- enabled libvorbis && require_pkg_config libvorbis vorbis vorbis/codec.h vorbis_info_init &&
- require_pkg_config libvorbisenc vorbisenc vorbis/vorbisenc.h vorbis_encode_init
---- a/doc/filters.texi
-+++ b/doc/filters.texi
-@@ -14666,68 +14666,60 @@ ffmpeg -i input.mov -vf lensfun=make=Canon:model="Canon EOS 100D":lens_model="Ca
-
- @section libvmaf
-
--Obtain the VMAF (Video Multi-Method Assessment Fusion)
--score between two input videos.
-+Calulate the VMAF (Video Multi-Method Assessment Fusion) score for a
-+reference/distorted pair of input videos.
-
--The first input is the encoded video, and the second input is the reference video.
-+The first input is the distorted video, and the second input is the reference video.
-
- The obtained VMAF score is printed through the logging system.
-
- It requires Netflix's vmaf library (libvmaf) as a pre-requisite.
- After installing the library it can be enabled using:
- @code{./configure --enable-libvmaf}.
--If no model path is specified it uses the default model: @code{vmaf_v0.6.1.pkl}.
-
- The filter has following options:
-
- @table @option
--@item model_path
--Set the model path which is to be used for SVM.
--Default value: @code{"/usr/local/share/model/vmaf_v0.6.1.pkl"}
--
--@item log_path
--Set the file path to be used to store logs.
-+@item model
-+A `|` delimited list of vmaf models. Each model can be configured with a number of parameters.
-+Default value: @code{"version=vmaf_v0.6.1"}
-
--@item log_fmt
--Set the format of the log file (csv, json or xml).
-+@item model_path
-+Deprecated, use model='path=...'.
-
- @item enable_transform
--This option can enable/disable the @code{score_transform} applied to the final predicted VMAF score,
--if you have specified score_transform option in the input parameter file passed to @code{run_vmaf_training.py}
--Default value: @code{false}
-+Deprecated, use model='enable_transform=true'.
-
- @item phone_model
--Invokes the phone model which will generate VMAF scores higher than in the
--regular model, which is more suitable for laptop, TV, etc. viewing conditions.
--Default value: @code{false}
-+Deprecated, use model='enable_transform=true'.
-+
-+@item enable_conf_interval
-+Deprecated, use model='enable_conf_interval=true'.
-+
-+@item feature
-+A `|` delimited list of features. Each feature can be configured with a number of parameters.
-
- @item psnr
--Enables computing psnr along with vmaf.
--Default value: @code{false}
-+Deprecated, use feature='name=psnr'.
-
- @item ssim
--Enables computing ssim along with vmaf.
--Default value: @code{false}
-+Deprecated, use feature='name=ssim'.
-
- @item ms_ssim
--Enables computing ms_ssim along with vmaf.
--Default value: @code{false}
-+Deprecated, use feature='name=ms_ssim'.
-
--@item pool
--Set the pool method to be used for computing vmaf.
--Options are @code{min}, @code{harmonic_mean} or @code{mean} (default).
-+@item log_path
-+Set the file path to be used to store log files.
-+
-+@item log_fmt
-+Set the format of the log file (xml, json, csv, or sub).
-
- @item n_threads
--Set number of threads to be used when computing vmaf.
--Default value: @code{0}, which makes use of all available logical processors.
-+Set number of threads to be used when initializing libvmaf.
-+Default value: @code{0}, no threads.
-
- @item n_subsample
--Set interval for frame subsampling used when computing vmaf.
--Default value: @code{1}
--
--@item enable_conf_interval
--Enables confidence interval.
--Default value: @code{false}
-+Set frame subsampling interval to be used.
- @end table
-
- This filter also supports the @ref{framesync} options.
-@@ -14735,23 +14727,31 @@ This filter also supports the @ref{framesync} options.
- @subsection Examples
- @itemize
- @item
--On the below examples the input file @file{main.mpg} being processed is
--compared with the reference file @file{ref.mpg}.
-+In the examples below, a distorted video @file{distorted.mpg} is
-+compared with a reference file @file{reference.mpg}.
-
-+@item
-+Basic usage:
-+@example
-+ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf=log_path=output.xml -f null -
-+@end example
-+
-+@item
-+Example with multiple models:
- @example
--ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf -f null -
-+ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf='model=version=vmaf_v0.6.1\\:name=vmaf|version=vmaf_v0.6.1neg\\:name=vmaf_neg' -f null -
- @end example
-
- @item
--Example with options:
-+Example with multiple addtional features:
- @example
--ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf="psnr=1:log_fmt=json" -f null -
-+ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf='feature=name=psnr|name=ciede' -f null -
- @end example
-
- @item
- Example with options and different containers:
- @example
--ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=psnr=1:log_fmt=json" -f null -
-+ffmpeg -i distorted.mpg -i reference.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=log_fmt=json:log_path=output.json" -f null -
- @end example
- @end itemize
-
---- a/libavfilter/vf_libvmaf.c
-+++ b/libavfilter/vf_libvmaf.c
-@@ -24,8 +24,8 @@
- * Calculate the VMAF between two input videos.
- */
-
--#include <pthread.h>
- #include <libvmaf.h>
-+
- #include "libavutil/avstring.h"
- #include "libavutil/opt.h"
- #include "libavutil/pixdesc.h"
-@@ -39,23 +39,9 @@
- typedef struct LIBVMAFContext {
- const AVClass *class;
- FFFrameSync fs;
-- const AVPixFmtDescriptor *desc;
-- int width;
-- int height;
-- double vmaf_score;
-- int vmaf_thread_created;
-- pthread_t vmaf_thread;
-- pthread_mutex_t lock;
-- pthread_cond_t cond;
-- int eof;
-- AVFrame *gmain;
-- AVFrame *gref;
-- int frame_set;
- char *model_path;
- char *log_path;
- char *log_fmt;
-- int disable_clip;
-- int disable_avx;
- int enable_transform;
- int phone_model;
- int psnr;
-@@ -65,184 +51,487 @@ typedef struct LIBVMAFContext {
- int n_threads;
- int n_subsample;
- int enable_conf_interval;
-- int error;
-+ char *model_cfg;
-+ char *feature_cfg;
-+ VmafContext *vmaf;
-+ VmafModel **model;
-+ unsigned model_cnt;
-+ unsigned frame_cnt;
-+ unsigned bpc;
- } LIBVMAFContext;
-
- #define OFFSET(x) offsetof(LIBVMAFContext, x)
- #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-
- static const AVOption libvmaf_options[] = {
-- {"model_path", "Set the model to be used for computing vmaf.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS},
-- {"log_path", "Set the file path to be used to store logs.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
-- {"log_fmt", "Set the format of the log (csv, json or xml).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
-- {"enable_transform", "Enables transform for computing vmaf.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-- {"phone_model", "Invokes the phone model that will generate higher VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-- {"psnr", "Enables computing psnr along with vmaf.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-- {"ssim", "Enables computing ssim along with vmaf.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-- {"ms_ssim", "Enables computing ms-ssim along with vmaf.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-+ {"model_path", "use model='path=...'.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"log_path", "Set the file path to be used to write log.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
-+ {"log_fmt", "Set the format of the log (csv, json, xml, or sub).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str="xml"}, 0, 1, FLAGS},
-+ {"enable_transform", "use model='enable_transform=true'.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"phone_model", "use model='enable_transform=true'.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"psnr", "use feature='name=psnr'.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"ssim", "use feature='name=ssim'.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"ms_ssim", "use feature='name=ms_ssim'.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
- {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
- {"n_threads", "Set number of threads to be used when computing vmaf.", OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS},
- {"n_subsample", "Set interval for frame subsampling used when computing vmaf.", OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS},
-- {"enable_conf_interval", "Enables confidence interval.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-+ {"enable_conf_interval", "model='enable_conf_interval=true'.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"model", "Set the model to be used for computing vmaf.", OFFSET(model_cfg), AV_OPT_TYPE_STRING, {.str="version=vmaf_v0.6.1"}, 0, 1, FLAGS},
-+ {"feature", "Set the feature to be used for computing vmaf.", OFFSET(feature_cfg), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
- { NULL }
- };
-
- FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs);
-
--#define read_frame_fn(type, bits) \
-- static int read_frame_##bits##bit(float *ref_data, float *main_data, \
-- float *temp_data, int stride, void *ctx) \
--{ \
-- LIBVMAFContext *s = (LIBVMAFContext *) ctx; \
-- int ret; \
-- \
-- pthread_mutex_lock(&s->lock); \
-- \
-- while (!s->frame_set && !s->eof) { \
-- pthread_cond_wait(&s->cond, &s->lock); \
-- } \
-- \
-- if (s->frame_set) { \
-- int ref_stride = s->gref->linesize[0]; \
-- int main_stride = s->gmain->linesize[0]; \
-- \
-- const type *ref_ptr = (const type *) s->gref->data[0]; \
-- const type *main_ptr = (const type *) s->gmain->data[0]; \
-- \
-- float *ptr = ref_data; \
-- float factor = 1.f / (1 << (bits - 8)); \
-- \
-- int h = s->height; \
-- int w = s->width; \
-- \
-- int i,j; \
-- \
-- for (i = 0; i < h; i++) { \
-- for ( j = 0; j < w; j++) { \
-- ptr[j] = ref_ptr[j] * factor; \
-- } \
-- ref_ptr += ref_stride / sizeof(*ref_ptr); \
-- ptr += stride / sizeof(*ptr); \
-- } \
-- \
-- ptr = main_data; \
-- \
-- for (i = 0; i < h; i++) { \
-- for (j = 0; j < w; j++) { \
-- ptr[j] = main_ptr[j] * factor; \
-- } \
-- main_ptr += main_stride / sizeof(*main_ptr); \
-- ptr += stride / sizeof(*ptr); \
-- } \
-- } \
-- \
-- ret = !s->frame_set; \
-- \
-- av_frame_unref(s->gref); \
-- av_frame_unref(s->gmain); \
-- s->frame_set = 0; \
-- \
-- pthread_cond_signal(&s->cond); \
-- pthread_mutex_unlock(&s->lock); \
-- \
-- if (ret) { \
-- return 2; \
-- } \
-- \
-- return 0; \
-+static enum VmafPixelFormat pix_fmt_map(enum AVPixelFormat av_pix_fmt)
-+{
-+ switch (av_pix_fmt) {
-+ case AV_PIX_FMT_YUV420P:
-+ case AV_PIX_FMT_YUV420P10LE:
-+ case AV_PIX_FMT_YUV420P12LE:
-+ case AV_PIX_FMT_YUV420P16LE:
-+ return VMAF_PIX_FMT_YUV420P;
-+ case AV_PIX_FMT_YUV422P:
-+ case AV_PIX_FMT_YUV422P10LE:
-+ case AV_PIX_FMT_YUV422P12LE:
-+ case AV_PIX_FMT_YUV422P16LE:
-+ return VMAF_PIX_FMT_YUV422P;
-+ case AV_PIX_FMT_YUV444P:
-+ case AV_PIX_FMT_YUV444P10LE:
-+ case AV_PIX_FMT_YUV444P12LE:
-+ case AV_PIX_FMT_YUV444P16LE:
-+ return VMAF_PIX_FMT_YUV444P;
-+ default:
-+ return VMAF_PIX_FMT_UNKNOWN;
-+ }
- }
-
--read_frame_fn(uint8_t, 8);
--read_frame_fn(uint16_t, 10);
-+static int copy_picture_data(AVFrame *src, VmafPicture *dst, unsigned bpc)
-+{
-+ int err = vmaf_picture_alloc(dst, pix_fmt_map(src->format), bpc,
-+ src->width, src->height);
-+ if (err)
-+ return AVERROR(ENOMEM);
-+
-+ for (unsigned i = 0; i < 3; i++) {
-+ uint8_t *src_data = src->data[i];
-+ uint8_t *dst_data = dst->data[i];
-+ for (unsigned j = 0; j < dst->h[i]; j++) {
-+ memcpy(dst_data, src_data, sizeof(*dst_data) * dst->w[i]);
-+ src_data += src->linesize[i];
-+ dst_data += dst->stride[i];
-+ }
-+ }
-+
-+ return 0;
-+}
-
--static void compute_vmaf_score(LIBVMAFContext *s)
-+static int do_vmaf(FFFrameSync *fs)
- {
-- int (*read_frame)(float *ref_data, float *main_data, float *temp_data,
-- int stride, void *ctx);
-- char *format;
-+ AVFilterContext *ctx = fs->parent;
-+ LIBVMAFContext *s = ctx->priv;
-+ VmafPicture pic_ref, pic_dist;
-+ AVFrame *ref, *dist;
-+ int err = 0;
-
-- if (s->desc->comp[0].depth <= 8) {
-- read_frame = read_frame_8bit;
-- } else {
-- read_frame = read_frame_10bit;
-+ int ret = ff_framesync_dualinput_get(fs, &dist, &ref);
-+ if (ret < 0)
-+ return ret;
-+ if (ctx->is_disabled || !ref)
-+ return ff_filter_frame(ctx->outputs[0], dist);
-+
-+ err = copy_picture_data(ref, &pic_ref, s->bpc);
-+ if (err) {
-+ av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
-+ return AVERROR(ENOMEM);
-+ }
-+
-+ err = copy_picture_data(dist, &pic_dist, s->bpc);
-+ if (err) {
-+ av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
-+ vmaf_picture_unref(&pic_ref);
-+ return AVERROR(ENOMEM);
- }
-
-- format = (char *) s->desc->name;
-+ err = vmaf_read_pictures(s->vmaf, &pic_ref, &pic_dist, s->frame_cnt++);
-+ if (err) {
-+ av_log(s, AV_LOG_ERROR, "problem during vmaf_read_pictures.\n");
-+ return AVERROR(EINVAL);
-+ }
-
-- s->error = compute_vmaf(&s->vmaf_score, format, s->width, s->height,
-- read_frame, s, s->model_path, s->log_path,
-- s->log_fmt, 0, 0, s->enable_transform,
-- s->phone_model, s->psnr, s->ssim,
-- s->ms_ssim, s->pool,
-- s->n_threads, s->n_subsample, s->enable_conf_interval);
-+ return ff_filter_frame(ctx->outputs[0], dist);
- }
-
--static void *call_vmaf(void *ctx)
-+
-+static AVDictionary **delimited_dict_parse(char *str, unsigned *cnt)
- {
-- LIBVMAFContext *s = (LIBVMAFContext *) ctx;
-- compute_vmaf_score(s);
-- if (!s->error) {
-- av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score);
-- } else {
-- pthread_mutex_lock(&s->lock);
-- pthread_cond_signal(&s->cond);
-- pthread_mutex_unlock(&s->lock);
-+ AVDictionary **dict = NULL;
-+ char *str_copy = NULL;
-+ char *saveptr = NULL;
-+ unsigned cnt2;
-+ int err = 0;
-+
-+ if (!str)
-+ return NULL;
-+
-+ cnt2 = 1;
-+ for (char *p = str; *p; p++) {
-+ if (*p == '|')
-+ cnt2++;
-+ }
-+
-+ dict = av_calloc(cnt2, sizeof(*dict));
-+ if (!dict)
-+ goto fail;
-+
-+ str_copy = av_strdup(str);
-+ if (!str_copy)
-+ goto fail;
-+
-+ *cnt = 0;
-+ for (unsigned i = 0; i < cnt2; i++) {
-+ char *s = av_strtok(i == 0 ? str_copy : NULL, "|", &saveptr);
-+ if (!s)
-+ continue;
-+ err = av_dict_parse_string(&dict[(*cnt)++], s, "=", ":", 0);
-+ if (err)
-+ goto fail;
-+ }
-+
-+ av_free(str_copy);
-+ return dict;
-+
-+fail:
-+ if (dict) {
-+ for (unsigned i = 0; i < *cnt; i++) {
-+ if (dict[i])
-+ av_dict_free(&dict[i]);
-+ }
-+ av_free(dict);
- }
-- pthread_exit(NULL);
-+
-+ av_free(str_copy);
-+ *cnt = 0;
- return NULL;
- }
-
--static int do_vmaf(FFFrameSync *fs)
-+static int parse_features(AVFilterContext *ctx)
- {
-- AVFilterContext *ctx = fs->parent;
- LIBVMAFContext *s = ctx->priv;
-- AVFrame *master, *ref;
-- int ret;
-+ AVDictionary **dict = NULL;
-+ unsigned dict_cnt;
-+ int err = 0;
-
-- ret = ff_framesync_dualinput_get(fs, &master, &ref);
-- if (ret < 0)
-- return ret;
-- if (!ref)
-- return ff_filter_frame(ctx->outputs[0], master);
-+ if (!s->feature_cfg)
-+ return 0;
-+
-+ dict = delimited_dict_parse(s->feature_cfg, &dict_cnt);
-+ if (!dict) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not parse feature config: %s\n", s->feature_cfg);
-+ return AVERROR(EINVAL);
-+ }
-
-- pthread_mutex_lock(&s->lock);
-+ for (unsigned i = 0; i < dict_cnt; i++) {
-+ char *feature_name = NULL;
-+ VmafFeatureDictionary *feature_opts_dict = NULL;
-+ AVDictionaryEntry *e = NULL;
-+
-+ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
-+ if (av_stristr(e->key, "name")) {
-+ feature_name = e->value;
-+ continue;
-+ }
-+
-+ err = vmaf_feature_dictionary_set(&feature_opts_dict, e->key,
-+ e->value);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not set feature option: %s.%s=%s\n",
-+ feature_name, e->key, e->value);
-+ goto exit;
-+ }
-+ }
-+
-+ err = vmaf_use_feature(s->vmaf, feature_name, feature_opts_dict);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem during vmaf_use_feature: %s\n", feature_name);
-+ goto exit;
-+ }
-+ }
-
-- while (s->frame_set && !s->error) {
-- pthread_cond_wait(&s->cond, &s->lock);
-+exit:
-+ for (unsigned i = 0; i < dict_cnt; i++) {
-+ if (dict[i])
-+ av_dict_free(&dict[i]);
- }
-+ av_free(dict);
-+ return err;
-+}
-+
-+static int parse_models(AVFilterContext *ctx)
-+{
-+ LIBVMAFContext *s = ctx->priv;
-+ AVDictionary **dict;
-+ unsigned dict_cnt;
-+ int err = 0;
-+
-+ if (!s->model_cfg) return 0;
-
-- if (s->error) {
-+ dict_cnt = 0;
-+ dict = delimited_dict_parse(s->model_cfg, &dict_cnt);
-+ if (!dict) {
- av_log(ctx, AV_LOG_ERROR,
-- "libvmaf encountered an error, check log for details\n");
-- pthread_mutex_unlock(&s->lock);
-+ "could not parse model config: %s\n", s->model_cfg);
- return AVERROR(EINVAL);
- }
-
-- av_frame_ref(s->gref, ref);
-- av_frame_ref(s->gmain, master);
-+ s->model_cnt = dict_cnt;
-+ s->model = av_calloc(s->model_cnt, sizeof(*s->model));
-+ if (!s->model)
-+ return AVERROR(ENOMEM);
-+
-+ for (unsigned i = 0; i < dict_cnt; i++) {
-+ VmafModelConfig model_cfg = { 0 };
-+ AVDictionaryEntry *e = NULL;
-+ char *version = NULL;
-+ char *path = NULL;
-+
-+ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
-+ if (av_stristr(e->key, "disable_clip")) {
-+ model_cfg.flags |= av_stristr(e->value, "true") ?
-+ VMAF_MODEL_FLAG_DISABLE_CLIP : 0;
-+ continue;
-+ }
-+
-+ if (av_stristr(e->key, "enable_transform")) {
-+ model_cfg.flags |= av_stristr(e->value, "true") ?
-+ VMAF_MODEL_FLAG_ENABLE_TRANSFORM : 0;
-+ continue;
-+ }
-+
-+ if (av_stristr(e->key, "name")) {
-+ model_cfg.name = e->value;
-+ continue;
-+ }
-+
-+ if (av_stristr(e->key, "version")) {
-+ version = e->value;
-+ continue;
-+ }
-+
-+ if (av_stristr(e->key, "path")) {
-+ path = e->value;
-+ continue;
-+ }
-+ }
-+
-+ if (version) {
-+ err = vmaf_model_load(&s->model[i], &model_cfg, version);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not load libvmaf model with version: %s\n",
-+ version);
-+ goto exit;
-+ }
-+ }
-+
-+ if (path && !s->model[i]) {
-+ err = vmaf_model_load_from_path(&s->model[i], &model_cfg, path);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not load libvmaf model with path: %s\n",
-+ path);
-+ goto exit;
-+ }
-+ }
-+
-+ if (!s->model[i]) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not load libvmaf model with config: %s\n",
-+ s->model_cfg);
-+ goto exit;
-+ }
-+
-+ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
-+ VmafFeatureDictionary *feature_opts_dict = NULL;
-+ char *feature_opt = NULL;
-+
-+ char *feature_name = av_strtok(e->key, ".", &feature_opt);
-+ if (!feature_opt)
-+ continue;
-+
-+ err = vmaf_feature_dictionary_set(&feature_opts_dict,
-+ feature_opt, e->value);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not set feature option: %s.%s=%s\n",
-+ feature_name, feature_opt, e->value);
-+ err = AVERROR(EINVAL);
-+ goto exit;
-+ }
-+
-+ err = vmaf_model_feature_overload(s->model[i], feature_name,
-+ feature_opts_dict);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not overload feature: %s\n", feature_name);
-+ err = AVERROR(EINVAL);
-+ goto exit;
-+ }
-+ }
-+ }
-+
-+ for (unsigned i = 0; i < s->model_cnt; i++) {
-+ err = vmaf_use_features_from_model(s->vmaf, s->model[i]);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem during vmaf_use_features_from_model\n");
-+ err = AVERROR(EINVAL);
-+ goto exit;
-+ }
-+ }
-+
-+exit:
-+ for (unsigned i = 0; i < dict_cnt; i++) {
-+ if (dict[i])
-+ av_dict_free(&dict[i]);
-+ }
-+ av_free(dict);
-+ return err;
-+}
-+
-+static enum VmafLogLevel log_level_map(int log_level)
-+{
-+ switch (log_level) {
-+ case AV_LOG_QUIET:
-+ return VMAF_LOG_LEVEL_NONE;
-+ case AV_LOG_ERROR:
-+ return VMAF_LOG_LEVEL_ERROR;
-+ case AV_LOG_WARNING:
-+ return VMAF_LOG_LEVEL_WARNING;
-+ case AV_LOG_INFO:
-+ return VMAF_LOG_LEVEL_INFO;
-+ case AV_LOG_DEBUG:
-+ return VMAF_LOG_LEVEL_DEBUG;
-+ default:
-+ return VMAF_LOG_LEVEL_INFO;
-+ }
-+}
-+
-+static int parse_deprecated_options(AVFilterContext *ctx)
-+{
-+ LIBVMAFContext *s = ctx->priv;
-+ VmafModel *model = NULL;
-+ VmafModelCollection *model_collection = NULL;
-+ enum VmafModelFlags flags = VMAF_MODEL_FLAGS_DEFAULT;
-+ int err = 0;
-+
-+ VmafModelConfig model_cfg = {
-+ .name = "vmaf",
-+ .flags = flags,
-+ };
-+
-+ if (s->enable_transform || s->phone_model)
-+ flags |= VMAF_MODEL_FLAG_ENABLE_TRANSFORM;
-+
-+ if (!s->model_path)
-+ goto extra_metrics_only;
-+
-+ if (s->enable_conf_interval) {
-+ err = vmaf_model_collection_load_from_path(&model, &model_collection,
-+ &model_cfg, s->model_path);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading model file: %s\n", s->model_path);
-+ goto exit;
-+ }
-+
-+ err = vmaf_use_features_from_model_collection(s->vmaf, model_collection);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractors from model file: %s\n",
-+ s->model_path);
-+ goto exit;
-+ }
-+ } else {
-+ err = vmaf_model_load_from_path(&model, &model_cfg, s->model_path);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading model file: %s\n", s->model_path);
-+ goto exit;
-+ }
-+ err = vmaf_use_features_from_model(s->vmaf, model);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractors from model file: %s\n",
-+ s->model_path);
-+ goto exit;
-+ }
-+ }
-+
-+extra_metrics_only:
-+ if (s->psnr) {
-+ VmafFeatureDictionary *d = NULL;
-+ vmaf_feature_dictionary_set(&d, "enable_chroma", "false");
-+
-+ err = vmaf_use_feature(s->vmaf, "psnr", d);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractor: psnr\n");
-+ goto exit;
-+ }
-+ }
-
-- s->frame_set = 1;
-+ if (s->ssim) {
-+ err = vmaf_use_feature(s->vmaf, "float_ssim", NULL);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractor: ssim\n");
-+ goto exit;
-+ }
-+ }
-
-- pthread_cond_signal(&s->cond);
-- pthread_mutex_unlock(&s->lock);
-+ if (s->ms_ssim) {
-+ err = vmaf_use_feature(s->vmaf, "float_ms_ssim", NULL);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractor: ms_ssim\n");
-+ goto exit;
-+ }
-+ }
-
-- return ff_filter_frame(ctx->outputs[0], master);
-+exit:
-+ return err;
- }
-
- static av_cold int init(AVFilterContext *ctx)
- {
- LIBVMAFContext *s = ctx->priv;
-+ int err = 0;
-
-- s->gref = av_frame_alloc();
-- s->gmain = av_frame_alloc();
-- if (!s->gref || !s->gmain)
-- return AVERROR(ENOMEM);
-+ VmafConfiguration cfg = {
-+ .log_level = log_level_map(av_log_get_level()),
-+ .n_subsample = s->n_subsample,
-+ .n_threads = s->n_threads,
-+ };
-+
-+ err = vmaf_init(&s->vmaf, cfg);
-+ if (err)
-+ return AVERROR(EINVAL);
-+
-+ err = parse_deprecated_options(ctx);
-+ if (err)
-+ return err;
-
-- s->error = 0;
-+ err = parse_models(ctx);
-+ if (err)
-+ return err;
-
-- s->vmaf_thread_created = 0;
-- pthread_mutex_init(&s->lock, NULL);
-- pthread_cond_init (&s->cond, NULL);
-+ err = parse_features(ctx);
-+ if (err)
-+ return err;
-
- s->fs.on_event = do_vmaf;
- return 0;
-@@ -256,26 +545,31 @@ static const enum AVPixelFormat pix_fmts[] = {
-
- static int config_input_ref(AVFilterLink *inlink)
- {
-- AVFilterContext *ctx = inlink->dst;
-+ AVFilterContext *ctx = inlink->dst;
- LIBVMAFContext *s = ctx->priv;
-- int th;
-+ const AVPixFmtDescriptor *desc;
-+ int err = 0;
-
-- if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
-- ctx->inputs[0]->h != ctx->inputs[1]->h) {
-- av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
-- return AVERROR(EINVAL);
-+ if (ctx->inputs[0]->w != ctx->inputs[1]->w) {
-+ av_log(ctx, AV_LOG_ERROR, "input width must match.\n");
-+ err |= AVERROR(EINVAL);
- }
-
-- s->desc = av_pix_fmt_desc_get(inlink->format);
-- s->width = ctx->inputs[0]->w;
-- s->height = ctx->inputs[0]->h;
-+ if (ctx->inputs[0]->h != ctx->inputs[1]->h) {
-+ av_log(ctx, AV_LOG_ERROR, "input height must match.\n");
-+ err |= AVERROR(EINVAL);
-+ }
-
-- th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s);
-- if (th) {
-- av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n");
-- return AVERROR(EINVAL);
-+ if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
-+ av_log(ctx, AV_LOG_ERROR, "input pix_fmt must match.\n");
-+ err |= AVERROR(EINVAL);
- }
-- s->vmaf_thread_created = 1;
-+
-+ if (err)
-+ return err;
-+
-+ desc = av_pix_fmt_desc_get(inlink->format);
-+ s->bpc = desc->comp[0].depth;
-
- return 0;
- }
-@@ -307,28 +601,80 @@ static int activate(AVFilterContext *ctx)
- return ff_framesync_activate(&s->fs);
- }
-
-+static enum VmafOutputFormat log_fmt_map(const char *log_fmt)
-+{
-+ if (log_fmt) {
-+ if (av_stristr(log_fmt, "xml"))
-+ return VMAF_OUTPUT_FORMAT_XML;
-+ if (av_stristr(log_fmt, "json"))
-+ return VMAF_OUTPUT_FORMAT_JSON;
-+ if (av_stristr(log_fmt, "csv"))
-+ return VMAF_OUTPUT_FORMAT_CSV;
-+ if (av_stristr(log_fmt, "sub"))
-+ return VMAF_OUTPUT_FORMAT_SUB;
-+ }
-+
-+ return VMAF_OUTPUT_FORMAT_XML;
-+}
-+
-+static enum VmafPoolingMethod pool_method_map(const char *pool_method)
-+{
-+ if (pool_method) {
-+ if (av_stristr(pool_method, "min"))
-+ return VMAF_POOL_METHOD_MIN;
-+ if (av_stristr(pool_method, "mean"))
-+ return VMAF_POOL_METHOD_MEAN;
-+ if (av_stristr(pool_method, "harmonic_mean"))
-+ return VMAF_POOL_METHOD_HARMONIC_MEAN;
-+ }
-+
-+ return VMAF_POOL_METHOD_MEAN;
-+}
-+
- static av_cold void uninit(AVFilterContext *ctx)
- {
- LIBVMAFContext *s = ctx->priv;
-+ int err = 0;
-
- ff_framesync_uninit(&s->fs);
-
-- pthread_mutex_lock(&s->lock);
-- s->eof = 1;
-- pthread_cond_signal(&s->cond);
-- pthread_mutex_unlock(&s->lock);
-+ if (!s->frame_cnt)
-+ goto clean_up;
-
-- if (s->vmaf_thread_created)
-- {
-- pthread_join(s->vmaf_thread, NULL);
-- s->vmaf_thread_created = 0;
-+ err = vmaf_read_pictures(s->vmaf, NULL, NULL, 0);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem flushing libvmaf context.\n");
- }
-
-- av_frame_free(&s->gref);
-- av_frame_free(&s->gmain);
-+ for (unsigned i = 0; i < s->model_cnt; i++) {
-+ double vmaf_score;
-+ err = vmaf_score_pooled(s->vmaf, s->model[i], pool_method_map(s->pool),
-+ &vmaf_score, 0, s->frame_cnt - 1);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem getting pooled vmaf score.\n");
-+ }
-+
-+ av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n", vmaf_score);
-+ }
-+
-+ if (s->vmaf) {
-+ if (s->log_path && !err)
-+ vmaf_write_output(s->vmaf, s->log_path, log_fmt_map(s->log_fmt));
-+ }
-+
-+clean_up:
-+ if (s->model) {
-+ for (unsigned i = 0; i < s->model_cnt; i++) {
-+ if (s->model[i])
-+ vmaf_model_destroy(s->model[i]);
-+ }
-+ av_free(s->model);
-+ }
-
-- pthread_mutex_destroy(&s->lock);
-- pthread_cond_destroy(&s->cond);
-+ if (s->vmaf)
-+ vmaf_close(s->vmaf);
- }
-
- static const AVFilterPad libvmaf_inputs[] = {
---
-2.20.1
-
diff --git a/PKGBUILD b/PKGBUILD
index abb1d42a5903..3143480bd921 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -1,6 +1,6 @@
# Maintainer : eggz
pkgname=ffmpeg-nocuda
-pkgver=5.0.1
+pkgver=5.1
gitver=n${pkgver}
pkgrel=3
pkgdesc='Complete solution to record, convert and stream audio and video (without nvidias propriatary blobs)'
@@ -73,14 +73,9 @@ provides=('libavcodec.so' 'libavdevice.so' 'libavfilter.so' 'libavformat.so'
'ffmpeg')
conflicts=('ffmpeg')
source=("git+https://git.ffmpeg.org/ffmpeg.git#tag=$gitver"
- 010-ffmpeg-lavc-svt_hevc-add-libsvt-hevc-encoder-wrapper.patch
- 030-ffmpeg-Add-ability-for-ffmpeg-to-run-svt-vp9.patch
040-ffmpeg-add-av_stream_get_first_dts-for-chromium.patch
- 050-ffmpeg-vmaf-2.x.patch
)
-sha256sums=('SKIP'
-'SKIP'
-'SKIP'
+sha256sums=(
'SKIP'
'SKIP'
)