summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorDenis2023-12-10 16:19:26 +0100
committerDenis2023-12-10 21:45:43 +0100
commit6ae54c5c2eabeb8a3cc033149795c245b7f2c53a (patch)
tree2e7d8c808aa7ec5cc2731686ab2a7b772bdd60a2
parentc1a11ef1bf6c1462a2f4f1a50e0c73818e7548c6 (diff)
downloadaur-6ae54c5c2eabeb8a3cc033149795c245b7f2c53a.tar.gz
Update package PKGBUILD to follow upstream while still disabling unused hardware and enabling hardware encoding/decong features
-rw-r--r--.SRCINFO224
-rw-r--r--PKGBUILD554
-rw-r--r--gamescope.patch6572
3 files changed, 7204 insertions, 146 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 88e03a9ef2c7..e4af9287974d 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,63 +1,203 @@
pkgbase = lib32-mesa-amdonly-gaming-git
- pkgdesc = an open-source implementation of the OpenGL specification, git version
- pkgver = 23.2.0_devel.173976.d3662ba461e.d41d8cd98f00b204e9800998ecf8427e
+ pkgdesc = An open-source implementation of the OpenGL specification (32-bit)
+ pkgver = 24.0.0_devel.181988.9ca9b674462.d954e6c2b60b8d8c955b77483af4bdfb
pkgrel = 1
- url = https://www.mesa3d.org
+ url = https://www.mesa3d.org/
arch = x86_64
license = custom
- makedepends = python-mako
- makedepends = lib32-libxml2
- makedepends = lib32-libx11
- makedepends = xorgproto
- makedepends = lib32-libvdpau
- makedepends = git
+ makedepends = lib32-clang
+ makedepends = lib32-expat
+ makedepends = lib32-libdrm
+ makedepends = lib32-libelf
makedepends = lib32-libglvnd
- makedepends = wayland-protocols
- makedepends = meson
makedepends = lib32-libva
+ makedepends = lib32-libvdpau
+ makedepends = lib32-libx11
+ makedepends = lib32-libxdamage
+ makedepends = lib32-libxml2
makedepends = lib32-libxrandr
+ makedepends = lib32-libxshmfence
+ makedepends = lib32-libxxf86vm
makedepends = lib32-llvm
- makedepends = lib32-clang
- depends = mesa-amdonly-gaming-git
- depends = lib32-gcc-libs
+ makedepends = lib32-lm_sensors
+ makedepends = lib32-rust-libs
+ makedepends = lib32-spirv-llvm-translator
+ makedepends = lib32-spirv-tools
+ makedepends = lib32-systemd
+ makedepends = lib32-vulkan-icd-loader
+ makedepends = lib32-wayland
+ makedepends = lib32-zstd
+ makedepends = git
+ makedepends = clang
+ makedepends = cmake
+ makedepends = elfutils
+ makedepends = glslang
+ makedepends = libclc
+ makedepends = meson>=1.3.0
+ makedepends = python-mako
+ makedepends = python-ply
+ makedepends = rust-bindgen
+ makedepends = wayland-protocols
+ makedepends = xorgproto
+ options = !lto
+ source = mesa::git+https://gitlab.freedesktop.org/mesa/mesa.git#branch=main
+ source = LICENSE
+ source = llvm32.native
+ source = gamescope.patch
+ validpgpkeys = 8703B6700E7EE06D7A39B8D6EDAE37B02CEB490D
+ validpgpkeys = 946D09B5E4C9845E63075FF1D961C596A7203456
+ validpgpkeys = E3E8F480C52ADD73B278EE78E1ECBE07D7D70895
+ validpgpkeys = A5CC9FEC93F2F837CB044912336909B6B25FADFA
+ validpgpkeys = 71C4B75620BC75708B4BDB254C95FAAB3EB073EC
+ validpgpkeys = 57551DE15B968F6341C248F68D8E31AFC32428A6
+ b2sums = SKIP
+ b2sums = cc60238726b35133b5b729fb4ed1e76e04136588533615d84b4a54656d5b41727d5e7ff06ef4de3eb102eed6669d6c5c5cb8ac9fbdf6fc25aa477877c5c3ba87
+ b2sums = f84a2ec7aa5cd0e3efcfee69897e3da5c6c7031a07b7c93b43f1a709cc0ae0fd312c56c659e7cb7f2e276cbe49be6e586f117320404d2978b09ae0969d919eaf
+ b2sums = 6219a0797724ab17b36f85a134349ffa52f3fce74490834c5f86b474ba893cbb34ebef604cd72c851436296add906dbdb8fca9eb50d1e5f74c7caa8e2963bdcc
+
+pkgname = lib32-amdonly-gaming-vulkan-mesa-layers
+ pkgdesc = Mesa's Vulkan layers (32-bit)
depends = lib32-libdrm
+ depends = lib32-libxcb
depends = lib32-wayland
- depends = lib32-libxxf86vm
- depends = lib32-libxdamage
+ depends = amdonly-gaming-vulkan-mesa-layers
+ conflicts = lib32-vulkan-mesa-layers
+ conflicts = lib32-vulkan-mesa-layer
+ replaces = lib32-vulkan-mesa-layers
+ replaces = lib32-vulkan-mesa-layer
+
+pkgname = lib32-amdonly-gaming-opencl-clover-mesa
+ pkgdesc = OpenCL support with clover for mesa drivers (32-bit)
+ depends = lib32-clang
+ depends = lib32-expat
+ depends = lib32-libdrm
+ depends = lib32-libelf
+ depends = lib32-spirv-llvm-translator
+ depends = lib32-zstd
+ depends = libclc
+ depends = amdonly-gaming-opencl-clover-mesa
+ optdepends = opencl-headers: headers necessary for OpenCL development
+ provides = lib32-opencl-clover-mesa
+ provides = lib32-opencl-driver
+ conflicts = lib32-opencl-clover-mesa
+ conflicts = lib32-opencl-mesa
+ replaces = lib32-opencl-clover-mesa
+ replaces = lib32-opencl-mesa<=23.1.4-1
+
+pkgname = lib32-amdonly-gaming-opencl-rusticl-mesa
+ pkgdesc = OpenCL support with rusticl for mesa drivers (32-bit)
+ depends = lib32-clang
+ depends = lib32-expat
+ depends = lib32-libdrm
+ depends = lib32-libelf
+ depends = lib32-lm_sensors
+ depends = lib32-spirv-llvm-translator
+ depends = lib32-zstd
+ depends = libclc
+ depends = amdonly-gaming-opencl-rusticl-mesa
+ optdepends = opencl-headers: headers necessary for OpenCL development
+ provides = lib32-opencl-rusticl-mesa
+ provides = lib32-opencl-driver
+ conflicts = lib32-opencl-rusticl-mesa
+ conflicts = lib32-opencl-mesa
+ replaces = lib32-opencl-rusticl-mesa
+ replaces = lib32-opencl-mesa<=23.1.4-1
+
+pkgname = lib32-amdonly-gaming-vulkan-radeon
+ pkgdesc = Radeon's Vulkan mesa driver (32-bit)
+ depends = lib32-libdrm
+ depends = lib32-libelf
+ depends = lib32-libx11
+ depends = lib32-libxshmfence
+ depends = lib32-llvm-libs
+ depends = lib32-systemd
+ depends = lib32-wayland
+ depends = lib32-xcb-util-keysyms
+ depends = lib32-zstd
+ depends = amdonly-gaming-vulkan-radeon
+ optdepends = lib32-vulkan-mesa-layers: additional vulkan layers
+ provides = lib32-vulkan-radeon
+ provides = lib32-vulkan-driver
+ conflicts = lib32-vulkan-radeon
+ replaces = lib32-vulkan-radeon
+
+pkgname = lib32-amdonly-gaming-vulkan-swrast
+ pkgdesc = Vulkan software rasteriser driver (32-bit)
+ depends = lib32-libdrm
+ depends = lib32-libx11
+ depends = lib32-libxshmfence
+ depends = lib32-llvm-libs
+ depends = lib32-systemd
+ depends = lib32-wayland
+ depends = lib32-xcb-util-keysyms
+ depends = lib32-zstd
+ optdepends = lib32-vulkan-mesa-layers: additional vulkan layers
+ provides = lib32-vulkan-swrast
+ provides = lib32-vulkan-driver
+ conflicts = lib32-vulkan-swrast
+ conflicts = lib32-vulkan-mesa
+ replaces = lib32-vulkan-swrast
+ replaces = lib32-vulkan-mesa
+
+pkgname = lib32-amdonly-gaming-vulkan-virtio
+ pkgdesc = Venus Vulkan mesa driver for Virtual Machines (32-bit)
+ depends = lib32-libdrm
+ depends = lib32-libx11
depends = lib32-libxshmfence
+ depends = lib32-systemd
+ depends = lib32-wayland
+ depends = lib32-xcb-util-keysyms
+ depends = lib32-zstd
+ optdepends = lib32-vulkan-mesa-layers: additional vulkan layers
+ provides = lib32-vulkan-driver
+ conflicts = lib32-vulkan-virtio
+ replaces = lib32-vulkan-virtio
+
+pkgname = lib32-amdonly-gaming-libva-mesa-driver
+ pkgdesc = VA-API drivers (32-bit)
+ depends = lib32-expat
+ depends = lib32-libdrm
+ depends = lib32-libelf
+ depends = lib32-libx11
+ depends = lib32-libxshmfence
+ depends = lib32-llvm-libs
+ depends = lib32-zstd
+ provides = lib32-libva-driver
+ conflicts = lib32-libva-mesa-driver
+ replaces = lib32-libva-mesa-driver
+
+pkgname = lib32-amdonly-gaming-mesa-vdpau
+ pkgdesc = VDPAU drivers (32-bit)
+ depends = lib32-expat
+ depends = lib32-libdrm
+ depends = lib32-libelf
+ depends = lib32-libx11
+ depends = lib32-libxshmfence
+ depends = lib32-llvm-libs
+ depends = lib32-zstd
+ provides = lib32-vdpau-driver
+ provides = lib32-mesa-vdpau
+ conflicts = lib32-mesa-vdpau
+ replaces = lib32-mesa-vdpau
+
+pkgname = lib32-amdonly-gaming-mesa
+ depends = lib32-libdrm
depends = lib32-libelf
+ depends = lib32-libglvnd
+ depends = lib32-libxdamage
+ depends = lib32-libxshmfence
+ depends = lib32-libxxf86vm
+ depends = lib32-llvm-libs
depends = lib32-lm_sensors
- depends = glslang
depends = lib32-vulkan-icd-loader
+ depends = lib32-wayland
depends = lib32-zstd
- depends = lib32-llvm-libs
+ depends = amdonly-gaming-mesa
optdepends = opengl-man-pages: for the OpenGL API man pages
provides = lib32-mesa
- provides = lib32-vulkan-radeon
- provides = lib32-vulkan-mesa-layer
- provides = lib32-libva-mesa-driver
- provides = lib32-mesa-vdpau
provides = lib32-mesa-libgl
provides = lib32-opengl-driver
- provides = lib32-vulkan-driver
- provides = lib32-opencl-mesa
conflicts = lib32-mesa
- conflicts = lib32-vulkan-intel
- conflicts = lib32-vulkan-radeon
- conflicts = lib32-vulkan-mesa-layer
- conflicts = lib32-libva-mesa-driver
- conflicts = lib32-mesa-vdpau
conflicts = lib32-mesa-libgl
- conflicts = lib32-opencl-mesa
- options = !lto
- source = mesa::git+https://gitlab.freedesktop.org/mesa/mesa.git#branch=main
- source = LICENSE
- source = llvm32.native
- md5sums = SKIP
- md5sums = 5c65a0fe315dd347e09b1f2826a1df5a
- md5sums = 6b4a19068a323d7f90a3d3cd315ed1f9
- sha512sums = SKIP
- sha512sums = 25da77914dded10c1f432ebcbf29941124138824ceecaf1367b3deedafaecabc082d463abcfa3d15abff59f177491472b505bcb5ba0c4a51bb6b93b4721a23c2
- sha512sums = c7dbb390ebde291c517a854fcbe5166c24e95206f768cc9458ca896b2253aabd6df12a7becf831998721b2d622d0c02afdd8d519e77dea8e1d6807b35f0166fe
-
-pkgname = lib32-mesa-amdonly-gaming-git
+ replaces = lib32-mesa
+ replaces = lib32-mesa-libgl
diff --git a/PKGBUILD b/PKGBUILD
index 08d27855b154..c6560d624610 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -11,33 +11,83 @@
# Contributor: Antti "Tera" Oja <antti.bofh@gmail.com>
# Contributor: Diego Jose <diegoxter1006@gmail.com>
-pkgname=lib32-mesa-amdonly-gaming-git
-pkgdesc="an open-source implementation of the OpenGL specification, git version"
-pkgver=23.2.0_devel.173976.d3662ba461e.d41d8cd98f00b204e9800998ecf8427e
+pkgbase=lib32-mesa-amdonly-gaming-git
+pkgver=24.0.0_devel.181988.9ca9b674462.d954e6c2b60b8d8c955b77483af4bdfb
+options=(!lto) # LTO is bad for mesa, makes random applications crash on my system
+
+pkgname=(
+ 'lib32-amdonly-gaming-vulkan-mesa-layers'
+ 'lib32-amdonly-gaming-opencl-clover-mesa'
+ 'lib32-amdonly-gaming-opencl-rusticl-mesa'
+ 'lib32-amdonly-gaming-vulkan-radeon'
+ 'lib32-amdonly-gaming-vulkan-swrast'
+ 'lib32-amdonly-gaming-vulkan-virtio'
+ 'lib32-amdonly-gaming-libva-mesa-driver'
+ 'lib32-amdonly-gaming-mesa-vdpau'
+ 'lib32-amdonly-gaming-mesa'
+)
pkgrel=1
+pkgdesc="An open-source implementation of the OpenGL specification (32-bit)"
+url="https://www.mesa3d.org/"
arch=('x86_64')
-makedepends=('python-mako' 'lib32-libxml2' 'lib32-libx11' 'xorgproto'
- 'lib32-libvdpau' 'git' 'lib32-libglvnd' 'wayland-protocols'
- 'meson' 'lib32-libva' 'lib32-libxrandr' 'lib32-llvm' 'lib32-clang')
-depends=('mesa-amdonly-gaming-git' 'lib32-gcc-libs' 'lib32-libdrm' 'lib32-wayland' 'lib32-libxxf86vm'
- 'lib32-libxdamage' 'lib32-libxshmfence' 'lib32-libelf'
- 'lib32-lm_sensors' 'glslang' 'lib32-vulkan-icd-loader' 'lib32-zstd' 'lib32-llvm-libs')
-optdepends=('opengl-man-pages: for the OpenGL API man pages')
-provides=('lib32-mesa' 'lib32-vulkan-radeon' 'lib32-vulkan-mesa-layer' 'lib32-libva-mesa-driver' 'lib32-mesa-vdpau' 'lib32-mesa-libgl' 'lib32-opengl-driver' 'lib32-vulkan-driver' 'lib32-opencl-mesa')
-conflicts=('lib32-mesa' 'lib32-vulkan-intel' 'lib32-vulkan-radeon' 'lib32-vulkan-mesa-layer' 'lib32-libva-mesa-driver' 'lib32-mesa-vdpau' 'lib32-mesa-libgl' 'lib32-opencl-mesa')
-url="https://www.mesa3d.org"
license=('custom')
-source=('mesa::git+https://gitlab.freedesktop.org/mesa/mesa.git#branch=main'
- 'LICENSE'
- 'llvm32.native')
-md5sums=('SKIP'
- '5c65a0fe315dd347e09b1f2826a1df5a'
- '6b4a19068a323d7f90a3d3cd315ed1f9')
-sha512sums=('SKIP'
- '25da77914dded10c1f432ebcbf29941124138824ceecaf1367b3deedafaecabc082d463abcfa3d15abff59f177491472b505bcb5ba0c4a51bb6b93b4721a23c2'
- 'c7dbb390ebde291c517a854fcbe5166c24e95206f768cc9458ca896b2253aabd6df12a7becf831998721b2d622d0c02afdd8d519e77dea8e1d6807b35f0166fe')
+makedepends=(
+ 'lib32-clang'
+ 'lib32-expat'
+ 'lib32-libdrm'
+ 'lib32-libelf'
+ 'lib32-libglvnd'
+ 'lib32-libva'
+ 'lib32-libvdpau'
+ 'lib32-libx11'
+ 'lib32-libxdamage'
+ 'lib32-libxml2'
+ 'lib32-libxrandr'
+ 'lib32-libxshmfence'
+ 'lib32-libxxf86vm'
+ 'lib32-llvm'
+ 'lib32-lm_sensors'
+ 'lib32-rust-libs'
+ 'lib32-spirv-llvm-translator'
+ 'lib32-spirv-tools'
+ 'lib32-systemd'
+ 'lib32-vulkan-icd-loader'
+ 'lib32-wayland'
+ 'lib32-zstd'
+
+ # shared between mesa and lib32-mesa
+ 'git'
+ 'clang'
+ 'cmake'
+ 'elfutils'
+ 'glslang'
+ 'libclc'
+ 'meson>=1.3.0'
+ 'python-mako'
+ 'python-ply'
+ 'rust-bindgen'
+ 'wayland-protocols'
+ 'xorgproto'
+
+)
+source=(
+ 'mesa::git+https://gitlab.freedesktop.org/mesa/mesa.git#branch=main'
+ 'LICENSE'
+ 'llvm32.native'
+ gamescope.patch
+)
+b2sums=('SKIP'
+ 'cc60238726b35133b5b729fb4ed1e76e04136588533615d84b4a54656d5b41727d5e7ff06ef4de3eb102eed6669d6c5c5cb8ac9fbdf6fc25aa477877c5c3ba87' # LICENSE
+ 'f84a2ec7aa5cd0e3efcfee69897e3da5c6c7031a07b7c93b43f1a709cc0ae0fd312c56c659e7cb7f2e276cbe49be6e586f117320404d2978b09ae0969d919eaf' # llvm32.native
+ '6219a0797724ab17b36f85a134349ffa52f3fce74490834c5f86b474ba893cbb34ebef604cd72c851436296add906dbdb8fca9eb50d1e5f74c7caa8e2963bdcc' # gamescope.patch
+)
+validpgpkeys=('8703B6700E7EE06D7A39B8D6EDAE37B02CEB490D' # Emil Velikov <emil.l.velikov@gmail.com>
+ '946D09B5E4C9845E63075FF1D961C596A7203456' # Andres Gomez <tanty@igalia.com>
+ 'E3E8F480C52ADD73B278EE78E1ECBE07D7D70895' # Juan Antonio Suárez Romero (Igalia, S.L.) <jasuarez@igalia.com>
+ 'A5CC9FEC93F2F837CB044912336909B6B25FADFA' # Juan A. Suarez Romero <jasuarez@igalia.com>
+ '71C4B75620BC75708B4BDB254C95FAAB3EB073EC' # Dylan Baker <dylan@pnwbakers.com>
+ '57551DE15B968F6341C248F68D8E31AFC32428A6') # Eric Engestrom <eric@engestrom.ch>
-options=(!lto) # LTO is bad for mesa, makes random applications crash on my system
# NINJAFLAGS is an env var used to pass commandline options to ninja
# NOTE: It's your responbility to validate the value of $NINJAFLAGS. If unsure, don't set it.
@@ -61,90 +111,386 @@ pkgver() {
}
prepare() {
- # although removing _build folder in build() function feels more natural,
- # that interferes with the spirit of makepkg --noextract
- if [ -d _build ]; then
- rm -rf _build
- fi
+ # although removing _build folder in build() function feels more natural,
+ # that interferes with the spirit of makepkg --noextract
+ if [ -d _build ]; then
+ rm -rf _build
+ fi
- local _patchfile
- for _patchfile in "${source[@]}"; do
- _patchfile="${_patchfile%%::*}"
- _patchfile="${_patchfile##*/}"
- [[ $_patchfile = *.patch ]] || continue
- echo "Applying patch $_patchfile..."
- patch --directory=mesa --forward --strip=1 --input="${srcdir}/${_patchfile}"
- done
+ # Ugly hack to allow recompilation using gamescope.path
+ if [ -f "mesa/src/egl/wayland/wayland-drm/gamescope-commit-queue-v1.xml" ]; then
+ rm -rf "mesa/src/egl/wayland/wayland-drm/gamescope-commit-queue-v1.xml"
+ fi
+
+ # Include package release in version string so Chromium invalidates
+ # its GPU cache; otherwise it can cause pages to render incorrectly.
+ # https://bugs.launchpad.net/ubuntu/+source/chromium-browser/+bug/2020604
+ echo "$pkgver-$pkgrel" >VERSION
+
+ local _patchfile
+ for _patchfile in "${source[@]}"; do
+ _patchfile="${_patchfile%%::*}"
+ _patchfile="${_patchfile##*/}"
+ [[ $_patchfile = *.patch ]] || continue
+ echo "Applying patch $_patchfile..."
+ patch --directory=mesa --forward --strip=1 --input="${srcdir}/${_patchfile}"
+ done
+}
+
+build() {
+ local meson_options=(
+ --cross-file lib32
+ -D android-libbacktrace=disabled
+ -D b_ndebug=true
+ -D dri3=enabled
+ -D egl=enabled
+ -D gallium-drivers=radeonsi,virgl,svga,swrast,crocus,zink
+ -D gallium-extra-hud=true
+ -D gallium-nine=true
+ -D gallium-omx=disabled
+ -D gallium-opencl=icd
+ -D gallium-rusticl=true
+ -D gallium-va=enabled
+ -D gallium-vdpau=enabled
+ -D gallium-xa=enabled
+ -D gbm=enabled
+ -D gles1=disabled
+ -D gles2=enabled
+ -D glvnd=true
+ -D glx=dri
+ -D intel-clc=enabled
+ -D libunwind=disabled
+ -D llvm=enabled
+ -D lmsensors=enabled
+ -D microsoft-clc=disabled
+ -D osmesa=true
+ -D platforms=x11,wayland
+ -D rust_std=2021
+ -D shared-glapi=enabled
+ -D valgrind=disabled
+ -D video-codecs=vc1dec,h264dec,h264enc,h265dec,h265enc
+ -D vulkan-drivers=amd,swrast,virtio
+ -D vulkan-layers=device-select,overlay
+ )
+
+ # Build only minimal debug info to reduce size
+ #CFLAGS+=' -g1'
+ #CXXFLAGS+=' -g1'
+
+ export BINDGEN_EXTRA_CLANG_ARGS="-m32"
+
+ arch-meson mesa build "${meson_options[@]}"
+ meson configure build --no-pager # Print config
+
+ #if [ ! -f "build/build.ninja.bak" ]; then
+ # cp build/build.ninja build/build.ninja.back
+ #fi
+
+ # Evil: Hack build to make proc-macro crate native
+ # Should become unnecessary with Meson 1.3
+ #sed -e '/^rule rust_COMPILER$/irule rust_HACK\n command = rustc -C linker=gcc $ARGS $in\n deps = gcc\n depfile = $targetdep\n description = Compiling native Rust source $in\n' \
+ # -e '/^build src\/gallium\/frontends\/rusticl\/librusticl_proc_macros\.so:/s/rust_COMPILER/rust_HACK/' \
+ # -e '/^ LINK_ARGS =/s/ src\/gallium\/frontends\/rusticl\/librusticl_proc_macros\.so//' \
+ # -i build/build.ninja
+
+ $NINJAFLAGS meson compile -C build
+
+ # fake installation to be seperated into packages
+ # outside of fakeroot but mesa doesn't need to chown/mod
+ DESTDIR="${srcdir}/fakeinstall" meson install -C build
+}
+
+_install() {
+ local src f dir
+ for src; do
+ f="${src#fakeinstall/}"
+ dir="${pkgdir}/${f%/*}"
+ install -m755 -d "${dir}"
+ mv -v "${src}" "${dir}/"
+ done
+}
+
+_libdir=usr/lib32
+
+package_lib32-amdonly-gaming-vulkan-mesa-layers() {
+ pkgdesc="Mesa's Vulkan layers (32-bit)"
+ depends=(
+ 'lib32-libdrm'
+ 'lib32-libxcb'
+ 'lib32-wayland'
+
+ 'amdonly-gaming-vulkan-mesa-layers'
+ )
+ conflicts=(
+ 'lib32-vulkan-mesa-layers'
+ 'lib32-vulkan-mesa-layer'
+ )
+ replaces=(
+ 'lib32-vulkan-mesa-layers'
+ 'lib32-vulkan-mesa-layer'
+ )
+
+ rm -rv fakeinstall/usr/share/vulkan/explicit_layer.d
+ rm -rv fakeinstall/usr/share/vulkan/implicit_layer.d
+ _install fakeinstall/$_libdir/libVkLayer_*.so
+ rm -v fakeinstall/usr/bin/mesa-overlay-control.py
+
+ install -m644 -Dt "${pkgdir}/usr/share/licenses/${pkgname}" LICENSE
+}
+
+package_lib32-amdonly-gaming-opencl-clover-mesa() {
+ pkgdesc="OpenCL support with clover for mesa drivers (32-bit)"
+ depends=(
+ 'lib32-clang'
+ 'lib32-expat'
+ 'lib32-libdrm'
+ 'lib32-libelf'
+ 'lib32-spirv-llvm-translator'
+ 'lib32-zstd'
+
+ 'libclc'
+ 'amdonly-gaming-opencl-clover-mesa'
+ )
+ optdepends=('opencl-headers: headers necessary for OpenCL development')
+ provides=(
+ 'lib32-opencl-clover-mesa'
+ 'lib32-opencl-driver'
+ )
+ replaces=(
+ 'lib32-opencl-clover-mesa'
+ "lib32-opencl-mesa<=23.1.4-1"
+ )
+ conflicts=(
+ 'lib32-opencl-clover-mesa'
+ 'lib32-opencl-mesa'
+ )
+
+ rm -v fakeinstall/etc/OpenCL/vendors/mesa.icd
+ _install fakeinstall/$_libdir/libMesaOpenCL*
+ _install fakeinstall/$_libdir/gallium-pipe
+
+ install -m644 -Dt "${pkgdir}/usr/share/licenses/${pkgname}" LICENSE
+}
+
+package_lib32-amdonly-gaming-opencl-rusticl-mesa() {
+ pkgdesc="OpenCL support with rusticl for mesa drivers (32-bit)"
+ depends=(
+ 'lib32-clang'
+ 'lib32-expat'
+ 'lib32-libdrm'
+ 'lib32-libelf'
+ 'lib32-lm_sensors'
+ 'lib32-spirv-llvm-translator'
+ 'lib32-zstd'
+
+ 'libclc'
+ 'amdonly-gaming-opencl-rusticl-mesa'
+ )
+ optdepends=('opencl-headers: headers necessary for OpenCL development')
+ provides=(
+ 'lib32-opencl-rusticl-mesa'
+ 'lib32-opencl-driver'
+ )
+ replaces=(
+ 'lib32-opencl-rusticl-mesa'
+ "lib32-opencl-mesa<=23.1.4-1"
+ )
+ conflicts=(
+ 'lib32-opencl-rusticl-mesa'
+ 'lib32-opencl-mesa'
+ )
+
+ rm -v fakeinstall/etc/OpenCL/vendors/rusticl.icd
+ _install fakeinstall/$_libdir/libRusticlOpenCL*
+
+ install -m644 -Dt "${pkgdir}/usr/share/licenses/${pkgname}" LICENSE
+}
+
+package_lib32-amdonly-gaming-vulkan-radeon() {
+ pkgdesc="Radeon's Vulkan mesa driver (32-bit)"
+ depends=(
+ 'lib32-libdrm'
+ 'lib32-libelf'
+ 'lib32-libx11'
+ 'lib32-libxshmfence'
+ 'lib32-llvm-libs'
+ 'lib32-systemd'
+ 'lib32-wayland'
+ 'lib32-xcb-util-keysyms'
+ 'lib32-zstd'
+
+ 'amdonly-gaming-vulkan-radeon'
+ )
+ optdepends=('lib32-vulkan-mesa-layers: additional vulkan layers')
+ provides=(
+ 'lib32-vulkan-radeon'
+ 'lib32-vulkan-driver'
+ )
+ replaces=('lib32-vulkan-radeon')
+ conflicts=('lib32-vulkan-radeon')
+
+ rm -v fakeinstall/usr/share/drirc.d/00-radv-defaults.conf
+ _install fakeinstall/usr/share/vulkan/icd.d/radeon_icd*.json
+ _install fakeinstall/$_libdir/libvulkan_radeon.so
+
+ install -m644 -Dt "${pkgdir}/usr/share/licenses/${pkgname}" LICENSE
+}
+
+package_lib32-amdonly-gaming-vulkan-swrast() {
+ pkgdesc="Vulkan software rasteriser driver (32-bit)"
+ depends=(
+ 'lib32-libdrm'
+ 'lib32-libx11'
+ 'lib32-libxshmfence'
+ 'lib32-llvm-libs'
+ 'lib32-systemd'
+ 'lib32-wayland'
+ 'lib32-xcb-util-keysyms'
+ 'lib32-zstd'
+ )
+ optdepends=('lib32-vulkan-mesa-layers: additional vulkan layers')
+ conflicts=(
+ 'lib32-vulkan-swrast'
+ 'lib32-vulkan-mesa'
+ )
+ replaces=(
+ 'lib32-vulkan-swrast'
+ 'lib32-vulkan-mesa'
+ )
+ provides=(
+ 'lib32-vulkan-swrast'
+ 'lib32-vulkan-driver'
+ )
+
+ _install fakeinstall/usr/share/vulkan/icd.d/lvp_icd*.json
+ _install fakeinstall/$_libdir/libvulkan_lvp.so
+
+ install -m644 -Dt "${pkgdir}/usr/share/licenses/${pkgname}" LICENSE
+}
+
+package_lib32-amdonly-gaming-vulkan-virtio() {
+ pkgdesc="Venus Vulkan mesa driver for Virtual Machines (32-bit)"
+ depends=(
+ 'lib32-libdrm'
+ 'lib32-libx11'
+ 'lib32-libxshmfence'
+ 'lib32-systemd'
+ 'lib32-wayland'
+ 'lib32-xcb-util-keysyms'
+ 'lib32-zstd'
+ )
+ optdepends=('lib32-vulkan-mesa-layers: additional vulkan layers')
+ provides=(
+ 'lib32-vulkan-driver'
+ )
+ replaces=('lib32-vulkan-virtio')
+ conflicts=('lib32-vulkan-virtio')
+
+ _install fakeinstall/usr/share/vulkan/icd.d/virtio_icd*.json
+ _install fakeinstall/$_libdir/libvulkan_virtio.so
+
+ install -m644 -Dt "${pkgdir}/usr/share/licenses/${pkgname}" LICENSE
}
-build () {
- export CC="${CC:-gcc}"
- export CXX="${CXX:-g++}"
- CC="$CC -m32"
- CXX="$CXX -m32"
-
- export PKG_CONFIG=/usr/bin/i686-pc-linux-gnu-pkg-config
-
- arch-meson mesa _build \
- --native-file llvm32.native \
- -D b_ndebug=true \
- --wrap-mode=nofallback \
- -D sysconfdir=/etc \
- --libdir=/usr/lib32 \
- -D platforms=x11,wayland \
- -D gallium-d3d12-video=disabled \
- -D gallium-drivers=radeonsi,swrast,zink \
- -D vulkan-drivers=amd,swrast \
- -D dri3=enabled \
- -D egl=enabled \
- -D gallium-extra-hud=true \
- -D vulkan-beta=true\
- -D vulkan-layers=device-select,overlay \
- -D gallium-nine=false \
- -D gallium-omx=disabled \
- -D gallium-opencl=disabled \
- -D gallium-va=enabled \
- -D gallium-vdpau=enabled \
- -D gallium-xa=disabled \
- -D gbm=enabled \
- -D gles1=disabled \
- -D gles2=enabled \
- -D glvnd=true \
- -D glx=dri \
- -D libunwind=disabled \
- -D android-libbacktrace=disabled \
- -D llvm=enabled \
- -D shared-llvm=enabled \
- -D lmsensors=enabled \
- -D osmesa=true \
- -D shared-glapi=enabled \
- -D valgrind=disabled \
- -D tools=[] \
- -D zstd=enabled \
- -D microsoft-clc=disabled
-
- meson configure --no-pager _build
-
- ninja $NINJAFLAGS -C _build
+package_lib32-amdonly-gaming-libva-mesa-driver() {
+ pkgdesc="VA-API drivers (32-bit)"
+ depends=(
+ 'lib32-expat'
+ 'lib32-libdrm'
+ 'lib32-libelf'
+ 'lib32-libx11'
+ 'lib32-libxshmfence'
+ 'lib32-llvm-libs'
+ 'lib32-zstd'
+ )
+ provides=('lib32-libva-driver')
+ replaces=('lib32-libva-mesa-driver')
+ conflicts=('lib32-libva-mesa-driver')
+
+ _install fakeinstall/$_libdir/dri/*_drv_video.so
+
+ install -m644 -Dt "${pkgdir}/usr/share/licenses/${pkgname}" LICENSE
}
-package() {
- DESTDIR="$pkgdir" ninja $NINJAFLAGS -C _build install
-
- # remove files provided by mesa-git
- rm -rf "$pkgdir"/etc
- rm -rf "$pkgdir"/usr/include
- rm -rf "$pkgdir"/usr/share/glvnd/
- rm -rf "$pkgdir"/usr/share/drirc.d/
- rm -rf "$pkgdir"/usr/share/vulkan/explicit_layer.d/
- rm -rf "$pkgdir"/usr/share/vulkan/implicit_layer.d/VkLayer_MESA_device_select.json
-
- # remove script file from /usr/bin
- # https://gitlab.freedesktop.org/mesa/mesa/issues/2230
- rm "${pkgdir}/usr/bin/mesa-overlay-control.py"
- rmdir "${pkgdir}/usr/bin"
-
- # indirect rendering
- ln -s /usr/lib32/libGLX_mesa.so.0 "${pkgdir}/usr/lib32/libGLX_indirect.so.0"
- install -m644 -Dt "$pkgdir"/usr/share/licenses/$pkgbase/ "$srcdir"/LICENSE
+package_lib32-amdonly-gaming-mesa-vdpau() {
+ pkgdesc="VDPAU drivers (32-bit)"
+ depends=(
+ 'lib32-expat'
+ 'lib32-libdrm'
+ 'lib32-libelf'
+ 'lib32-libx11'
+ 'lib32-libxshmfence'
+ 'lib32-llvm-libs'
+ 'lib32-zstd'
+ )
+ provides=(
+ 'lib32-vdpau-driver'
+ 'lib32-mesa-vdpau'
+ )
+ replaces=('lib32-mesa-vdpau')
+ conflicts=('lib32-mesa-vdpau')
+
+ _install fakeinstall/$_libdir/vdpau
+
+ install -m644 -Dt "${pkgdir}/usr/share/licenses/${pkgname}" LICENSE
+}
+
+package_lib32-amdonly-gaming-mesa() {
+ depends=(
+ 'lib32-libdrm'
+ 'lib32-libelf'
+ 'lib32-libglvnd'
+ 'lib32-libxdamage'
+ 'lib32-libxshmfence'
+ 'lib32-libxxf86vm'
+ 'lib32-llvm-libs'
+ 'lib32-lm_sensors'
+ 'lib32-vulkan-icd-loader'
+ 'lib32-wayland'
+ 'lib32-zstd'
+
+ 'amdonly-gaming-mesa'
+ )
+ optdepends=(
+ 'opengl-man-pages: for the OpenGL API man pages'
+ )
+ provides=(
+ 'lib32-mesa'
+ 'lib32-mesa-libgl'
+ 'lib32-opengl-driver'
+ )
+ conflicts=(
+ 'lib32-mesa'
+ 'lib32-mesa-libgl'
+ )
+ replaces=(
+ 'lib32-mesa'
+ 'lib32-mesa-libgl'
+ )
+
+ rm -v fakeinstall/usr/share/drirc.d/00-mesa-defaults.conf
+ rm -v fakeinstall/usr/share/glvnd/egl_vendor.d/50_mesa.json
+
+ # ati-dri, nouveau-dri, intel-dri, svga-dri, swrast, swr
+ _install fakeinstall/$_libdir/dri/*_dri.so
+
+ _install fakeinstall/$_libdir/d3d
+ _install fakeinstall/$_libdir/lib{gbm,glapi}.so*
+ _install fakeinstall/$_libdir/libOSMesa.so*
+ _install fakeinstall/$_libdir/libxatracker.so*
+
+ rm -rv fakeinstall/usr/include
+ _install fakeinstall/$_libdir/pkgconfig
+
+ # libglvnd support
+ _install fakeinstall/$_libdir/libGLX_mesa.so*
+ _install fakeinstall/$_libdir/libEGL_mesa.so*
+
+ # indirect rendering
+ ln -sr "$pkgdir"/$_libdir/libGLX_{mesa,indirect}.so.0
+
+ # make sure there are no files left to install
+ find fakeinstall -depth -print0 | xargs -0 rmdir
+
+ install -m644 -Dt "${pkgdir}/usr/share/licenses/${pkgname}" LICENSE
}
diff --git a/gamescope.patch b/gamescope.patch
new file mode 100644
index 000000000000..2858db116864
--- /dev/null
+++ b/gamescope.patch
@@ -0,0 +1,6572 @@
+From 9286f07049608623388a09256ef223bac86c2bac Mon Sep 17 00:00:00 2001
+From: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
+Date: Mon, 21 Feb 2022 18:43:54 +0100
+Subject: [PATCH 1/5] STEAMOS: Dynamic swapchain override for gamescope limiter
+
+---
+ src/loader/loader_dri3_helper.c | 42 +++++++++++++++++++++++++++++++--
+ src/loader/loader_dri3_helper.h | 1 +
+ src/loader/meson.build | 2 +-
+ src/vulkan/wsi/wsi_common_x11.c | 38 +++++++++++++++++++++++++++++
+ 4 files changed, 80 insertions(+), 3 deletions(-)
+
+diff --git a/src/loader/loader_dri3_helper.c b/src/loader/loader_dri3_helper.c
+index 2631a9e2fd5..dbf6db349c6 100644
+--- a/src/loader/loader_dri3_helper.c
++++ b/src/loader/loader_dri3_helper.c
+@@ -289,6 +289,30 @@ dri3_update_max_num_back(struct loader_dri3_drawable *draw)
+ }
+ }
+
++static unsigned
++gamescope_swapchain_override()
++{
++ const char *path = getenv("GAMESCOPE_LIMITER_FILE");
++ if (!path)
++ return 0;
++
++ static simple_mtx_t mtx = SIMPLE_MTX_INITIALIZER;
++ static int fd = -1;
++
++ simple_mtx_lock(&mtx);
++ if (fd < 0) {
++ fd = open(path, O_RDONLY);
++ }
++ simple_mtx_unlock(&mtx);
++
++ if (fd < 0)
++ return 0;
++
++ uint32_t override_value = 0;
++ pread(fd, &override_value, sizeof(override_value), 0);
++ return override_value;
++}
++
+ void
+ loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
+ {
+@@ -303,10 +327,12 @@ loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
+ * PS. changing from value A to B and A < B won't cause swap out of order but
+ * may still gets wrong target_msc value at the beginning.
+ */
+- if (draw->swap_interval != interval)
++ if (draw->orig_swap_interval != interval)
+ loader_dri3_swapbuffer_barrier(draw);
+
+- draw->swap_interval = interval;
++ draw->orig_swap_interval = interval;
++ if (gamescope_swapchain_override() != 1)
++ draw->swap_interval = interval;
+ }
+
+ static void
+@@ -438,6 +464,12 @@ loader_dri3_drawable_init(xcb_connection_t *conn,
+ draw->swap_interval = dri_get_initial_swap_interval(draw->dri_screen_render_gpu,
+ draw->ext->config);
+
++ draw->orig_swap_interval = draw->swap_interval;
++
++ unsigned gamescope_override = gamescope_swapchain_override();
++ if (gamescope_override == 1)
++ draw->swap_interval = 1;
++
+ dri3_update_max_num_back(draw);
+
+ /* Create a new drawable */
+@@ -1085,6 +1117,12 @@ loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
+ if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
+ dri3_fence_reset(draw->conn, back);
+
++ unsigned gamescope_override = gamescope_swapchain_override();
++ if (gamescope_override == 1)
++ draw->swap_interval = 1;
++ else
++ draw->swap_interval = draw->orig_swap_interval;
++
+ /* Compute when we want the frame shown by taking the last known
+ * successful MSC and adding in a swap interval for each outstanding swap
+ * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
+diff --git a/src/loader/loader_dri3_helper.h b/src/loader/loader_dri3_helper.h
+index cc2362dd599..fe73b3f329c 100644
+--- a/src/loader/loader_dri3_helper.h
++++ b/src/loader/loader_dri3_helper.h
+@@ -178,6 +178,7 @@ struct loader_dri3_drawable {
+ bool block_on_depleted_buffers;
+ bool queries_buffer_age;
+ int swap_interval;
++ int orig_swap_interval;
+
+ struct loader_dri3_extensions *ext;
+ const struct loader_dri3_vtable *vtable;
+diff --git a/src/loader/meson.build b/src/loader/meson.build
+index 043cc852112..8391ff38936 100644
+--- a/src/loader/meson.build
++++ b/src/loader/meson.build
+@@ -29,7 +29,7 @@ if with_platform_x11 and with_dri3
+ dependencies : [
+ idep_mesautil,
+ dep_libdrm, dep_xcb_dri3, dep_xcb_present, dep_xcb_sync, dep_xshmfence,
+- dep_xcb_xfixes,
++ dep_xcb_xfixes, dep_xcb_xrandr, idep_mesautil
+ ],
+ build_by_default : false,
+ )
+diff --git a/src/vulkan/wsi/wsi_common_x11.c b/src/vulkan/wsi/wsi_common_x11.c
+index b7724c028ea..0302e90d40e 100644
+--- a/src/vulkan/wsi/wsi_common_x11.c
++++ b/src/vulkan/wsi/wsi_common_x11.c
+@@ -48,6 +48,7 @@
+ #include "util/hash_table.h"
+ #include "util/os_file.h"
+ #include "util/os_time.h"
++#include "util/simple_mtx.h"
+ #include "util/u_debug.h"
+ #include "util/u_thread.h"
+ #include "util/xmlconfig.h"
+@@ -219,6 +220,30 @@ wsi_x11_detect_xwayland(xcb_connection_t *conn,
+ return is_xwayland;
+ }
+
++static unsigned
++gamescope_swapchain_override()
++{
++ const char *path = getenv("GAMESCOPE_LIMITER_FILE");
++ if (!path)
++ return 0;
++
++ static simple_mtx_t mtx = SIMPLE_MTX_INITIALIZER;
++ static int fd = -1;
++
++ simple_mtx_lock(&mtx);
++ if (fd < 0) {
++ fd = open(path, O_RDONLY);
++ }
++ simple_mtx_unlock(&mtx);
++
++ if (fd < 0)
++ return 0;
++
++ uint32_t override_value = 0;
++ pread(fd, &override_value, sizeof(override_value), 0);
++ return override_value;
++}
++
+ static struct wsi_x11_connection *
+ wsi_x11_connection_create(struct wsi_device *wsi_dev,
+ xcb_connection_t *conn)
+@@ -1104,6 +1129,8 @@ struct x11_swapchain {
+ /* Total number of images returned to application in AcquireNextImage. */
+ uint64_t present_poll_acquire_count;
+
++ VkPresentModeKHR orig_present_mode;
++
+ struct x11_image images[0];
+ };
+ VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
+@@ -1854,6 +1881,12 @@ x11_queue_present(struct wsi_swapchain *anv_chain,
+ if (chain->status < 0)
+ return chain->status;
+
++ unsigned gamescope_override = gamescope_swapchain_override();
++ if ((gamescope_override == 1 && chain->base.present_mode != VK_PRESENT_MODE_FIFO_KHR) ||
++ (gamescope_override != 1 && chain->base.present_mode != chain->orig_present_mode)) {
++ return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
++ }
++
+ if (damage && damage->pRectangles && damage->rectangleCount > 0 &&
+ damage->rectangleCount <= MAX_DAMAGE_RECTS) {
+ xcb_rectangle_t rects[MAX_DAMAGE_RECTS];
+@@ -2612,6 +2645,10 @@ x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+ xcb_void_cookie_t cookie;
+ VkResult result;
+ VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
++ VkPresentModeKHR orig_present_mode = present_mode;
++
++ if (gamescope_swapchain_override() == 1)
++ present_mode = VK_PRESENT_MODE_FIFO_KHR;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
+
+@@ -2724,6 +2761,7 @@ x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+ chain->base.wait_for_present = x11_wait_for_present;
+ chain->base.release_images = x11_release_images;
+ chain->base.present_mode = present_mode;
++ chain->orig_present_mode = orig_present_mode;
+ chain->base.image_count = num_images;
+ chain->conn = conn;
+ chain->window = window;
+--
+2.43.0
+
+
+From 172b4926c459e8eda16549814d450e206ef0eab2 Mon Sep 17 00:00:00 2001
+From: Derek Foreman <derek.foreman@collabora.com>
+Date: Wed, 20 Sep 2023 10:40:33 -0500
+Subject: [PATCH 2/5] vulkan/wsi/wayland: refactor wayland dispatch code
+
+We currently have two similar but different bits of code to dispatch
+wayland event queues. Pull out as much common code as possible.
+
+Signed-off-by: Derek Foreman <derek.foreman@collabora.com>
+---
+ src/vulkan/wsi/wsi_common_wayland.c | 399 +++++++++++++++-------------
+ 1 file changed, 208 insertions(+), 191 deletions(-)
+
+diff --git a/src/vulkan/wsi/wsi_common_wayland.c b/src/vulkan/wsi/wsi_common_wayland.c
+index 4d52171e28b..75e1a361a0b 100644
+--- a/src/vulkan/wsi/wsi_common_wayland.c
++++ b/src/vulkan/wsi/wsi_common_wayland.c
+@@ -96,6 +96,11 @@ struct wsi_wl_display {
+ struct wl_display *wl_display;
+ /* Actually a proxy wrapper around the event queue */
+ struct wl_display *wl_display_wrapper;
++
++ pthread_mutex_t wl_fd_lock;
++ pthread_cond_t wl_fd_reader_finished;
++ bool wl_fd_read_in_progress;
++
+ struct wl_event_queue *queue;
+
+ struct wl_shm *wl_shm;
+@@ -157,6 +162,8 @@ struct wsi_wl_surface {
+ struct wsi_wl_swapchain {
+ struct wsi_swapchain base;
+
++ struct wl_event_queue *queue;
++
+ struct wsi_wl_surface *wsi_wl_surface;
+ struct wp_tearing_control_v1 *tearing_control;
+
+@@ -180,10 +187,7 @@ struct wsi_wl_swapchain {
+ pthread_mutex_t lock; /* protects all members */
+ uint64_t max_completed;
+ struct wl_list outstanding_list;
+- pthread_cond_t list_advanced;
+- struct wl_event_queue *queue;
+ struct wp_presentation *wp_presentation;
+- bool dispatch_in_progress;
+ } present_ids;
+
+ struct wsi_wl_image images[0];
+@@ -208,6 +212,135 @@ find_format(struct u_vector *formats, VkFormat format)
+ return NULL;
+ }
+
++static int
++wsi_wl_display_read_queue_with_timeout_internal(struct wsi_wl_display *wsi_wl_display,
++ struct wl_event_queue *queue,
++ uint64_t atimeout)
++{
++ uint64_t current_time_nsec;
++ struct timespec rel_timeout, end_time, current_time;
++ int ret;
++
++ if (wl_display_prepare_read_queue(wsi_wl_display->wl_display, queue) < 0) {
++ /* Another thread might have read events for our queue already. Go
++ * back to dispatch them.
++ */
++ pthread_mutex_unlock(&wsi_wl_display->wl_fd_lock);
++ if (errno == EAGAIN)
++ return VK_SUCCESS;
++
++ return VK_ERROR_OUT_OF_DATE_KHR;
++ }
++
++ wsi_wl_display->wl_fd_read_in_progress = true;
++ pthread_mutex_unlock(&wsi_wl_display->wl_fd_lock);
++
++ while (1) {
++ struct pollfd pollfd = {
++ .fd = wl_display_get_fd(wsi_wl_display->wl_display),
++ .events = POLLIN
++ };
++
++ current_time_nsec = os_time_get_nano();
++ if (current_time_nsec > atimeout) {
++ rel_timeout.tv_sec = 0;
++ rel_timeout.tv_nsec = 0;
++ } else {
++ timespec_from_nsec(&current_time, current_time_nsec);
++ timespec_from_nsec(&end_time, atimeout);
++ timespec_sub(&rel_timeout, &end_time, &current_time);
++ }
++
++ ret = ppoll(&pollfd, 1, &rel_timeout, NULL);
++ if (ret < 0) {
++ if (errno == EINTR || errno == EAGAIN)
++ continue;
++
++ ret = VK_ERROR_OUT_OF_DATE_KHR;
++ } else if (ret == 0)
++ ret = VK_TIMEOUT;
++ else
++ ret = VK_SUCCESS;
++
++ break;
++ }
++
++ if (ret != VK_SUCCESS) {
++ wl_display_cancel_read(wsi_wl_display->wl_display);
++ } else {
++ ret = wl_display_read_events(wsi_wl_display->wl_display);
++ if (ret != 0)
++ ret = VK_ERROR_OUT_OF_DATE_KHR;
++ }
++
++ pthread_mutex_lock(&wsi_wl_display->wl_fd_lock);
++ wsi_wl_display->wl_fd_read_in_progress = false;
++ pthread_cond_broadcast(&wsi_wl_display->wl_fd_reader_finished);
++ return ret;
++}
++
++static int
++wsi_wl_display_dispatch_queue_with_timeout(struct wsi_wl_display *wsi_wl_display,
++ struct wl_event_queue *queue,
++ uint64_t timeout)
++{
++ int err;
++ int n_events;
++ uint64_t atimeout, now;
++
++ if (timeout == UINT64_MAX)
++ atimeout = timeout;
++ else
++ atimeout = os_time_get_absolute_timeout(timeout);
++
++ while (1) {
++ n_events = wl_display_dispatch_queue_pending(wsi_wl_display->wl_display,
++ queue);
++ if (n_events > 0) {
++ err = VK_SUCCESS;
++ break;
++ }
++ pthread_mutex_lock(&wsi_wl_display->wl_fd_lock);
++
++ if (wsi_wl_display->wl_fd_read_in_progress) {
++ struct timespec end_time;
++
++ timespec_from_nsec(&end_time, atimeout);
++
++ err = pthread_cond_timedwait(&wsi_wl_display->wl_fd_reader_finished,
++ &wsi_wl_display->wl_fd_lock,
++ &end_time);
++ if (err) {
++ if (errno == ETIMEDOUT)
++ err = VK_TIMEOUT;
++ else
++ err = VK_ERROR_OUT_OF_DATE_KHR;
++ } else {
++ /* We don't know if the other thread actually
++ * dispatched anything, so let the caller decide
++ * whether it should continue.
++ */
++ err = VK_INCOMPLETE;
++ }
++ } else {
++ err = wsi_wl_display_read_queue_with_timeout_internal(wsi_wl_display,
++ queue,
++ timeout);
++ }
++
++ pthread_mutex_unlock(&wsi_wl_display->wl_fd_lock);
++
++ now = os_time_get_nano();
++ if (now > atimeout) {
++ err = VK_TIMEOUT;
++ break;
++ }
++
++ }
++
++ return err;
++}
++
+ static struct wsi_wl_format *
+ wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
+ struct u_vector *formats,
+@@ -833,6 +966,8 @@ wsi_wl_display_finish(struct wsi_wl_display *display)
+ wl_proxy_wrapper_destroy(display->wl_display_wrapper);
+ if (display->queue)
+ wl_event_queue_destroy(display->queue);
++ pthread_mutex_destroy(&display->wl_fd_lock);
++ pthread_cond_destroy(&display->wl_fd_reader_finished);
+ }
+
+ static VkResult
+@@ -851,6 +986,11 @@ wsi_wl_display_init(struct wsi_wayland *wsi_wl,
+ display->wl_display = wl_display;
+ display->sw = sw;
+
++ display->wl_fd_read_in_progress = false;
++ pthread_mutex_init(&display->wl_fd_lock, NULL);
++ if (!wsi_init_pthread_cond_monotonic(&display->wl_fd_reader_finished))
++ goto fail;
++
+ display->queue = wl_display_create_queue(wl_display);
+ if (!display->queue) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+@@ -951,6 +1091,7 @@ fail_registry:
+ wl_registry_destroy(registry);
+
+ fail:
++ pthread_mutex_destroy(&display->wl_fd_lock);
+ wsi_wl_display_finish(display);
+ return result;
+ }
+@@ -1672,19 +1813,15 @@ wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
+ uint64_t timeout)
+ {
+ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+- struct wl_display *wl_display = chain->wsi_wl_surface->display->wl_display;
+- struct timespec end_time;
+- int wl_fd = wl_display_get_fd(wl_display);
+- VkResult ret;
+- int err;
++ uint64_t end_time, time_left, now;
++ int ret;
++ bool expired = false;
++ bool finished;
+
+- uint64_t atimeout;
+- if (timeout == 0 || timeout == UINT64_MAX)
+- atimeout = timeout;
++ if (timeout == UINT64_MAX)
++ end_time = timeout;
+ else
+- atimeout = os_time_get_absolute_timeout(timeout);
+-
+- timespec_from_nsec(&end_time, atimeout);
++ end_time = os_time_get_absolute_timeout(timeout);
+
+ /* Need to observe that the swapchain semaphore has been unsignalled,
+ * as this is guaranteed when a present is complete. */
+@@ -1700,141 +1837,45 @@ wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
+ return VK_SUCCESS;
+ }
+
++ while (1) {
++ ret = wl_display_dispatch_queue_pending(chain->wsi_wl_surface->display->wl_display,
++ chain->queue);
++ if (ret < 0)
++ return VK_ERROR_OUT_OF_DATE_KHR;
++
+ /* PresentWait can be called concurrently.
+ * If there is contention on this mutex, it means there is currently a dispatcher in flight holding the lock.
+ * The lock is only held while there is forward progress processing events from Wayland,
+ * so there should be no problem locking without timeout.
+ * We would like to be able to support timeout = 0 to query the current max_completed count.
+ * A timedlock with no timeout can be problematic in that scenario. */
+- err = pthread_mutex_lock(&chain->present_ids.lock);
+- if (err != 0)
+- return VK_ERROR_OUT_OF_DATE_KHR;
+-
+- if (chain->present_ids.max_completed >= present_id) {
++ pthread_mutex_lock(&chain->present_ids.lock);
++ finished = chain->present_ids.max_completed >= present_id;
+ pthread_mutex_unlock(&chain->present_ids.lock);
+- return VK_SUCCESS;
+- }
+-
+- /* Someone else is dispatching events; wait for them to update the chain
+- * status and wake us up. */
+- while (chain->present_ids.dispatch_in_progress) {
+- /* We only own the lock when the wait succeeds. */
+- err = pthread_cond_timedwait(&chain->present_ids.list_advanced,
+- &chain->present_ids.lock, &end_time);
+-
+- if (err == ETIMEDOUT) {
+- pthread_mutex_unlock(&chain->present_ids.lock);
+- return VK_TIMEOUT;
+- } else if (err != 0) {
+- pthread_mutex_unlock(&chain->present_ids.lock);
+- return VK_ERROR_OUT_OF_DATE_KHR;
+- }
+-
+- if (chain->present_ids.max_completed >= present_id) {
+- pthread_mutex_unlock(&chain->present_ids.lock);
++ if (finished)
+ return VK_SUCCESS;
+- }
+-
+- /* Whoever was previously dispatching the events isn't anymore, so we
+- * will take over and fall through below. */
+- if (!chain->present_ids.dispatch_in_progress)
+- break;
+- }
+-
+- assert(!chain->present_ids.dispatch_in_progress);
+- chain->present_ids.dispatch_in_progress = true;
+-
+- /* Whether or not we were dispatching the events before, we are now: pull
+- * all the new events from our event queue, post them, and wake up everyone
+- * else who might be waiting. */
+- while (1) {
+- ret = wl_display_dispatch_queue_pending(wl_display, chain->present_ids.queue);
+- if (ret < 0) {
+- ret = VK_ERROR_OUT_OF_DATE_KHR;
+- goto relinquish_dispatch;
+- }
+-
+- /* Some events dispatched: check the new completions. */
+- if (ret > 0) {
+- /* Completed our own present; stop our own dispatching and let
+- * someone else pick it up. */
+- if (chain->present_ids.max_completed >= present_id) {
+- ret = VK_SUCCESS;
+- goto relinquish_dispatch;
+- }
+-
+- /* Wake up other waiters who may have been unblocked by the events
+- * we just read. */
+- pthread_cond_broadcast(&chain->present_ids.list_advanced);
+- }
+-
+- /* Check for timeout, and relinquish the dispatch to another thread
+- * if we're over our budget. */
+- uint64_t current_time_nsec = os_time_get_nano();
+- if (current_time_nsec > atimeout) {
+- ret = VK_TIMEOUT;
+- goto relinquish_dispatch;
+- }
+-
+- /* To poll and read from WL fd safely, we must be cooperative.
+- * See wl_display_prepare_read_queue in https://wayland.freedesktop.org/docs/html/apb.html */
+-
+- /* Try to read events from the server. */
+- ret = wl_display_prepare_read_queue(wl_display, chain->present_ids.queue);
+- if (ret < 0) {
+- /* Another thread might have read events for our queue already. Go
+- * back to dispatch them.
+- */
+- if (errno == EAGAIN)
+- continue;
+- ret = VK_ERROR_OUT_OF_DATE_KHR;
+- goto relinquish_dispatch;
+- }
+
+- /* Drop the lock around poll, so people can wait whilst we sleep. */
+- pthread_mutex_unlock(&chain->present_ids.lock);
+-
+- struct pollfd pollfd = {
+- .fd = wl_fd,
+- .events = POLLIN
+- };
+- struct timespec current_time, rel_timeout;
+- timespec_from_nsec(&current_time, current_time_nsec);
+- timespec_sub(&rel_timeout, &end_time, &current_time);
+- ret = ppoll(&pollfd, 1, &rel_timeout, NULL);
++ if (expired)
++ return VK_TIMEOUT;
+
+- /* Re-lock after poll; either we're dispatching events under the lock or
+- * bouncing out from an error also under the lock. We can't use timedlock
+- * here because we need to acquire to clear dispatch_in_progress. */
+- pthread_mutex_lock(&chain->present_ids.lock);
++ now = os_time_get_nano();
++ if (now > end_time)
++ time_left = 0;
++ else
++ time_left = end_time - now;
+
+- if (ret <= 0) {
+- int lerrno = errno;
+- wl_display_cancel_read(wl_display);
+- if (ret < 0) {
+- /* If ppoll() was interrupted, try again. */
+- if (lerrno == EINTR || lerrno == EAGAIN)
+- continue;
+- ret = VK_ERROR_OUT_OF_DATE_KHR;
+- goto relinquish_dispatch;
+- }
+- assert(ret == 0);
++ ret = wsi_wl_display_dispatch_queue_with_timeout(chain->wsi_wl_surface->display,
++ chain->queue,
++ time_left);
++ if (ret == VK_INCOMPLETE)
+ continue;
+- }
+
+- ret = wl_display_read_events(wl_display);
+- if (ret < 0) {
+- ret = VK_ERROR_OUT_OF_DATE_KHR;
+- goto relinquish_dispatch;
+- }
+- }
++ if (ret != VK_SUCCESS && ret != VK_TIMEOUT)
++ return ret;
+
+-relinquish_dispatch:
+- assert(chain->present_ids.dispatch_in_progress);
+- chain->present_ids.dispatch_in_progress = false;
+- pthread_cond_broadcast(&chain->present_ids.list_advanced);
+- pthread_mutex_unlock(&chain->present_ids.lock);
+- return ret;
++ if (time_left == 0)
++ expired = true;
++ }
+ }
+
+ static VkResult
+@@ -1844,19 +1885,18 @@ wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
+ {
+ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+ struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
+- struct timespec start_time, end_time;
+- struct timespec rel_timeout;
+- int wl_fd = wl_display_get_fd(wsi_wl_surface->display->wl_display);
+-
+- timespec_from_nsec(&rel_timeout, info->timeout);
++ uint64_t end_time, time_left, now;
++ bool expired = false;
++ int ret;
+
+- clock_gettime(CLOCK_MONOTONIC, &start_time);
+- timespec_add(&end_time, &rel_timeout, &start_time);
++ if (info->timeout == UINT64_MAX)
++ end_time = info->timeout;
++ else
++ end_time = os_time_get_absolute_timeout(info->timeout);
+
+ while (1) {
+- /* Try to dispatch potential events. */
+- int ret = wl_display_dispatch_queue_pending(wsi_wl_surface->display->wl_display,
+- wsi_wl_surface->display->queue);
++ ret = wl_display_dispatch_queue_pending(wsi_wl_surface->display->wl_display,
++ wsi_wl_surface->display->queue);
+ if (ret < 0)
+ return VK_ERROR_OUT_OF_DATE_KHR;
+
+@@ -1870,46 +1910,26 @@ wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
+ }
+ }
+
+- /* Check for timeout. */
+- struct timespec current_time;
+- clock_gettime(CLOCK_MONOTONIC, &current_time);
+- if (timespec_after(&current_time, &end_time))
+- return (info->timeout ? VK_TIMEOUT : VK_NOT_READY);
++ if (expired)
++ return info->timeout ? VK_TIMEOUT : VK_NOT_READY;
+
+- /* Try to read events from the server. */
+- ret = wl_display_prepare_read_queue(wsi_wl_surface->display->wl_display,
+- wsi_wl_surface->display->queue);
+- if (ret < 0) {
+- /* Another thread might have read events for our queue already. Go
+- * back to dispatch them.
+- */
+- if (errno == EAGAIN)
+- continue;
+- return VK_ERROR_OUT_OF_DATE_KHR;
+- }
++ now = os_time_get_nano();
++ if (now > end_time)
++ time_left = 0;
++ else
++ time_left = end_time - now;
+
+- struct pollfd pollfd = {
+- .fd = wl_fd,
+- .events = POLLIN
+- };
+- timespec_sub(&rel_timeout, &end_time, &current_time);
+- ret = ppoll(&pollfd, 1, &rel_timeout, NULL);
+- if (ret <= 0) {
+- int lerrno = errno;
+- wl_display_cancel_read(wsi_wl_surface->display->wl_display);
+- if (ret < 0) {
+- /* If ppoll() was interrupted, try again. */
+- if (lerrno == EINTR || lerrno == EAGAIN)
+- continue;
+- return VK_ERROR_OUT_OF_DATE_KHR;
+- }
+- assert(ret == 0);
++ ret = wsi_wl_display_dispatch_queue_with_timeout(wsi_wl_surface->display,
++ wsi_wl_surface->display->queue,
++ time_left);
++ if (ret == VK_ERROR_OUT_OF_DATE_KHR)
++ return ret;
++
++ if (ret == VK_INCOMPLETE)
+ continue;
+- }
+
+- ret = wl_display_read_events(wsi_wl_surface->display->wl_display);
+- if (ret < 0)
+- return VK_ERROR_OUT_OF_DATE_KHR;
++ if (ret == VK_TIMEOUT)
++ expired = true;
+ }
+ }
+
+@@ -1930,9 +1950,10 @@ presentation_handle_presented(void *data,
+ {
+ struct wsi_wl_present_id *id = data;
+
+- /* present_ids.lock already held around dispatch */
++ pthread_mutex_lock(&id->chain->present_ids.lock);
+ if (id->present_id > id->chain->present_ids.max_completed)
+ id->chain->present_ids.max_completed = id->present_id;
++ pthread_mutex_unlock(&id->chain->present_ids.lock);
+
+ wp_presentation_feedback_destroy(feedback);
+ wl_list_remove(&id->link);
+@@ -1945,9 +1966,10 @@ presentation_handle_discarded(void *data,
+ {
+ struct wsi_wl_present_id *id = data;
+
+- /* present_ids.lock already held around dispatch */
++ pthread_mutex_lock(&id->chain->present_ids.lock);
+ if (id->present_id > id->chain->present_ids.max_completed)
+ id->chain->present_ids.max_completed = id->present_id;
++ pthread_mutex_unlock(&id->chain->present_ids.lock);
+
+ wp_presentation_feedback_destroy(feedback);
+ wl_list_remove(&id->link);
+@@ -2195,8 +2217,6 @@ wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
+ chain->wsi_wl_surface->chain = NULL;
+
+ if (chain->present_ids.wp_presentation) {
+- assert(!chain->present_ids.dispatch_in_progress);
+-
+ /* In VK_EXT_swapchain_maintenance1 there is no requirement to wait for all present IDs to be complete.
+ * Waiting for the swapchain fence is enough.
+ * Just clean up anything user did not wait for. */
+@@ -2208,7 +2228,6 @@ wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
+ }
+
+ wl_proxy_wrapper_destroy(chain->present_ids.wp_presentation);
+- pthread_cond_destroy(&chain->present_ids.list_advanced);
+ pthread_mutex_destroy(&chain->present_ids.lock);
+ }
+
+@@ -2369,18 +2388,16 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+ chain->num_drm_modifiers = num_drm_modifiers;
+ chain->drm_modifiers = drm_modifiers;
+
++ chain->queue = wl_display_create_queue(chain->wsi_wl_surface->display->wl_display);
++
+ if (chain->wsi_wl_surface->display->wp_presentation_notwrapped) {
+- if (!wsi_init_pthread_cond_monotonic(&chain->present_ids.list_advanced))
+- goto fail;
+ pthread_mutex_init(&chain->present_ids.lock, NULL);
+
+ wl_list_init(&chain->present_ids.outstanding_list);
+- chain->present_ids.queue =
+- wl_display_create_queue(chain->wsi_wl_surface->display->wl_display);
+ chain->present_ids.wp_presentation =
+ wl_proxy_create_wrapper(chain->wsi_wl_surface->display->wp_presentation_notwrapped);
+ wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.wp_presentation,
+- chain->present_ids.queue);
++ chain->queue);
+ }
+
+ chain->fifo_ready = true;
+--
+2.43.0
+
+
+From b9a2fb065c30a9065615c894d7da4a4796a4f487 Mon Sep 17 00:00:00 2001
+From: Denis <benato.denis96@gmail.com>
+Date: Sun, 10 Dec 2023 14:46:16 +0100
+Subject: [PATCH 3/5] vulkan/wsi/wayland: Use commit_timing/commit_queue
+ protocol for FIFO
+
+The commit_timing protocol allows us to set a presentation timestamp,
+and the commit_queue protocol allows us to request FIFO semantics for
+committed state (instead of the default mailbox).
+
+I these are available, use them to implement Vulkan's FIFO presentation
+mode.
+
+Signed-off-by: Derek Foreman <derek.foreman@collabora.com>
+---
+ src/egl/wayland/wayland-drm/meson.build | 2 +
+ src/vulkan/wsi/meson.build | 2 +
+ src/vulkan/wsi/wsi_common_wayland.c | 138 +-
+ src/vulkan/wsi/wsi_common_wayland.c.orig | 2475 ++++++++++++++++++++++
+ 4 files changed, 2605 insertions(+), 12 deletions(-)
+ create mode 100644 src/vulkan/wsi/wsi_common_wayland.c.orig
+
+diff --git a/src/egl/wayland/wayland-drm/meson.build b/src/egl/wayland/wayland-drm/meson.build
+index ac822acec67..8b6044f09e5 100644
+--- a/src/egl/wayland/wayland-drm/meson.build
++++ b/src/egl/wayland/wayland-drm/meson.build
+@@ -59,6 +59,8 @@ libwayland_drm = static_library(
+ # here for now as the maybe-least-bad solution.
+ wp_dir = dep_wl_protocols.get_variable(pkgconfig : 'pkgdatadir', internal : 'pkgdatadir')
+ wp_protos = {
++ 'commit-queue-v1': 'staging/commit-queue/commit-queue-v1.xml',
++ 'commit-timing-v1': 'staging/commit-timing/commit-timing-v1.xml',
+ 'linux-dmabuf-unstable-v1': 'unstable/linux-dmabuf/linux-dmabuf-unstable-v1.xml',
+ 'presentation-time': 'stable/presentation-time/presentation-time.xml',
+ 'tearing-control-v1': 'staging/tearing-control/tearing-control-v1.xml',
+diff --git a/src/vulkan/wsi/meson.build b/src/vulkan/wsi/meson.build
+index c8206eac996..48ea09b99aa 100644
+--- a/src/vulkan/wsi/meson.build
++++ b/src/vulkan/wsi/meson.build
+@@ -31,6 +31,8 @@ endif
+
+ if with_platform_wayland
+ files_vulkan_wsi += files('wsi_common_wayland.c')
++ files_vulkan_wsi += wp_files['commit-queue-v1']
++ files_vulkan_wsi += wp_files['commit-timing-v1']
+ files_vulkan_wsi += wp_files['linux-dmabuf-unstable-v1']
+ files_vulkan_wsi += wp_files['presentation-time']
+ files_vulkan_wsi += wp_files['tearing-control-v1']
+diff --git a/src/vulkan/wsi/wsi_common_wayland.c b/src/vulkan/wsi/wsi_common_wayland.c
+index 75e1a361a0b..16848fb0a2a 100644
+--- a/src/vulkan/wsi/wsi_common_wayland.c
++++ b/src/vulkan/wsi/wsi_common_wayland.c
+@@ -41,6 +41,8 @@
+ #include "vk_util.h"
+ #include "wsi_common_entrypoints.h"
+ #include "wsi_common_private.h"
++#include "commit-queue-v1-client-protocol.h"
++#include "commit-timing-v1-client-protocol.h"
+ #include "linux-dmabuf-unstable-v1-client-protocol.h"
+ #include "presentation-time-client-protocol.h"
+ #include "tearing-control-v1-client-protocol.h"
+@@ -113,6 +115,9 @@ struct wsi_wl_display {
+ /* users want per-chain wsi_wl_swapchain->present_ids.wp_presentation */
+ struct wp_presentation *wp_presentation_notwrapped;
+
++ struct wp_commit_queue_manager_v1 *commit_queue_manager;
++ struct wp_commit_timing_manager_v1 *commit_timing_manager;
++
+ struct wsi_wayland *wsi_wl;
+
+ /* Formats populated by zwp_linux_dmabuf_v1 or wl_shm interfaces */
+@@ -135,6 +140,7 @@ struct wsi_wayland {
+
+ struct wsi_wl_image {
+ struct wsi_image base;
++ struct wsi_wl_swapchain *chain;
+ struct wl_buffer *buffer;
+ bool busy;
+ int shm_fd;
+@@ -166,6 +172,9 @@ struct wsi_wl_swapchain {
+
+ struct wsi_wl_surface *wsi_wl_surface;
+ struct wp_tearing_control_v1 *tearing_control;
++ struct wp_commit_queue_v1 *commit_queue;
++ struct wp_commit_timer_v1 *commit_timer;
++ bool can_timestamp;
+
+ struct wl_callback *frame;
+
+@@ -181,13 +190,17 @@ struct wsi_wl_swapchain {
+ const uint64_t *drm_modifiers;
+
+ VkPresentModeKHR present_mode;
+- bool fifo_ready;
++ bool legacy_fifo_ready;
++
++ uint64_t last_target_time;
+
+ struct {
+ pthread_mutex_t lock; /* protects all members */
+ uint64_t max_completed;
+ struct wl_list outstanding_list;
+ struct wp_presentation *wp_presentation;
++ uint64_t phase_time;
++ unsigned int refresh_nsec;
+ } present_ids;
+
+ struct wsi_wl_image images[0];
+@@ -934,6 +947,12 @@ registry_handle_global(void *data, struct wl_registry *registry,
+ } else if (strcmp(interface, wp_tearing_control_manager_v1_interface.name) == 0) {
+ display->tearing_control_manager =
+ wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
++ } else if (strcmp(interface, wp_commit_timing_manager_v1_interface.name) == 0) {
++ display->commit_timing_manager =
++ wl_registry_bind(registry, name, &wp_commit_timing_manager_v1_interface, 1);
++ } else if (strcmp(interface, wp_commit_queue_manager_v1_interface.name) == 0) {
++ display->commit_queue_manager =
++ wl_registry_bind(registry, name, &wp_commit_queue_manager_v1_interface, 1);
+ }
+ }
+
+@@ -960,6 +979,10 @@ wsi_wl_display_finish(struct wsi_wl_display *display)
+ zwp_linux_dmabuf_v1_destroy(display->wl_dmabuf);
+ if (display->wp_presentation_notwrapped)
+ wp_presentation_destroy(display->wp_presentation_notwrapped);
++ if (display->commit_queue_manager)
++ wp_commit_queue_manager_v1_destroy(display->commit_queue_manager);
++ if (display->commit_timing_manager)
++ wp_commit_timing_manager_v1_destroy(display->commit_timing_manager);
+ if (display->tearing_control_manager)
+ wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
+ if (display->wl_display_wrapper)
+@@ -1919,6 +1942,16 @@ wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
+ else
+ time_left = end_time - now;
+
++ /* If we can use timestamps, we want to make sure to dispatch the queue
++ * feedback events are in so we can get a refresh rate and a vsync time to
++ * phase lock to */
++ if (chain->can_timestamp) {
++ ret = wl_display_dispatch_queue_pending(wsi_wl_surface->display->wl_display,
++ chain->queue);
++ if (ret < 0)
++ return VK_ERROR_OUT_OF_DATE_KHR;
++ }
++
+ ret = wsi_wl_display_dispatch_queue_with_timeout(wsi_wl_surface->display,
+ wsi_wl_surface->display->queue,
+ time_left);
+@@ -1949,10 +1982,16 @@ presentation_handle_presented(void *data,
+ uint32_t flags)
+ {
+ struct wsi_wl_present_id *id = data;
++ struct timespec presentation_time;
+
+ pthread_mutex_lock(&id->chain->present_ids.lock);
+ if (id->present_id > id->chain->present_ids.max_completed)
+ id->chain->present_ids.max_completed = id->present_id;
++
++ presentation_time.tv_sec = ((uint64_t)tv_sec_hi << 32) + tv_sec_lo;
++ presentation_time.tv_nsec = tv_nsec;
++ id->chain->present_ids.phase_time = timespec_to_nsec(&presentation_time);
++ id->chain->present_ids.refresh_nsec = refresh;
+ pthread_mutex_unlock(&id->chain->present_ids.lock);
+
+ wp_presentation_feedback_destroy(feedback);
+@@ -1988,8 +2027,10 @@ frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
+ {
+ struct wsi_wl_swapchain *chain = data;
+
++ assert(!chain->can_timestamp);
++
+ chain->frame = NULL;
+- chain->fifo_ready = true;
++ chain->legacy_fifo_ready = true;
+
+ wl_callback_destroy(callback);
+ }
+@@ -1998,6 +2039,46 @@ static const struct wl_callback_listener frame_listener = {
+ frame_handle_done,
+ };
+
++static void
++set_timestamp(struct wsi_wl_swapchain *chain)
++{
++ uint64_t now, target;
++ struct timespec target_ts;
++ uint64_t refresh;
++ uint64_t phase_time;
++
++ now = os_time_get_nano();
++
++ pthread_mutex_lock(&chain->present_ids.lock);
++ phase_time = chain->present_ids.phase_time;
++ refresh = chain->present_ids.refresh_nsec;
++ pthread_mutex_unlock(&chain->present_ids.lock);
++
++ if (refresh == 0)
++ refresh = 16666666;
++
++ target = chain->last_target_time + refresh;
++
++ if (now > target) {
++ uint64_t offset;
++
++ if (phase_time > now)
++ now = phase_time;
++
++ offset = (now - phase_time) % refresh;
++ target = now - offset + refresh;
++ }
++
++ timespec_from_nsec(&target_ts, target);
++ wp_commit_timer_v1_set_timestamp(chain->commit_timer,
++ target_ts.tv_sec >> 32, target_ts.tv_sec,
++ target_ts.tv_nsec);
++
++ wp_commit_queue_v1_set_queue_mode(chain->commit_queue,
++ WP_COMMIT_QUEUE_V1_QUEUE_MODE_FIFO);
++ chain->last_target_time = target;
++}
++
+ static VkResult
+ wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
+ uint32_t image_index,
+@@ -2006,6 +2087,7 @@ wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
+ {
+ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+ struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
++ bool mode_fifo = chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR;
+
+ if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
+ struct wsi_wl_image *image = &chain->images[image_index];
+@@ -2015,7 +2097,7 @@ wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
+
+ /* For EXT_swapchain_maintenance1. We might have transitioned from FIFO to MAILBOX.
+ * In this case we need to let the FIFO request complete, before presenting MAILBOX. */
+- while (!chain->fifo_ready) {
++ while (!chain->can_timestamp && !chain->legacy_fifo_ready) {
+ int ret = wl_display_dispatch_queue(wsi_wl_surface->display->wl_display,
+ wsi_wl_surface->display->queue);
+ if (ret < 0)
+@@ -2038,16 +2120,19 @@ wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
+ wl_surface_damage(wsi_wl_surface->surface, 0, 0, INT32_MAX, INT32_MAX);
+ }
+
+- if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
+- chain->frame = wl_surface_frame(wsi_wl_surface->surface);
+- wl_callback_add_listener(chain->frame, &frame_listener, chain);
+- chain->fifo_ready = false;
+- } else {
+- /* If we present MAILBOX, any subsequent presentation in FIFO can replace this image. */
+- chain->fifo_ready = true;
++ if (!chain->can_timestamp) {
++ if (mode_fifo) {
++ chain->frame = wl_surface_frame(wsi_wl_surface->surface);
++ wl_callback_add_listener(chain->frame, &frame_listener, chain);
++ chain->legacy_fifo_ready = false;
++ } else {
++ /* If we present MAILBOX, any subsequent presentation in FIFO can replace this image. */
++ chain->legacy_fifo_ready = true;
++ }
+ }
+
+- if (present_id > 0 && chain->present_ids.wp_presentation) {
++ if (chain->present_ids.wp_presentation &&
++ (present_id > 0 || (chain->can_timestamp && mode_fifo))) {
+ struct wsi_wl_present_id *id =
+ vk_zalloc(chain->wsi_wl_surface->display->wsi_wl->alloc, sizeof(*id), sizeof(uintptr_t),
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+@@ -2066,6 +2151,10 @@ wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
+ }
+
+ chain->images[image_index].busy = true;
++
++ if (chain->can_timestamp && mode_fifo)
++ set_timestamp(chain);
++
+ wl_surface_commit(wsi_wl_surface->surface);
+ wl_display_flush(wsi_wl_surface->display->wl_display);
+
+@@ -2181,6 +2270,7 @@ wsi_wl_image_init(struct wsi_wl_swapchain *chain,
+ goto fail_image;
+
+ wl_buffer_add_listener(image->buffer, &buffer_listener, image);
++ image->chain = chain;
+
+ return VK_SUCCESS;
+
+@@ -2231,6 +2321,12 @@ wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
+ pthread_mutex_destroy(&chain->present_ids.lock);
+ }
+
++ if (chain->commit_queue)
++ wp_commit_queue_v1_destroy(chain->commit_queue);
++
++ if (chain->commit_timer)
++ wp_commit_timer_v1_destroy(chain->commit_timer);
++
+ wsi_swapchain_finish(&chain->base);
+ }
+
+@@ -2285,6 +2381,15 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+ wp_tearing_control_v1_destroy(old_chain->tearing_control);
+ old_chain->tearing_control = NULL;
+ }
++ if (old_chain->commit_queue) {
++ wp_commit_queue_v1_destroy(old_chain->commit_queue);
++ old_chain->commit_queue = NULL;
++ old_chain->can_timestamp = false;
++ }
++ if (old_chain->commit_timer) {
++ wp_commit_timer_v1_destroy(old_chain->commit_timer);
++ old_chain->commit_timer = NULL;
++ }
+ }
+
+ /* Take ownership of the wsi_wl_surface */
+@@ -2400,7 +2505,16 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+ chain->queue);
+ }
+
+- chain->fifo_ready = true;
++ chain->legacy_fifo_ready = true;
++ struct wsi_wl_display *dpy = chain->wsi_wl_surface->display;
++ if (dpy->commit_queue_manager &&
++ dpy->commit_timing_manager) {
++ chain->commit_queue = wp_commit_queue_manager_v1_get_queue_controller(dpy->commit_queue_manager,
++ chain->wsi_wl_surface->surface);
++ chain->commit_timer = wp_commit_timing_manager_v1_get_timer(dpy->commit_timing_manager,
++ chain->wsi_wl_surface->surface);
++ chain->can_timestamp = true;
++ }
+
+ for (uint32_t i = 0; i < chain->base.image_count; i++) {
+ result = wsi_wl_image_init(chain, &chain->images[i],
+diff --git a/src/vulkan/wsi/wsi_common_wayland.c.orig b/src/vulkan/wsi/wsi_common_wayland.c.orig
+new file mode 100644
+index 00000000000..75e1a361a0b
+--- /dev/null
++++ b/src/vulkan/wsi/wsi_common_wayland.c.orig
+@@ -0,0 +1,2475 @@
++/*
++ * Copyright © 2015 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <wayland-client.h>
++
++#include <assert.h>
++#include <stdlib.h>
++#include <stdio.h>
++#include <unistd.h>
++#include <errno.h>
++#include <string.h>
++#include <pthread.h>
++#include <poll.h>
++#include <sys/mman.h>
++#include <sys/types.h>
++
++#include "drm-uapi/drm_fourcc.h"
++
++#include "vk_instance.h"
++#include "vk_physical_device.h"
++#include "vk_util.h"
++#include "wsi_common_entrypoints.h"
++#include "wsi_common_private.h"
++#include "linux-dmabuf-unstable-v1-client-protocol.h"
++#include "presentation-time-client-protocol.h"
++#include "tearing-control-v1-client-protocol.h"
++
++#include <util/compiler.h>
++#include <util/hash_table.h>
++#include <util/timespec.h>
++#include <util/u_endian.h>
++#include <util/u_vector.h>
++#include <util/u_dynarray.h>
++#include <util/anon_file.h>
++#include <util/os_time.h>
++
++#ifdef MAJOR_IN_MKDEV
++#include <sys/mkdev.h>
++#endif
++#ifdef MAJOR_IN_SYSMACROS
++#include <sys/sysmacros.h>
++#endif
++
++struct wsi_wayland;
++
++struct wsi_wl_format {
++ VkFormat vk_format;
++ uint32_t flags;
++ struct u_vector modifiers;
++};
++
++struct dmabuf_feedback_format_table {
++ unsigned int size;
++ struct {
++ uint32_t format;
++ uint32_t padding; /* unused */
++ uint64_t modifier;
++ } *data;
++};
++
++struct dmabuf_feedback_tranche {
++ dev_t target_device;
++ uint32_t flags;
++ struct u_vector formats;
++};
++
++struct dmabuf_feedback {
++ dev_t main_device;
++ struct dmabuf_feedback_format_table format_table;
++ struct util_dynarray tranches;
++ struct dmabuf_feedback_tranche pending_tranche;
++};
++
++struct wsi_wl_display {
++ /* The real wl_display */
++ struct wl_display *wl_display;
++ /* Actually a proxy wrapper around the event queue */
++ struct wl_display *wl_display_wrapper;
++
++ pthread_mutex_t wl_fd_lock;
++ pthread_cond_t wl_fd_reader_finished;
++ bool wl_fd_read_in_progress;
++
++ struct wl_event_queue *queue;
++
++ struct wl_shm *wl_shm;
++ struct zwp_linux_dmabuf_v1 *wl_dmabuf;
++ struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
++ struct wp_tearing_control_manager_v1 *tearing_control_manager;
++
++ struct dmabuf_feedback_format_table format_table;
++
++ /* users want per-chain wsi_wl_swapchain->present_ids.wp_presentation */
++ struct wp_presentation *wp_presentation_notwrapped;
++
++ struct wsi_wayland *wsi_wl;
++
++ /* Formats populated by zwp_linux_dmabuf_v1 or wl_shm interfaces */
++ struct u_vector formats;
++
++ bool sw;
++
++ dev_t main_device;
++ bool same_gpu;
++};
++
++struct wsi_wayland {
++ struct wsi_interface base;
++
++ struct wsi_device *wsi;
++
++ const VkAllocationCallbacks *alloc;
++ VkPhysicalDevice physical_device;
++};
++
++struct wsi_wl_image {
++ struct wsi_image base;
++ struct wl_buffer *buffer;
++ bool busy;
++ int shm_fd;
++ void *shm_ptr;
++ unsigned shm_size;
++};
++
++enum wsi_wl_buffer_type {
++ WSI_WL_BUFFER_NATIVE,
++ WSI_WL_BUFFER_GPU_SHM,
++ WSI_WL_BUFFER_SHM_MEMCPY,
++};
++
++struct wsi_wl_surface {
++ VkIcdSurfaceWayland base;
++
++ struct wsi_wl_swapchain *chain;
++ struct wl_surface *surface;
++ struct wsi_wl_display *display;
++
++ struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
++ struct dmabuf_feedback dmabuf_feedback, pending_dmabuf_feedback;
++};
++
++struct wsi_wl_swapchain {
++ struct wsi_swapchain base;
++
++ struct wl_event_queue *queue;
++
++ struct wsi_wl_surface *wsi_wl_surface;
++ struct wp_tearing_control_v1 *tearing_control;
++
++ struct wl_callback *frame;
++
++ VkExtent2D extent;
++ VkFormat vk_format;
++ enum wsi_wl_buffer_type buffer_type;
++ uint32_t drm_format;
++ enum wl_shm_format shm_format;
++
++ bool suboptimal;
++
++ uint32_t num_drm_modifiers;
++ const uint64_t *drm_modifiers;
++
++ VkPresentModeKHR present_mode;
++ bool fifo_ready;
++
++ struct {
++ pthread_mutex_t lock; /* protects all members */
++ uint64_t max_completed;
++ struct wl_list outstanding_list;
++ struct wp_presentation *wp_presentation;
++ } present_ids;
++
++ struct wsi_wl_image images[0];
++};
++VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_wl_swapchain, base.base, VkSwapchainKHR,
++ VK_OBJECT_TYPE_SWAPCHAIN_KHR)
++
++enum wsi_wl_fmt_flag {
++ WSI_WL_FMT_ALPHA = 1 << 0,
++ WSI_WL_FMT_OPAQUE = 1 << 1,
++};
++
++static struct wsi_wl_format *
++find_format(struct u_vector *formats, VkFormat format)
++{
++ struct wsi_wl_format *f;
++
++ u_vector_foreach(f, formats)
++ if (f->vk_format == format)
++ return f;
++
++ return NULL;
++}
++
++static int
++wsi_wl_display_read_queue_with_timeout_internal(struct wsi_wl_display *wsi_wl_display,
++ struct wl_event_queue *queue,
++ uint64_t atimeout)
++{
++ uint64_t current_time_nsec;
++ struct timespec rel_timeout, end_time, current_time;
++ int ret;
++
++ if (wl_display_prepare_read_queue(wsi_wl_display->wl_display, queue) < 0) {
++ /* Another thread might have read events for our queue already. Go
++ * back to dispatch them.
++ */
++ pthread_mutex_unlock(&wsi_wl_display->wl_fd_lock);
++ if (errno == EAGAIN)
++ return VK_SUCCESS;
++
++ return VK_ERROR_OUT_OF_DATE_KHR;
++ }
++
++ wsi_wl_display->wl_fd_read_in_progress = true;
++ pthread_mutex_unlock(&wsi_wl_display->wl_fd_lock);
++
++ while (1) {
++ struct pollfd pollfd = {
++ .fd = wl_display_get_fd(wsi_wl_display->wl_display),
++ .events = POLLIN
++ };
++
++ current_time_nsec = os_time_get_nano();
++ if (current_time_nsec > atimeout) {
++ rel_timeout.tv_sec = 0;
++ rel_timeout.tv_nsec = 0;
++ } else {
++ timespec_from_nsec(&current_time, current_time_nsec);
++ timespec_from_nsec(&end_time, atimeout);
++ timespec_sub(&rel_timeout, &end_time, &current_time);
++ }
++
++ ret = ppoll(&pollfd, 1, &rel_timeout, NULL);
++ if (ret < 0) {
++ if (errno == EINTR || errno == EAGAIN)
++ continue;
++
++ ret = VK_ERROR_OUT_OF_DATE_KHR;
++ } else if (ret == 0)
++ ret = VK_TIMEOUT;
++ else
++ ret = VK_SUCCESS;
++
++ break;
++ }
++
++ if (ret != VK_SUCCESS) {
++ wl_display_cancel_read(wsi_wl_display->wl_display);
++ } else {
++ ret = wl_display_read_events(wsi_wl_display->wl_display);
++ if (ret != 0)
++ ret = VK_ERROR_OUT_OF_DATE_KHR;
++ }
++
++ pthread_mutex_lock(&wsi_wl_display->wl_fd_lock);
++ wsi_wl_display->wl_fd_read_in_progress = false;
++ pthread_cond_broadcast(&wsi_wl_display->wl_fd_reader_finished);
++ return ret;
++}
++
++static int
++wsi_wl_display_dispatch_queue_with_timeout(struct wsi_wl_display *wsi_wl_display,
++ struct wl_event_queue *queue,
++ uint64_t timeout)
++{
++ int err;
++ int n_events;
++ uint64_t atimeout, now;
++
++ if (timeout == UINT64_MAX)
++ atimeout = timeout;
++ else
++ atimeout = os_time_get_absolute_timeout(timeout);
++
++ while (1) {
++ n_events = wl_display_dispatch_queue_pending(wsi_wl_display->wl_display,
++ queue);
++ if (n_events > 0) {
++ err = VK_SUCCESS;
++ break;
++ }
++ pthread_mutex_lock(&wsi_wl_display->wl_fd_lock);
++
++ if (wsi_wl_display->wl_fd_read_in_progress) {
++ struct timespec end_time;
++
++ timespec_from_nsec(&end_time, atimeout);
++
++ err = pthread_cond_timedwait(&wsi_wl_display->wl_fd_reader_finished,
++ &wsi_wl_display->wl_fd_lock,
++ &end_time);
++ if (err) {
++ if (errno == ETIMEDOUT)
++ err = VK_TIMEOUT;
++ else
++ err = VK_ERROR_OUT_OF_DATE_KHR;
++ } else {
++ /* We don't know if the other thread actually
++ * dispatched anything, so let the caller decide
++ * whether it should continue.
++ */
++ err = VK_INCOMPLETE;
++ }
++ } else {
++ err = wsi_wl_display_read_queue_with_timeout_internal(wsi_wl_display,
++ queue,
++ timeout);
++ }
++
++ pthread_mutex_unlock(&wsi_wl_display->wl_fd_lock);
++
++ now = os_time_get_nano();
++ if (now > atimeout) {
++ err = VK_TIMEOUT;
++ break;
++ }
++
++ }
++
++ return err;
++}
++
++static struct wsi_wl_format *
++wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
++ struct u_vector *formats,
++ VkFormat format, uint32_t flags)
++{
++ assert(flags & (WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE));
++
++ /* Don't add a format that's already in the list */
++ struct wsi_wl_format *f = find_format(formats, format);
++ if (f) {
++ f->flags |= flags;
++ return f;
++ }
++
++ /* Don't add formats that aren't renderable. */
++ VkFormatProperties props;
++
++ display->wsi_wl->wsi->GetPhysicalDeviceFormatProperties(display->wsi_wl->physical_device,
++ format, &props);
++ if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
++ return NULL;
++
++ struct u_vector modifiers;
++ if (!u_vector_init_pow2(&modifiers, 4, sizeof(uint64_t)))
++ return NULL;
++
++ f = u_vector_add(formats);
++ if (!f) {
++ u_vector_finish(&modifiers);
++ return NULL;
++ }
++
++ f->vk_format = format;
++ f->flags = flags;
++ f->modifiers = modifiers;
++
++ return f;
++}
++
++static void
++wsi_wl_format_add_modifier(struct wsi_wl_format *format, uint64_t modifier)
++{
++ uint64_t *mod;
++
++ if (modifier == DRM_FORMAT_MOD_INVALID)
++ return;
++
++ u_vector_foreach(mod, &format->modifiers)
++ if (*mod == modifier)
++ return;
++
++ mod = u_vector_add(&format->modifiers);
++ if (mod)
++ *mod = modifier;
++}
++
++static void
++wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display *display,
++ struct u_vector *formats,
++ VkFormat vk_format, uint32_t flags,
++ uint64_t modifier)
++{
++ struct wsi_wl_format *format;
++
++ format = wsi_wl_display_add_vk_format(display, formats, vk_format, flags);
++ if (format)
++ wsi_wl_format_add_modifier(format, modifier);
++}
++
++static void
++wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display *display,
++ struct u_vector *formats,
++ uint32_t drm_format, uint64_t modifier)
++{
++ switch (drm_format) {
++#if 0
++ /* TODO: These are only available when VK_EXT_4444_formats is enabled, so
++ * we probably need to make their use conditional on this extension. */
++ case DRM_FORMAT_ARGB4444:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A4R4G4B4_UNORM_PACK16,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_XRGB4444:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A4R4G4B4_UNORM_PACK16,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_ABGR4444:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A4B4G4R4_UNORM_PACK16,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_XBGR4444:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A4B4G4R4_UNORM_PACK16,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++#endif
++
++ /* Vulkan _PACKN formats have the same component order as DRM formats
++ * on little endian systems, on big endian there exists no analog. */
++#if UTIL_ARCH_LITTLE_ENDIAN
++ case DRM_FORMAT_RGBA4444:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R4G4B4A4_UNORM_PACK16,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_RGBX4444:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R4G4B4A4_UNORM_PACK16,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_BGRA4444:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B4G4R4A4_UNORM_PACK16,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_BGRX4444:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B4G4R4A4_UNORM_PACK16,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_RGB565:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R5G6B5_UNORM_PACK16,
++ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
++ modifier);
++ break;
++ case DRM_FORMAT_BGR565:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B5G6R5_UNORM_PACK16,
++ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
++ modifier);
++ break;
++ case DRM_FORMAT_ARGB1555:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A1R5G5B5_UNORM_PACK16,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_XRGB1555:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A1R5G5B5_UNORM_PACK16,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_RGBA5551:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R5G5B5A1_UNORM_PACK16,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_RGBX5551:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R5G5B5A1_UNORM_PACK16,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_BGRA5551:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B5G5R5A1_UNORM_PACK16,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_BGRX5551:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B5G5R5A1_UNORM_PACK16,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_ARGB2101010:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A2R10G10B10_UNORM_PACK32,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_XRGB2101010:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A2R10G10B10_UNORM_PACK32,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_ABGR2101010:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A2B10G10R10_UNORM_PACK32,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_XBGR2101010:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_A2B10G10R10_UNORM_PACK32,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++
++ /* Vulkan 16-bits-per-channel formats have an inverted channel order
++ * compared to DRM formats, just like the 8-bits-per-channel ones.
++ * On little endian systems the memory representation of each channel
++ * matches the DRM formats'. */
++ case DRM_FORMAT_ABGR16161616:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R16G16B16A16_UNORM,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_XBGR16161616:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R16G16B16A16_UNORM,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_ABGR16161616F:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R16G16B16A16_SFLOAT,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_XBGR16161616F:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R16G16B16A16_SFLOAT,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++#endif
++
++ /* Non-packed 8-bit formats have an inverted channel order compared to the
++ * little endian DRM formats, because the DRM channel ordering is high->low
++ * but the vulkan channel ordering is in memory byte order
++ *
++ * For all UNORM formats which have a SRGB variant, we must support both if
++ * we can. SRGB in this context means that rendering to it will result in a
++ * linear -> nonlinear SRGB colorspace conversion before the data is stored.
++ * The inverse function is applied when sampling from SRGB images.
++ * From Wayland's perspective nothing changes, the difference is just how
++ * Vulkan interprets the pixel data. */
++ case DRM_FORMAT_XBGR8888:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R8G8B8_SRGB,
++ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
++ modifier);
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R8G8B8_UNORM,
++ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
++ modifier);
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R8G8B8A8_SRGB,
++ WSI_WL_FMT_OPAQUE, modifier);
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R8G8B8A8_UNORM,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_ABGR8888:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R8G8B8A8_SRGB,
++ WSI_WL_FMT_ALPHA, modifier);
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_R8G8B8A8_UNORM,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ case DRM_FORMAT_XRGB8888:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B8G8R8_SRGB,
++ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
++ modifier);
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B8G8R8_UNORM,
++ WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
++ modifier);
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B8G8R8A8_SRGB,
++ WSI_WL_FMT_OPAQUE, modifier);
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B8G8R8A8_UNORM,
++ WSI_WL_FMT_OPAQUE, modifier);
++ break;
++ case DRM_FORMAT_ARGB8888:
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B8G8R8A8_SRGB,
++ WSI_WL_FMT_ALPHA, modifier);
++ wsi_wl_display_add_vk_format_modifier(display, formats,
++ VK_FORMAT_B8G8R8A8_UNORM,
++ WSI_WL_FMT_ALPHA, modifier);
++ break;
++ }
++}
++
++static uint32_t
++drm_format_for_wl_shm_format(enum wl_shm_format shm_format)
++{
++ /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
++ switch (shm_format) {
++ case WL_SHM_FORMAT_ARGB8888:
++ return DRM_FORMAT_ARGB8888;
++ case WL_SHM_FORMAT_XRGB8888:
++ return DRM_FORMAT_XRGB8888;
++ default:
++ return shm_format;
++ }
++}
++
++static void
++wsi_wl_display_add_wl_shm_format(struct wsi_wl_display *display,
++ struct u_vector *formats,
++ enum wl_shm_format shm_format)
++{
++ uint32_t drm_format = drm_format_for_wl_shm_format(shm_format);
++
++ wsi_wl_display_add_drm_format_modifier(display, formats, drm_format,
++ DRM_FORMAT_MOD_INVALID);
++}
++
++static uint32_t
++wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
++{
++ switch (vk_format) {
++#if 0
++ case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
++ return alpha ? DRM_FORMAT_ARGB4444 : DRM_FORMAT_XRGB4444;
++ case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
++ return alpha ? DRM_FORMAT_ABGR4444 : DRM_FORMAT_XBGR4444;
++#endif
++#if UTIL_ARCH_LITTLE_ENDIAN
++ case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
++ return alpha ? DRM_FORMAT_RGBA4444 : DRM_FORMAT_RGBX4444;
++ case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
++ return alpha ? DRM_FORMAT_BGRA4444 : DRM_FORMAT_BGRX4444;
++ case VK_FORMAT_R5G6B5_UNORM_PACK16:
++ return DRM_FORMAT_RGB565;
++ case VK_FORMAT_B5G6R5_UNORM_PACK16:
++ return DRM_FORMAT_BGR565;
++ case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
++ return alpha ? DRM_FORMAT_ARGB1555 : DRM_FORMAT_XRGB1555;
++ case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
++ return alpha ? DRM_FORMAT_RGBA5551 : DRM_FORMAT_RGBX5551;
++ case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
++ return alpha ? DRM_FORMAT_BGRA5551 : DRM_FORMAT_BGRX5551;
++ case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
++ return alpha ? DRM_FORMAT_ARGB2101010 : DRM_FORMAT_XRGB2101010;
++ case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
++ return alpha ? DRM_FORMAT_ABGR2101010 : DRM_FORMAT_XBGR2101010;
++ case VK_FORMAT_R16G16B16A16_UNORM:
++ return alpha ? DRM_FORMAT_ABGR16161616 : DRM_FORMAT_XBGR16161616;
++ case VK_FORMAT_R16G16B16A16_SFLOAT:
++ return alpha ? DRM_FORMAT_ABGR16161616F : DRM_FORMAT_XBGR16161616F;
++#endif
++ case VK_FORMAT_R8G8B8_UNORM:
++ case VK_FORMAT_R8G8B8_SRGB:
++ return DRM_FORMAT_XBGR8888;
++ case VK_FORMAT_R8G8B8A8_UNORM:
++ case VK_FORMAT_R8G8B8A8_SRGB:
++ return alpha ? DRM_FORMAT_ABGR8888 : DRM_FORMAT_XBGR8888;
++ case VK_FORMAT_B8G8R8_UNORM:
++ case VK_FORMAT_B8G8R8_SRGB:
++ return DRM_FORMAT_BGRX8888;
++ case VK_FORMAT_B8G8R8A8_UNORM:
++ case VK_FORMAT_B8G8R8A8_SRGB:
++ return alpha ? DRM_FORMAT_ARGB8888 : DRM_FORMAT_XRGB8888;
++
++ default:
++ assert(!"Unsupported Vulkan format");
++ return DRM_FORMAT_INVALID;
++ }
++}
++
++static enum wl_shm_format
++wl_shm_format_for_vk_format(VkFormat vk_format, bool alpha)
++{
++ uint32_t drm_format = wl_drm_format_for_vk_format(vk_format, alpha);
++ if (drm_format == DRM_FORMAT_INVALID) {
++ return 0;
++ }
++
++ /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
++ switch (drm_format) {
++ case DRM_FORMAT_ARGB8888:
++ return WL_SHM_FORMAT_ARGB8888;
++ case DRM_FORMAT_XRGB8888:
++ return WL_SHM_FORMAT_XRGB8888;
++ default:
++ return drm_format;
++ }
++}
++
++static void
++dmabuf_handle_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
++ uint32_t format)
++{
++ /* Formats are implicitly advertised by the modifier event, so we ignore
++ * them here. */
++}
++
++static void
++dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
++ uint32_t format, uint32_t modifier_hi,
++ uint32_t modifier_lo)
++{
++ struct wsi_wl_display *display = data;
++ uint64_t modifier;
++
++ /* Ignore this if the compositor advertised dma-buf feedback. From version 4
++ * onwards (when dma-buf feedback was introduced), the compositor should not
++ * advertise this event anymore, but let's keep this for safety. */
++ if (display->wl_dmabuf_feedback)
++ return;
++
++ modifier = ((uint64_t) modifier_hi << 32) | modifier_lo;
++ wsi_wl_display_add_drm_format_modifier(display, &display->formats,
++ format, modifier);
++}
++
++static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
++ dmabuf_handle_format,
++ dmabuf_handle_modifier,
++};
++
++static void
++dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table *format_table)
++{
++ if (format_table->data && format_table->data != MAP_FAILED)
++ munmap(format_table->data, format_table->size);
++}
++
++static void
++dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table *format_table)
++{
++ memset(format_table, 0, sizeof(*format_table));
++}
++
++static void
++dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
++{
++ struct wsi_wl_format *format;
++
++ u_vector_foreach(format, &tranche->formats)
++ u_vector_finish(&format->modifiers);
++
++ u_vector_finish(&tranche->formats);
++}
++
++static int
++dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
++{
++ memset(tranche, 0, sizeof(*tranche));
++
++ if (!u_vector_init(&tranche->formats, 8, sizeof(struct wsi_wl_format)))
++ return -1;
++
++ return 0;
++}
++
++static void
++dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
++{
++ dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
++
++ util_dynarray_foreach(&dmabuf_feedback->tranches,
++ struct dmabuf_feedback_tranche, tranche)
++ dmabuf_feedback_tranche_fini(tranche);
++ util_dynarray_fini(&dmabuf_feedback->tranches);
++
++ dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
++}
++
++static int
++dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
++{
++ memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
++
++ if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
++ return -1;
++
++ util_dynarray_init(&dmabuf_feedback->tranches, NULL);
++
++ dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
++
++ return 0;
++}
++
++static void
++default_dmabuf_feedback_format_table(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
++ int32_t fd, uint32_t size)
++{
++ struct wsi_wl_display *display = data;
++
++ display->format_table.size = size;
++ display->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
++
++ close(fd);
++}
++
++static void
++default_dmabuf_feedback_main_device(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
++ struct wl_array *device)
++{
++ struct wsi_wl_display *display = data;
++
++ assert(device->size == sizeof(dev_t));
++ memcpy(&display->main_device, device->data, device->size);
++}
++
++static void
++default_dmabuf_feedback_tranche_target_device(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
++ struct wl_array *device)
++{
++ /* ignore this event */
++}
++
++static void
++default_dmabuf_feedback_tranche_flags(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
++ uint32_t flags)
++{
++ /* ignore this event */
++}
++
++static void
++default_dmabuf_feedback_tranche_formats(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
++ struct wl_array *indices)
++{
++ struct wsi_wl_display *display = data;
++ uint32_t format;
++ uint64_t modifier;
++ uint16_t *index;
++
++ /* We couldn't map the format table or the compositor didn't advertise it,
++ * so we have to ignore the feedback. */
++ if (display->format_table.data == MAP_FAILED ||
++ display->format_table.data == NULL)
++ return;
++
++ wl_array_for_each(index, indices) {
++ format = display->format_table.data[*index].format;
++ modifier = display->format_table.data[*index].modifier;
++ wsi_wl_display_add_drm_format_modifier(display, &display->formats,
++ format, modifier);
++ }
++}
++
++static void
++default_dmabuf_feedback_tranche_done(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
++{
++ /* ignore this event */
++}
++
++static void
++default_dmabuf_feedback_done(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
++{
++ /* ignore this event */
++}
++
++static const struct zwp_linux_dmabuf_feedback_v1_listener
++dmabuf_feedback_listener = {
++ .format_table = default_dmabuf_feedback_format_table,
++ .main_device = default_dmabuf_feedback_main_device,
++ .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
++ .tranche_flags = default_dmabuf_feedback_tranche_flags,
++ .tranche_formats = default_dmabuf_feedback_tranche_formats,
++ .tranche_done = default_dmabuf_feedback_tranche_done,
++ .done = default_dmabuf_feedback_done,
++};
++
++static void
++shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
++{
++ struct wsi_wl_display *display = data;
++
++ wsi_wl_display_add_wl_shm_format(display, &display->formats, format);
++}
++
++static const struct wl_shm_listener shm_listener = {
++ .format = shm_handle_format
++};
++
++static void
++registry_handle_global(void *data, struct wl_registry *registry,
++ uint32_t name, const char *interface, uint32_t version)
++{
++ struct wsi_wl_display *display = data;
++
++ if (display->sw) {
++ if (strcmp(interface, wl_shm_interface.name) == 0) {
++ display->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
++ wl_shm_add_listener(display->wl_shm, &shm_listener, display);
++ }
++ } else {
++ if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 && version >= 3) {
++ display->wl_dmabuf =
++ wl_registry_bind(registry, name, &zwp_linux_dmabuf_v1_interface,
++ MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
++ zwp_linux_dmabuf_v1_add_listener(display->wl_dmabuf,
++ &dmabuf_listener, display);
++ }
++ }
++
++ if (strcmp(interface, wp_presentation_interface.name) == 0) {
++ display->wp_presentation_notwrapped =
++ wl_registry_bind(registry, name, &wp_presentation_interface, 1);
++ } else if (strcmp(interface, wp_tearing_control_manager_v1_interface.name) == 0) {
++ display->tearing_control_manager =
++ wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
++ }
++}
++
++static void
++registry_handle_global_remove(void *data, struct wl_registry *registry,
++ uint32_t name)
++{ /* No-op */ }
++
++static const struct wl_registry_listener registry_listener = {
++ registry_handle_global,
++ registry_handle_global_remove
++};
++
++static void
++wsi_wl_display_finish(struct wsi_wl_display *display)
++{
++ struct wsi_wl_format *f;
++ u_vector_foreach(f, &display->formats)
++ u_vector_finish(&f->modifiers);
++ u_vector_finish(&display->formats);
++ if (display->wl_shm)
++ wl_shm_destroy(display->wl_shm);
++ if (display->wl_dmabuf)
++ zwp_linux_dmabuf_v1_destroy(display->wl_dmabuf);
++ if (display->wp_presentation_notwrapped)
++ wp_presentation_destroy(display->wp_presentation_notwrapped);
++ if (display->tearing_control_manager)
++ wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
++ if (display->wl_display_wrapper)
++ wl_proxy_wrapper_destroy(display->wl_display_wrapper);
++ if (display->queue)
++ wl_event_queue_destroy(display->queue);
++ pthread_mutex_destroy(&display->wl_fd_lock);
++ pthread_cond_destroy(&display->wl_fd_reader_finished);
++}
++
++static VkResult
++wsi_wl_display_init(struct wsi_wayland *wsi_wl,
++ struct wsi_wl_display *display,
++ struct wl_display *wl_display,
++ bool get_format_list, bool sw)
++{
++ VkResult result = VK_SUCCESS;
++ memset(display, 0, sizeof(*display));
++
++ if (!u_vector_init(&display->formats, 8, sizeof(struct wsi_wl_format)))
++ return VK_ERROR_OUT_OF_HOST_MEMORY;
++
++ display->wsi_wl = wsi_wl;
++ display->wl_display = wl_display;
++ display->sw = sw;
++
++ display->wl_fd_read_in_progress = false;
++ pthread_mutex_init(&display->wl_fd_lock, NULL);
++ if (!wsi_init_pthread_cond_monotonic(&display->wl_fd_reader_finished))
++ goto fail;
++
++ display->queue = wl_display_create_queue(wl_display);
++ if (!display->queue) {
++ result = VK_ERROR_OUT_OF_HOST_MEMORY;
++ goto fail;
++ }
++
++ display->wl_display_wrapper = wl_proxy_create_wrapper(wl_display);
++ if (!display->wl_display_wrapper) {
++ result = VK_ERROR_OUT_OF_HOST_MEMORY;
++ goto fail;
++ }
++
++ wl_proxy_set_queue((struct wl_proxy *) display->wl_display_wrapper,
++ display->queue);
++
++ struct wl_registry *registry =
++ wl_display_get_registry(display->wl_display_wrapper);
++ if (!registry) {
++ result = VK_ERROR_OUT_OF_HOST_MEMORY;
++ goto fail;
++ }
++
++ wl_registry_add_listener(registry, &registry_listener, display);
++
++ /* Round-trip to get wl_shm and zwp_linux_dmabuf_v1 globals */
++ wl_display_roundtrip_queue(display->wl_display, display->queue);
++ if (!display->wl_dmabuf && !display->wl_shm) {
++ result = VK_ERROR_SURFACE_LOST_KHR;
++ goto fail_registry;
++ }
++
++ /* Caller doesn't expect us to query formats/modifiers, so return */
++ if (!get_format_list)
++ goto out;
++
++ /* Default assumption */
++ display->same_gpu = true;
++
++ /* Get the default dma-buf feedback */
++ if (display->wl_dmabuf && zwp_linux_dmabuf_v1_get_version(display->wl_dmabuf) >=
++ ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
++ dmabuf_feedback_format_table_init(&display->format_table);
++ display->wl_dmabuf_feedback =
++ zwp_linux_dmabuf_v1_get_default_feedback(display->wl_dmabuf);
++ zwp_linux_dmabuf_feedback_v1_add_listener(display->wl_dmabuf_feedback,
++ &dmabuf_feedback_listener, display);
++
++ /* Round-trip again to fetch dma-buf feedback */
++ wl_display_roundtrip_queue(display->wl_display, display->queue);
++
++ if (wsi_wl->wsi->drm_info.hasRender ||
++ wsi_wl->wsi->drm_info.hasPrimary) {
++ /* Apparently some wayland compositor do not send the render
++ * device node but the primary, so test against both.
++ */
++ display->same_gpu =
++ (wsi_wl->wsi->drm_info.hasRender &&
++ major(display->main_device) == wsi_wl->wsi->drm_info.renderMajor &&
++ minor(display->main_device) == wsi_wl->wsi->drm_info.renderMinor) ||
++ (wsi_wl->wsi->drm_info.hasPrimary &&
++ major(display->main_device) == wsi_wl->wsi->drm_info.primaryMajor &&
++ minor(display->main_device) == wsi_wl->wsi->drm_info.primaryMinor);
++ }
++ }
++
++ /* Round-trip again to get formats and modifiers */
++ wl_display_roundtrip_queue(display->wl_display, display->queue);
++
++ if (wsi_wl->wsi->force_bgra8_unorm_first) {
++ /* Find BGRA8_UNORM in the list and swap it to the first position if we
++ * can find it. Some apps get confused if SRGB is first in the list.
++ */
++ struct wsi_wl_format *first_fmt = u_vector_head(&display->formats);
++ struct wsi_wl_format *f, tmp_fmt;
++ f = find_format(&display->formats, VK_FORMAT_B8G8R8A8_UNORM);
++ if (f) {
++ tmp_fmt = *f;
++ *f = *first_fmt;
++ *first_fmt = tmp_fmt;
++ }
++ }
++
++out:
++ /* We don't need this anymore */
++ wl_registry_destroy(registry);
++
++ /* Destroy default dma-buf feedback object and format table */
++ if (display->wl_dmabuf_feedback) {
++ zwp_linux_dmabuf_feedback_v1_destroy(display->wl_dmabuf_feedback);
++ display->wl_dmabuf_feedback = NULL;
++ dmabuf_feedback_format_table_fini(&display->format_table);
++ }
++
++ return VK_SUCCESS;
++
++fail_registry:
++ if (registry)
++ wl_registry_destroy(registry);
++
++fail:
++ pthread_mutex_destroy(&display->wl_fd_lock);
++ wsi_wl_display_finish(display);
++ return result;
++}
++
++static VkResult
++wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display,
++ bool sw,
++ struct wsi_wl_display **display_out)
++{
++ struct wsi_wl_display *display =
++ vk_alloc(wsi->alloc, sizeof(*display), 8,
++ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
++ if (!display)
++ return VK_ERROR_OUT_OF_HOST_MEMORY;
++
++ VkResult result = wsi_wl_display_init(wsi, display, wl_display, true,
++ sw);
++ if (result != VK_SUCCESS) {
++ vk_free(wsi->alloc, display);
++ return result;
++ }
++
++ *display_out = display;
++
++ return result;
++}
++
++static void
++wsi_wl_display_destroy(struct wsi_wl_display *display)
++{
++ struct wsi_wayland *wsi = display->wsi_wl;
++ wsi_wl_display_finish(display);
++ vk_free(wsi->alloc, display);
++}
++
++VKAPI_ATTR VkBool32 VKAPI_CALL
++wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
++ uint32_t queueFamilyIndex,
++ struct wl_display *wl_display)
++{
++ VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
++ struct wsi_device *wsi_device = pdevice->wsi_device;
++ struct wsi_wayland *wsi =
++ (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
++
++ if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
++ return false;
++
++ struct wsi_wl_display display;
++ VkResult ret = wsi_wl_display_init(wsi, &display, wl_display, false,
++ wsi_device->sw);
++ if (ret == VK_SUCCESS)
++ wsi_wl_display_finish(&display);
++
++ return ret == VK_SUCCESS;
++}
++
++static VkResult
++wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
++ struct wsi_device *wsi_device,
++ uint32_t queueFamilyIndex,
++ VkBool32* pSupported)
++{
++ *pSupported = true;
++
++ return VK_SUCCESS;
++}
++
++static uint32_t
++wsi_wl_surface_get_min_image_count(const VkSurfacePresentModeEXT *present_mode)
++{
++ if (present_mode && (present_mode->presentMode == VK_PRESENT_MODE_FIFO_KHR ||
++ present_mode->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)) {
++ /* If we receive a FIFO present mode, only 2 images is required for forward progress.
++ * Performance with 2 images will be questionable, but we only allow it for applications
++ * using the new API, so we don't risk breaking any existing apps this way.
++ * Other ICDs expose 2 images here already. */
++ return 2;
++ } else {
++ /* For true mailbox mode, we need at least 4 images:
++ * 1) One to scan out from
++ * 2) One to have queued for scan-out
++ * 3) One to be currently held by the Wayland compositor
++ * 4) One to render to
++ */
++ return 4;
++ }
++}
++
++static uint32_t
++wsi_wl_surface_get_min_image_count_for_mode_group(const VkSwapchainPresentModesCreateInfoEXT *modes)
++{
++ /* If we don't provide the PresentModeCreateInfo struct, we must be backwards compatible,
++ * and assume that minImageCount is the default one, i.e. 4, which supports both FIFO and MAILBOX. */
++ if (!modes) {
++ return wsi_wl_surface_get_min_image_count(NULL);
++ }
++
++ uint32_t max_required = 0;
++ for (uint32_t i = 0; i < modes->presentModeCount; i++) {
++ const VkSurfacePresentModeEXT mode = {
++ VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_EXT,
++ NULL,
++ modes->pPresentModes[i]
++ };
++ max_required = MAX2(max_required, wsi_wl_surface_get_min_image_count(&mode));
++ }
++
++ return max_required;
++}
++
++static VkResult
++wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
++ struct wsi_device *wsi_device,
++ const VkSurfacePresentModeEXT *present_mode,
++ VkSurfaceCapabilitiesKHR* caps)
++{
++ caps->minImageCount = wsi_wl_surface_get_min_image_count(present_mode);
++ /* There is no real maximum */
++ caps->maxImageCount = 0;
++
++ caps->currentExtent = (VkExtent2D) { UINT32_MAX, UINT32_MAX };
++ caps->minImageExtent = (VkExtent2D) { 1, 1 };
++ caps->maxImageExtent = (VkExtent2D) {
++ wsi_device->maxImageDimension2D,
++ wsi_device->maxImageDimension2D,
++ };
++
++ caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
++ caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
++ caps->maxImageArrayLayers = 1;
++
++ caps->supportedCompositeAlpha =
++ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
++ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
++
++ caps->supportedUsageFlags = wsi_caps_get_image_usage();
++
++ VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
++ if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
++ caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
++
++ return VK_SUCCESS;
++}
++
++static VkResult
++wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
++ struct wsi_device *wsi_device,
++ const void *info_next,
++ VkSurfaceCapabilities2KHR* caps)
++{
++ assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
++
++ const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
++
++ VkResult result =
++ wsi_wl_surface_get_capabilities(surface, wsi_device, present_mode,
++ &caps->surfaceCapabilities);
++
++ vk_foreach_struct(ext, caps->pNext) {
++ switch (ext->sType) {
++ case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
++ VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
++ protected->supportsProtected = VK_FALSE;
++ break;
++ }
++
++ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
++ /* Unsupported. */
++ VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
++ scaling->supportedPresentScaling = 0;
++ scaling->supportedPresentGravityX = 0;
++ scaling->supportedPresentGravityY = 0;
++ scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
++ scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
++ break;
++ }
++
++ case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
++ /* Can easily toggle between FIFO and MAILBOX on Wayland. */
++ VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
++ if (compat->pPresentModes) {
++ assert(present_mode);
++ VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
++ /* Must always return queried present mode even when truncating. */
++ vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
++ *mode = present_mode->presentMode;
++ }
++ switch (present_mode->presentMode) {
++ case VK_PRESENT_MODE_MAILBOX_KHR:
++ vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
++ *mode = VK_PRESENT_MODE_FIFO_KHR;
++ }
++ break;
++ case VK_PRESENT_MODE_FIFO_KHR:
++ vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
++ *mode = VK_PRESENT_MODE_MAILBOX_KHR;
++ }
++ break;
++ default:
++ break;
++ }
++ } else {
++ if (!present_mode) {
++ wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
++ "without a VkSurfacePresentModeEXT set. This is an "
++ "application bug.\n");
++ compat->presentModeCount = 1;
++ } else {
++ switch (present_mode->presentMode) {
++ case VK_PRESENT_MODE_MAILBOX_KHR:
++ case VK_PRESENT_MODE_FIFO_KHR:
++ compat->presentModeCount = 2;
++ break;
++ default:
++ compat->presentModeCount = 1;
++ break;
++ }
++ }
++ }
++ break;
++ }
++
++ default:
++ /* Ignored */
++ break;
++ }
++ }
++
++ return result;
++}
++
++static VkResult
++wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
++ struct wsi_device *wsi_device,
++ uint32_t* pSurfaceFormatCount,
++ VkSurfaceFormatKHR* pSurfaceFormats)
++{
++ VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
++ struct wsi_wayland *wsi =
++ (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
++
++ struct wsi_wl_display display;
++ if (wsi_wl_display_init(wsi, &display, surface->display, true,
++ wsi_device->sw))
++ return VK_ERROR_SURFACE_LOST_KHR;
++
++ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
++ pSurfaceFormats, pSurfaceFormatCount);
++
++ struct wsi_wl_format *disp_fmt;
++ u_vector_foreach(disp_fmt, &display.formats) {
++ /* Skip formats for which we can't support both alpha & opaque
++ * formats.
++ */
++ if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
++ !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
++ continue;
++
++ vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
++ out_fmt->format = disp_fmt->vk_format;
++ out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
++ }
++ }
++
++ wsi_wl_display_finish(&display);
++
++ return vk_outarray_status(&out);
++}
++
++static VkResult
++wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
++ struct wsi_device *wsi_device,
++ const void *info_next,
++ uint32_t* pSurfaceFormatCount,
++ VkSurfaceFormat2KHR* pSurfaceFormats)
++{
++ VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
++ struct wsi_wayland *wsi =
++ (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
++
++ struct wsi_wl_display display;
++ if (wsi_wl_display_init(wsi, &display, surface->display, true,
++ wsi_device->sw))
++ return VK_ERROR_SURFACE_LOST_KHR;
++
++ VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
++ pSurfaceFormats, pSurfaceFormatCount);
++
++ struct wsi_wl_format *disp_fmt;
++ u_vector_foreach(disp_fmt, &display.formats) {
++ /* Skip formats for which we can't support both alpha & opaque
++ * formats.
++ */
++ if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
++ !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
++ continue;
++
++ vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
++ out_fmt->surfaceFormat.format = disp_fmt->vk_format;
++ out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
++ }
++ }
++
++ wsi_wl_display_finish(&display);
++
++ return vk_outarray_status(&out);
++}
++
++static VkResult
++wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *icd_surface,
++ struct wsi_device *wsi_device,
++ uint32_t* pPresentModeCount,
++ VkPresentModeKHR* pPresentModes)
++{
++ VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
++ struct wsi_wayland *wsi =
++ (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
++
++ struct wsi_wl_display display;
++ if (wsi_wl_display_init(wsi, &display, surface->display, true,
++ wsi_device->sw))
++ return VK_ERROR_SURFACE_LOST_KHR;
++
++ VkPresentModeKHR present_modes[3];
++ uint32_t present_modes_count = 0;
++
++ /* The following two modes are always supported */
++ present_modes[present_modes_count++] = VK_PRESENT_MODE_MAILBOX_KHR;
++ present_modes[present_modes_count++] = VK_PRESENT_MODE_FIFO_KHR;
++
++ if (display.tearing_control_manager)
++ present_modes[present_modes_count++] = VK_PRESENT_MODE_IMMEDIATE_KHR;
++
++ assert(present_modes_count <= ARRAY_SIZE(present_modes));
++ wsi_wl_display_finish(&display);
++
++ if (pPresentModes == NULL) {
++ *pPresentModeCount = present_modes_count;
++ return VK_SUCCESS;
++ }
++
++ *pPresentModeCount = MIN2(*pPresentModeCount, present_modes_count);
++ typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
++
++ if (*pPresentModeCount < present_modes_count)
++ return VK_INCOMPLETE;
++ else
++ return VK_SUCCESS;
++}
++
++static VkResult
++wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
++ struct wsi_device *wsi_device,
++ uint32_t* pRectCount,
++ VkRect2D* pRects)
++{
++ VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
++
++ vk_outarray_append_typed(VkRect2D, &out, rect) {
++ /* We don't know a size so just return the usual "I don't know." */
++ *rect = (VkRect2D) {
++ .offset = { 0, 0 },
++ .extent = { UINT32_MAX, UINT32_MAX },
++ };
++ }
++
++ return vk_outarray_status(&out);
++}
++
++void
++wsi_wl_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
++ const VkAllocationCallbacks *pAllocator)
++{
++ VK_FROM_HANDLE(vk_instance, instance, _instance);
++ struct wsi_wl_surface *wsi_wl_surface =
++ wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
++
++ if (wsi_wl_surface->wl_dmabuf_feedback) {
++ zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
++ dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
++ dmabuf_feedback_fini(&wsi_wl_surface->pending_dmabuf_feedback);
++ }
++
++ if (wsi_wl_surface->surface)
++ wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
++
++ if (wsi_wl_surface->display)
++ wsi_wl_display_destroy(wsi_wl_surface->display);
++
++ vk_free2(&instance->alloc, pAllocator, wsi_wl_surface);
++}
++
++static struct wsi_wl_format *
++pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface,
++ VkFormat vk_format)
++{
++ struct wsi_wl_format *f = NULL;
++
++ /* If the main_device was not advertised, we don't have valid feedback */
++ if (wsi_wl_surface->dmabuf_feedback.main_device == 0)
++ return NULL;
++
++ util_dynarray_foreach(&wsi_wl_surface->dmabuf_feedback.tranches,
++ struct dmabuf_feedback_tranche, tranche) {
++ f = find_format(&tranche->formats, vk_format);
++ if (f)
++ break;
++ }
++
++ return f;
++}
++
++static void
++surface_dmabuf_feedback_format_table(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
++ int32_t fd, uint32_t size)
++{
++ struct wsi_wl_surface *wsi_wl_surface = data;
++ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
++
++ feedback->format_table.size = size;
++ feedback->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
++
++ close(fd);
++}
++
++static void
++surface_dmabuf_feedback_main_device(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
++ struct wl_array *device)
++{
++ struct wsi_wl_surface *wsi_wl_surface = data;
++ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
++
++ memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
++}
++
++static void
++surface_dmabuf_feedback_tranche_target_device(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
++ struct wl_array *device)
++{
++ struct wsi_wl_surface *wsi_wl_surface = data;
++ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
++
++ memcpy(&feedback->pending_tranche.target_device, device->data,
++ sizeof(feedback->pending_tranche.target_device));
++}
++
++static void
++surface_dmabuf_feedback_tranche_flags(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
++ uint32_t flags)
++{
++ struct wsi_wl_surface *wsi_wl_surface = data;
++ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
++
++ feedback->pending_tranche.flags = flags;
++}
++
++static void
++surface_dmabuf_feedback_tranche_formats(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
++ struct wl_array *indices)
++{
++ struct wsi_wl_surface *wsi_wl_surface = data;
++ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
++ uint32_t format;
++ uint64_t modifier;
++ uint16_t *index;
++
++ /* Compositor may advertise or not a format table. If it does, we use it.
++ * Otherwise, we steal the most recent advertised format table. If we don't have
++ * a most recent advertised format table, compositor did something wrong. */
++ if (feedback->format_table.data == NULL) {
++ feedback->format_table = wsi_wl_surface->dmabuf_feedback.format_table;
++ dmabuf_feedback_format_table_init(&wsi_wl_surface->dmabuf_feedback.format_table);
++ }
++ if (feedback->format_table.data == MAP_FAILED ||
++ feedback->format_table.data == NULL)
++ return;
++
++ wl_array_for_each(index, indices) {
++ format = feedback->format_table.data[*index].format;
++ modifier = feedback->format_table.data[*index].modifier;
++
++ wsi_wl_display_add_drm_format_modifier(wsi_wl_surface->display,
++ &wsi_wl_surface->pending_dmabuf_feedback.pending_tranche.formats,
++ format, modifier);
++ }
++}
++
++static void
++surface_dmabuf_feedback_tranche_done(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
++{
++ struct wsi_wl_surface *wsi_wl_surface = data;
++ struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
++
++ /* Add tranche to array of tranches. */
++ util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
++ feedback->pending_tranche);
++
++ dmabuf_feedback_tranche_init(&feedback->pending_tranche);
++}
++
++static bool
++sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A, const uint64_t *modifiers_A,
++ uint32_t num_drm_modifiers_B, const uint64_t *modifiers_B)
++{
++ uint32_t i, j;
++ bool mod_found;
++
++ if (num_drm_modifiers_A != num_drm_modifiers_B)
++ return false;
++
++ for (i = 0; i < num_drm_modifiers_A; i++) {
++ mod_found = false;
++ for (j = 0; j < num_drm_modifiers_B; j++) {
++ if (modifiers_A[i] == modifiers_B[j]) {
++ mod_found = true;
++ break;
++ }
++ }
++ if (!mod_found)
++ return false;
++ }
++
++ return true;
++}
++
++static void
++surface_dmabuf_feedback_done(void *data,
++ struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
++{
++ struct wsi_wl_surface *wsi_wl_surface = data;
++ struct wsi_wl_swapchain *chain = wsi_wl_surface->chain;
++ struct wsi_wl_format *f;
++
++ dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
++ wsi_wl_surface->dmabuf_feedback = wsi_wl_surface->pending_dmabuf_feedback;
++ dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback);
++
++ /* It's not just because we received dma-buf feedback that re-allocation is a
++ * good idea. In order to know if we should re-allocate or not, we must
++ * compare the most recent parameters that we used to allocate with the ones
++ * from the feedback we just received.
++ *
++ * The allocation parameters are: the format, its set of modifiers and the
++ * tranche flags. On WSI we are not using the tranche flags for anything, so
++ * we disconsider this. As we can't switch to another format (it is selected
++ * by the client), we just need to compare the set of modifiers.
++ *
++ * So we just look for the vk_format in the tranches (respecting their
++ * preferences), and compare its set of modifiers with the set of modifiers
++ * we've used to allocate previously. If they differ, we are using suboptimal
++ * parameters and should re-allocate.
++ */
++ f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface, chain->vk_format);
++ if (f && !sets_of_modifiers_are_the_same(u_vector_length(&f->modifiers),
++ u_vector_tail(&f->modifiers),
++ chain->num_drm_modifiers,
++ chain->drm_modifiers))
++ wsi_wl_surface->chain->suboptimal = true;
++}
++
++static const struct zwp_linux_dmabuf_feedback_v1_listener
++surface_dmabuf_feedback_listener = {
++ .format_table = surface_dmabuf_feedback_format_table,
++ .main_device = surface_dmabuf_feedback_main_device,
++ .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
++ .tranche_flags = surface_dmabuf_feedback_tranche_flags,
++ .tranche_formats = surface_dmabuf_feedback_tranche_formats,
++ .tranche_done = surface_dmabuf_feedback_tranche_done,
++ .done = surface_dmabuf_feedback_done,
++};
++
++static VkResult wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface)
++{
++ wsi_wl_surface->wl_dmabuf_feedback =
++ zwp_linux_dmabuf_v1_get_surface_feedback(wsi_wl_surface->display->wl_dmabuf,
++ wsi_wl_surface->surface);
++
++ zwp_linux_dmabuf_feedback_v1_add_listener(wsi_wl_surface->wl_dmabuf_feedback,
++ &surface_dmabuf_feedback_listener,
++ wsi_wl_surface);
++
++ if (dmabuf_feedback_init(&wsi_wl_surface->dmabuf_feedback) < 0)
++ goto fail;
++ if (dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback) < 0)
++ goto fail_pending;
++
++ return VK_SUCCESS;
++
++fail_pending:
++ dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
++fail:
++ zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
++ wsi_wl_surface->wl_dmabuf_feedback = NULL;
++ return VK_ERROR_OUT_OF_HOST_MEMORY;
++}
++
++static VkResult wsi_wl_surface_init(struct wsi_wl_surface *wsi_wl_surface,
++ struct wsi_device *wsi_device)
++{
++ struct wsi_wayland *wsi =
++ (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
++ VkResult result;
++
++ /* wsi_wl_surface has already been initialized. */
++ if (wsi_wl_surface->display)
++ return VK_SUCCESS;
++
++ result = wsi_wl_display_create(wsi, wsi_wl_surface->base.display,
++ wsi_device->sw, &wsi_wl_surface->display);
++ if (result != VK_SUCCESS)
++ goto fail;
++
++ wsi_wl_surface->surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
++ if (!wsi_wl_surface->surface) {
++ result = VK_ERROR_OUT_OF_HOST_MEMORY;
++ goto fail;
++ }
++ wl_proxy_set_queue((struct wl_proxy *) wsi_wl_surface->surface,
++ wsi_wl_surface->display->queue);
++
++ /* Bind wsi_wl_surface to dma-buf feedback. */
++ if (wsi_wl_surface->display->wl_dmabuf &&
++ zwp_linux_dmabuf_v1_get_version(wsi_wl_surface->display->wl_dmabuf) >=
++ ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
++ result = wsi_wl_surface_bind_to_dmabuf_feedback(wsi_wl_surface);
++ if (result != VK_SUCCESS)
++ goto fail;
++
++ wl_display_roundtrip_queue(wsi_wl_surface->display->wl_display,
++ wsi_wl_surface->display->queue);
++ }
++
++ return VK_SUCCESS;
++
++fail:
++ if (wsi_wl_surface->surface)
++ wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
++
++ if (wsi_wl_surface->display)
++ wsi_wl_display_destroy(wsi_wl_surface->display);
++ return result;
++}
++
++VKAPI_ATTR VkResult VKAPI_CALL
++wsi_CreateWaylandSurfaceKHR(VkInstance _instance,
++ const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
++ const VkAllocationCallbacks *pAllocator,
++ VkSurfaceKHR *pSurface)
++{
++ VK_FROM_HANDLE(vk_instance, instance, _instance);
++ struct wsi_wl_surface *wsi_wl_surface;
++ VkIcdSurfaceWayland *surface;
++
++ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
++
++ wsi_wl_surface = vk_zalloc2(&instance->alloc, pAllocator, sizeof *wsi_wl_surface,
++ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
++ if (wsi_wl_surface == NULL)
++ return VK_ERROR_OUT_OF_HOST_MEMORY;
++
++ surface = &wsi_wl_surface->base;
++
++ surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
++ surface->display = pCreateInfo->display;
++ surface->surface = pCreateInfo->surface;
++
++ *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
++
++ return VK_SUCCESS;
++}
++
++struct wsi_wl_present_id {
++ struct wp_presentation_feedback *feedback;
++ uint64_t present_id;
++ const VkAllocationCallbacks *alloc;
++ struct wsi_wl_swapchain *chain;
++ struct wl_list link;
++};
++
++static struct wsi_image *
++wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
++ uint32_t image_index)
++{
++ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
++ return &chain->images[image_index].base;
++}
++
++static VkResult
++wsi_wl_swapchain_release_images(struct wsi_swapchain *wsi_chain,
++ uint32_t count, const uint32_t *indices)
++{
++ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
++ for (uint32_t i = 0; i < count; i++) {
++ uint32_t index = indices[i];
++ assert(chain->images[index].busy);
++ chain->images[index].busy = false;
++ }
++ return VK_SUCCESS;
++}
++
++static void
++wsi_wl_swapchain_set_present_mode(struct wsi_swapchain *wsi_chain,
++ VkPresentModeKHR mode)
++{
++ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
++ chain->base.present_mode = mode;
++}
++
++static VkResult
++wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
++ uint64_t present_id,
++ uint64_t timeout)
++{
++ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
++ uint64_t end_time, time_left, now;
++ int ret;
++ bool expired = false;
++ bool finished;
++
++ if (timeout == UINT64_MAX)
++ end_time = timeout;
++ else
++ end_time = os_time_get_absolute_timeout(timeout);
++
++ /* Need to observe that the swapchain semaphore has been unsignalled,
++ * as this is guaranteed when a present is complete. */
++ VkResult result = wsi_swapchain_wait_for_present_semaphore(
++ &chain->base, present_id, timeout);
++ if (result != VK_SUCCESS)
++ return result;
++
++ if (!chain->present_ids.wp_presentation) {
++ /* If we're enabling present wait despite the protocol not being supported,
++ * use best effort not to crash, even if result will not be correct.
++ * For correctness, we must at least wait for the timeline semaphore to complete. */
++ return VK_SUCCESS;
++ }
++
++ while (1) {
++ ret = wl_display_dispatch_queue_pending(chain->wsi_wl_surface->display->wl_display,
++ chain->queue);
++ if (ret < 0)
++ return VK_ERROR_OUT_OF_DATE_KHR;
++
++ /* PresentWait can be called concurrently.
++ * If there is contention on this mutex, it means there is currently a dispatcher in flight holding the lock.
++ * The lock is only held while there is forward progress processing events from Wayland,
++ * so there should be no problem locking without timeout.
++ * We would like to be able to support timeout = 0 to query the current max_completed count.
++ * A timedlock with no timeout can be problematic in that scenario. */
++ pthread_mutex_lock(&chain->present_ids.lock);
++ finished = chain->present_ids.max_completed >= present_id;
++ pthread_mutex_unlock(&chain->present_ids.lock);
++ if (finished)
++ return VK_SUCCESS;
++
++ if (expired)
++ return VK_TIMEOUT;
++
++ now = os_time_get_nano();
++ if (now > end_time)
++ time_left = 0;
++ else
++ time_left = end_time - now;
++
++ ret = wsi_wl_display_dispatch_queue_with_timeout(chain->wsi_wl_surface->display,
++ chain->queue,
++ time_left);
++ if (ret == VK_INCOMPLETE)
++ continue;
++
++ if (ret != VK_SUCCESS && ret != VK_TIMEOUT)
++ return ret;
++
++ if (time_left == 0)
++ expired = true;
++ }
++}
++
++static VkResult
++wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
++ const VkAcquireNextImageInfoKHR *info,
++ uint32_t *image_index)
++{
++ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
++ struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
++ uint64_t end_time, time_left, now;
++ bool expired = false;
++ int ret;
++
++ if (info->timeout == UINT64_MAX)
++ end_time = info->timeout;
++ else
++ end_time = os_time_get_absolute_timeout(info->timeout);
++
++ while (1) {
++ ret = wl_display_dispatch_queue_pending(wsi_wl_surface->display->wl_display,
++ wsi_wl_surface->display->queue);
++ if (ret < 0)
++ return VK_ERROR_OUT_OF_DATE_KHR;
++
++ /* Try to find a free image. */
++ for (uint32_t i = 0; i < chain->base.image_count; i++) {
++ if (!chain->images[i].busy) {
++ /* We found a non-busy image */
++ *image_index = i;
++ chain->images[i].busy = true;
++ return (chain->suboptimal ? VK_SUBOPTIMAL_KHR : VK_SUCCESS);
++ }
++ }
++
++ if (expired)
++ return info->timeout ? VK_TIMEOUT : VK_NOT_READY;
++
++ now = os_time_get_nano();
++ if (now > end_time)
++ time_left = 0;
++ else
++ time_left = end_time - now;
++
++ ret = wsi_wl_display_dispatch_queue_with_timeout(wsi_wl_surface->display,
++ wsi_wl_surface->display->queue,
++ time_left);
++ if (ret == VK_ERROR_OUT_OF_DATE_KHR)
++ return ret;
++
++ if (ret == VK_INCOMPLETE)
++ continue;
++
++ if (ret == VK_TIMEOUT)
++ expired = true;
++ }
++}
++
++static void
++presentation_handle_sync_output(void *data,
++ struct wp_presentation_feedback *feedback,
++ struct wl_output *output)
++{
++}
++
++static void
++presentation_handle_presented(void *data,
++ struct wp_presentation_feedback *feedback,
++ uint32_t tv_sec_hi, uint32_t tv_sec_lo,
++ uint32_t tv_nsec, uint32_t refresh,
++ uint32_t seq_hi, uint32_t seq_lo,
++ uint32_t flags)
++{
++ struct wsi_wl_present_id *id = data;
++
++ pthread_mutex_lock(&id->chain->present_ids.lock);
++ if (id->present_id > id->chain->present_ids.max_completed)
++ id->chain->present_ids.max_completed = id->present_id;
++ pthread_mutex_unlock(&id->chain->present_ids.lock);
++
++ wp_presentation_feedback_destroy(feedback);
++ wl_list_remove(&id->link);
++ vk_free(id->alloc, id);
++}
++
++static void
++presentation_handle_discarded(void *data,
++ struct wp_presentation_feedback *feedback)
++{
++ struct wsi_wl_present_id *id = data;
++
++ pthread_mutex_lock(&id->chain->present_ids.lock);
++ if (id->present_id > id->chain->present_ids.max_completed)
++ id->chain->present_ids.max_completed = id->present_id;
++ pthread_mutex_unlock(&id->chain->present_ids.lock);
++
++ wp_presentation_feedback_destroy(feedback);
++ wl_list_remove(&id->link);
++ vk_free(id->alloc, id);
++}
++
++static const struct wp_presentation_feedback_listener
++ pres_feedback_listener = {
++ presentation_handle_sync_output,
++ presentation_handle_presented,
++ presentation_handle_discarded,
++};
++
++static void
++frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
++{
++ struct wsi_wl_swapchain *chain = data;
++
++ chain->frame = NULL;
++ chain->fifo_ready = true;
++
++ wl_callback_destroy(callback);
++}
++
++static const struct wl_callback_listener frame_listener = {
++ frame_handle_done,
++};
++
++static VkResult
++wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
++ uint32_t image_index,
++ uint64_t present_id,
++ const VkPresentRegionKHR *damage)
++{
++ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
++ struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
++
++ if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
++ struct wsi_wl_image *image = &chain->images[image_index];
++ memcpy(image->shm_ptr, image->base.cpu_map,
++ image->base.row_pitches[0] * chain->extent.height);
++ }
++
++ /* For EXT_swapchain_maintenance1. We might have transitioned from FIFO to MAILBOX.
++ * In this case we need to let the FIFO request complete, before presenting MAILBOX. */
++ while (!chain->fifo_ready) {
++ int ret = wl_display_dispatch_queue(wsi_wl_surface->display->wl_display,
++ wsi_wl_surface->display->queue);
++ if (ret < 0)
++ return VK_ERROR_OUT_OF_DATE_KHR;
++ }
++
++ assert(image_index < chain->base.image_count);
++ wl_surface_attach(wsi_wl_surface->surface, chain->images[image_index].buffer, 0, 0);
++
++ if (wl_surface_get_version(wsi_wl_surface->surface) >= 4 && damage &&
++ damage->pRectangles && damage->rectangleCount > 0) {
++ for (unsigned i = 0; i < damage->rectangleCount; i++) {
++ const VkRectLayerKHR *rect = &damage->pRectangles[i];
++ assert(rect->layer == 0);
++ wl_surface_damage_buffer(wsi_wl_surface->surface,
++ rect->offset.x, rect->offset.y,
++ rect->extent.width, rect->extent.height);
++ }
++ } else {
++ wl_surface_damage(wsi_wl_surface->surface, 0, 0, INT32_MAX, INT32_MAX);
++ }
++
++ if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
++ chain->frame = wl_surface_frame(wsi_wl_surface->surface);
++ wl_callback_add_listener(chain->frame, &frame_listener, chain);
++ chain->fifo_ready = false;
++ } else {
++ /* If we present MAILBOX, any subsequent presentation in FIFO can replace this image. */
++ chain->fifo_ready = true;
++ }
++
++ if (present_id > 0 && chain->present_ids.wp_presentation) {
++ struct wsi_wl_present_id *id =
++ vk_zalloc(chain->wsi_wl_surface->display->wsi_wl->alloc, sizeof(*id), sizeof(uintptr_t),
++ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
++ id->chain = chain;
++ id->present_id = present_id;
++ id->alloc = chain->wsi_wl_surface->display->wsi_wl->alloc;
++
++ pthread_mutex_lock(&chain->present_ids.lock);
++ id->feedback = wp_presentation_feedback(chain->present_ids.wp_presentation,
++ chain->wsi_wl_surface->surface);
++ wp_presentation_feedback_add_listener(id->feedback,
++ &pres_feedback_listener,
++ id);
++ wl_list_insert(&chain->present_ids.outstanding_list, &id->link);
++ pthread_mutex_unlock(&chain->present_ids.lock);
++ }
++
++ chain->images[image_index].busy = true;
++ wl_surface_commit(wsi_wl_surface->surface);
++ wl_display_flush(wsi_wl_surface->display->wl_display);
++
++ return VK_SUCCESS;
++}
++
++static void
++buffer_handle_release(void *data, struct wl_buffer *buffer)
++{
++ struct wsi_wl_image *image = data;
++
++ assert(image->buffer == buffer);
++
++ image->busy = false;
++}
++
++static const struct wl_buffer_listener buffer_listener = {
++ buffer_handle_release,
++};
++
++static uint8_t *
++wsi_wl_alloc_image_shm(struct wsi_image *imagew, unsigned size)
++{
++ struct wsi_wl_image *image = (struct wsi_wl_image *)imagew;
++
++ /* Create a shareable buffer */
++ int fd = os_create_anonymous_file(size, NULL);
++ if (fd < 0)
++ return NULL;
++
++ void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
++ if (ptr == MAP_FAILED) {
++ close(fd);
++ return NULL;
++ }
++
++ image->shm_fd = fd;
++ image->shm_ptr = ptr;
++ image->shm_size = size;
++
++ return ptr;
++}
++
++static VkResult
++wsi_wl_image_init(struct wsi_wl_swapchain *chain,
++ struct wsi_wl_image *image,
++ const VkSwapchainCreateInfoKHR *pCreateInfo,
++ const VkAllocationCallbacks* pAllocator)
++{
++ struct wsi_wl_display *display = chain->wsi_wl_surface->display;
++ VkResult result;
++
++ result = wsi_create_image(&chain->base, &chain->base.image_info,
++ &image->base);
++ if (result != VK_SUCCESS)
++ return result;
++
++ switch (chain->buffer_type) {
++ case WSI_WL_BUFFER_GPU_SHM:
++ case WSI_WL_BUFFER_SHM_MEMCPY: {
++ if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
++ wsi_wl_alloc_image_shm(&image->base, image->base.row_pitches[0] *
++ chain->extent.height);
++ }
++ assert(image->shm_ptr != NULL);
++
++ /* Share it in a wl_buffer */
++ struct wl_shm_pool *pool = wl_shm_create_pool(display->wl_shm,
++ image->shm_fd,
++ image->shm_size);
++ wl_proxy_set_queue((struct wl_proxy *)pool, display->queue);
++ image->buffer = wl_shm_pool_create_buffer(pool, 0, chain->extent.width,
++ chain->extent.height,
++ image->base.row_pitches[0],
++ chain->shm_format);
++ wl_shm_pool_destroy(pool);
++ break;
++ }
++
++ case WSI_WL_BUFFER_NATIVE: {
++ assert(display->wl_dmabuf);
++
++ struct zwp_linux_buffer_params_v1 *params =
++ zwp_linux_dmabuf_v1_create_params(display->wl_dmabuf);
++ if (!params)
++ goto fail_image;
++
++ for (int i = 0; i < image->base.num_planes; i++) {
++ zwp_linux_buffer_params_v1_add(params,
++ image->base.dma_buf_fd,
++ i,
++ image->base.offsets[i],
++ image->base.row_pitches[i],
++ image->base.drm_modifier >> 32,
++ image->base.drm_modifier & 0xffffffff);
++ }
++
++ image->buffer =
++ zwp_linux_buffer_params_v1_create_immed(params,
++ chain->extent.width,
++ chain->extent.height,
++ chain->drm_format,
++ 0);
++ zwp_linux_buffer_params_v1_destroy(params);
++ break;
++ }
++
++ default:
++ unreachable("Invalid buffer type");
++ }
++
++ if (!image->buffer)
++ goto fail_image;
++
++ wl_buffer_add_listener(image->buffer, &buffer_listener, image);
++
++ return VK_SUCCESS;
++
++fail_image:
++ wsi_destroy_image(&chain->base, &image->base);
++
++ return VK_ERROR_OUT_OF_HOST_MEMORY;
++}
++
++static void
++wsi_wl_swapchain_images_free(struct wsi_wl_swapchain *chain)
++{
++ for (uint32_t i = 0; i < chain->base.image_count; i++) {
++ if (chain->images[i].buffer) {
++ wl_buffer_destroy(chain->images[i].buffer);
++ wsi_destroy_image(&chain->base, &chain->images[i].base);
++ if (chain->images[i].shm_size) {
++ close(chain->images[i].shm_fd);
++ munmap(chain->images[i].shm_ptr, chain->images[i].shm_size);
++ }
++ }
++ }
++}
++
++static void
++wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
++ const VkAllocationCallbacks *pAllocator)
++{
++ if (chain->frame)
++ wl_callback_destroy(chain->frame);
++ if (chain->tearing_control)
++ wp_tearing_control_v1_destroy(chain->tearing_control);
++ if (chain->wsi_wl_surface)
++ chain->wsi_wl_surface->chain = NULL;
++
++ if (chain->present_ids.wp_presentation) {
++ /* In VK_EXT_swapchain_maintenance1 there is no requirement to wait for all present IDs to be complete.
++ * Waiting for the swapchain fence is enough.
++ * Just clean up anything user did not wait for. */
++ struct wsi_wl_present_id *id, *tmp;
++ wl_list_for_each_safe(id, tmp, &chain->present_ids.outstanding_list, link) {
++ wp_presentation_feedback_destroy(id->feedback);
++ wl_list_remove(&id->link);
++ vk_free(id->alloc, id);
++ }
++
++ wl_proxy_wrapper_destroy(chain->present_ids.wp_presentation);
++ pthread_mutex_destroy(&chain->present_ids.lock);
++ }
++
++ wsi_swapchain_finish(&chain->base);
++}
++
++static VkResult
++wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
++ const VkAllocationCallbacks *pAllocator)
++{
++ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
++
++ wsi_wl_swapchain_images_free(chain);
++ wsi_wl_swapchain_chain_free(chain, pAllocator);
++
++ vk_free(pAllocator, chain);
++
++ return VK_SUCCESS;
++}
++
++static VkResult
++wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
++ VkDevice device,
++ struct wsi_device *wsi_device,
++ const VkSwapchainCreateInfoKHR* pCreateInfo,
++ const VkAllocationCallbacks* pAllocator,
++ struct wsi_swapchain **swapchain_out)
++{
++ struct wsi_wl_surface *wsi_wl_surface =
++ wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
++ struct wsi_wl_swapchain *chain;
++ VkResult result;
++
++ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
++
++ int num_images = pCreateInfo->minImageCount;
++
++ size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
++ chain = vk_zalloc(pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
++ if (chain == NULL)
++ return VK_ERROR_OUT_OF_HOST_MEMORY;
++
++ /* We are taking ownership of the wsi_wl_surface, so remove ownership from
++ * oldSwapchain. If the surface is currently owned by a swapchain that is
++ * not oldSwapchain we return an error.
++ */
++ if (wsi_wl_surface->chain &&
++ wsi_swapchain_to_handle(&wsi_wl_surface->chain->base) != pCreateInfo->oldSwapchain) {
++ return VK_ERROR_NATIVE_WINDOW_IN_USE_KHR;
++ }
++ if (pCreateInfo->oldSwapchain) {
++ VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
++ old_chain->wsi_wl_surface = NULL;
++ if (old_chain->tearing_control) {
++ wp_tearing_control_v1_destroy(old_chain->tearing_control);
++ old_chain->tearing_control = NULL;
++ }
++ }
++
++ /* Take ownership of the wsi_wl_surface */
++ chain->wsi_wl_surface = wsi_wl_surface;
++ wsi_wl_surface->chain = chain;
++
++ result = wsi_wl_surface_init(wsi_wl_surface, wsi_device);
++ if (result != VK_SUCCESS)
++ goto fail;
++
++ VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
++ if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
++ chain->tearing_control =
++ wp_tearing_control_manager_v1_get_tearing_control(wsi_wl_surface->display->tearing_control_manager,
++ wsi_wl_surface->surface);
++ if (!chain->tearing_control) {
++ result = VK_ERROR_OUT_OF_HOST_MEMORY;
++ goto fail;
++ }
++ wp_tearing_control_v1_set_presentation_hint(chain->tearing_control,
++ WP_TEARING_CONTROL_V1_PRESENTATION_HINT_ASYNC);
++ }
++
++ enum wsi_wl_buffer_type buffer_type;
++ struct wsi_base_image_params *image_params = NULL;
++ struct wsi_cpu_image_params cpu_image_params;
++ struct wsi_drm_image_params drm_image_params;
++ uint32_t num_drm_modifiers = 0;
++ const uint64_t *drm_modifiers = NULL;
++ if (wsi_device->sw) {
++ cpu_image_params = (struct wsi_cpu_image_params) {
++ .base.image_type = WSI_IMAGE_TYPE_CPU,
++ };
++ if (wsi_device->has_import_memory_host &&
++ !(WSI_DEBUG & WSI_DEBUG_NOSHM)) {
++ buffer_type = WSI_WL_BUFFER_GPU_SHM;
++ cpu_image_params.alloc_shm = wsi_wl_alloc_image_shm;
++ } else {
++ buffer_type = WSI_WL_BUFFER_SHM_MEMCPY;
++ }
++ image_params = &cpu_image_params.base;
++ } else {
++ drm_image_params = (struct wsi_drm_image_params) {
++ .base.image_type = WSI_IMAGE_TYPE_DRM,
++ .same_gpu = wsi_wl_surface->display->same_gpu,
++ };
++ /* Use explicit DRM format modifiers when both the server and the driver
++ * support them.
++ */
++ if (wsi_wl_surface->display->wl_dmabuf && wsi_device->supports_modifiers) {
++ struct wsi_wl_format *f = NULL;
++ /* Try to select modifiers for our vk_format from surface dma-buf
++ * feedback. If that doesn't work, fallback to the list of supported
++ * formats/modifiers by the display. */
++ if (wsi_wl_surface->wl_dmabuf_feedback)
++ f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface,
++ pCreateInfo->imageFormat);
++ if (f == NULL)
++ f = find_format(&chain->wsi_wl_surface->display->formats,
++ pCreateInfo->imageFormat);
++ if (f != NULL) {
++ num_drm_modifiers = u_vector_length(&f->modifiers);
++ drm_modifiers = u_vector_tail(&f->modifiers);
++ if (num_drm_modifiers > 0)
++ drm_image_params.num_modifier_lists = 1;
++ else
++ drm_image_params.num_modifier_lists = 0;
++ drm_image_params.num_modifiers = &num_drm_modifiers;
++ drm_image_params.modifiers = &drm_modifiers;
++ }
++ }
++ buffer_type = WSI_WL_BUFFER_NATIVE;
++ image_params = &drm_image_params.base;
++ }
++
++ result = wsi_swapchain_init(wsi_device, &chain->base, device,
++ pCreateInfo, image_params, pAllocator);
++ if (result != VK_SUCCESS)
++ goto fail;
++
++ bool alpha = pCreateInfo->compositeAlpha ==
++ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
++
++ chain->base.destroy = wsi_wl_swapchain_destroy;
++ chain->base.get_wsi_image = wsi_wl_swapchain_get_wsi_image;
++ chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
++ chain->base.queue_present = wsi_wl_swapchain_queue_present;
++ chain->base.release_images = wsi_wl_swapchain_release_images;
++ chain->base.set_present_mode = wsi_wl_swapchain_set_present_mode;
++ chain->base.wait_for_present = wsi_wl_swapchain_wait_for_present;
++ chain->base.present_mode = present_mode;
++ chain->base.image_count = num_images;
++ chain->extent = pCreateInfo->imageExtent;
++ chain->vk_format = pCreateInfo->imageFormat;
++ chain->buffer_type = buffer_type;
++ if (buffer_type == WSI_WL_BUFFER_NATIVE) {
++ chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
++ } else {
++ chain->shm_format = wl_shm_format_for_vk_format(chain->vk_format, alpha);
++ }
++ chain->num_drm_modifiers = num_drm_modifiers;
++ chain->drm_modifiers = drm_modifiers;
++
++ chain->queue = wl_display_create_queue(chain->wsi_wl_surface->display->wl_display);
++
++ if (chain->wsi_wl_surface->display->wp_presentation_notwrapped) {
++ pthread_mutex_init(&chain->present_ids.lock, NULL);
++
++ wl_list_init(&chain->present_ids.outstanding_list);
++ chain->present_ids.wp_presentation =
++ wl_proxy_create_wrapper(chain->wsi_wl_surface->display->wp_presentation_notwrapped);
++ wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.wp_presentation,
++ chain->queue);
++ }
++
++ chain->fifo_ready = true;
++
++ for (uint32_t i = 0; i < chain->base.image_count; i++) {
++ result = wsi_wl_image_init(chain, &chain->images[i],
++ pCreateInfo, pAllocator);
++ if (result != VK_SUCCESS)
++ goto fail_image_init;
++ chain->images[i].busy = false;
++ }
++
++ *swapchain_out = &chain->base;
++
++ return VK_SUCCESS;
++
++fail_image_init:
++ wsi_wl_swapchain_images_free(chain);
++
++ wsi_wl_swapchain_chain_free(chain, pAllocator);
++fail:
++ vk_free(pAllocator, chain);
++ wsi_wl_surface->chain = NULL;
++
++ return result;
++}
++
++VkResult
++wsi_wl_init_wsi(struct wsi_device *wsi_device,
++ const VkAllocationCallbacks *alloc,
++ VkPhysicalDevice physical_device)
++{
++ struct wsi_wayland *wsi;
++ VkResult result;
++
++ wsi = vk_alloc(alloc, sizeof(*wsi), 8,
++ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
++ if (!wsi) {
++ result = VK_ERROR_OUT_OF_HOST_MEMORY;
++ goto fail;
++ }
++
++ wsi->physical_device = physical_device;
++ wsi->alloc = alloc;
++ wsi->wsi = wsi_device;
++
++ wsi->base.get_support = wsi_wl_surface_get_support;
++ wsi->base.get_capabilities2 = wsi_wl_surface_get_capabilities2;
++ wsi->base.get_formats = wsi_wl_surface_get_formats;
++ wsi->base.get_formats2 = wsi_wl_surface_get_formats2;
++ wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
++ wsi->base.get_present_rectangles = wsi_wl_surface_get_present_rectangles;
++ wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
++
++ wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
++
++ return VK_SUCCESS;
++
++fail:
++ wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
++
++ return result;
++}
++
++void
++wsi_wl_finish_wsi(struct wsi_device *wsi_device,
++ const VkAllocationCallbacks *alloc)
++{
++ struct wsi_wayland *wsi =
++ (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
++ if (!wsi)
++ return;
++
++ vk_free(alloc, wsi);
++}
+--
+2.43.0
+
+
+From 0b8b0e5784431e94813fbc62b09ec99e013bca72 Mon Sep 17 00:00:00 2001
+From: Denis <benato.denis96@gmail.com>
+Date: Sun, 10 Dec 2023 14:48:09 +0100
+Subject: [PATCH 4/5] hack: rip out commit-timing-v1
+
+---
+ src/egl/wayland/wayland-drm/meson.build | 6 +-
+ src/vulkan/wsi/meson.build | 2 +-
+ src/vulkan/wsi/wsi_common_wayland.c | 24 +-
+ src/vulkan/wsi/wsi_common_wayland.c.orig | 2475 ----------------------
+ 4 files changed, 6 insertions(+), 2501 deletions(-)
+ delete mode 100644 src/vulkan/wsi/wsi_common_wayland.c.orig
+
+diff --git a/src/egl/wayland/wayland-drm/meson.build b/src/egl/wayland/wayland-drm/meson.build
+index 8b6044f09e5..48c676d7be4 100644
+--- a/src/egl/wayland/wayland-drm/meson.build
++++ b/src/egl/wayland/wayland-drm/meson.build
+@@ -60,7 +60,7 @@ libwayland_drm = static_library(
+ wp_dir = dep_wl_protocols.get_variable(pkgconfig : 'pkgdatadir', internal : 'pkgdatadir')
+ wp_protos = {
+ 'commit-queue-v1': 'staging/commit-queue/commit-queue-v1.xml',
+- 'commit-timing-v1': 'staging/commit-timing/commit-timing-v1.xml',
++ #'commit-timing-v1': 'staging/commit-timing/commit-timing-v1.xml',
+ 'linux-dmabuf-unstable-v1': 'unstable/linux-dmabuf/linux-dmabuf-unstable-v1.xml',
+ 'presentation-time': 'stable/presentation-time/presentation-time.xml',
+ 'tearing-control-v1': 'staging/tearing-control/tearing-control-v1.xml',
+@@ -69,13 +69,13 @@ wp_files = {}
+ foreach name, xml : wp_protos
+ code = custom_target(
+ name + '-protocol.c',
+- input : files(join_paths(wp_dir, xml)),
++ input : files(wp_dir / xml),
+ output : name + '-protocol.c',
+ command : [prog_wl_scanner, wl_scanner_arg, '@INPUT@', '@OUTPUT@'],
+ )
+ header = custom_target(
+ name + '-client-protocol.h',
+- input : files(join_paths(wp_dir, xml)),
++ input : files(wp_dir / xml),
+ output : name + '-client-protocol.h',
+ command : [prog_wl_scanner, 'client-header', '@INPUT@', '@OUTPUT@'],
+ )
+diff --git a/src/vulkan/wsi/meson.build b/src/vulkan/wsi/meson.build
+index 48ea09b99aa..5caea0e8f4f 100644
+--- a/src/vulkan/wsi/meson.build
++++ b/src/vulkan/wsi/meson.build
+@@ -32,7 +32,7 @@ endif
+ if with_platform_wayland
+ files_vulkan_wsi += files('wsi_common_wayland.c')
+ files_vulkan_wsi += wp_files['commit-queue-v1']
+- files_vulkan_wsi += wp_files['commit-timing-v1']
++ #files_vulkan_wsi += wp_files['commit-timing-v1']
+ files_vulkan_wsi += wp_files['linux-dmabuf-unstable-v1']
+ files_vulkan_wsi += wp_files['presentation-time']
+ files_vulkan_wsi += wp_files['tearing-control-v1']
+diff --git a/src/vulkan/wsi/wsi_common_wayland.c b/src/vulkan/wsi/wsi_common_wayland.c
+index 16848fb0a2a..d9069dee499 100644
+--- a/src/vulkan/wsi/wsi_common_wayland.c
++++ b/src/vulkan/wsi/wsi_common_wayland.c
+@@ -42,7 +42,6 @@
+ #include "wsi_common_entrypoints.h"
+ #include "wsi_common_private.h"
+ #include "commit-queue-v1-client-protocol.h"
+-#include "commit-timing-v1-client-protocol.h"
+ #include "linux-dmabuf-unstable-v1-client-protocol.h"
+ #include "presentation-time-client-protocol.h"
+ #include "tearing-control-v1-client-protocol.h"
+@@ -116,7 +115,6 @@ struct wsi_wl_display {
+ struct wp_presentation *wp_presentation_notwrapped;
+
+ struct wp_commit_queue_manager_v1 *commit_queue_manager;
+- struct wp_commit_timing_manager_v1 *commit_timing_manager;
+
+ struct wsi_wayland *wsi_wl;
+
+@@ -173,7 +171,6 @@ struct wsi_wl_swapchain {
+ struct wsi_wl_surface *wsi_wl_surface;
+ struct wp_tearing_control_v1 *tearing_control;
+ struct wp_commit_queue_v1 *commit_queue;
+- struct wp_commit_timer_v1 *commit_timer;
+ bool can_timestamp;
+
+ struct wl_callback *frame;
+@@ -947,9 +944,6 @@ registry_handle_global(void *data, struct wl_registry *registry,
+ } else if (strcmp(interface, wp_tearing_control_manager_v1_interface.name) == 0) {
+ display->tearing_control_manager =
+ wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
+- } else if (strcmp(interface, wp_commit_timing_manager_v1_interface.name) == 0) {
+- display->commit_timing_manager =
+- wl_registry_bind(registry, name, &wp_commit_timing_manager_v1_interface, 1);
+ } else if (strcmp(interface, wp_commit_queue_manager_v1_interface.name) == 0) {
+ display->commit_queue_manager =
+ wl_registry_bind(registry, name, &wp_commit_queue_manager_v1_interface, 1);
+@@ -981,8 +975,6 @@ wsi_wl_display_finish(struct wsi_wl_display *display)
+ wp_presentation_destroy(display->wp_presentation_notwrapped);
+ if (display->commit_queue_manager)
+ wp_commit_queue_manager_v1_destroy(display->commit_queue_manager);
+- if (display->commit_timing_manager)
+- wp_commit_timing_manager_v1_destroy(display->commit_timing_manager);
+ if (display->tearing_control_manager)
+ wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
+ if (display->wl_display_wrapper)
+@@ -2070,9 +2062,6 @@ set_timestamp(struct wsi_wl_swapchain *chain)
+ }
+
+ timespec_from_nsec(&target_ts, target);
+- wp_commit_timer_v1_set_timestamp(chain->commit_timer,
+- target_ts.tv_sec >> 32, target_ts.tv_sec,
+- target_ts.tv_nsec);
+
+ wp_commit_queue_v1_set_queue_mode(chain->commit_queue,
+ WP_COMMIT_QUEUE_V1_QUEUE_MODE_FIFO);
+@@ -2088,6 +2077,7 @@ wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
+ struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+ struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
+ bool mode_fifo = chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR;
++ //fprintf(stderr, "FIFO = %d\n", mode_fifo);
+
+ if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
+ struct wsi_wl_image *image = &chain->images[image_index];
+@@ -2324,9 +2314,6 @@ wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
+ if (chain->commit_queue)
+ wp_commit_queue_v1_destroy(chain->commit_queue);
+
+- if (chain->commit_timer)
+- wp_commit_timer_v1_destroy(chain->commit_timer);
+-
+ wsi_swapchain_finish(&chain->base);
+ }
+
+@@ -2386,10 +2373,6 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+ old_chain->commit_queue = NULL;
+ old_chain->can_timestamp = false;
+ }
+- if (old_chain->commit_timer) {
+- wp_commit_timer_v1_destroy(old_chain->commit_timer);
+- old_chain->commit_timer = NULL;
+- }
+ }
+
+ /* Take ownership of the wsi_wl_surface */
+@@ -2507,12 +2490,9 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+
+ chain->legacy_fifo_ready = true;
+ struct wsi_wl_display *dpy = chain->wsi_wl_surface->display;
+- if (dpy->commit_queue_manager &&
+- dpy->commit_timing_manager) {
++ if (dpy->commit_queue_manager) {
+ chain->commit_queue = wp_commit_queue_manager_v1_get_queue_controller(dpy->commit_queue_manager,
+ chain->wsi_wl_surface->surface);
+- chain->commit_timer = wp_commit_timing_manager_v1_get_timer(dpy->commit_timing_manager,
+- chain->wsi_wl_surface->surface);
+ chain->can_timestamp = true;
+ }
+
+diff --git a/src/vulkan/wsi/wsi_common_wayland.c.orig b/src/vulkan/wsi/wsi_common_wayland.c.orig
+deleted file mode 100644
+index 75e1a361a0b..00000000000
+--- a/src/vulkan/wsi/wsi_common_wayland.c.orig
++++ /dev/null
+@@ -1,2475 +0,0 @@
+-/*
+- * Copyright © 2015 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+- * IN THE SOFTWARE.
+- */
+-
+-#include <wayland-client.h>
+-
+-#include <assert.h>
+-#include <stdlib.h>
+-#include <stdio.h>
+-#include <unistd.h>
+-#include <errno.h>
+-#include <string.h>
+-#include <pthread.h>
+-#include <poll.h>
+-#include <sys/mman.h>
+-#include <sys/types.h>
+-
+-#include "drm-uapi/drm_fourcc.h"
+-
+-#include "vk_instance.h"
+-#include "vk_physical_device.h"
+-#include "vk_util.h"
+-#include "wsi_common_entrypoints.h"
+-#include "wsi_common_private.h"
+-#include "linux-dmabuf-unstable-v1-client-protocol.h"
+-#include "presentation-time-client-protocol.h"
+-#include "tearing-control-v1-client-protocol.h"
+-
+-#include <util/compiler.h>
+-#include <util/hash_table.h>
+-#include <util/timespec.h>
+-#include <util/u_endian.h>
+-#include <util/u_vector.h>
+-#include <util/u_dynarray.h>
+-#include <util/anon_file.h>
+-#include <util/os_time.h>
+-
+-#ifdef MAJOR_IN_MKDEV
+-#include <sys/mkdev.h>
+-#endif
+-#ifdef MAJOR_IN_SYSMACROS
+-#include <sys/sysmacros.h>
+-#endif
+-
+-struct wsi_wayland;
+-
+-struct wsi_wl_format {
+- VkFormat vk_format;
+- uint32_t flags;
+- struct u_vector modifiers;
+-};
+-
+-struct dmabuf_feedback_format_table {
+- unsigned int size;
+- struct {
+- uint32_t format;
+- uint32_t padding; /* unused */
+- uint64_t modifier;
+- } *data;
+-};
+-
+-struct dmabuf_feedback_tranche {
+- dev_t target_device;
+- uint32_t flags;
+- struct u_vector formats;
+-};
+-
+-struct dmabuf_feedback {
+- dev_t main_device;
+- struct dmabuf_feedback_format_table format_table;
+- struct util_dynarray tranches;
+- struct dmabuf_feedback_tranche pending_tranche;
+-};
+-
+-struct wsi_wl_display {
+- /* The real wl_display */
+- struct wl_display *wl_display;
+- /* Actually a proxy wrapper around the event queue */
+- struct wl_display *wl_display_wrapper;
+-
+- pthread_mutex_t wl_fd_lock;
+- pthread_cond_t wl_fd_reader_finished;
+- bool wl_fd_read_in_progress;
+-
+- struct wl_event_queue *queue;
+-
+- struct wl_shm *wl_shm;
+- struct zwp_linux_dmabuf_v1 *wl_dmabuf;
+- struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
+- struct wp_tearing_control_manager_v1 *tearing_control_manager;
+-
+- struct dmabuf_feedback_format_table format_table;
+-
+- /* users want per-chain wsi_wl_swapchain->present_ids.wp_presentation */
+- struct wp_presentation *wp_presentation_notwrapped;
+-
+- struct wsi_wayland *wsi_wl;
+-
+- /* Formats populated by zwp_linux_dmabuf_v1 or wl_shm interfaces */
+- struct u_vector formats;
+-
+- bool sw;
+-
+- dev_t main_device;
+- bool same_gpu;
+-};
+-
+-struct wsi_wayland {
+- struct wsi_interface base;
+-
+- struct wsi_device *wsi;
+-
+- const VkAllocationCallbacks *alloc;
+- VkPhysicalDevice physical_device;
+-};
+-
+-struct wsi_wl_image {
+- struct wsi_image base;
+- struct wl_buffer *buffer;
+- bool busy;
+- int shm_fd;
+- void *shm_ptr;
+- unsigned shm_size;
+-};
+-
+-enum wsi_wl_buffer_type {
+- WSI_WL_BUFFER_NATIVE,
+- WSI_WL_BUFFER_GPU_SHM,
+- WSI_WL_BUFFER_SHM_MEMCPY,
+-};
+-
+-struct wsi_wl_surface {
+- VkIcdSurfaceWayland base;
+-
+- struct wsi_wl_swapchain *chain;
+- struct wl_surface *surface;
+- struct wsi_wl_display *display;
+-
+- struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
+- struct dmabuf_feedback dmabuf_feedback, pending_dmabuf_feedback;
+-};
+-
+-struct wsi_wl_swapchain {
+- struct wsi_swapchain base;
+-
+- struct wl_event_queue *queue;
+-
+- struct wsi_wl_surface *wsi_wl_surface;
+- struct wp_tearing_control_v1 *tearing_control;
+-
+- struct wl_callback *frame;
+-
+- VkExtent2D extent;
+- VkFormat vk_format;
+- enum wsi_wl_buffer_type buffer_type;
+- uint32_t drm_format;
+- enum wl_shm_format shm_format;
+-
+- bool suboptimal;
+-
+- uint32_t num_drm_modifiers;
+- const uint64_t *drm_modifiers;
+-
+- VkPresentModeKHR present_mode;
+- bool fifo_ready;
+-
+- struct {
+- pthread_mutex_t lock; /* protects all members */
+- uint64_t max_completed;
+- struct wl_list outstanding_list;
+- struct wp_presentation *wp_presentation;
+- } present_ids;
+-
+- struct wsi_wl_image images[0];
+-};
+-VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_wl_swapchain, base.base, VkSwapchainKHR,
+- VK_OBJECT_TYPE_SWAPCHAIN_KHR)
+-
+-enum wsi_wl_fmt_flag {
+- WSI_WL_FMT_ALPHA = 1 << 0,
+- WSI_WL_FMT_OPAQUE = 1 << 1,
+-};
+-
+-static struct wsi_wl_format *
+-find_format(struct u_vector *formats, VkFormat format)
+-{
+- struct wsi_wl_format *f;
+-
+- u_vector_foreach(f, formats)
+- if (f->vk_format == format)
+- return f;
+-
+- return NULL;
+-}
+-
+-static int
+-wsi_wl_display_read_queue_with_timeout_internal(struct wsi_wl_display *wsi_wl_display,
+- struct wl_event_queue *queue,
+- uint64_t atimeout)
+-{
+- uint64_t current_time_nsec;
+- struct timespec rel_timeout, end_time, current_time;
+- int ret;
+-
+- if (wl_display_prepare_read_queue(wsi_wl_display->wl_display, queue) < 0) {
+- /* Another thread might have read events for our queue already. Go
+- * back to dispatch them.
+- */
+- pthread_mutex_unlock(&wsi_wl_display->wl_fd_lock);
+- if (errno == EAGAIN)
+- return VK_SUCCESS;
+-
+- return VK_ERROR_OUT_OF_DATE_KHR;
+- }
+-
+- wsi_wl_display->wl_fd_read_in_progress = true;
+- pthread_mutex_unlock(&wsi_wl_display->wl_fd_lock);
+-
+- while (1) {
+- struct pollfd pollfd = {
+- .fd = wl_display_get_fd(wsi_wl_display->wl_display),
+- .events = POLLIN
+- };
+-
+- current_time_nsec = os_time_get_nano();
+- if (current_time_nsec > atimeout) {
+- rel_timeout.tv_sec = 0;
+- rel_timeout.tv_nsec = 0;
+- } else {
+- timespec_from_nsec(&current_time, current_time_nsec);
+- timespec_from_nsec(&end_time, atimeout);
+- timespec_sub(&rel_timeout, &end_time, &current_time);
+- }
+-
+- ret = ppoll(&pollfd, 1, &rel_timeout, NULL);
+- if (ret < 0) {
+- if (errno == EINTR || errno == EAGAIN)
+- continue;
+-
+- ret = VK_ERROR_OUT_OF_DATE_KHR;
+- } else if (ret == 0)
+- ret = VK_TIMEOUT;
+- else
+- ret = VK_SUCCESS;
+-
+- break;
+- }
+-
+- if (ret != VK_SUCCESS) {
+- wl_display_cancel_read(wsi_wl_display->wl_display);
+- } else {
+- ret = wl_display_read_events(wsi_wl_display->wl_display);
+- if (ret != 0)
+- ret = VK_ERROR_OUT_OF_DATE_KHR;
+- }
+-
+- pthread_mutex_lock(&wsi_wl_display->wl_fd_lock);
+- wsi_wl_display->wl_fd_read_in_progress = false;
+- pthread_cond_broadcast(&wsi_wl_display->wl_fd_reader_finished);
+- return ret;
+-}
+-
+-static int
+-wsi_wl_display_dispatch_queue_with_timeout(struct wsi_wl_display *wsi_wl_display,
+- struct wl_event_queue *queue,
+- uint64_t timeout)
+-{
+- int err;
+- int n_events;
+- uint64_t atimeout, now;
+-
+- if (timeout == UINT64_MAX)
+- atimeout = timeout;
+- else
+- atimeout = os_time_get_absolute_timeout(timeout);
+-
+- while (1) {
+- n_events = wl_display_dispatch_queue_pending(wsi_wl_display->wl_display,
+- queue);
+- if (n_events > 0) {
+- err = VK_SUCCESS;
+- break;
+- }
+- pthread_mutex_lock(&wsi_wl_display->wl_fd_lock);
+-
+- if (wsi_wl_display->wl_fd_read_in_progress) {
+- struct timespec end_time;
+-
+- timespec_from_nsec(&end_time, atimeout);
+-
+- err = pthread_cond_timedwait(&wsi_wl_display->wl_fd_reader_finished,
+- &wsi_wl_display->wl_fd_lock,
+- &end_time);
+- if (err) {
+- if (errno == ETIMEDOUT)
+- err = VK_TIMEOUT;
+- else
+- err = VK_ERROR_OUT_OF_DATE_KHR;
+- } else {
+- /* We don't know if the other thread actually
+- * dispatched anything, so let the caller decide
+- * whether it should continue.
+- */
+- err = VK_INCOMPLETE;
+- }
+- } else {
+- err = wsi_wl_display_read_queue_with_timeout_internal(wsi_wl_display,
+- queue,
+- timeout);
+- }
+-
+- pthread_mutex_unlock(&wsi_wl_display->wl_fd_lock);
+-
+- now = os_time_get_nano();
+- if (now > atimeout) {
+- err = VK_TIMEOUT;
+- break;
+- }
+-
+- }
+-
+- return err;
+-}
+-
+-static struct wsi_wl_format *
+-wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
+- struct u_vector *formats,
+- VkFormat format, uint32_t flags)
+-{
+- assert(flags & (WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE));
+-
+- /* Don't add a format that's already in the list */
+- struct wsi_wl_format *f = find_format(formats, format);
+- if (f) {
+- f->flags |= flags;
+- return f;
+- }
+-
+- /* Don't add formats that aren't renderable. */
+- VkFormatProperties props;
+-
+- display->wsi_wl->wsi->GetPhysicalDeviceFormatProperties(display->wsi_wl->physical_device,
+- format, &props);
+- if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
+- return NULL;
+-
+- struct u_vector modifiers;
+- if (!u_vector_init_pow2(&modifiers, 4, sizeof(uint64_t)))
+- return NULL;
+-
+- f = u_vector_add(formats);
+- if (!f) {
+- u_vector_finish(&modifiers);
+- return NULL;
+- }
+-
+- f->vk_format = format;
+- f->flags = flags;
+- f->modifiers = modifiers;
+-
+- return f;
+-}
+-
+-static void
+-wsi_wl_format_add_modifier(struct wsi_wl_format *format, uint64_t modifier)
+-{
+- uint64_t *mod;
+-
+- if (modifier == DRM_FORMAT_MOD_INVALID)
+- return;
+-
+- u_vector_foreach(mod, &format->modifiers)
+- if (*mod == modifier)
+- return;
+-
+- mod = u_vector_add(&format->modifiers);
+- if (mod)
+- *mod = modifier;
+-}
+-
+-static void
+-wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display *display,
+- struct u_vector *formats,
+- VkFormat vk_format, uint32_t flags,
+- uint64_t modifier)
+-{
+- struct wsi_wl_format *format;
+-
+- format = wsi_wl_display_add_vk_format(display, formats, vk_format, flags);
+- if (format)
+- wsi_wl_format_add_modifier(format, modifier);
+-}
+-
+-static void
+-wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display *display,
+- struct u_vector *formats,
+- uint32_t drm_format, uint64_t modifier)
+-{
+- switch (drm_format) {
+-#if 0
+- /* TODO: These are only available when VK_EXT_4444_formats is enabled, so
+- * we probably need to make their use conditional on this extension. */
+- case DRM_FORMAT_ARGB4444:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A4R4G4B4_UNORM_PACK16,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_XRGB4444:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A4R4G4B4_UNORM_PACK16,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_ABGR4444:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A4B4G4R4_UNORM_PACK16,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_XBGR4444:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A4B4G4R4_UNORM_PACK16,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+-#endif
+-
+- /* Vulkan _PACKN formats have the same component order as DRM formats
+- * on little endian systems, on big endian there exists no analog. */
+-#if UTIL_ARCH_LITTLE_ENDIAN
+- case DRM_FORMAT_RGBA4444:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_RGBX4444:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_BGRA4444:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B4G4R4A4_UNORM_PACK16,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_BGRX4444:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B4G4R4A4_UNORM_PACK16,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_RGB565:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R5G6B5_UNORM_PACK16,
+- WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+- modifier);
+- break;
+- case DRM_FORMAT_BGR565:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B5G6R5_UNORM_PACK16,
+- WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+- modifier);
+- break;
+- case DRM_FORMAT_ARGB1555:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A1R5G5B5_UNORM_PACK16,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_XRGB1555:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A1R5G5B5_UNORM_PACK16,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_RGBA5551:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R5G5B5A1_UNORM_PACK16,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_RGBX5551:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R5G5B5A1_UNORM_PACK16,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_BGRA5551:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_BGRX5551:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_ARGB2101010:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_XRGB2101010:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_ABGR2101010:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_XBGR2101010:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+-
+- /* Vulkan 16-bits-per-channel formats have an inverted channel order
+- * compared to DRM formats, just like the 8-bits-per-channel ones.
+- * On little endian systems the memory representation of each channel
+- * matches the DRM formats'. */
+- case DRM_FORMAT_ABGR16161616:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R16G16B16A16_UNORM,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_XBGR16161616:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R16G16B16A16_UNORM,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_ABGR16161616F:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R16G16B16A16_SFLOAT,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_XBGR16161616F:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R16G16B16A16_SFLOAT,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+-#endif
+-
+- /* Non-packed 8-bit formats have an inverted channel order compared to the
+- * little endian DRM formats, because the DRM channel ordering is high->low
+- * but the vulkan channel ordering is in memory byte order
+- *
+- * For all UNORM formats which have a SRGB variant, we must support both if
+- * we can. SRGB in this context means that rendering to it will result in a
+- * linear -> nonlinear SRGB colorspace conversion before the data is stored.
+- * The inverse function is applied when sampling from SRGB images.
+- * From Wayland's perspective nothing changes, the difference is just how
+- * Vulkan interprets the pixel data. */
+- case DRM_FORMAT_XBGR8888:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R8G8B8_SRGB,
+- WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+- modifier);
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R8G8B8_UNORM,
+- WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+- modifier);
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R8G8B8A8_SRGB,
+- WSI_WL_FMT_OPAQUE, modifier);
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R8G8B8A8_UNORM,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_ABGR8888:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R8G8B8A8_SRGB,
+- WSI_WL_FMT_ALPHA, modifier);
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_R8G8B8A8_UNORM,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- case DRM_FORMAT_XRGB8888:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B8G8R8_SRGB,
+- WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+- modifier);
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B8G8R8_UNORM,
+- WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
+- modifier);
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B8G8R8A8_SRGB,
+- WSI_WL_FMT_OPAQUE, modifier);
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B8G8R8A8_UNORM,
+- WSI_WL_FMT_OPAQUE, modifier);
+- break;
+- case DRM_FORMAT_ARGB8888:
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B8G8R8A8_SRGB,
+- WSI_WL_FMT_ALPHA, modifier);
+- wsi_wl_display_add_vk_format_modifier(display, formats,
+- VK_FORMAT_B8G8R8A8_UNORM,
+- WSI_WL_FMT_ALPHA, modifier);
+- break;
+- }
+-}
+-
+-static uint32_t
+-drm_format_for_wl_shm_format(enum wl_shm_format shm_format)
+-{
+- /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
+- switch (shm_format) {
+- case WL_SHM_FORMAT_ARGB8888:
+- return DRM_FORMAT_ARGB8888;
+- case WL_SHM_FORMAT_XRGB8888:
+- return DRM_FORMAT_XRGB8888;
+- default:
+- return shm_format;
+- }
+-}
+-
+-static void
+-wsi_wl_display_add_wl_shm_format(struct wsi_wl_display *display,
+- struct u_vector *formats,
+- enum wl_shm_format shm_format)
+-{
+- uint32_t drm_format = drm_format_for_wl_shm_format(shm_format);
+-
+- wsi_wl_display_add_drm_format_modifier(display, formats, drm_format,
+- DRM_FORMAT_MOD_INVALID);
+-}
+-
+-static uint32_t
+-wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
+-{
+- switch (vk_format) {
+-#if 0
+- case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
+- return alpha ? DRM_FORMAT_ARGB4444 : DRM_FORMAT_XRGB4444;
+- case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
+- return alpha ? DRM_FORMAT_ABGR4444 : DRM_FORMAT_XBGR4444;
+-#endif
+-#if UTIL_ARCH_LITTLE_ENDIAN
+- case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+- return alpha ? DRM_FORMAT_RGBA4444 : DRM_FORMAT_RGBX4444;
+- case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+- return alpha ? DRM_FORMAT_BGRA4444 : DRM_FORMAT_BGRX4444;
+- case VK_FORMAT_R5G6B5_UNORM_PACK16:
+- return DRM_FORMAT_RGB565;
+- case VK_FORMAT_B5G6R5_UNORM_PACK16:
+- return DRM_FORMAT_BGR565;
+- case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+- return alpha ? DRM_FORMAT_ARGB1555 : DRM_FORMAT_XRGB1555;
+- case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+- return alpha ? DRM_FORMAT_RGBA5551 : DRM_FORMAT_RGBX5551;
+- case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+- return alpha ? DRM_FORMAT_BGRA5551 : DRM_FORMAT_BGRX5551;
+- case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+- return alpha ? DRM_FORMAT_ARGB2101010 : DRM_FORMAT_XRGB2101010;
+- case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+- return alpha ? DRM_FORMAT_ABGR2101010 : DRM_FORMAT_XBGR2101010;
+- case VK_FORMAT_R16G16B16A16_UNORM:
+- return alpha ? DRM_FORMAT_ABGR16161616 : DRM_FORMAT_XBGR16161616;
+- case VK_FORMAT_R16G16B16A16_SFLOAT:
+- return alpha ? DRM_FORMAT_ABGR16161616F : DRM_FORMAT_XBGR16161616F;
+-#endif
+- case VK_FORMAT_R8G8B8_UNORM:
+- case VK_FORMAT_R8G8B8_SRGB:
+- return DRM_FORMAT_XBGR8888;
+- case VK_FORMAT_R8G8B8A8_UNORM:
+- case VK_FORMAT_R8G8B8A8_SRGB:
+- return alpha ? DRM_FORMAT_ABGR8888 : DRM_FORMAT_XBGR8888;
+- case VK_FORMAT_B8G8R8_UNORM:
+- case VK_FORMAT_B8G8R8_SRGB:
+- return DRM_FORMAT_BGRX8888;
+- case VK_FORMAT_B8G8R8A8_UNORM:
+- case VK_FORMAT_B8G8R8A8_SRGB:
+- return alpha ? DRM_FORMAT_ARGB8888 : DRM_FORMAT_XRGB8888;
+-
+- default:
+- assert(!"Unsupported Vulkan format");
+- return DRM_FORMAT_INVALID;
+- }
+-}
+-
+-static enum wl_shm_format
+-wl_shm_format_for_vk_format(VkFormat vk_format, bool alpha)
+-{
+- uint32_t drm_format = wl_drm_format_for_vk_format(vk_format, alpha);
+- if (drm_format == DRM_FORMAT_INVALID) {
+- return 0;
+- }
+-
+- /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
+- switch (drm_format) {
+- case DRM_FORMAT_ARGB8888:
+- return WL_SHM_FORMAT_ARGB8888;
+- case DRM_FORMAT_XRGB8888:
+- return WL_SHM_FORMAT_XRGB8888;
+- default:
+- return drm_format;
+- }
+-}
+-
+-static void
+-dmabuf_handle_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
+- uint32_t format)
+-{
+- /* Formats are implicitly advertised by the modifier event, so we ignore
+- * them here. */
+-}
+-
+-static void
+-dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
+- uint32_t format, uint32_t modifier_hi,
+- uint32_t modifier_lo)
+-{
+- struct wsi_wl_display *display = data;
+- uint64_t modifier;
+-
+- /* Ignore this if the compositor advertised dma-buf feedback. From version 4
+- * onwards (when dma-buf feedback was introduced), the compositor should not
+- * advertise this event anymore, but let's keep this for safety. */
+- if (display->wl_dmabuf_feedback)
+- return;
+-
+- modifier = ((uint64_t) modifier_hi << 32) | modifier_lo;
+- wsi_wl_display_add_drm_format_modifier(display, &display->formats,
+- format, modifier);
+-}
+-
+-static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
+- dmabuf_handle_format,
+- dmabuf_handle_modifier,
+-};
+-
+-static void
+-dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table *format_table)
+-{
+- if (format_table->data && format_table->data != MAP_FAILED)
+- munmap(format_table->data, format_table->size);
+-}
+-
+-static void
+-dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table *format_table)
+-{
+- memset(format_table, 0, sizeof(*format_table));
+-}
+-
+-static void
+-dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
+-{
+- struct wsi_wl_format *format;
+-
+- u_vector_foreach(format, &tranche->formats)
+- u_vector_finish(&format->modifiers);
+-
+- u_vector_finish(&tranche->formats);
+-}
+-
+-static int
+-dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
+-{
+- memset(tranche, 0, sizeof(*tranche));
+-
+- if (!u_vector_init(&tranche->formats, 8, sizeof(struct wsi_wl_format)))
+- return -1;
+-
+- return 0;
+-}
+-
+-static void
+-dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
+-{
+- dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
+-
+- util_dynarray_foreach(&dmabuf_feedback->tranches,
+- struct dmabuf_feedback_tranche, tranche)
+- dmabuf_feedback_tranche_fini(tranche);
+- util_dynarray_fini(&dmabuf_feedback->tranches);
+-
+- dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
+-}
+-
+-static int
+-dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
+-{
+- memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
+-
+- if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
+- return -1;
+-
+- util_dynarray_init(&dmabuf_feedback->tranches, NULL);
+-
+- dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
+-
+- return 0;
+-}
+-
+-static void
+-default_dmabuf_feedback_format_table(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
+- int32_t fd, uint32_t size)
+-{
+- struct wsi_wl_display *display = data;
+-
+- display->format_table.size = size;
+- display->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+-
+- close(fd);
+-}
+-
+-static void
+-default_dmabuf_feedback_main_device(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+- struct wl_array *device)
+-{
+- struct wsi_wl_display *display = data;
+-
+- assert(device->size == sizeof(dev_t));
+- memcpy(&display->main_device, device->data, device->size);
+-}
+-
+-static void
+-default_dmabuf_feedback_tranche_target_device(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+- struct wl_array *device)
+-{
+- /* ignore this event */
+-}
+-
+-static void
+-default_dmabuf_feedback_tranche_flags(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+- uint32_t flags)
+-{
+- /* ignore this event */
+-}
+-
+-static void
+-default_dmabuf_feedback_tranche_formats(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+- struct wl_array *indices)
+-{
+- struct wsi_wl_display *display = data;
+- uint32_t format;
+- uint64_t modifier;
+- uint16_t *index;
+-
+- /* We couldn't map the format table or the compositor didn't advertise it,
+- * so we have to ignore the feedback. */
+- if (display->format_table.data == MAP_FAILED ||
+- display->format_table.data == NULL)
+- return;
+-
+- wl_array_for_each(index, indices) {
+- format = display->format_table.data[*index].format;
+- modifier = display->format_table.data[*index].modifier;
+- wsi_wl_display_add_drm_format_modifier(display, &display->formats,
+- format, modifier);
+- }
+-}
+-
+-static void
+-default_dmabuf_feedback_tranche_done(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
+-{
+- /* ignore this event */
+-}
+-
+-static void
+-default_dmabuf_feedback_done(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
+-{
+- /* ignore this event */
+-}
+-
+-static const struct zwp_linux_dmabuf_feedback_v1_listener
+-dmabuf_feedback_listener = {
+- .format_table = default_dmabuf_feedback_format_table,
+- .main_device = default_dmabuf_feedback_main_device,
+- .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
+- .tranche_flags = default_dmabuf_feedback_tranche_flags,
+- .tranche_formats = default_dmabuf_feedback_tranche_formats,
+- .tranche_done = default_dmabuf_feedback_tranche_done,
+- .done = default_dmabuf_feedback_done,
+-};
+-
+-static void
+-shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
+-{
+- struct wsi_wl_display *display = data;
+-
+- wsi_wl_display_add_wl_shm_format(display, &display->formats, format);
+-}
+-
+-static const struct wl_shm_listener shm_listener = {
+- .format = shm_handle_format
+-};
+-
+-static void
+-registry_handle_global(void *data, struct wl_registry *registry,
+- uint32_t name, const char *interface, uint32_t version)
+-{
+- struct wsi_wl_display *display = data;
+-
+- if (display->sw) {
+- if (strcmp(interface, wl_shm_interface.name) == 0) {
+- display->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
+- wl_shm_add_listener(display->wl_shm, &shm_listener, display);
+- }
+- } else {
+- if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 && version >= 3) {
+- display->wl_dmabuf =
+- wl_registry_bind(registry, name, &zwp_linux_dmabuf_v1_interface,
+- MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
+- zwp_linux_dmabuf_v1_add_listener(display->wl_dmabuf,
+- &dmabuf_listener, display);
+- }
+- }
+-
+- if (strcmp(interface, wp_presentation_interface.name) == 0) {
+- display->wp_presentation_notwrapped =
+- wl_registry_bind(registry, name, &wp_presentation_interface, 1);
+- } else if (strcmp(interface, wp_tearing_control_manager_v1_interface.name) == 0) {
+- display->tearing_control_manager =
+- wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
+- }
+-}
+-
+-static void
+-registry_handle_global_remove(void *data, struct wl_registry *registry,
+- uint32_t name)
+-{ /* No-op */ }
+-
+-static const struct wl_registry_listener registry_listener = {
+- registry_handle_global,
+- registry_handle_global_remove
+-};
+-
+-static void
+-wsi_wl_display_finish(struct wsi_wl_display *display)
+-{
+- struct wsi_wl_format *f;
+- u_vector_foreach(f, &display->formats)
+- u_vector_finish(&f->modifiers);
+- u_vector_finish(&display->formats);
+- if (display->wl_shm)
+- wl_shm_destroy(display->wl_shm);
+- if (display->wl_dmabuf)
+- zwp_linux_dmabuf_v1_destroy(display->wl_dmabuf);
+- if (display->wp_presentation_notwrapped)
+- wp_presentation_destroy(display->wp_presentation_notwrapped);
+- if (display->tearing_control_manager)
+- wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
+- if (display->wl_display_wrapper)
+- wl_proxy_wrapper_destroy(display->wl_display_wrapper);
+- if (display->queue)
+- wl_event_queue_destroy(display->queue);
+- pthread_mutex_destroy(&display->wl_fd_lock);
+- pthread_cond_destroy(&display->wl_fd_reader_finished);
+-}
+-
+-static VkResult
+-wsi_wl_display_init(struct wsi_wayland *wsi_wl,
+- struct wsi_wl_display *display,
+- struct wl_display *wl_display,
+- bool get_format_list, bool sw)
+-{
+- VkResult result = VK_SUCCESS;
+- memset(display, 0, sizeof(*display));
+-
+- if (!u_vector_init(&display->formats, 8, sizeof(struct wsi_wl_format)))
+- return VK_ERROR_OUT_OF_HOST_MEMORY;
+-
+- display->wsi_wl = wsi_wl;
+- display->wl_display = wl_display;
+- display->sw = sw;
+-
+- display->wl_fd_read_in_progress = false;
+- pthread_mutex_init(&display->wl_fd_lock, NULL);
+- if (!wsi_init_pthread_cond_monotonic(&display->wl_fd_reader_finished))
+- goto fail;
+-
+- display->queue = wl_display_create_queue(wl_display);
+- if (!display->queue) {
+- result = VK_ERROR_OUT_OF_HOST_MEMORY;
+- goto fail;
+- }
+-
+- display->wl_display_wrapper = wl_proxy_create_wrapper(wl_display);
+- if (!display->wl_display_wrapper) {
+- result = VK_ERROR_OUT_OF_HOST_MEMORY;
+- goto fail;
+- }
+-
+- wl_proxy_set_queue((struct wl_proxy *) display->wl_display_wrapper,
+- display->queue);
+-
+- struct wl_registry *registry =
+- wl_display_get_registry(display->wl_display_wrapper);
+- if (!registry) {
+- result = VK_ERROR_OUT_OF_HOST_MEMORY;
+- goto fail;
+- }
+-
+- wl_registry_add_listener(registry, &registry_listener, display);
+-
+- /* Round-trip to get wl_shm and zwp_linux_dmabuf_v1 globals */
+- wl_display_roundtrip_queue(display->wl_display, display->queue);
+- if (!display->wl_dmabuf && !display->wl_shm) {
+- result = VK_ERROR_SURFACE_LOST_KHR;
+- goto fail_registry;
+- }
+-
+- /* Caller doesn't expect us to query formats/modifiers, so return */
+- if (!get_format_list)
+- goto out;
+-
+- /* Default assumption */
+- display->same_gpu = true;
+-
+- /* Get the default dma-buf feedback */
+- if (display->wl_dmabuf && zwp_linux_dmabuf_v1_get_version(display->wl_dmabuf) >=
+- ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
+- dmabuf_feedback_format_table_init(&display->format_table);
+- display->wl_dmabuf_feedback =
+- zwp_linux_dmabuf_v1_get_default_feedback(display->wl_dmabuf);
+- zwp_linux_dmabuf_feedback_v1_add_listener(display->wl_dmabuf_feedback,
+- &dmabuf_feedback_listener, display);
+-
+- /* Round-trip again to fetch dma-buf feedback */
+- wl_display_roundtrip_queue(display->wl_display, display->queue);
+-
+- if (wsi_wl->wsi->drm_info.hasRender ||
+- wsi_wl->wsi->drm_info.hasPrimary) {
+- /* Apparently some wayland compositor do not send the render
+- * device node but the primary, so test against both.
+- */
+- display->same_gpu =
+- (wsi_wl->wsi->drm_info.hasRender &&
+- major(display->main_device) == wsi_wl->wsi->drm_info.renderMajor &&
+- minor(display->main_device) == wsi_wl->wsi->drm_info.renderMinor) ||
+- (wsi_wl->wsi->drm_info.hasPrimary &&
+- major(display->main_device) == wsi_wl->wsi->drm_info.primaryMajor &&
+- minor(display->main_device) == wsi_wl->wsi->drm_info.primaryMinor);
+- }
+- }
+-
+- /* Round-trip again to get formats and modifiers */
+- wl_display_roundtrip_queue(display->wl_display, display->queue);
+-
+- if (wsi_wl->wsi->force_bgra8_unorm_first) {
+- /* Find BGRA8_UNORM in the list and swap it to the first position if we
+- * can find it. Some apps get confused if SRGB is first in the list.
+- */
+- struct wsi_wl_format *first_fmt = u_vector_head(&display->formats);
+- struct wsi_wl_format *f, tmp_fmt;
+- f = find_format(&display->formats, VK_FORMAT_B8G8R8A8_UNORM);
+- if (f) {
+- tmp_fmt = *f;
+- *f = *first_fmt;
+- *first_fmt = tmp_fmt;
+- }
+- }
+-
+-out:
+- /* We don't need this anymore */
+- wl_registry_destroy(registry);
+-
+- /* Destroy default dma-buf feedback object and format table */
+- if (display->wl_dmabuf_feedback) {
+- zwp_linux_dmabuf_feedback_v1_destroy(display->wl_dmabuf_feedback);
+- display->wl_dmabuf_feedback = NULL;
+- dmabuf_feedback_format_table_fini(&display->format_table);
+- }
+-
+- return VK_SUCCESS;
+-
+-fail_registry:
+- if (registry)
+- wl_registry_destroy(registry);
+-
+-fail:
+- pthread_mutex_destroy(&display->wl_fd_lock);
+- wsi_wl_display_finish(display);
+- return result;
+-}
+-
+-static VkResult
+-wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display,
+- bool sw,
+- struct wsi_wl_display **display_out)
+-{
+- struct wsi_wl_display *display =
+- vk_alloc(wsi->alloc, sizeof(*display), 8,
+- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+- if (!display)
+- return VK_ERROR_OUT_OF_HOST_MEMORY;
+-
+- VkResult result = wsi_wl_display_init(wsi, display, wl_display, true,
+- sw);
+- if (result != VK_SUCCESS) {
+- vk_free(wsi->alloc, display);
+- return result;
+- }
+-
+- *display_out = display;
+-
+- return result;
+-}
+-
+-static void
+-wsi_wl_display_destroy(struct wsi_wl_display *display)
+-{
+- struct wsi_wayland *wsi = display->wsi_wl;
+- wsi_wl_display_finish(display);
+- vk_free(wsi->alloc, display);
+-}
+-
+-VKAPI_ATTR VkBool32 VKAPI_CALL
+-wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+- uint32_t queueFamilyIndex,
+- struct wl_display *wl_display)
+-{
+- VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
+- struct wsi_device *wsi_device = pdevice->wsi_device;
+- struct wsi_wayland *wsi =
+- (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+-
+- if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
+- return false;
+-
+- struct wsi_wl_display display;
+- VkResult ret = wsi_wl_display_init(wsi, &display, wl_display, false,
+- wsi_device->sw);
+- if (ret == VK_SUCCESS)
+- wsi_wl_display_finish(&display);
+-
+- return ret == VK_SUCCESS;
+-}
+-
+-static VkResult
+-wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
+- struct wsi_device *wsi_device,
+- uint32_t queueFamilyIndex,
+- VkBool32* pSupported)
+-{
+- *pSupported = true;
+-
+- return VK_SUCCESS;
+-}
+-
+-static uint32_t
+-wsi_wl_surface_get_min_image_count(const VkSurfacePresentModeEXT *present_mode)
+-{
+- if (present_mode && (present_mode->presentMode == VK_PRESENT_MODE_FIFO_KHR ||
+- present_mode->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)) {
+- /* If we receive a FIFO present mode, only 2 images is required for forward progress.
+- * Performance with 2 images will be questionable, but we only allow it for applications
+- * using the new API, so we don't risk breaking any existing apps this way.
+- * Other ICDs expose 2 images here already. */
+- return 2;
+- } else {
+- /* For true mailbox mode, we need at least 4 images:
+- * 1) One to scan out from
+- * 2) One to have queued for scan-out
+- * 3) One to be currently held by the Wayland compositor
+- * 4) One to render to
+- */
+- return 4;
+- }
+-}
+-
+-static uint32_t
+-wsi_wl_surface_get_min_image_count_for_mode_group(const VkSwapchainPresentModesCreateInfoEXT *modes)
+-{
+- /* If we don't provide the PresentModeCreateInfo struct, we must be backwards compatible,
+- * and assume that minImageCount is the default one, i.e. 4, which supports both FIFO and MAILBOX. */
+- if (!modes) {
+- return wsi_wl_surface_get_min_image_count(NULL);
+- }
+-
+- uint32_t max_required = 0;
+- for (uint32_t i = 0; i < modes->presentModeCount; i++) {
+- const VkSurfacePresentModeEXT mode = {
+- VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_EXT,
+- NULL,
+- modes->pPresentModes[i]
+- };
+- max_required = MAX2(max_required, wsi_wl_surface_get_min_image_count(&mode));
+- }
+-
+- return max_required;
+-}
+-
+-static VkResult
+-wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
+- struct wsi_device *wsi_device,
+- const VkSurfacePresentModeEXT *present_mode,
+- VkSurfaceCapabilitiesKHR* caps)
+-{
+- caps->minImageCount = wsi_wl_surface_get_min_image_count(present_mode);
+- /* There is no real maximum */
+- caps->maxImageCount = 0;
+-
+- caps->currentExtent = (VkExtent2D) { UINT32_MAX, UINT32_MAX };
+- caps->minImageExtent = (VkExtent2D) { 1, 1 };
+- caps->maxImageExtent = (VkExtent2D) {
+- wsi_device->maxImageDimension2D,
+- wsi_device->maxImageDimension2D,
+- };
+-
+- caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+- caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+- caps->maxImageArrayLayers = 1;
+-
+- caps->supportedCompositeAlpha =
+- VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
+- VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
+-
+- caps->supportedUsageFlags = wsi_caps_get_image_usage();
+-
+- VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
+- if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
+- caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
+-
+- return VK_SUCCESS;
+-}
+-
+-static VkResult
+-wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
+- struct wsi_device *wsi_device,
+- const void *info_next,
+- VkSurfaceCapabilities2KHR* caps)
+-{
+- assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
+-
+- const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
+-
+- VkResult result =
+- wsi_wl_surface_get_capabilities(surface, wsi_device, present_mode,
+- &caps->surfaceCapabilities);
+-
+- vk_foreach_struct(ext, caps->pNext) {
+- switch (ext->sType) {
+- case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
+- VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
+- protected->supportsProtected = VK_FALSE;
+- break;
+- }
+-
+- case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
+- /* Unsupported. */
+- VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
+- scaling->supportedPresentScaling = 0;
+- scaling->supportedPresentGravityX = 0;
+- scaling->supportedPresentGravityY = 0;
+- scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
+- scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
+- break;
+- }
+-
+- case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
+- /* Can easily toggle between FIFO and MAILBOX on Wayland. */
+- VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
+- if (compat->pPresentModes) {
+- assert(present_mode);
+- VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
+- /* Must always return queried present mode even when truncating. */
+- vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
+- *mode = present_mode->presentMode;
+- }
+- switch (present_mode->presentMode) {
+- case VK_PRESENT_MODE_MAILBOX_KHR:
+- vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
+- *mode = VK_PRESENT_MODE_FIFO_KHR;
+- }
+- break;
+- case VK_PRESENT_MODE_FIFO_KHR:
+- vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
+- *mode = VK_PRESENT_MODE_MAILBOX_KHR;
+- }
+- break;
+- default:
+- break;
+- }
+- } else {
+- if (!present_mode) {
+- wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
+- "without a VkSurfacePresentModeEXT set. This is an "
+- "application bug.\n");
+- compat->presentModeCount = 1;
+- } else {
+- switch (present_mode->presentMode) {
+- case VK_PRESENT_MODE_MAILBOX_KHR:
+- case VK_PRESENT_MODE_FIFO_KHR:
+- compat->presentModeCount = 2;
+- break;
+- default:
+- compat->presentModeCount = 1;
+- break;
+- }
+- }
+- }
+- break;
+- }
+-
+- default:
+- /* Ignored */
+- break;
+- }
+- }
+-
+- return result;
+-}
+-
+-static VkResult
+-wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
+- struct wsi_device *wsi_device,
+- uint32_t* pSurfaceFormatCount,
+- VkSurfaceFormatKHR* pSurfaceFormats)
+-{
+- VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
+- struct wsi_wayland *wsi =
+- (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+-
+- struct wsi_wl_display display;
+- if (wsi_wl_display_init(wsi, &display, surface->display, true,
+- wsi_device->sw))
+- return VK_ERROR_SURFACE_LOST_KHR;
+-
+- VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
+- pSurfaceFormats, pSurfaceFormatCount);
+-
+- struct wsi_wl_format *disp_fmt;
+- u_vector_foreach(disp_fmt, &display.formats) {
+- /* Skip formats for which we can't support both alpha & opaque
+- * formats.
+- */
+- if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
+- !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
+- continue;
+-
+- vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
+- out_fmt->format = disp_fmt->vk_format;
+- out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+- }
+- }
+-
+- wsi_wl_display_finish(&display);
+-
+- return vk_outarray_status(&out);
+-}
+-
+-static VkResult
+-wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
+- struct wsi_device *wsi_device,
+- const void *info_next,
+- uint32_t* pSurfaceFormatCount,
+- VkSurfaceFormat2KHR* pSurfaceFormats)
+-{
+- VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
+- struct wsi_wayland *wsi =
+- (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+-
+- struct wsi_wl_display display;
+- if (wsi_wl_display_init(wsi, &display, surface->display, true,
+- wsi_device->sw))
+- return VK_ERROR_SURFACE_LOST_KHR;
+-
+- VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
+- pSurfaceFormats, pSurfaceFormatCount);
+-
+- struct wsi_wl_format *disp_fmt;
+- u_vector_foreach(disp_fmt, &display.formats) {
+- /* Skip formats for which we can't support both alpha & opaque
+- * formats.
+- */
+- if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
+- !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
+- continue;
+-
+- vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
+- out_fmt->surfaceFormat.format = disp_fmt->vk_format;
+- out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+- }
+- }
+-
+- wsi_wl_display_finish(&display);
+-
+- return vk_outarray_status(&out);
+-}
+-
+-static VkResult
+-wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *icd_surface,
+- struct wsi_device *wsi_device,
+- uint32_t* pPresentModeCount,
+- VkPresentModeKHR* pPresentModes)
+-{
+- VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
+- struct wsi_wayland *wsi =
+- (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+-
+- struct wsi_wl_display display;
+- if (wsi_wl_display_init(wsi, &display, surface->display, true,
+- wsi_device->sw))
+- return VK_ERROR_SURFACE_LOST_KHR;
+-
+- VkPresentModeKHR present_modes[3];
+- uint32_t present_modes_count = 0;
+-
+- /* The following two modes are always supported */
+- present_modes[present_modes_count++] = VK_PRESENT_MODE_MAILBOX_KHR;
+- present_modes[present_modes_count++] = VK_PRESENT_MODE_FIFO_KHR;
+-
+- if (display.tearing_control_manager)
+- present_modes[present_modes_count++] = VK_PRESENT_MODE_IMMEDIATE_KHR;
+-
+- assert(present_modes_count <= ARRAY_SIZE(present_modes));
+- wsi_wl_display_finish(&display);
+-
+- if (pPresentModes == NULL) {
+- *pPresentModeCount = present_modes_count;
+- return VK_SUCCESS;
+- }
+-
+- *pPresentModeCount = MIN2(*pPresentModeCount, present_modes_count);
+- typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
+-
+- if (*pPresentModeCount < present_modes_count)
+- return VK_INCOMPLETE;
+- else
+- return VK_SUCCESS;
+-}
+-
+-static VkResult
+-wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
+- struct wsi_device *wsi_device,
+- uint32_t* pRectCount,
+- VkRect2D* pRects)
+-{
+- VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
+-
+- vk_outarray_append_typed(VkRect2D, &out, rect) {
+- /* We don't know a size so just return the usual "I don't know." */
+- *rect = (VkRect2D) {
+- .offset = { 0, 0 },
+- .extent = { UINT32_MAX, UINT32_MAX },
+- };
+- }
+-
+- return vk_outarray_status(&out);
+-}
+-
+-void
+-wsi_wl_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
+- const VkAllocationCallbacks *pAllocator)
+-{
+- VK_FROM_HANDLE(vk_instance, instance, _instance);
+- struct wsi_wl_surface *wsi_wl_surface =
+- wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
+-
+- if (wsi_wl_surface->wl_dmabuf_feedback) {
+- zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
+- dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
+- dmabuf_feedback_fini(&wsi_wl_surface->pending_dmabuf_feedback);
+- }
+-
+- if (wsi_wl_surface->surface)
+- wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
+-
+- if (wsi_wl_surface->display)
+- wsi_wl_display_destroy(wsi_wl_surface->display);
+-
+- vk_free2(&instance->alloc, pAllocator, wsi_wl_surface);
+-}
+-
+-static struct wsi_wl_format *
+-pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface,
+- VkFormat vk_format)
+-{
+- struct wsi_wl_format *f = NULL;
+-
+- /* If the main_device was not advertised, we don't have valid feedback */
+- if (wsi_wl_surface->dmabuf_feedback.main_device == 0)
+- return NULL;
+-
+- util_dynarray_foreach(&wsi_wl_surface->dmabuf_feedback.tranches,
+- struct dmabuf_feedback_tranche, tranche) {
+- f = find_format(&tranche->formats, vk_format);
+- if (f)
+- break;
+- }
+-
+- return f;
+-}
+-
+-static void
+-surface_dmabuf_feedback_format_table(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
+- int32_t fd, uint32_t size)
+-{
+- struct wsi_wl_surface *wsi_wl_surface = data;
+- struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+-
+- feedback->format_table.size = size;
+- feedback->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+-
+- close(fd);
+-}
+-
+-static void
+-surface_dmabuf_feedback_main_device(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+- struct wl_array *device)
+-{
+- struct wsi_wl_surface *wsi_wl_surface = data;
+- struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+-
+- memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
+-}
+-
+-static void
+-surface_dmabuf_feedback_tranche_target_device(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+- struct wl_array *device)
+-{
+- struct wsi_wl_surface *wsi_wl_surface = data;
+- struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+-
+- memcpy(&feedback->pending_tranche.target_device, device->data,
+- sizeof(feedback->pending_tranche.target_device));
+-}
+-
+-static void
+-surface_dmabuf_feedback_tranche_flags(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+- uint32_t flags)
+-{
+- struct wsi_wl_surface *wsi_wl_surface = data;
+- struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+-
+- feedback->pending_tranche.flags = flags;
+-}
+-
+-static void
+-surface_dmabuf_feedback_tranche_formats(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
+- struct wl_array *indices)
+-{
+- struct wsi_wl_surface *wsi_wl_surface = data;
+- struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+- uint32_t format;
+- uint64_t modifier;
+- uint16_t *index;
+-
+- /* Compositor may advertise or not a format table. If it does, we use it.
+- * Otherwise, we steal the most recent advertised format table. If we don't have
+- * a most recent advertised format table, compositor did something wrong. */
+- if (feedback->format_table.data == NULL) {
+- feedback->format_table = wsi_wl_surface->dmabuf_feedback.format_table;
+- dmabuf_feedback_format_table_init(&wsi_wl_surface->dmabuf_feedback.format_table);
+- }
+- if (feedback->format_table.data == MAP_FAILED ||
+- feedback->format_table.data == NULL)
+- return;
+-
+- wl_array_for_each(index, indices) {
+- format = feedback->format_table.data[*index].format;
+- modifier = feedback->format_table.data[*index].modifier;
+-
+- wsi_wl_display_add_drm_format_modifier(wsi_wl_surface->display,
+- &wsi_wl_surface->pending_dmabuf_feedback.pending_tranche.formats,
+- format, modifier);
+- }
+-}
+-
+-static void
+-surface_dmabuf_feedback_tranche_done(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
+-{
+- struct wsi_wl_surface *wsi_wl_surface = data;
+- struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
+-
+- /* Add tranche to array of tranches. */
+- util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
+- feedback->pending_tranche);
+-
+- dmabuf_feedback_tranche_init(&feedback->pending_tranche);
+-}
+-
+-static bool
+-sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A, const uint64_t *modifiers_A,
+- uint32_t num_drm_modifiers_B, const uint64_t *modifiers_B)
+-{
+- uint32_t i, j;
+- bool mod_found;
+-
+- if (num_drm_modifiers_A != num_drm_modifiers_B)
+- return false;
+-
+- for (i = 0; i < num_drm_modifiers_A; i++) {
+- mod_found = false;
+- for (j = 0; j < num_drm_modifiers_B; j++) {
+- if (modifiers_A[i] == modifiers_B[j]) {
+- mod_found = true;
+- break;
+- }
+- }
+- if (!mod_found)
+- return false;
+- }
+-
+- return true;
+-}
+-
+-static void
+-surface_dmabuf_feedback_done(void *data,
+- struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
+-{
+- struct wsi_wl_surface *wsi_wl_surface = data;
+- struct wsi_wl_swapchain *chain = wsi_wl_surface->chain;
+- struct wsi_wl_format *f;
+-
+- dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
+- wsi_wl_surface->dmabuf_feedback = wsi_wl_surface->pending_dmabuf_feedback;
+- dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback);
+-
+- /* It's not just because we received dma-buf feedback that re-allocation is a
+- * good idea. In order to know if we should re-allocate or not, we must
+- * compare the most recent parameters that we used to allocate with the ones
+- * from the feedback we just received.
+- *
+- * The allocation parameters are: the format, its set of modifiers and the
+- * tranche flags. On WSI we are not using the tranche flags for anything, so
+- * we disconsider this. As we can't switch to another format (it is selected
+- * by the client), we just need to compare the set of modifiers.
+- *
+- * So we just look for the vk_format in the tranches (respecting their
+- * preferences), and compare its set of modifiers with the set of modifiers
+- * we've used to allocate previously. If they differ, we are using suboptimal
+- * parameters and should re-allocate.
+- */
+- f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface, chain->vk_format);
+- if (f && !sets_of_modifiers_are_the_same(u_vector_length(&f->modifiers),
+- u_vector_tail(&f->modifiers),
+- chain->num_drm_modifiers,
+- chain->drm_modifiers))
+- wsi_wl_surface->chain->suboptimal = true;
+-}
+-
+-static const struct zwp_linux_dmabuf_feedback_v1_listener
+-surface_dmabuf_feedback_listener = {
+- .format_table = surface_dmabuf_feedback_format_table,
+- .main_device = surface_dmabuf_feedback_main_device,
+- .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
+- .tranche_flags = surface_dmabuf_feedback_tranche_flags,
+- .tranche_formats = surface_dmabuf_feedback_tranche_formats,
+- .tranche_done = surface_dmabuf_feedback_tranche_done,
+- .done = surface_dmabuf_feedback_done,
+-};
+-
+-static VkResult wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface)
+-{
+- wsi_wl_surface->wl_dmabuf_feedback =
+- zwp_linux_dmabuf_v1_get_surface_feedback(wsi_wl_surface->display->wl_dmabuf,
+- wsi_wl_surface->surface);
+-
+- zwp_linux_dmabuf_feedback_v1_add_listener(wsi_wl_surface->wl_dmabuf_feedback,
+- &surface_dmabuf_feedback_listener,
+- wsi_wl_surface);
+-
+- if (dmabuf_feedback_init(&wsi_wl_surface->dmabuf_feedback) < 0)
+- goto fail;
+- if (dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback) < 0)
+- goto fail_pending;
+-
+- return VK_SUCCESS;
+-
+-fail_pending:
+- dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
+-fail:
+- zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
+- wsi_wl_surface->wl_dmabuf_feedback = NULL;
+- return VK_ERROR_OUT_OF_HOST_MEMORY;
+-}
+-
+-static VkResult wsi_wl_surface_init(struct wsi_wl_surface *wsi_wl_surface,
+- struct wsi_device *wsi_device)
+-{
+- struct wsi_wayland *wsi =
+- (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+- VkResult result;
+-
+- /* wsi_wl_surface has already been initialized. */
+- if (wsi_wl_surface->display)
+- return VK_SUCCESS;
+-
+- result = wsi_wl_display_create(wsi, wsi_wl_surface->base.display,
+- wsi_device->sw, &wsi_wl_surface->display);
+- if (result != VK_SUCCESS)
+- goto fail;
+-
+- wsi_wl_surface->surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
+- if (!wsi_wl_surface->surface) {
+- result = VK_ERROR_OUT_OF_HOST_MEMORY;
+- goto fail;
+- }
+- wl_proxy_set_queue((struct wl_proxy *) wsi_wl_surface->surface,
+- wsi_wl_surface->display->queue);
+-
+- /* Bind wsi_wl_surface to dma-buf feedback. */
+- if (wsi_wl_surface->display->wl_dmabuf &&
+- zwp_linux_dmabuf_v1_get_version(wsi_wl_surface->display->wl_dmabuf) >=
+- ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
+- result = wsi_wl_surface_bind_to_dmabuf_feedback(wsi_wl_surface);
+- if (result != VK_SUCCESS)
+- goto fail;
+-
+- wl_display_roundtrip_queue(wsi_wl_surface->display->wl_display,
+- wsi_wl_surface->display->queue);
+- }
+-
+- return VK_SUCCESS;
+-
+-fail:
+- if (wsi_wl_surface->surface)
+- wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
+-
+- if (wsi_wl_surface->display)
+- wsi_wl_display_destroy(wsi_wl_surface->display);
+- return result;
+-}
+-
+-VKAPI_ATTR VkResult VKAPI_CALL
+-wsi_CreateWaylandSurfaceKHR(VkInstance _instance,
+- const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
+- const VkAllocationCallbacks *pAllocator,
+- VkSurfaceKHR *pSurface)
+-{
+- VK_FROM_HANDLE(vk_instance, instance, _instance);
+- struct wsi_wl_surface *wsi_wl_surface;
+- VkIcdSurfaceWayland *surface;
+-
+- assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
+-
+- wsi_wl_surface = vk_zalloc2(&instance->alloc, pAllocator, sizeof *wsi_wl_surface,
+- 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+- if (wsi_wl_surface == NULL)
+- return VK_ERROR_OUT_OF_HOST_MEMORY;
+-
+- surface = &wsi_wl_surface->base;
+-
+- surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
+- surface->display = pCreateInfo->display;
+- surface->surface = pCreateInfo->surface;
+-
+- *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
+-
+- return VK_SUCCESS;
+-}
+-
+-struct wsi_wl_present_id {
+- struct wp_presentation_feedback *feedback;
+- uint64_t present_id;
+- const VkAllocationCallbacks *alloc;
+- struct wsi_wl_swapchain *chain;
+- struct wl_list link;
+-};
+-
+-static struct wsi_image *
+-wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
+- uint32_t image_index)
+-{
+- struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+- return &chain->images[image_index].base;
+-}
+-
+-static VkResult
+-wsi_wl_swapchain_release_images(struct wsi_swapchain *wsi_chain,
+- uint32_t count, const uint32_t *indices)
+-{
+- struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+- for (uint32_t i = 0; i < count; i++) {
+- uint32_t index = indices[i];
+- assert(chain->images[index].busy);
+- chain->images[index].busy = false;
+- }
+- return VK_SUCCESS;
+-}
+-
+-static void
+-wsi_wl_swapchain_set_present_mode(struct wsi_swapchain *wsi_chain,
+- VkPresentModeKHR mode)
+-{
+- struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+- chain->base.present_mode = mode;
+-}
+-
+-static VkResult
+-wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
+- uint64_t present_id,
+- uint64_t timeout)
+-{
+- struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+- uint64_t end_time, time_left, now;
+- int ret;
+- bool expired = false;
+- bool finished;
+-
+- if (timeout == UINT64_MAX)
+- end_time = timeout;
+- else
+- end_time = os_time_get_absolute_timeout(timeout);
+-
+- /* Need to observe that the swapchain semaphore has been unsignalled,
+- * as this is guaranteed when a present is complete. */
+- VkResult result = wsi_swapchain_wait_for_present_semaphore(
+- &chain->base, present_id, timeout);
+- if (result != VK_SUCCESS)
+- return result;
+-
+- if (!chain->present_ids.wp_presentation) {
+- /* If we're enabling present wait despite the protocol not being supported,
+- * use best effort not to crash, even if result will not be correct.
+- * For correctness, we must at least wait for the timeline semaphore to complete. */
+- return VK_SUCCESS;
+- }
+-
+- while (1) {
+- ret = wl_display_dispatch_queue_pending(chain->wsi_wl_surface->display->wl_display,
+- chain->queue);
+- if (ret < 0)
+- return VK_ERROR_OUT_OF_DATE_KHR;
+-
+- /* PresentWait can be called concurrently.
+- * If there is contention on this mutex, it means there is currently a dispatcher in flight holding the lock.
+- * The lock is only held while there is forward progress processing events from Wayland,
+- * so there should be no problem locking without timeout.
+- * We would like to be able to support timeout = 0 to query the current max_completed count.
+- * A timedlock with no timeout can be problematic in that scenario. */
+- pthread_mutex_lock(&chain->present_ids.lock);
+- finished = chain->present_ids.max_completed >= present_id;
+- pthread_mutex_unlock(&chain->present_ids.lock);
+- if (finished)
+- return VK_SUCCESS;
+-
+- if (expired)
+- return VK_TIMEOUT;
+-
+- now = os_time_get_nano();
+- if (now > end_time)
+- time_left = 0;
+- else
+- time_left = end_time - now;
+-
+- ret = wsi_wl_display_dispatch_queue_with_timeout(chain->wsi_wl_surface->display,
+- chain->queue,
+- time_left);
+- if (ret == VK_INCOMPLETE)
+- continue;
+-
+- if (ret != VK_SUCCESS && ret != VK_TIMEOUT)
+- return ret;
+-
+- if (time_left == 0)
+- expired = true;
+- }
+-}
+-
+-static VkResult
+-wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
+- const VkAcquireNextImageInfoKHR *info,
+- uint32_t *image_index)
+-{
+- struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+- struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
+- uint64_t end_time, time_left, now;
+- bool expired = false;
+- int ret;
+-
+- if (info->timeout == UINT64_MAX)
+- end_time = info->timeout;
+- else
+- end_time = os_time_get_absolute_timeout(info->timeout);
+-
+- while (1) {
+- ret = wl_display_dispatch_queue_pending(wsi_wl_surface->display->wl_display,
+- wsi_wl_surface->display->queue);
+- if (ret < 0)
+- return VK_ERROR_OUT_OF_DATE_KHR;
+-
+- /* Try to find a free image. */
+- for (uint32_t i = 0; i < chain->base.image_count; i++) {
+- if (!chain->images[i].busy) {
+- /* We found a non-busy image */
+- *image_index = i;
+- chain->images[i].busy = true;
+- return (chain->suboptimal ? VK_SUBOPTIMAL_KHR : VK_SUCCESS);
+- }
+- }
+-
+- if (expired)
+- return info->timeout ? VK_TIMEOUT : VK_NOT_READY;
+-
+- now = os_time_get_nano();
+- if (now > end_time)
+- time_left = 0;
+- else
+- time_left = end_time - now;
+-
+- ret = wsi_wl_display_dispatch_queue_with_timeout(wsi_wl_surface->display,
+- wsi_wl_surface->display->queue,
+- time_left);
+- if (ret == VK_ERROR_OUT_OF_DATE_KHR)
+- return ret;
+-
+- if (ret == VK_INCOMPLETE)
+- continue;
+-
+- if (ret == VK_TIMEOUT)
+- expired = true;
+- }
+-}
+-
+-static void
+-presentation_handle_sync_output(void *data,
+- struct wp_presentation_feedback *feedback,
+- struct wl_output *output)
+-{
+-}
+-
+-static void
+-presentation_handle_presented(void *data,
+- struct wp_presentation_feedback *feedback,
+- uint32_t tv_sec_hi, uint32_t tv_sec_lo,
+- uint32_t tv_nsec, uint32_t refresh,
+- uint32_t seq_hi, uint32_t seq_lo,
+- uint32_t flags)
+-{
+- struct wsi_wl_present_id *id = data;
+-
+- pthread_mutex_lock(&id->chain->present_ids.lock);
+- if (id->present_id > id->chain->present_ids.max_completed)
+- id->chain->present_ids.max_completed = id->present_id;
+- pthread_mutex_unlock(&id->chain->present_ids.lock);
+-
+- wp_presentation_feedback_destroy(feedback);
+- wl_list_remove(&id->link);
+- vk_free(id->alloc, id);
+-}
+-
+-static void
+-presentation_handle_discarded(void *data,
+- struct wp_presentation_feedback *feedback)
+-{
+- struct wsi_wl_present_id *id = data;
+-
+- pthread_mutex_lock(&id->chain->present_ids.lock);
+- if (id->present_id > id->chain->present_ids.max_completed)
+- id->chain->present_ids.max_completed = id->present_id;
+- pthread_mutex_unlock(&id->chain->present_ids.lock);
+-
+- wp_presentation_feedback_destroy(feedback);
+- wl_list_remove(&id->link);
+- vk_free(id->alloc, id);
+-}
+-
+-static const struct wp_presentation_feedback_listener
+- pres_feedback_listener = {
+- presentation_handle_sync_output,
+- presentation_handle_presented,
+- presentation_handle_discarded,
+-};
+-
+-static void
+-frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
+-{
+- struct wsi_wl_swapchain *chain = data;
+-
+- chain->frame = NULL;
+- chain->fifo_ready = true;
+-
+- wl_callback_destroy(callback);
+-}
+-
+-static const struct wl_callback_listener frame_listener = {
+- frame_handle_done,
+-};
+-
+-static VkResult
+-wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
+- uint32_t image_index,
+- uint64_t present_id,
+- const VkPresentRegionKHR *damage)
+-{
+- struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+- struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
+-
+- if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
+- struct wsi_wl_image *image = &chain->images[image_index];
+- memcpy(image->shm_ptr, image->base.cpu_map,
+- image->base.row_pitches[0] * chain->extent.height);
+- }
+-
+- /* For EXT_swapchain_maintenance1. We might have transitioned from FIFO to MAILBOX.
+- * In this case we need to let the FIFO request complete, before presenting MAILBOX. */
+- while (!chain->fifo_ready) {
+- int ret = wl_display_dispatch_queue(wsi_wl_surface->display->wl_display,
+- wsi_wl_surface->display->queue);
+- if (ret < 0)
+- return VK_ERROR_OUT_OF_DATE_KHR;
+- }
+-
+- assert(image_index < chain->base.image_count);
+- wl_surface_attach(wsi_wl_surface->surface, chain->images[image_index].buffer, 0, 0);
+-
+- if (wl_surface_get_version(wsi_wl_surface->surface) >= 4 && damage &&
+- damage->pRectangles && damage->rectangleCount > 0) {
+- for (unsigned i = 0; i < damage->rectangleCount; i++) {
+- const VkRectLayerKHR *rect = &damage->pRectangles[i];
+- assert(rect->layer == 0);
+- wl_surface_damage_buffer(wsi_wl_surface->surface,
+- rect->offset.x, rect->offset.y,
+- rect->extent.width, rect->extent.height);
+- }
+- } else {
+- wl_surface_damage(wsi_wl_surface->surface, 0, 0, INT32_MAX, INT32_MAX);
+- }
+-
+- if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
+- chain->frame = wl_surface_frame(wsi_wl_surface->surface);
+- wl_callback_add_listener(chain->frame, &frame_listener, chain);
+- chain->fifo_ready = false;
+- } else {
+- /* If we present MAILBOX, any subsequent presentation in FIFO can replace this image. */
+- chain->fifo_ready = true;
+- }
+-
+- if (present_id > 0 && chain->present_ids.wp_presentation) {
+- struct wsi_wl_present_id *id =
+- vk_zalloc(chain->wsi_wl_surface->display->wsi_wl->alloc, sizeof(*id), sizeof(uintptr_t),
+- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+- id->chain = chain;
+- id->present_id = present_id;
+- id->alloc = chain->wsi_wl_surface->display->wsi_wl->alloc;
+-
+- pthread_mutex_lock(&chain->present_ids.lock);
+- id->feedback = wp_presentation_feedback(chain->present_ids.wp_presentation,
+- chain->wsi_wl_surface->surface);
+- wp_presentation_feedback_add_listener(id->feedback,
+- &pres_feedback_listener,
+- id);
+- wl_list_insert(&chain->present_ids.outstanding_list, &id->link);
+- pthread_mutex_unlock(&chain->present_ids.lock);
+- }
+-
+- chain->images[image_index].busy = true;
+- wl_surface_commit(wsi_wl_surface->surface);
+- wl_display_flush(wsi_wl_surface->display->wl_display);
+-
+- return VK_SUCCESS;
+-}
+-
+-static void
+-buffer_handle_release(void *data, struct wl_buffer *buffer)
+-{
+- struct wsi_wl_image *image = data;
+-
+- assert(image->buffer == buffer);
+-
+- image->busy = false;
+-}
+-
+-static const struct wl_buffer_listener buffer_listener = {
+- buffer_handle_release,
+-};
+-
+-static uint8_t *
+-wsi_wl_alloc_image_shm(struct wsi_image *imagew, unsigned size)
+-{
+- struct wsi_wl_image *image = (struct wsi_wl_image *)imagew;
+-
+- /* Create a shareable buffer */
+- int fd = os_create_anonymous_file(size, NULL);
+- if (fd < 0)
+- return NULL;
+-
+- void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+- if (ptr == MAP_FAILED) {
+- close(fd);
+- return NULL;
+- }
+-
+- image->shm_fd = fd;
+- image->shm_ptr = ptr;
+- image->shm_size = size;
+-
+- return ptr;
+-}
+-
+-static VkResult
+-wsi_wl_image_init(struct wsi_wl_swapchain *chain,
+- struct wsi_wl_image *image,
+- const VkSwapchainCreateInfoKHR *pCreateInfo,
+- const VkAllocationCallbacks* pAllocator)
+-{
+- struct wsi_wl_display *display = chain->wsi_wl_surface->display;
+- VkResult result;
+-
+- result = wsi_create_image(&chain->base, &chain->base.image_info,
+- &image->base);
+- if (result != VK_SUCCESS)
+- return result;
+-
+- switch (chain->buffer_type) {
+- case WSI_WL_BUFFER_GPU_SHM:
+- case WSI_WL_BUFFER_SHM_MEMCPY: {
+- if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
+- wsi_wl_alloc_image_shm(&image->base, image->base.row_pitches[0] *
+- chain->extent.height);
+- }
+- assert(image->shm_ptr != NULL);
+-
+- /* Share it in a wl_buffer */
+- struct wl_shm_pool *pool = wl_shm_create_pool(display->wl_shm,
+- image->shm_fd,
+- image->shm_size);
+- wl_proxy_set_queue((struct wl_proxy *)pool, display->queue);
+- image->buffer = wl_shm_pool_create_buffer(pool, 0, chain->extent.width,
+- chain->extent.height,
+- image->base.row_pitches[0],
+- chain->shm_format);
+- wl_shm_pool_destroy(pool);
+- break;
+- }
+-
+- case WSI_WL_BUFFER_NATIVE: {
+- assert(display->wl_dmabuf);
+-
+- struct zwp_linux_buffer_params_v1 *params =
+- zwp_linux_dmabuf_v1_create_params(display->wl_dmabuf);
+- if (!params)
+- goto fail_image;
+-
+- for (int i = 0; i < image->base.num_planes; i++) {
+- zwp_linux_buffer_params_v1_add(params,
+- image->base.dma_buf_fd,
+- i,
+- image->base.offsets[i],
+- image->base.row_pitches[i],
+- image->base.drm_modifier >> 32,
+- image->base.drm_modifier & 0xffffffff);
+- }
+-
+- image->buffer =
+- zwp_linux_buffer_params_v1_create_immed(params,
+- chain->extent.width,
+- chain->extent.height,
+- chain->drm_format,
+- 0);
+- zwp_linux_buffer_params_v1_destroy(params);
+- break;
+- }
+-
+- default:
+- unreachable("Invalid buffer type");
+- }
+-
+- if (!image->buffer)
+- goto fail_image;
+-
+- wl_buffer_add_listener(image->buffer, &buffer_listener, image);
+-
+- return VK_SUCCESS;
+-
+-fail_image:
+- wsi_destroy_image(&chain->base, &image->base);
+-
+- return VK_ERROR_OUT_OF_HOST_MEMORY;
+-}
+-
+-static void
+-wsi_wl_swapchain_images_free(struct wsi_wl_swapchain *chain)
+-{
+- for (uint32_t i = 0; i < chain->base.image_count; i++) {
+- if (chain->images[i].buffer) {
+- wl_buffer_destroy(chain->images[i].buffer);
+- wsi_destroy_image(&chain->base, &chain->images[i].base);
+- if (chain->images[i].shm_size) {
+- close(chain->images[i].shm_fd);
+- munmap(chain->images[i].shm_ptr, chain->images[i].shm_size);
+- }
+- }
+- }
+-}
+-
+-static void
+-wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
+- const VkAllocationCallbacks *pAllocator)
+-{
+- if (chain->frame)
+- wl_callback_destroy(chain->frame);
+- if (chain->tearing_control)
+- wp_tearing_control_v1_destroy(chain->tearing_control);
+- if (chain->wsi_wl_surface)
+- chain->wsi_wl_surface->chain = NULL;
+-
+- if (chain->present_ids.wp_presentation) {
+- /* In VK_EXT_swapchain_maintenance1 there is no requirement to wait for all present IDs to be complete.
+- * Waiting for the swapchain fence is enough.
+- * Just clean up anything user did not wait for. */
+- struct wsi_wl_present_id *id, *tmp;
+- wl_list_for_each_safe(id, tmp, &chain->present_ids.outstanding_list, link) {
+- wp_presentation_feedback_destroy(id->feedback);
+- wl_list_remove(&id->link);
+- vk_free(id->alloc, id);
+- }
+-
+- wl_proxy_wrapper_destroy(chain->present_ids.wp_presentation);
+- pthread_mutex_destroy(&chain->present_ids.lock);
+- }
+-
+- wsi_swapchain_finish(&chain->base);
+-}
+-
+-static VkResult
+-wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
+- const VkAllocationCallbacks *pAllocator)
+-{
+- struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
+-
+- wsi_wl_swapchain_images_free(chain);
+- wsi_wl_swapchain_chain_free(chain, pAllocator);
+-
+- vk_free(pAllocator, chain);
+-
+- return VK_SUCCESS;
+-}
+-
+-static VkResult
+-wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+- VkDevice device,
+- struct wsi_device *wsi_device,
+- const VkSwapchainCreateInfoKHR* pCreateInfo,
+- const VkAllocationCallbacks* pAllocator,
+- struct wsi_swapchain **swapchain_out)
+-{
+- struct wsi_wl_surface *wsi_wl_surface =
+- wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
+- struct wsi_wl_swapchain *chain;
+- VkResult result;
+-
+- assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
+-
+- int num_images = pCreateInfo->minImageCount;
+-
+- size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
+- chain = vk_zalloc(pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+- if (chain == NULL)
+- return VK_ERROR_OUT_OF_HOST_MEMORY;
+-
+- /* We are taking ownership of the wsi_wl_surface, so remove ownership from
+- * oldSwapchain. If the surface is currently owned by a swapchain that is
+- * not oldSwapchain we return an error.
+- */
+- if (wsi_wl_surface->chain &&
+- wsi_swapchain_to_handle(&wsi_wl_surface->chain->base) != pCreateInfo->oldSwapchain) {
+- return VK_ERROR_NATIVE_WINDOW_IN_USE_KHR;
+- }
+- if (pCreateInfo->oldSwapchain) {
+- VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
+- old_chain->wsi_wl_surface = NULL;
+- if (old_chain->tearing_control) {
+- wp_tearing_control_v1_destroy(old_chain->tearing_control);
+- old_chain->tearing_control = NULL;
+- }
+- }
+-
+- /* Take ownership of the wsi_wl_surface */
+- chain->wsi_wl_surface = wsi_wl_surface;
+- wsi_wl_surface->chain = chain;
+-
+- result = wsi_wl_surface_init(wsi_wl_surface, wsi_device);
+- if (result != VK_SUCCESS)
+- goto fail;
+-
+- VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
+- if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
+- chain->tearing_control =
+- wp_tearing_control_manager_v1_get_tearing_control(wsi_wl_surface->display->tearing_control_manager,
+- wsi_wl_surface->surface);
+- if (!chain->tearing_control) {
+- result = VK_ERROR_OUT_OF_HOST_MEMORY;
+- goto fail;
+- }
+- wp_tearing_control_v1_set_presentation_hint(chain->tearing_control,
+- WP_TEARING_CONTROL_V1_PRESENTATION_HINT_ASYNC);
+- }
+-
+- enum wsi_wl_buffer_type buffer_type;
+- struct wsi_base_image_params *image_params = NULL;
+- struct wsi_cpu_image_params cpu_image_params;
+- struct wsi_drm_image_params drm_image_params;
+- uint32_t num_drm_modifiers = 0;
+- const uint64_t *drm_modifiers = NULL;
+- if (wsi_device->sw) {
+- cpu_image_params = (struct wsi_cpu_image_params) {
+- .base.image_type = WSI_IMAGE_TYPE_CPU,
+- };
+- if (wsi_device->has_import_memory_host &&
+- !(WSI_DEBUG & WSI_DEBUG_NOSHM)) {
+- buffer_type = WSI_WL_BUFFER_GPU_SHM;
+- cpu_image_params.alloc_shm = wsi_wl_alloc_image_shm;
+- } else {
+- buffer_type = WSI_WL_BUFFER_SHM_MEMCPY;
+- }
+- image_params = &cpu_image_params.base;
+- } else {
+- drm_image_params = (struct wsi_drm_image_params) {
+- .base.image_type = WSI_IMAGE_TYPE_DRM,
+- .same_gpu = wsi_wl_surface->display->same_gpu,
+- };
+- /* Use explicit DRM format modifiers when both the server and the driver
+- * support them.
+- */
+- if (wsi_wl_surface->display->wl_dmabuf && wsi_device->supports_modifiers) {
+- struct wsi_wl_format *f = NULL;
+- /* Try to select modifiers for our vk_format from surface dma-buf
+- * feedback. If that doesn't work, fallback to the list of supported
+- * formats/modifiers by the display. */
+- if (wsi_wl_surface->wl_dmabuf_feedback)
+- f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface,
+- pCreateInfo->imageFormat);
+- if (f == NULL)
+- f = find_format(&chain->wsi_wl_surface->display->formats,
+- pCreateInfo->imageFormat);
+- if (f != NULL) {
+- num_drm_modifiers = u_vector_length(&f->modifiers);
+- drm_modifiers = u_vector_tail(&f->modifiers);
+- if (num_drm_modifiers > 0)
+- drm_image_params.num_modifier_lists = 1;
+- else
+- drm_image_params.num_modifier_lists = 0;
+- drm_image_params.num_modifiers = &num_drm_modifiers;
+- drm_image_params.modifiers = &drm_modifiers;
+- }
+- }
+- buffer_type = WSI_WL_BUFFER_NATIVE;
+- image_params = &drm_image_params.base;
+- }
+-
+- result = wsi_swapchain_init(wsi_device, &chain->base, device,
+- pCreateInfo, image_params, pAllocator);
+- if (result != VK_SUCCESS)
+- goto fail;
+-
+- bool alpha = pCreateInfo->compositeAlpha ==
+- VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
+-
+- chain->base.destroy = wsi_wl_swapchain_destroy;
+- chain->base.get_wsi_image = wsi_wl_swapchain_get_wsi_image;
+- chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
+- chain->base.queue_present = wsi_wl_swapchain_queue_present;
+- chain->base.release_images = wsi_wl_swapchain_release_images;
+- chain->base.set_present_mode = wsi_wl_swapchain_set_present_mode;
+- chain->base.wait_for_present = wsi_wl_swapchain_wait_for_present;
+- chain->base.present_mode = present_mode;
+- chain->base.image_count = num_images;
+- chain->extent = pCreateInfo->imageExtent;
+- chain->vk_format = pCreateInfo->imageFormat;
+- chain->buffer_type = buffer_type;
+- if (buffer_type == WSI_WL_BUFFER_NATIVE) {
+- chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
+- } else {
+- chain->shm_format = wl_shm_format_for_vk_format(chain->vk_format, alpha);
+- }
+- chain->num_drm_modifiers = num_drm_modifiers;
+- chain->drm_modifiers = drm_modifiers;
+-
+- chain->queue = wl_display_create_queue(chain->wsi_wl_surface->display->wl_display);
+-
+- if (chain->wsi_wl_surface->display->wp_presentation_notwrapped) {
+- pthread_mutex_init(&chain->present_ids.lock, NULL);
+-
+- wl_list_init(&chain->present_ids.outstanding_list);
+- chain->present_ids.wp_presentation =
+- wl_proxy_create_wrapper(chain->wsi_wl_surface->display->wp_presentation_notwrapped);
+- wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.wp_presentation,
+- chain->queue);
+- }
+-
+- chain->fifo_ready = true;
+-
+- for (uint32_t i = 0; i < chain->base.image_count; i++) {
+- result = wsi_wl_image_init(chain, &chain->images[i],
+- pCreateInfo, pAllocator);
+- if (result != VK_SUCCESS)
+- goto fail_image_init;
+- chain->images[i].busy = false;
+- }
+-
+- *swapchain_out = &chain->base;
+-
+- return VK_SUCCESS;
+-
+-fail_image_init:
+- wsi_wl_swapchain_images_free(chain);
+-
+- wsi_wl_swapchain_chain_free(chain, pAllocator);
+-fail:
+- vk_free(pAllocator, chain);
+- wsi_wl_surface->chain = NULL;
+-
+- return result;
+-}
+-
+-VkResult
+-wsi_wl_init_wsi(struct wsi_device *wsi_device,
+- const VkAllocationCallbacks *alloc,
+- VkPhysicalDevice physical_device)
+-{
+- struct wsi_wayland *wsi;
+- VkResult result;
+-
+- wsi = vk_alloc(alloc, sizeof(*wsi), 8,
+- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+- if (!wsi) {
+- result = VK_ERROR_OUT_OF_HOST_MEMORY;
+- goto fail;
+- }
+-
+- wsi->physical_device = physical_device;
+- wsi->alloc = alloc;
+- wsi->wsi = wsi_device;
+-
+- wsi->base.get_support = wsi_wl_surface_get_support;
+- wsi->base.get_capabilities2 = wsi_wl_surface_get_capabilities2;
+- wsi->base.get_formats = wsi_wl_surface_get_formats;
+- wsi->base.get_formats2 = wsi_wl_surface_get_formats2;
+- wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
+- wsi->base.get_present_rectangles = wsi_wl_surface_get_present_rectangles;
+- wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
+-
+- wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
+-
+- return VK_SUCCESS;
+-
+-fail:
+- wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
+-
+- return result;
+-}
+-
+-void
+-wsi_wl_finish_wsi(struct wsi_device *wsi_device,
+- const VkAllocationCallbacks *alloc)
+-{
+- struct wsi_wayland *wsi =
+- (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+- if (!wsi)
+- return;
+-
+- vk_free(alloc, wsi);
+-}
+--
+2.43.0
+
+
+From 6c20c68d10ed9cbb440819a70fc12796418f2726 Mon Sep 17 00:00:00 2001
+From: Denis <benato.denis96@gmail.com>
+Date: Sun, 10 Dec 2023 14:51:30 +0100
+Subject: [PATCH 5/5] wsi: Use vendored gamescope-commit-queue-v1 protocol
+
+---
+ .../wayland-drm/gamescope-commit-queue-v1.xml | 181 ++++++++++++++++++
+ src/egl/wayland/wayland-drm/meson.build | 22 ++-
+ src/vulkan/wsi/meson.build | 2 +-
+ src/vulkan/wsi/wsi_common_wayland.c | 22 +--
+ 4 files changed, 214 insertions(+), 13 deletions(-)
+ create mode 100644 src/egl/wayland/wayland-drm/gamescope-commit-queue-v1.xml
+
+diff --git a/src/egl/wayland/wayland-drm/gamescope-commit-queue-v1.xml b/src/egl/wayland/wayland-drm/gamescope-commit-queue-v1.xml
+new file mode 100644
+index 00000000000..d460e0bc10f
+--- /dev/null
++++ b/src/egl/wayland/wayland-drm/gamescope-commit-queue-v1.xml
+@@ -0,0 +1,181 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<protocol name="gamescope_commit_queue_v1">
++ <copyright>
++ Copyright © 2023 Valve Corporation
++
++ Permission is hereby granted, free of charge, to any person obtaining a
++ copy of this software and associated documentation files (the "Software"),
++ to deal in the Software without restriction, including without limitation
++ the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ and/or sell copies of the Software, and to permit persons to whom the
++ Software is furnished to do so, subject to the following conditions:
++
++ The above copyright notice and this permission notice (including the next
++ paragraph) shall be included in all copies or substantial portions of the
++ Software.
++
++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ DEALINGS IN THE SOFTWARE.
++ </copyright>
++
++ <interface name="gamescope_commit_queue_manager_v1" version="1">
++ <description summary="commit queuing">
++ By design Wayland uses a "mailbox" style presentation model. Under
++ the mailbox model, when wl_surface.commit is called, the currently
++ pending state is intended to replace the current state immediately.
++
++ If state is committed many times before the compositor repaints a
++ scene, each commit takes place immediately, updating the existing
++ state. When the compositor repaints the display only the most
++ recent accumulation of state is visible. This may lead to client
++ buffers being released without presentation if they were replaced
++ before being displayed.
++
++ There are other presentation models such as FIFO (First In First
++ Out) in which state commits are explicitly queued for future
++ repaint intervals, and client buffers should not be released
++ without being displayed.
++
++ Graphics APIs such as Vulkan aim to support these presentation
++ models, but they are not implementable on top of our mailbox model
++ without the ability to change the default surface state handling
++ behaviour.
++
++ This interface provides a way to control the compositor's surface
++ state handling to enable presentation models other than mailbox.
++
++ It does so by exposing control of a compositor surface state queue,
++ and specifying for each call of wl_surface.commit whether the
++ pending state should be handled in a mailbox or a FIFO fashion.
++
++ Warning! The protocol described in this file is currently in the testing
++ phase. Backward compatible changes may be added together with the
++ corresponding interface version bump. Backward incompatible changes can
++ only be done by creating a new major version of the extension.
++ </description>
++ <enum name="error">
++ <description summary="fatal presentation error">
++ These fatal protocol errors may be emitted in response to
++ illegal requests.
++ </description>
++ <entry name="queue_controller_already_exists" value="0"
++ summary="commit queue controller already exists for surface"/>
++ </enum>
++
++ <request name="destroy" type="destructor">
++ <description summary="unbind from the surface queuing interface">
++ Informs the server that the client will no longer be using
++ this protocol object. Existing objects created by this object
++ are not affected.
++ </description>
++ </request>
++
++ <request name="get_queue_controller">
++ <description summary="request commit queue submission interface for surface">
++ Establish a queue controller for a surface.
++
++ Graphics APIs (EGL, Vulkan) will likely use this protocol
++ internally, so clients using them shouldn't directly use this
++ protocol on surfaces managed by those APIs, or a
++ queue_controller_already_exists protocol error will occur.
++ </description>
++ <arg name="id" type="new_id" interface="gamescope_commit_queue_v1"/>
++ <arg name="surface" type="object" interface="wl_surface"/>
++ </request>
++ </interface>
++
++ <interface name="gamescope_commit_queue_v1" version="1">
++ <description summary="commit queue controller">
++ A queue controller for a surface.
++
++ A wayland compositor may implicitly queue surface state to
++ allow it to pick the most recently ready state at repaint time,
++ or to allow surface state to contain timing information.
++
++ The commit queue controller object allows explicit control over
++ the queue of upcoming surface state by allowing a client to attach
++ a queue drain mode to pending surface state before it calls
++ wl_surface.commit.
++ </description>
++
++ <enum name="error">
++ <description summary="fatal presentation error">
++ These fatal protocol errors may be emitted in response to
++ illegal requests.
++ </description>
++ <entry name="invalid_queue_mode" value="0"
++ summary="invalid queue mode"/>
++ </enum>
++
++ <enum name="queue_mode">
++ <description summary="Queue drain mode">
++ This enum is used to choose how the compositor processes a queue
++ entry at output repaint time.
++ </description>
++ <entry name="mailbox" value="0">
++ <description summary="Fast forward through past timestamps">
++ State from this queue slot may be updated immediately (without
++ completing a repaint) if newer state is ready to display at
++ repaint time.
++ </description>
++ </entry>
++ <entry name="fifo" value="1">
++ <description summary="Attempt to display each queued commit">
++ This queue slot will be the last state update for this surface
++ that the compositor will process during the repaint in which
++ it is ready for display.
++
++ If the compositor is presenting with tearing, the surface state
++ must be made current for an iteration of the compositor's repaint
++ loop. This may result in the state being visible for a very short
++ duration, with visible artifacts, or even not visible at all for
++ surfaces that aren't full screen.
++
++ The compositor must not cause state processing to stall indefinitely
++ for a surface that is occluded or otherwise not visible. Instead,
++ if the compositor is choosing not to present a surface for reasons
++ unrelated to state readiness, the FIFO condition must be considered
++ satisfied at the moment new state becomes ready to replace the
++ undisplayed state.
++ </description>
++ </entry>
++ </enum>
++
++ <request name="set_queue_mode">
++ <description summary="set the queue draining mode for the pending commit">
++ This request adds a queue drain mode to the pending surface
++ state, which will be commit by the next wl_surface.commit.
++
++ This request tells the compositor how to process the state
++ from that commit when handling its internal state queue.
++
++ If the drain mode is "mailbox", the compositor may continue
++ processing the next state in the queue before it repaints
++ the display.
++
++ If the drain mode is "fifo", the compositor should ensure the
++ queue is not advanced until after this state has been current
++ for a repaint. The queue may be advance without repaint in the
++ case of off-screen or occluded surfaces.
++
++ The default drain mode when none is specified is "mailbox".
++ </description>
++ <arg name="mode" type="uint" enum="drain_mode"/>
++ </request>
++
++ <request name="destroy" type="destructor">
++ <description summary="Destroy the surface queue controller">
++ Informs the server that the client will no longer be using
++ this protocol object.
++
++ Surface state changes previously made by this protocol are
++ unaffected by this object's destruction.
++ </description>
++ </request>
++ </interface>
++</protocol>
+diff --git a/src/egl/wayland/wayland-drm/meson.build b/src/egl/wayland/wayland-drm/meson.build
+index 48c676d7be4..3fb5a1723b3 100644
+--- a/src/egl/wayland/wayland-drm/meson.build
++++ b/src/egl/wayland/wayland-drm/meson.build
+@@ -59,7 +59,7 @@ libwayland_drm = static_library(
+ # here for now as the maybe-least-bad solution.
+ wp_dir = dep_wl_protocols.get_variable(pkgconfig : 'pkgdatadir', internal : 'pkgdatadir')
+ wp_protos = {
+- 'commit-queue-v1': 'staging/commit-queue/commit-queue-v1.xml',
++ #'commit-queue-v1': 'staging/commit-queue/commit-queue-v1.xml',
+ #'commit-timing-v1': 'staging/commit-timing/commit-timing-v1.xml',
+ 'linux-dmabuf-unstable-v1': 'unstable/linux-dmabuf/linux-dmabuf-unstable-v1.xml',
+ 'presentation-time': 'stable/presentation-time/presentation-time.xml',
+@@ -81,3 +81,23 @@ foreach name, xml : wp_protos
+ )
+ wp_files += { name: [code, header] }
+ endforeach
++
++gamescope_protos = {
++ 'gamescope-commit-queue-v1': 'gamescope-commit-queue-v1.xml',
++}
++foreach name, xml : gamescope_protos
++ code = custom_target(
++ name + '-protocol.c',
++ input : xml,
++ output : name + '-protocol.c',
++ command : [prog_wl_scanner, wl_scanner_arg, '@INPUT@', '@OUTPUT@'],
++ )
++ header = custom_target(
++ name + '-client-protocol.h',
++ input : xml,
++ output : name + '-client-protocol.h',
++ command : [prog_wl_scanner, 'client-header', '@INPUT@', '@OUTPUT@'],
++ )
++ wp_files += { name: [code, header] }
++endforeach
++
+diff --git a/src/vulkan/wsi/meson.build b/src/vulkan/wsi/meson.build
+index 5caea0e8f4f..d90722be253 100644
+--- a/src/vulkan/wsi/meson.build
++++ b/src/vulkan/wsi/meson.build
+@@ -31,7 +31,7 @@ endif
+
+ if with_platform_wayland
+ files_vulkan_wsi += files('wsi_common_wayland.c')
+- files_vulkan_wsi += wp_files['commit-queue-v1']
++ files_vulkan_wsi += wp_files['gamescope-commit-queue-v1']
+ #files_vulkan_wsi += wp_files['commit-timing-v1']
+ files_vulkan_wsi += wp_files['linux-dmabuf-unstable-v1']
+ files_vulkan_wsi += wp_files['presentation-time']
+diff --git a/src/vulkan/wsi/wsi_common_wayland.c b/src/vulkan/wsi/wsi_common_wayland.c
+index d9069dee499..b5f956974af 100644
+--- a/src/vulkan/wsi/wsi_common_wayland.c
++++ b/src/vulkan/wsi/wsi_common_wayland.c
+@@ -41,7 +41,7 @@
+ #include "vk_util.h"
+ #include "wsi_common_entrypoints.h"
+ #include "wsi_common_private.h"
+-#include "commit-queue-v1-client-protocol.h"
++#include "gamescope-commit-queue-v1-client-protocol.h"
+ #include "linux-dmabuf-unstable-v1-client-protocol.h"
+ #include "presentation-time-client-protocol.h"
+ #include "tearing-control-v1-client-protocol.h"
+@@ -114,7 +114,7 @@ struct wsi_wl_display {
+ /* users want per-chain wsi_wl_swapchain->present_ids.wp_presentation */
+ struct wp_presentation *wp_presentation_notwrapped;
+
+- struct wp_commit_queue_manager_v1 *commit_queue_manager;
++ struct gamescope_commit_queue_manager_v1 *commit_queue_manager;
+
+ struct wsi_wayland *wsi_wl;
+
+@@ -170,7 +170,7 @@ struct wsi_wl_swapchain {
+
+ struct wsi_wl_surface *wsi_wl_surface;
+ struct wp_tearing_control_v1 *tearing_control;
+- struct wp_commit_queue_v1 *commit_queue;
++ struct gamescope_commit_queue_v1 *commit_queue;
+ bool can_timestamp;
+
+ struct wl_callback *frame;
+@@ -944,9 +944,9 @@ registry_handle_global(void *data, struct wl_registry *registry,
+ } else if (strcmp(interface, wp_tearing_control_manager_v1_interface.name) == 0) {
+ display->tearing_control_manager =
+ wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
+- } else if (strcmp(interface, wp_commit_queue_manager_v1_interface.name) == 0) {
++ } else if (strcmp(interface, gamescope_commit_queue_manager_v1_interface.name) == 0) {
+ display->commit_queue_manager =
+- wl_registry_bind(registry, name, &wp_commit_queue_manager_v1_interface, 1);
++ wl_registry_bind(registry, name, &gamescope_commit_queue_manager_v1_interface, 1);
+ }
+ }
+
+@@ -974,7 +974,7 @@ wsi_wl_display_finish(struct wsi_wl_display *display)
+ if (display->wp_presentation_notwrapped)
+ wp_presentation_destroy(display->wp_presentation_notwrapped);
+ if (display->commit_queue_manager)
+- wp_commit_queue_manager_v1_destroy(display->commit_queue_manager);
++ gamescope_commit_queue_manager_v1_destroy(display->commit_queue_manager);
+ if (display->tearing_control_manager)
+ wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
+ if (display->wl_display_wrapper)
+@@ -2063,8 +2063,8 @@ set_timestamp(struct wsi_wl_swapchain *chain)
+
+ timespec_from_nsec(&target_ts, target);
+
+- wp_commit_queue_v1_set_queue_mode(chain->commit_queue,
+- WP_COMMIT_QUEUE_V1_QUEUE_MODE_FIFO);
++ gamescope_commit_queue_v1_set_queue_mode(chain->commit_queue,
++ GAMESCOPE_COMMIT_QUEUE_V1_QUEUE_MODE_FIFO);
+ chain->last_target_time = target;
+ }
+
+@@ -2312,7 +2312,7 @@ wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
+ }
+
+ if (chain->commit_queue)
+- wp_commit_queue_v1_destroy(chain->commit_queue);
++ gamescope_commit_queue_v1_destroy(chain->commit_queue);
+
+ wsi_swapchain_finish(&chain->base);
+ }
+@@ -2369,7 +2369,7 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+ old_chain->tearing_control = NULL;
+ }
+ if (old_chain->commit_queue) {
+- wp_commit_queue_v1_destroy(old_chain->commit_queue);
++ gamescope_commit_queue_v1_destroy(old_chain->commit_queue);
+ old_chain->commit_queue = NULL;
+ old_chain->can_timestamp = false;
+ }
+@@ -2491,7 +2491,7 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+ chain->legacy_fifo_ready = true;
+ struct wsi_wl_display *dpy = chain->wsi_wl_surface->display;
+ if (dpy->commit_queue_manager) {
+- chain->commit_queue = wp_commit_queue_manager_v1_get_queue_controller(dpy->commit_queue_manager,
++ chain->commit_queue = gamescope_commit_queue_manager_v1_get_queue_controller(dpy->commit_queue_manager,
+ chain->wsi_wl_surface->surface);
+ chain->can_timestamp = true;
+ }
+--
+2.43.0
+