summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorYurii Kolesnykov2019-09-12 13:05:07 +0300
committerYurii Kolesnykov2019-09-12 13:05:07 +0300
commitfc4a94d00fbb27f8ea7d5d35d662d960f246435f (patch)
treeead69701755abafea63180b2479b920e2e9b66a5
downloadaur-fc4a94d00fbb27f8ea7d5d35d662d960f246435f.tar.gz
init
Signed-off-by: Yurii Kolesnykov <root@yurikoles.com>
-rw-r--r--.SRCINFO37
-rw-r--r--.gitignore5
-rw-r--r--PKGBUILD64
-rw-r--r--zfs-utils.initcpio.hook189
-rw-r--r--zfs-utils.initcpio.install101
-rw-r--r--zfs-utils.initcpio.zfsencryptssh.install39
-rw-r--r--zfs-utils.install18
7 files changed, 453 insertions, 0 deletions
diff --git a/.SRCINFO b/.SRCINFO
new file mode 100644
index 000000000000..c5dea5697f24
--- /dev/null
+++ b/.SRCINFO
@@ -0,0 +1,37 @@
+pkgbase = zfs-utils-head-git
+ pkgdesc = Kernel module support files for the Zettabyte File System.
+ pkgver = 0.8.0.r270.gd66620681
+ pkgrel = 1
+ url = http://zfsonlinux.org/
+ install = zfs-utils.install
+ arch = x86_64
+ groups = zfs-head-git
+ license = CDDL
+ makedepends = python
+ makedepends = python-setuptools
+ makedepends = python-cffi
+ makedepends = git
+ optdepends = python: pyzfs and extra utilities,
+ optdepends = python-cffi: pyzfs
+ provides = zfs-utils-git
+ provides = zfs-utils
+ provides = spl-utils
+ conflicts = zfs-utils-git
+ conflicts = zfs-utils
+ conflicts = spl-utils
+ replaces = spl-utils-common-git
+ replaces = zfs-utils-common-git
+ backup = etc/zfs/zed.d/zed.rc
+ backup = etc/default/zfs
+ backup = etc/modules-load.d/zfs.conf
+ source = git+https://github.com/zfsonlinux/zfs.git
+ source = zfs-utils.initcpio.install
+ source = zfs-utils.initcpio.hook
+ source = zfs-utils.initcpio.zfsencryptssh.install
+ sha256sums = SKIP
+ sha256sums = 29a8a6d76fff01b71ef1990526785405d9c9410bdea417b08b56107210d00b10
+ sha256sums = 78e038f95639c209576e7fa182afd56ac11a695af9ebfa958709839ff1e274ce
+ sha256sums = 29080a84e5d7e36e63c4412b98646043724621245b36e5288f5fed6914da5b68
+
+pkgname = zfs-utils-head-git
+
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000000..e0498a7c8766
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+*.log
+zfs
+src
+pkg
+*.pkg.* \ No newline at end of file
diff --git a/PKGBUILD b/PKGBUILD
new file mode 100644
index 000000000000..f0926e86baed
--- /dev/null
+++ b/PKGBUILD
@@ -0,0 +1,64 @@
+# Maintainer: Yurii Kolesnykov <root@yurikoles.com>
+# Contributor: Jan Houben <jan@nexttrex.de>
+# Contributor: Jesus Alvarez <jeezusjr at gmail dot com>
+
+pkgname="zfs-utils-head-git"
+pkgver=0.8.0.r270.gd66620681
+pkgrel=1
+pkgdesc="Kernel module support files for the Zettabyte File System."
+makedepends=("python" "python-setuptools" "python-cffi" "git")
+optdepends=("python: pyzfs and extra utilities", "python-cffi: pyzfs")
+arch=("x86_64")
+url="http://zfsonlinux.org/"
+source=("git+https://github.com/zfsonlinux/zfs.git"
+ "zfs-utils.initcpio.install"
+ "zfs-utils.initcpio.hook"
+ "zfs-utils.initcpio.zfsencryptssh.install")
+sha256sums=("SKIP"
+ "29a8a6d76fff01b71ef1990526785405d9c9410bdea417b08b56107210d00b10"
+ "78e038f95639c209576e7fa182afd56ac11a695af9ebfa958709839ff1e274ce"
+ "29080a84e5d7e36e63c4412b98646043724621245b36e5288f5fed6914da5b68")
+license=("CDDL")
+groups=("zfs-head-git")
+provides=("zfs-utils-git" "zfs-utils" "spl-utils")
+install=zfs-utils.install
+conflicts=("zfs-utils-git" "zfs-utils" "spl-utils")
+replaces=("spl-utils-common-git" "zfs-utils-common-git")
+backup=('etc/zfs/zed.d/zed.rc' 'etc/default/zfs' 'etc/modules-load.d/zfs.conf')
+
+pkgver() {
+ cd "${srcdir}/zfs"
+ git describe --long --tags | sed 's/^zfs.//;s/\([^-]*-g\)/r\1/;s/-/./g'
+}
+
+build() {
+ cd "${srcdir}/zfs"
+ ./autogen.sh
+ ./configure --prefix=/usr --sysconfdir=/etc --sbindir=/usr/bin --with-mounthelperdir=/usr/bin \
+ --libdir=/usr/lib --datadir=/usr/share --includedir=/usr/include \
+ --with-udevdir=/lib/udev --libexecdir=/usr/lib/zfs-${pkgver} \
+ --with-config=user --enable-systemd --enable-pyzfs
+ make
+}
+
+package() {
+ cd "${srcdir}/zfs"
+ make DESTDIR="${pkgdir}" install
+ # Remove uneeded files
+ rm -r "${pkgdir}"/etc/init.d
+ rm -r "${pkgdir}"/usr/share/initramfs-tools
+ rm -r "${pkgdir}"/usr/lib/modules-load.d
+ # move module tree /lib -> /usr/lib
+ cp -r "${pkgdir}"/{lib,usr}
+ rm -r "${pkgdir}"/lib
+ # Autoload the zfs module at boot
+ mkdir -p "${pkgdir}/etc/modules-load.d"
+ printf "%s\n" "zfs" > "${pkgdir}/etc/modules-load.d/zfs.conf"
+ # fix permissions
+ chmod 750 ${pkgdir}/etc/sudoers.d
+ # Install the support files
+ install -D -m644 "${srcdir}"/zfs-utils.initcpio.hook "${pkgdir}"/usr/lib/initcpio/hooks/zfs
+ install -D -m644 "${srcdir}"/zfs-utils.initcpio.install "${pkgdir}"/usr/lib/initcpio/install/zfs
+ install -D -m644 "${srcdir}"/zfs-utils.initcpio.zfsencryptssh.install "${pkgdir}"/usr/lib/initcpio/install/zfsencryptssh
+ install -D -m644 contrib/bash_completion.d/zfs "${pkgdir}"/usr/share/bash-completion/completions/zfs
+}
diff --git a/zfs-utils.initcpio.hook b/zfs-utils.initcpio.hook
new file mode 100644
index 000000000000..b75d2fa17961
--- /dev/null
+++ b/zfs-utils.initcpio.hook
@@ -0,0 +1,189 @@
+#
+# WARNING: This script is parsed by ash in busybox at boot time, not bash!
+# http://linux.die.net/man/1/ash
+# https://wiki.ubuntu.com/DashAsBinSh
+# http://www.jpsdomain.org/public/2008-JP_bash_vs_dash.pdf
+#
+ZPOOL_FORCE=""
+ZPOOL_IMPORT_FLAGS=""
+
+zfs_get_bootfs () {
+ for zfs_dataset in $(zpool list -H -o bootfs); do
+ case ${zfs_dataset} in
+ "" | "-")
+ # skip this line/dataset
+ ;;
+ "no pools available")
+ return 1
+ ;;
+ *)
+ ZFS_DATASET=${zfs_dataset}
+ return 0
+ ;;
+ esac
+ done
+ return 1
+}
+
+zfs_decrypt_fs() {
+ dataset=$1
+
+ # check if 'zfs load-key' is available
+ zfs 2>&1 | grep load-key > /dev/null || return 0
+
+ # check if dataset is encrypted
+ [ "$(zfs get -H -o value encryption "${dataset}")" != "off" ] || return 0
+
+ # check if key is already loaded
+ [ "$(zfs get -H -o value keystatus "${dataset}")" != "available" ] || return 0
+
+ # get the encryption root
+ encryptionroot=$(zfs get -H -o value encryptionroot "${dataset}")
+
+ # export encription root to be used by other hooks (SSH)
+ echo "${encryptionroot}" > /.encryptionroot
+
+ # loop until we get the correct password or key is unlocked by another vector (SSH for instance)
+ while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ] &&
+ ! eval zfs load-key "${encryptionroot}"; do
+ sleep 2
+ done
+
+ if [ -f /.encryptionroot ]; then
+ rm /.encryptionroot
+ fi
+}
+
+zfs_mount_handler () {
+ if [ "${ZFS_DATASET}" = "bootfs" ] ; then
+ if ! zfs_get_bootfs ; then
+ # Lets import everything and try again
+ zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
+ if ! zfs_get_bootfs ; then
+ err "ZFS: Cannot find bootfs."
+ exit 1
+ fi
+ fi
+ fi
+
+ local pool="${ZFS_DATASET%%/*}"
+ local rwopt_exp="${rwopt:-ro}"
+
+ if ! zpool list -H "${pool}" 2>1 > /dev/null ; then
+ if [ ! "${rwopt_exp}" = "rw" ]; then
+ msg "ZFS: Importing pool ${pool} readonly."
+ ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on"
+ else
+ msg "ZFS: Importing pool ${pool}."
+ fi
+
+ if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then
+ err "ZFS: Unable to import pool ${pool}."
+ exit 1
+ fi
+ fi
+
+ local node="$1"
+ local rootmnt=$(zfs get -H -o value mountpoint "${ZFS_DATASET}")
+ local tab_file="${node}/etc/fstab"
+ local zfs_datasets="$(zfs list -H -o name -t filesystem -r ${ZFS_DATASET})"
+
+ # Mount the root, and any child datasets
+ for dataset in ${zfs_datasets}; do
+ mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
+ canmount=$(zfs get -H -o value canmount "${dataset}")
+ # skip dataset
+ [ ${canmount} = "off" -o ${mountpoint} = "none" ] && continue
+ if [ ${mountpoint} = "legacy" ]; then
+ if [ -f "${tab_file}" ]; then
+ if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
+ opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
+ mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
+ fi
+ fi
+ else
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}/${mountpoint##${rootmnt}}"
+ fi
+ done
+}
+
+set_flags() {
+ # Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
+ [ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f"
+
+ # Add import directory to import command flags
+ [ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
+ [ "${zfs_import_dir}" = "" ] && [ -f /etc/zfs/zpool.cache.org ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -c /etc/zfs/zpool.cache.org"
+}
+
+run_hook() {
+ set_flags
+
+ # Wait 15 seconds for ZFS devices to show up
+ [ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
+
+ case ${root} in
+ # root=zfs
+ "zfs")
+ mount_handler="zfs_mount_handler"
+ ;;
+ # root=ZFS=... syntax (grub)
+ "ZFS="*)
+ mount_handler="zfs_mount_handler"
+ ZFS_DATASET="${root#*[=]}"
+ ;;
+ esac
+
+ case ${zfs} in
+ "")
+ # skip this line/dataset
+ ;;
+ auto|bootfs)
+ ZFS_DATASET="bootfs"
+ mount_handler="zfs_mount_handler"
+ local pool="[a-zA-Z][^ ]*"
+ ;;
+ *)
+ ZFS_DATASET="${zfs}"
+ mount_handler="zfs_mount_handler"
+ local pool="${ZFS_DATASET%%/*}"
+ ;;
+ esac
+
+ # Allow at least n seconds for zfs device to show up. Especially
+ # when using zfs_import_dir instead of zpool.cache, the listing of
+ # available pools can be slow, so this loop must be top-tested to
+ # ensure we do one 'zpool import' pass after the timer has expired.
+ sleep ${ZFS_WAIT} & pid=$!
+ local break_after=0
+ while :; do
+ kill -0 $pid > /dev/null 2>&1 || break_after=1
+ if [ -c "/dev/zfs" ]; then
+ zpool import ${ZPOOL_IMPORT_FLAGS} | awk "
+ BEGIN { pool_found=0; online=0; unavail=0 }
+ /^ ${pool} .*/ { pool_found=1 }
+ /^\$/ { pool_found=0 }
+ /UNAVAIL/ { if (pool_found == 1) { unavail=1 } }
+ /ONLINE/ { if (pool_found == 1) { online=1 } }
+ END { if (online == 1 && unavail != 1)
+ { exit 0 }
+ else
+ { exit 1 }
+ }" && break
+ fi
+ [ $break_after == 1 ] && break
+ sleep 1
+ done
+ kill $pid > /dev/null 2>&1
+}
+
+run_latehook () {
+ set_flags
+ # only run zpool import, if flags were set (cache file found / zfs_import_dir specified)
+ [ ! "${ZPOOL_IMPORT_FLAGS}" = "" ] && zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
+}
+
+# vim:set ts=4 sw=4 ft=sh et:
diff --git a/zfs-utils.initcpio.install b/zfs-utils.initcpio.install
new file mode 100644
index 000000000000..350377b0d58c
--- /dev/null
+++ b/zfs-utils.initcpio.install
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+build() {
+ map add_module \
+ zavl \
+ znvpair \
+ zunicode \
+ zcommon \
+ zfs \
+ spl
+
+ map add_binary \
+ fsck.zfs \
+ mount.zfs \
+ seq \
+ zdb \
+ zed \
+ zfs \
+ zhack \
+ zinject \
+ zpool \
+ zstreamdump \
+ /lib/udev/vdev_id \
+ /lib/udev/zvol_id \
+ findmnt
+
+ map add_file \
+ /lib/udev/rules.d/60-zvol.rules \
+ /lib/udev/rules.d/69-vdev.rules \
+ /lib/udev/rules.d/90-zfs.rules \
+ /lib/libgcc_s.so.1
+
+ map add_dir \
+ /etc/zfs/zed.d
+
+ add_runscript
+
+ # allow mount(8) to "autodetect" ZFS
+ echo 'zfs' >>"${BUILDROOT}/etc/filesystems"
+
+ [[ -f /etc/zfs/zpool.cache ]] && cp "/etc/zfs/zpool.cache" "${BUILDROOT}/etc/zfs/zpool.cache.org"
+ [[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf"
+}
+
+help() {
+ cat<<HELPEOF
+This hook allows you to use ZFS as your root filesystem.
+
+Command Line Setup:
+
+ You can append the following arguments to your kernel parameters list. See
+ https://wiki.archlinux.org/index.php/Kernel_parameters for more information.
+
+ To use ZFS as your boot filesystem:
+
+ zfs=bootfs or zfs=auto or root=zfs
+
+ To use a pool or dataset:
+
+ zfs=<pool/dataset>
+
+ To force importing of a ZFS pool:
+
+ zfs_force=1
+
+ If set to 1, this will use "zpool import -f" when attempting to import
+ pools.
+
+ To change the seconds of time to wait for ZFS devices to show up at boot:
+
+ zfs_wait=30
+
+ To search for devices in a directory other than "/dev":
+
+ zfs_import_dir=/dev/disk/by-uuid
+ or
+ zfs_import_dir=/dev/disk/by-partuuid
+ or
+ zfs_import_dir=/dev/disk/by-path
+ etc.
+
+ Following initcpio convention, the 'rw' option must be specified to load the
+ pool as read/write. Pools are loaded as read only by default.
+
+Examples:
+
+ To use bootfs on your pool, use
+
+ zfs=bootfs rw
+
+ This will setup your root using tank/root zfs pool.
+
+ zfs=tank/root rw
+
+If you want to set properties for zfs-on-linux module, you should add them to
+/etc/modprobe.d/zfs.conf and then rebuild initcpio.
+
+HELPEOF
+}
+
+# vim: set ts=4 sw=4 ft=sh et:
diff --git a/zfs-utils.initcpio.zfsencryptssh.install b/zfs-utils.initcpio.zfsencryptssh.install
new file mode 100644
index 000000000000..e0ef04beabfe
--- /dev/null
+++ b/zfs-utils.initcpio.zfsencryptssh.install
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+make_etc_passwd() {
+ echo 'root:x:0:0:root:/root:/bin/zfsdecrypt_shell' > "${BUILDROOT}"/etc/passwd
+ echo '/bin/zfsdecrypt_shell' > "${BUILDROOT}"/etc/shells
+}
+
+make_zfsdecrypt_shell() {
+ decrypt_shell='#!/bin/sh
+if [ -f "/.encryptionroot" ]; then
+ # source zfs hook functions
+ . /hooks/zfs
+ # decrypt bootfs
+ zfs_decrypt_fs "$(cat /.encryptionroot)"
+ # kill pending decryption attempt to allow the boot process to continue
+ killall zfs
+else
+ echo "ZFS is not ready yet. Please wait!"
+fi'
+ printf '%s' "$decrypt_shell" > "${BUILDROOT}"/bin/zfsdecrypt_shell
+ chmod a+x "${BUILDROOT}"/bin/zfsdecrypt_shell
+}
+
+build ()
+{
+ make_etc_passwd
+ make_zfsdecrypt_shell
+}
+
+help ()
+{
+ cat<<HELPEOF
+This hook is meant to be used in conjunction with mkinitcpio-dropbear,
+mkinitcpio-netconf and/ormkinitcpio-ppp. This will provide a way to unlock
+your encrypted ZFS root filesystem remotely.
+HELPEOF
+}
+
+# vim: set ts=4 sw=4 ft=sh et:
diff --git a/zfs-utils.install b/zfs-utils.install
new file mode 100644
index 000000000000..914ed46bf477
--- /dev/null
+++ b/zfs-utils.install
@@ -0,0 +1,18 @@
+post_upgrade() {
+
+ # If upgrading from 2017.12.08.r3208.4e9b15696-1 or older
+ # display zfs-import warning
+ if [[ $(vercmp $2 2017.12.08.r3208.4e9b15696-1) -le 0 ]]; then
+ echo '>>> WARNING: A new systemd unit file was added to archzfs!'
+ echo '>>> You may need enable zfs-import.target'
+ echo '>>> See https://github.com/archzfs/archzfs/issues/186'
+ fi
+
+ # If upgrading from 2018.02.02.r3272.1b66810ba-1 or older
+ # display encryption format change warning
+ if [[ $(vercmp $2 2018.02.02.r3272.1b66810ba-1) -le 0 ]]; then
+ echo '>>> WARNING: The on-disk format for encrypted datasets has changed!'
+ echo '>>> All encrypted datasets will mount read only and need to be migrated.'
+ echo '>>> See https://github.com/archzfs/archzfs/issues/222'
+ fi
+}