summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Wilson2019-06-17 11:47:49 -0700
committerAlex Wilson2019-06-17 11:47:49 -0700
commit45719b27d39cda7e0dd130196f7d6aa196441187 (patch)
treee1faf26ccb37bd5409bca278e6a586613e2e3ae5
parent1f64e272da5969e7b06952f3d3cff47d1d8a2515 (diff)
downloadaur-45719b27d39cda7e0dd130196f7d6aa196441187.tar.gz
Add mkinitcpio hook for pivy-zfs
-rw-r--r--.SRCINFO6
-rw-r--r--PKGBUILD16
-rw-r--r--zfs-pivy.hook213
-rw-r--r--zfs-pivy.hook.diff47
-rw-r--r--zfs-pivy.install110
-rw-r--r--zfs-pivy.install.diff27
6 files changed, 416 insertions, 3 deletions
diff --git a/.SRCINFO b/.SRCINFO
index f3497510f070..8d3a32de45eb 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,7 +1,7 @@
pkgbase = pivy
pkgdesc = Tools for using PIV smartcards/Yubikeys with ssh-agent and disk encryption
pkgver = 0.2.0
- pkgrel = 1
+ pkgrel = 2
url = https://github.com/arekinath/pivy
arch = x86
arch = x86_64
@@ -14,8 +14,12 @@ pkgbase = pivy
depends = libedit
optdepends = cryptsetup: LUKS encrypted disk support (pivy-luks)
optdepends = zfs-utils: ZFS encrypted pool/fs support (pivy-zfs)
+ source = zfs-pivy.hook
+ source = zfs-pivy.install
source = https://github.com/arekinath/pivy/archive/v0.2.0.tar.gz
source = https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.7.4.tar.gz
+ sha256sums = c7c0bb3179388caa8ac6c20864183dd8b305e1865988f487a691278a8900e9a3
+ sha256sums = 32bfff1f6f395c70556b0f7eb364973c70cf795b25fc83aaf85aa81cd48c3b7f
sha256sums = 51e3651a0fae8fe150157130c7380efb29c57b00ba6371c765484e0ae82a9906
sha256sums = 1e3a9fada06c1c060011470ad0ff960de28f9a0515277d7336f7e09362517da6
diff --git a/PKGBUILD b/PKGBUILD
index 0b1f7d712e22..0fe8d22cb253 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -1,11 +1,13 @@
# Maintainer: Alex Wilson <alex at cooperi dot net>
pkgname=pivy
pkgver=0.2.0
-pkgrel=1
+pkgrel=2
pkgdesc="Tools for using PIV smartcards/Yubikeys with ssh-agent and disk encryption"
url="https://github.com/arekinath/pivy"
license=('MPL2')
source=(
+ "zfs-pivy.hook"
+ "zfs-pivy.install"
"https://github.com/arekinath/pivy/archive/v$pkgver.tar.gz"
"https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.7.4.tar.gz")
arch=(x86 x86_64)
@@ -13,7 +15,9 @@ depends=(libbsd pcsclite libedit)
optdepends=('cryptsetup: LUKS encrypted disk support (pivy-luks)'
'zfs-utils: ZFS encrypted pool/fs support (pivy-zfs)')
makedepends=(cryptsetup zfs-utils json-c)
-sha256sums=('51e3651a0fae8fe150157130c7380efb29c57b00ba6371c765484e0ae82a9906'
+sha256sums=('c7c0bb3179388caa8ac6c20864183dd8b305e1865988f487a691278a8900e9a3'
+ '32bfff1f6f395c70556b0f7eb364973c70cf795b25fc83aaf85aa81cd48c3b7f'
+ '51e3651a0fae8fe150157130c7380efb29c57b00ba6371c765484e0ae82a9906'
'1e3a9fada06c1c060011470ad0ff960de28f9a0515277d7336f7e09362517da6')
prepare() {
@@ -28,4 +32,12 @@ build() {
package() {
cd "$pkgname-$pkgver"
make prefix=/usr DESTDIR="$pkgdir/" USE_ZFS=yes USE_LUKS=yes install
+
+ cd "$srcdir"
+ hookdir="${pkgdir}/usr/lib/initcpio/hooks"
+ hookinsdir="${pkgdir}/usr/lib/initcpio/install"
+ install -o root -g root -m 0755 -d "$hookdir"
+ install -o root -g root -m 0644 zfs-pivy.hook "${hookdir}/zfs-pivy"
+ install -o root -g root -m 0755 -d "$hookinsdir"
+ install -o root -g root -m 0644 zfs-pivy.install "${hookinsdir}/zfs-pivy"
}
diff --git a/zfs-pivy.hook b/zfs-pivy.hook
new file mode 100644
index 000000000000..f877fc5afa0a
--- /dev/null
+++ b/zfs-pivy.hook
@@ -0,0 +1,213 @@
+#
+# WARNING: This script is parsed by ash in busybox at boot time, not bash!
+# http://linux.die.net/man/1/ash
+# https://wiki.ubuntu.com/DashAsBinSh
+# http://www.jpsdomain.org/public/2008-JP_bash_vs_dash.pdf
+#
+ZPOOL_FORCE=""
+ZPOOL_IMPORT_FLAGS=""
+
+zfs_get_bootfs () {
+ for zfs_dataset in $(zpool list -H -o bootfs); do
+ case ${zfs_dataset} in
+ "" | "-")
+ # skip this line/dataset
+ ;;
+ "no pools available")
+ return 1
+ ;;
+ *)
+ ZFS_DATASET=${zfs_dataset}
+ return 0
+ ;;
+ esac
+ done
+ return 1
+}
+
+zfs_decrypt_fs() {
+ dataset=$1
+
+ # check if 'zfs load-key' is available
+ zfs 2>&1 | grep load-key > /dev/null || return 0
+
+ # check if dataset is encrypted
+ [ "$(zfs get -H -o value encryption "${dataset}")" != "off" ] || return 0
+
+ # check if key is already loaded
+ [ "$(zfs get -H -o value keystatus "${dataset}")" != "available" ] || return 0
+
+ # get the encryption root
+ encryptionroot=$(zfs get -H -o value encryptionroot "${dataset}")
+
+ # export encription root to be used by other hooks (SSH)
+ echo "${encryptionroot}" > /.encryptionroot
+
+ # if the dataset has an ebox, use pivy-zfs to unlock it
+ if [ "$(zfs get -H -o source rfd77:ebox "${dataset}")" == "local" ]; then
+ # loop until pivy-zfs unlock succeeds
+ while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ] &&
+ ! eval pivy-zfs unlock "${encryptionroot}"; do
+ sleep 2
+ done
+ fi
+
+ # loop until we get the correct password or key is unlocked by another vector (SSH for instance)
+ while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ] &&
+ ! eval zfs load-key "${encryptionroot}"; do
+ sleep 2
+ done
+
+ if [ -f /.encryptionroot ]; then
+ rm /.encryptionroot
+ fi
+}
+
+zfs_mount_handler () {
+ if [ "${ZFS_DATASET}" = "bootfs" ] ; then
+ if ! zfs_get_bootfs ; then
+ # Lets import everything and try again
+ zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
+ if ! zfs_get_bootfs ; then
+ err "ZFS: Cannot find bootfs."
+ exit 1
+ fi
+ fi
+ fi
+
+ local pool="${ZFS_DATASET%%/*}"
+ local rwopt_exp="${rwopt:-ro}"
+
+ if ! zpool list -H "${pool}" 2>1 > /dev/null ; then
+ if [ ! "${rwopt_exp}" = "rw" ]; then
+ msg "ZFS: Importing pool ${pool} readonly."
+ ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on"
+ else
+ msg "ZFS: Importing pool ${pool}."
+ fi
+
+ if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then
+ err "ZFS: Unable to import pool ${pool}."
+ exit 1
+ fi
+ fi
+
+ local node="$1"
+ local rootmnt=$(zfs get -H -o value mountpoint "${ZFS_DATASET}")
+ local tab_file="${node}/etc/fstab"
+ local zfs_datasets="$(zfs list -H -o name -t filesystem -r ${ZFS_DATASET})"
+
+ # Mount the root, and any child datasets
+ for dataset in ${zfs_datasets}; do
+ mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
+ canmount=$(zfs get -H -o value canmount "${dataset}")
+ # skip dataset
+ [ ${canmount} = "off" -o ${mountpoint} = "none" ] && continue
+ if [ ${mountpoint} = "legacy" ]; then
+ if [ -f "${tab_file}" ]; then
+ if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
+ opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
+ mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
+ fi
+ fi
+ else
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}/${mountpoint##${rootmnt}}"
+ fi
+ done
+}
+
+set_flags() {
+ # Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
+ [ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f"
+
+ # Add import directory to import command flags
+ [ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
+ [ "${zfs_import_dir}" = "" ] && [ -f /etc/zfs/zpool.cache.org ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -c /etc/zfs/zpool.cache.org"
+}
+
+run_hook() {
+ set_flags
+
+ # Wait 15 seconds for ZFS devices to show up
+ [ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
+
+ # Start pcscd, in case we want it for pivy-zfs
+ pcscd
+ while [[ ! -f /run/pcscd/pcscd.pid ]]; do
+ sleep 0.2
+ done
+ pcscd_pid=$(cat /run/pcscd/pcscd.pid)
+ kill_pcscd() {
+ kill $pcscd_pid
+ }
+ trap kill_pcscd EXIT
+
+ case ${root} in
+ # root=zfs
+ "zfs")
+ mount_handler="zfs_mount_handler"
+ ;;
+ # root=ZFS=... syntax (grub)
+ "ZFS="*)
+ mount_handler="zfs_mount_handler"
+ ZFS_DATASET="${root#*[=]}"
+ ;;
+ esac
+
+ case ${zfs} in
+ "")
+ # skip this line/dataset
+ ;;
+ auto|bootfs)
+ ZFS_DATASET="bootfs"
+ mount_handler="zfs_mount_handler"
+ local pool="[a-zA-Z][^ ]*"
+ ;;
+ *)
+ ZFS_DATASET="${zfs}"
+ mount_handler="zfs_mount_handler"
+ local pool="${ZFS_DATASET%%/*}"
+ ;;
+ esac
+
+ # Allow at least n seconds for zfs device to show up. Especially
+ # when using zfs_import_dir instead of zpool.cache, the listing of
+ # available pools can be slow, so this loop must be top-tested to
+ # ensure we do one 'zpool import' pass after the timer has expired.
+ sleep ${ZFS_WAIT} & pid=$!
+ local break_after=0
+ while :; do
+ kill -0 $pid > /dev/null 2>&1 || break_after=1
+ if [ -c "/dev/zfs" ]; then
+ zpool import ${ZPOOL_IMPORT_FLAGS} | awk "
+ BEGIN { pool_found=0; online=0; unavail=0 }
+ /^ ${pool} .*/ { pool_found=1 }
+ /^\$/ { pool_found=0 }
+ /UNAVAIL/ { if (pool_found == 1) { unavail=1 } }
+ /ONLINE/ { if (pool_found == 1) { online=1 } }
+ END { if (online == 1 && unavail != 1)
+ { exit 0 }
+ else
+ { exit 1 }
+ }" && break
+ fi
+ [ $break_after == 1 ] && break
+ sleep 1
+ done
+ kill $pid > /dev/null 2>&1
+}
+
+run_latehook () {
+ set_flags
+ # only run zpool import, if flags were set (cache file found / zfs_import_dir specified)
+ [ ! "${ZPOOL_IMPORT_FLAGS}" = "" ] && zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
+ # loop through all imported pools and if they have encryption at the root, unlock them now
+ for x in $(zpool list -Ho name); do
+ zfs_decrypt_fs "$x"
+ done
+}
+
+# vim:set ts=4 sw=4 ft=sh et:
diff --git a/zfs-pivy.hook.diff b/zfs-pivy.hook.diff
new file mode 100644
index 000000000000..f4aa94310bd6
--- /dev/null
+++ b/zfs-pivy.hook.diff
@@ -0,0 +1,47 @@
+--- /usr/lib/initcpio/hooks/zfs 2019-06-14 20:01:07.000000000 -0700
++++ ./zfs-pivy.hook 2019-06-17 11:36:28.995485785 -0700
+@@ -43,6 +43,15 @@
+ # export encription root to be used by other hooks (SSH)
+ echo "${encryptionroot}" > /.encryptionroot
+
++ # if the dataset has an ebox, use pivy-zfs to unlock it
++ if [ "$(zfs get -H -o source rfd77:ebox "${dataset}")" == "local" ]; then
++ # loop until pivy-zfs unlock succeeds
++ while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ] &&
++ ! eval pivy-zfs unlock "${encryptionroot}"; do
++ sleep 2
++ done
++ fi
++
+ # loop until we get the correct password or key is unlocked by another vector (SSH for instance)
+ while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ] &&
+ ! eval zfs load-key "${encryptionroot}"; do
+@@ -125,6 +134,17 @@
+ # Wait 15 seconds for ZFS devices to show up
+ [ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
+
++ # Start pcscd, in case we want it for pivy-zfs
++ pcscd
++ while [[ ! -f /run/pcscd/pcscd.pid ]]; do
++ sleep 0.2
++ done
++ pcscd_pid=$(cat /run/pcscd/pcscd.pid)
++ kill_pcscd() {
++ kill $pcscd_pid
++ }
++ trap kill_pcscd EXIT
++
+ case ${root} in
+ # root=zfs
+ "zfs")
+@@ -184,6 +204,10 @@
+ set_flags
+ # only run zpool import, if flags were set (cache file found / zfs_import_dir specified)
+ [ ! "${ZPOOL_IMPORT_FLAGS}" = "" ] && zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
++ # loop through all imported pools and if they have encryption at the root, unlock them now
++ for x in $(zpool list -Ho name); do
++ zfs_decrypt_fs "$x"
++ done
+ }
+
+ # vim:set ts=4 sw=4 ft=sh et:
diff --git a/zfs-pivy.install b/zfs-pivy.install
new file mode 100644
index 000000000000..7b8fd045a074
--- /dev/null
+++ b/zfs-pivy.install
@@ -0,0 +1,110 @@
+#!/bin/bash
+
+build() {
+ map add_module \
+ zavl \
+ znvpair \
+ zunicode \
+ zcommon \
+ zfs \
+ spl
+
+ map add_binary \
+ fsck.zfs \
+ mount.zfs \
+ seq \
+ zdb \
+ zed \
+ zfs \
+ zhack \
+ zinject \
+ zpool \
+ zstreamdump \
+ /lib/udev/vdev_id \
+ /lib/udev/zvol_id \
+ findmnt \
+ pcscd \
+ pivy-zfs \
+ pivy-box \
+ pivy-tool
+
+ map add_file \
+ /lib/udev/rules.d/60-zvol.rules \
+ /lib/udev/rules.d/69-vdev.rules \
+ /lib/udev/rules.d/90-zfs.rules \
+ /lib/libgcc_s.so.1 \
+ /etc/libccid_Info.plist \
+ /usr/lib/pcsc/drivers/ifd-ccid.bundle/Contents/Info.plist \
+ /usr/lib/pcsc/drivers/ifd-ccid.bundle/Contents/Linux/libccid.so \
+ /usr/lib/libfl.so.2 \
+ /usr/lib/libusb-1.0.so.0
+
+ map add_dir \
+ /etc/zfs/zed.d
+
+ add_runscript
+
+ # allow mount(8) to "autodetect" ZFS
+ echo 'zfs' >>"${BUILDROOT}/etc/filesystems"
+
+ [[ -f /etc/zfs/zpool.cache ]] && cp "/etc/zfs/zpool.cache" "${BUILDROOT}/etc/zfs/zpool.cache.org"
+ [[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf"
+}
+
+help() {
+ cat<<HELPEOF
+This hook allows you to use ZFS as your root filesystem.
+
+Command Line Setup:
+
+ You can append the following arguments to your kernel parameters list. See
+ https://wiki.archlinux.org/index.php/Kernel_parameters for more information.
+
+ To use ZFS as your boot filesystem:
+
+ zfs=bootfs or zfs=auto or root=zfs
+
+ To use a pool or dataset:
+
+ zfs=<pool/dataset>
+
+ To force importing of a ZFS pool:
+
+ zfs_force=1
+
+ If set to 1, this will use "zpool import -f" when attempting to import
+ pools.
+
+ To change the seconds of time to wait for ZFS devices to show up at boot:
+
+ zfs_wait=30
+
+ To search for devices in a directory other than "/dev":
+
+ zfs_import_dir=/dev/disk/by-uuid
+ or
+ zfs_import_dir=/dev/disk/by-partuuid
+ or
+ zfs_import_dir=/dev/disk/by-path
+ etc.
+
+ Following initcpio convention, the 'rw' option must be specified to load the
+ pool as read/write. Pools are loaded as read only by default.
+
+Examples:
+
+ To use bootfs on your pool, use
+
+ zfs=bootfs rw
+
+ This will setup your root using tank/root zfs pool.
+
+ zfs=tank/root rw
+
+If you want to set properties for zfs-on-linux module, you should add them to
+/etc/modprobe.d/zfs.conf and then rebuild initcpio.
+
+HELPEOF
+}
+
+# vim: set ts=4 sw=4 ft=sh et:
diff --git a/zfs-pivy.install.diff b/zfs-pivy.install.diff
new file mode 100644
index 000000000000..d9e3d82193a8
--- /dev/null
+++ b/zfs-pivy.install.diff
@@ -0,0 +1,27 @@
+--- /usr/lib/initcpio/install/zfs 2019-06-14 20:01:07.000000000 -0700
++++ ./zfs-pivy.install 2019-06-17 11:28:32.631104239 -0700
+@@ -22,13 +22,22 @@
+ zstreamdump \
+ /lib/udev/vdev_id \
+ /lib/udev/zvol_id \
+- findmnt
++ findmnt \
++ pcscd \
++ pivy-zfs \
++ pivy-box \
++ pivy-tool
+
+ map add_file \
+ /lib/udev/rules.d/60-zvol.rules \
+ /lib/udev/rules.d/69-vdev.rules \
+ /lib/udev/rules.d/90-zfs.rules \
+- /lib/libgcc_s.so.1
++ /lib/libgcc_s.so.1 \
++ /etc/libccid_Info.plist \
++ /usr/lib/pcsc/drivers/ifd-ccid.bundle/Contents/Info.plist \
++ /usr/lib/pcsc/drivers/ifd-ccid.bundle/Contents/Linux/libccid.so \
++ /usr/lib/libfl.so.2 \
++ /usr/lib/libusb-1.0.so.0
+
+ map add_dir \
+ /etc/zfs/zed.d