summarylogtreecommitdiffstats
path: root/zfs-pivy.hook
diff options
context:
space:
mode:
Diffstat (limited to 'zfs-pivy.hook')
-rw-r--r--zfs-pivy.hook213
1 files changed, 213 insertions, 0 deletions
diff --git a/zfs-pivy.hook b/zfs-pivy.hook
new file mode 100644
index 000000000000..f877fc5afa0a
--- /dev/null
+++ b/zfs-pivy.hook
@@ -0,0 +1,213 @@
+#
+# WARNING: This script is parsed by ash in busybox at boot time, not bash!
+# http://linux.die.net/man/1/ash
+# https://wiki.ubuntu.com/DashAsBinSh
+# http://www.jpsdomain.org/public/2008-JP_bash_vs_dash.pdf
+#
+ZPOOL_FORCE=""
+ZPOOL_IMPORT_FLAGS=""
+
+zfs_get_bootfs () {
+ for zfs_dataset in $(zpool list -H -o bootfs); do
+ case ${zfs_dataset} in
+ "" | "-")
+ # skip this line/dataset
+ ;;
+ "no pools available")
+ return 1
+ ;;
+ *)
+ ZFS_DATASET=${zfs_dataset}
+ return 0
+ ;;
+ esac
+ done
+ return 1
+}
+
+zfs_decrypt_fs() {
+ dataset=$1
+
+ # check if 'zfs load-key' is available
+ zfs 2>&1 | grep load-key > /dev/null || return 0
+
+ # check if dataset is encrypted
+ [ "$(zfs get -H -o value encryption "${dataset}")" != "off" ] || return 0
+
+ # check if key is already loaded
+ [ "$(zfs get -H -o value keystatus "${dataset}")" != "available" ] || return 0
+
+ # get the encryption root
+ encryptionroot=$(zfs get -H -o value encryptionroot "${dataset}")
+
+ # export encription root to be used by other hooks (SSH)
+ echo "${encryptionroot}" > /.encryptionroot
+
+ # if the dataset has an ebox, use pivy-zfs to unlock it
+ if [ "$(zfs get -H -o source rfd77:ebox "${dataset}")" == "local" ]; then
+ # loop until pivy-zfs unlock succeeds
+ while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ] &&
+ ! eval pivy-zfs unlock "${encryptionroot}"; do
+ sleep 2
+ done
+ fi
+
+ # loop until we get the correct password or key is unlocked by another vector (SSH for instance)
+ while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ] &&
+ ! eval zfs load-key "${encryptionroot}"; do
+ sleep 2
+ done
+
+ if [ -f /.encryptionroot ]; then
+ rm /.encryptionroot
+ fi
+}
+
+zfs_mount_handler () {
+ if [ "${ZFS_DATASET}" = "bootfs" ] ; then
+ if ! zfs_get_bootfs ; then
+ # Lets import everything and try again
+ zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
+ if ! zfs_get_bootfs ; then
+ err "ZFS: Cannot find bootfs."
+ exit 1
+ fi
+ fi
+ fi
+
+ local pool="${ZFS_DATASET%%/*}"
+ local rwopt_exp="${rwopt:-ro}"
+
+ if ! zpool list -H "${pool}" 2>1 > /dev/null ; then
+ if [ ! "${rwopt_exp}" = "rw" ]; then
+ msg "ZFS: Importing pool ${pool} readonly."
+ ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on"
+ else
+ msg "ZFS: Importing pool ${pool}."
+ fi
+
+ if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then
+ err "ZFS: Unable to import pool ${pool}."
+ exit 1
+ fi
+ fi
+
+ local node="$1"
+ local rootmnt=$(zfs get -H -o value mountpoint "${ZFS_DATASET}")
+ local tab_file="${node}/etc/fstab"
+ local zfs_datasets="$(zfs list -H -o name -t filesystem -r ${ZFS_DATASET})"
+
+ # Mount the root, and any child datasets
+ for dataset in ${zfs_datasets}; do
+ mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
+ canmount=$(zfs get -H -o value canmount "${dataset}")
+ # skip dataset
+ [ ${canmount} = "off" -o ${mountpoint} = "none" ] && continue
+ if [ ${mountpoint} = "legacy" ]; then
+ if [ -f "${tab_file}" ]; then
+ if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
+ opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
+ mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
+ fi
+ fi
+ else
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}/${mountpoint##${rootmnt}}"
+ fi
+ done
+}
+
+set_flags() {
+ # Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
+ [ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f"
+
+ # Add import directory to import command flags
+ [ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
+ [ "${zfs_import_dir}" = "" ] && [ -f /etc/zfs/zpool.cache.org ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -c /etc/zfs/zpool.cache.org"
+}
+
+run_hook() {
+ set_flags
+
+ # Wait 15 seconds for ZFS devices to show up
+ [ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
+
+ # Start pcscd, in case we want it for pivy-zfs
+ pcscd
+ while [[ ! -f /run/pcscd/pcscd.pid ]]; do
+ sleep 0.2
+ done
+ pcscd_pid=$(cat /run/pcscd/pcscd.pid)
+ kill_pcscd() {
+ kill $pcscd_pid
+ }
+ trap kill_pcscd EXIT
+
+ case ${root} in
+ # root=zfs
+ "zfs")
+ mount_handler="zfs_mount_handler"
+ ;;
+ # root=ZFS=... syntax (grub)
+ "ZFS="*)
+ mount_handler="zfs_mount_handler"
+ ZFS_DATASET="${root#*[=]}"
+ ;;
+ esac
+
+ case ${zfs} in
+ "")
+ # skip this line/dataset
+ ;;
+ auto|bootfs)
+ ZFS_DATASET="bootfs"
+ mount_handler="zfs_mount_handler"
+ local pool="[a-zA-Z][^ ]*"
+ ;;
+ *)
+ ZFS_DATASET="${zfs}"
+ mount_handler="zfs_mount_handler"
+ local pool="${ZFS_DATASET%%/*}"
+ ;;
+ esac
+
+ # Allow at least n seconds for zfs device to show up. Especially
+ # when using zfs_import_dir instead of zpool.cache, the listing of
+ # available pools can be slow, so this loop must be top-tested to
+ # ensure we do one 'zpool import' pass after the timer has expired.
+ sleep ${ZFS_WAIT} & pid=$!
+ local break_after=0
+ while :; do
+ kill -0 $pid > /dev/null 2>&1 || break_after=1
+ if [ -c "/dev/zfs" ]; then
+ zpool import ${ZPOOL_IMPORT_FLAGS} | awk "
+ BEGIN { pool_found=0; online=0; unavail=0 }
+ /^ ${pool} .*/ { pool_found=1 }
+ /^\$/ { pool_found=0 }
+ /UNAVAIL/ { if (pool_found == 1) { unavail=1 } }
+ /ONLINE/ { if (pool_found == 1) { online=1 } }
+ END { if (online == 1 && unavail != 1)
+ { exit 0 }
+ else
+ { exit 1 }
+ }" && break
+ fi
+ [ $break_after == 1 ] && break
+ sleep 1
+ done
+ kill $pid > /dev/null 2>&1
+}
+
+run_latehook () {
+ set_flags
+ # only run zpool import, if flags were set (cache file found / zfs_import_dir specified)
+ [ ! "${ZPOOL_IMPORT_FLAGS}" = "" ] && zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
+ # loop through all imported pools and if they have encryption at the root, unlock them now
+ for x in $(zpool list -Ho name); do
+ zfs_decrypt_fs "$x"
+ done
+}
+
+# vim:set ts=4 sw=4 ft=sh et: