summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorjustin2021-07-20 09:30:10 -0400
committerjustin2021-07-20 09:30:10 -0400
commit1a8295615d25c8fa6b9459842cface6f097bfd16 (patch)
tree0633a4aeb5dd84cd36e797aa204010faa69651ed
parent7b7f681d62511f6d57ddc37bb9c4f963b07f5327 (diff)
downloadaur-1a8295615d25c8fa6b9459842cface6f097bfd16.tar.gz
patched mount handler bug
-rw-r--r--PKGBUILD2
-rw-r--r--plymouth-zfs.initcpio.hook92
2 files changed, 67 insertions, 27 deletions
diff --git a/PKGBUILD b/PKGBUILD
index 58bbd62231d1..9d70263c19d9 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -17,7 +17,7 @@ install=plymouth-zfs.install
source=("plymouth-zfs.initcpio.hook"
"plymouth-zfs.initcpio.install"
"plymouth-zfs.install")
-sha256sums=('4fc0eb84fbd8a62babbd1f85a5db7d467eb1c4178218a40a98853a62f1135176'
+sha256sums=('ae2de1378e0966a740bb4e7f24bd796d76e1e830b8f791f1e1d23a12886c60da'
'44e940623bc0fbbfda5aa8f076850c9b1429874c144359b726e8c48c0ddb1a03'
'0910896056921ebedc1eb910d38b64d8815790f042cb3e9bc07a1a49e31e3b2b')
diff --git a/plymouth-zfs.initcpio.hook b/plymouth-zfs.initcpio.hook
index 49c632f9eaca..6f139b036fbb 100644
--- a/plymouth-zfs.initcpio.hook
+++ b/plymouth-zfs.initcpio.hook
@@ -36,15 +36,27 @@ zfs_decrypt_fs() {
# check if key is already loaded
[ "$(zfs get -H -o value keystatus "${dataset}")" != "available" ] || return 0
-
+
# get the encryption root
encryptionroot=$(zfs get -H -o value encryptionroot "${dataset}")
+ # export encription root to be used by other hooks (SSH)
+ echo "${encryptionroot}" > /.encryptionroot
+
+ # loop until we get the correct password or key is unlocked by another vector (SSH for instance)
+ # while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ] &&
+ # ! eval zfs load-key "${encryptionroot}"; do
+ # sleep 2
+ # done
# finally load the key
plymouth ask-for-password --prompt="Password for ${dataset} dataset" --dont-pause-progress --number-of-tries=5 --command="zfs load-key ${encryptionroot}"
# retry if key wasn't loaded
[ "$(zfs get -H -o value keystatus "${dataset}")" != "available" ] && zfs_decrypt_fs "${dataset}"
+
+ if [ -f /.encryptionroot ]; then
+ rm /.encryptionroot
+ fi
}
zfs_mount_handler () {
@@ -53,7 +65,8 @@ zfs_mount_handler () {
# Lets import everything and try again
zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
if ! zfs_get_bootfs ; then
- die "ZFS: Cannot find bootfs."
+ err "ZFS: Cannot find bootfs."
+ exit 1
fi
fi
fi
@@ -70,7 +83,8 @@ zfs_mount_handler () {
fi
if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then
- die "ZFS: Unable to import pool ${pool}."
+ err "ZFS: Unable to import pool ${pool}."
+ exit 1
fi
fi
@@ -82,34 +96,36 @@ zfs_mount_handler () {
# Mount the root, and any child datasets
for dataset in ${zfs_datasets}; do
mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
- case ${mountpoint} in
- "none")
- # skip this line/dataset.
- ;;
- "legacy")
- if [ -f "${tab_file}" ]; then
- if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
- opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
- mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
- zfs_decrypt_fs "${dataset}"
- mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
- fi
+ canmount=$(zfs get -H -o value canmount "${dataset}")
+ # skip dataset
+ [ ${canmount} = "off" -o ${mountpoint} = "none" ] && continue
+ if [ ${mountpoint} = "legacy" ]; then
+ if [ -f "${tab_file}" ]; then
+ if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
+ opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
+ mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
fi
- ;;
- *)
- zfs_decrypt_fs "${dataset}"
- mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}/${mountpoint##${rootmnt}}"
- ;;
- esac
+ fi
+ else
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}/${mountpoint##${rootmnt}}"
+ fi
done
}
-run_hook() {
+set_flags() {
# Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
[ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f"
# Add import directory to import command flags
[ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
+ [ "${zfs_import_dir}" = "" ] && [ -f /etc/zfs/zpool.cache.org ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -c /etc/zfs/zpool.cache.org"
+}
+
+run_hook() {
+ set_flags
# Wait 15 seconds for ZFS devices to show up
[ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
@@ -133,22 +149,46 @@ run_hook() {
auto|bootfs)
ZFS_DATASET="bootfs"
mount_handler="zfs_mount_handler"
+ local pool="[a-zA-Z][^ ]*"
;;
*)
ZFS_DATASET="${zfs}"
mount_handler="zfs_mount_handler"
+ local pool="${ZFS_DATASET%%/*}"
;;
esac
- # Allow up to n seconds for zfs device to show up
- for i in $(seq 1 ${ZFS_WAIT}); do
- [ -c "/dev/zfs" ] && break
+ # Allow at least n seconds for zfs device to show up. Especially
+ # when using zfs_import_dir instead of zpool.cache, the listing of
+ # available pools can be slow, so this loop must be top-tested to
+ # ensure we do one 'zpool import' pass after the timer has expired.
+ sleep ${ZFS_WAIT} & pid=$!
+ local break_after=0
+ while :; do
+ kill -0 $pid > /dev/null 2>&1 || break_after=1
+ if [ -c "/dev/zfs" ]; then
+ zpool import ${ZPOOL_IMPORT_FLAGS} | awk "
+ BEGIN { pool_found=0; online=0; unavail=0 }
+ /^ ${pool} .*/ { pool_found=1 }
+ /^\$/ { pool_found=0 }
+ /UNAVAIL/ { if (pool_found == 1) { unavail=1 } }
+ /ONLINE/ { if (pool_found == 1) { online=1 } }
+ END { if (online == 1 && unavail != 1)
+ { exit 0 }
+ else
+ { exit 1 }
+ }" && break
+ fi
+ [ $break_after == 1 ] && break
sleep 1
done
+ kill $pid > /dev/null 2>&1
}
run_latehook () {
- zpool import -N -a ${ZPOOL_FORCE}
+ set_flags
+ # only run zpool import, if flags were set (cache file found / zfs_import_dir specified)
+ [ ! "${ZPOOL_IMPORT_FLAGS}" = "" ] && zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
}
# vim:set ts=4 sw=4 ft=sh et: