summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Houben2017-07-11 03:51:30 +0200
committerJan Houben2017-07-11 03:51:30 +0200
commita14f9a61443eceaeec797cf8fd772bfbbdc0e36a (patch)
tree6773dd378a2cb5ad997d5cdca555701c5113ef9a
downloadaur-a14f9a61443eceaeec797cf8fd772bfbbdc0e36a.tar.gz
first commit
-rw-r--r--.SRCINFO18
-rw-r--r--.gitignore5
-rw-r--r--PKGBUILD23
-rw-r--r--plymouth-zfs.initcpio.hook141
-rw-r--r--plymouth-zfs.initcpio.install117
-rw-r--r--plymouth-zfs.install3
6 files changed, 307 insertions, 0 deletions
diff --git a/.SRCINFO b/.SRCINFO
new file mode 100644
index 000000000000..c8a24085e2c6
--- /dev/null
+++ b/.SRCINFO
@@ -0,0 +1,18 @@
+pkgbase = plymouth-zfs
+ pkgdesc = initcpio hook to support native zfs encryption for plymouth
+ pkgver = 0.1
+ pkgrel = 1
+ install = plymouth-zfs.install
+ arch = any
+ license = MIT
+ depends = plymouth
+ depends = zfs-encryption-dkms-git
+ source = plymouth-zfs.initcpio.hook
+ source = plymouth-zfs.initcpio.install
+ source = plymouth-zfs.install
+ sha256sums = 06688ac12efe795e0d8e6781507cb9fd2de094c753be93288e96963de75369a0
+ sha256sums = af8d601a9af17e6f3f6b59044280a2541e9d4303786840d5066c4ecb755f6d9d
+ sha256sums = 0910896056921ebedc1eb910d38b64d8815790f042cb3e9bc07a1a49e31e3b2b
+
+pkgname = plymouth-zfs
+
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000000..664cf4365b6a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+/src
+/pkg
+.directory
+*.pkg*
+*.orig
diff --git a/PKGBUILD b/PKGBUILD
new file mode 100644
index 000000000000..4a740007e3a3
--- /dev/null
+++ b/PKGBUILD
@@ -0,0 +1,23 @@
+# Maintainer: Jan Houben <jan@nexttrex.de>
+
+pkgname=plymouth-zfs
+pkgver=0.1
+pkgrel=1
+pkgdesc="initcpio hook to support native zfs encryption for plymouth"
+arch=(any)
+license=(MIT)
+depends=(plymouth zfs-encryption-dkms-git)
+install=plymouth-zfs.install
+source=("plymouth-zfs.initcpio.hook"
+ "plymouth-zfs.initcpio.install"
+ "plymouth-zfs.install")
+sha256sums=('06688ac12efe795e0d8e6781507cb9fd2de094c753be93288e96963de75369a0'
+ 'af8d601a9af17e6f3f6b59044280a2541e9d4303786840d5066c4ecb755f6d9d'
+ '0910896056921ebedc1eb910d38b64d8815790f042cb3e9bc07a1a49e31e3b2b')
+
+package() {
+ cd "$srcdir"
+ install -D -m644 "${srcdir}"/plymouth-zfs.initcpio.hook "${pkgdir}"/usr/lib/initcpio/hooks/plymouth-zfs
+ install -D -m644 "${srcdir}"/plymouth-zfs.initcpio.install "${pkgdir}"/usr/lib/initcpio/install/plymouth-zfs
+}
+
diff --git a/plymouth-zfs.initcpio.hook b/plymouth-zfs.initcpio.hook
new file mode 100644
index 000000000000..d5950fcad04b
--- /dev/null
+++ b/plymouth-zfs.initcpio.hook
@@ -0,0 +1,141 @@
+#
+# WARNING: This script is parsed by ash in busybox at boot time, not bash!
+# http://linux.die.net/man/1/ash
+# https://wiki.ubuntu.com/DashAsBinSh
+# http://www.jpsdomain.org/public/2008-JP_bash_vs_dash.pdf
+#
+ZPOOL_FORCE=""
+ZPOOL_IMPORT_FLAGS=""
+
+zfs_get_bootfs () {
+ for zfs_dataset in $(zpool list -H -o bootfs); do
+ case ${zfs_dataset} in
+ "" | "-")
+ # skip this line/dataset
+ ;;
+ "no pools available")
+ return 1
+ ;;
+ *)
+ ZFS_DATASET=${zfs_dataset}
+ return 0
+ ;;
+ esac
+ done
+ return 1
+}
+
+zfs_decrypt_fs() {
+ dataset=$1
+
+ # check if 'zfs load-key' is available
+ zfs 2>&1 | grep load-key > /dev/null || return 0
+
+ # check if dataset is encrypted
+ [ "$(zfs get -H -o value encryption "${dataset}")" != "off" ] || return 0
+
+ # get the encryption root
+ encryptionroot=$(zfs get -H -o value encryptionroot "${dataset}")
+
+ # check if key is already loaded
+ [ "$(zfs get -H -o value encryption "${dataset}")" != "available" ] || return 0
+
+ # finally load the key
+ plymouth ask-for-password --prompt="Password for ${dataset} dataset" --dont-pause-progress --number-of-tries=5 --command="zfs load-key ${encryptionroot}"
+}
+
+zfs_mount_handler () {
+ if [ "${ZFS_DATASET}" = "bootfs" ] ; then
+ if ! zfs_get_bootfs ; then
+ # Lets import everything and try again
+ zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
+ if ! zfs_get_bootfs ; then
+ die "ZFS: Cannot find bootfs."
+ fi
+ fi
+ fi
+
+ local pool="${ZFS_DATASET%%/*}"
+ local rwopt_exp="${rwopt:-ro}"
+
+ if ! zpool list -H "${pool}" 2>1 > /dev/null ; then
+ if [ ! "${rwopt_exp}" = "rw" ]; then
+ msg "ZFS: Importing pool ${pool} readonly."
+ ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on"
+ else
+ msg "ZFS: Importing pool ${pool}."
+ fi
+
+ if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then
+ die "ZFS: Unable to import pool ${pool}."
+ fi
+ fi
+
+ local node="$1"
+ local tab_file="${node}/etc/fstab"
+ local zfs_datasets="$(zfs list -H -o name -t filesystem -r ${ZFS_DATASET})"
+
+ # Mount the root, and any child datasets
+ for dataset in ${zfs_datasets}; do
+ mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
+ case ${mountpoint} in
+ "none")
+ # skip this line/dataset.
+ ;;
+ "legacy")
+ if [ -f "${tab_file}" ]; then
+ if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
+ opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
+ mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
+
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
+ fi
+ fi
+ ;;
+ *)
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}${mountpoint}"
+ ;;
+ esac
+ done
+}
+
+run_hook() {
+ # Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
+ [ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f"
+
+ # Add import directory to import command flags
+ [ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
+
+ # Wait 15 seconds for ZFS devices to show up
+ [ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
+
+ [ "${root}" = "zfs" ] && mount_handler="zfs_mount_handler"
+
+ case ${zfs} in
+ "")
+ # skip this line/dataset
+ ;;
+ auto|bootfs)
+ ZFS_DATASET="bootfs"
+ mount_handler="zfs_mount_handler"
+ ;;
+ *)
+ ZFS_DATASET="${zfs}"
+ mount_handler="zfs_mount_handler"
+ ;;
+ esac
+
+ # Allow up to n seconds for zfs device to show up
+ for i in $(seq 1 ${ZFS_WAIT}); do
+ [ -c "/dev/zfs" ] && break
+ sleep 1
+ done
+}
+
+run_latehook () {
+ zpool import -N -a ${ZPOOL_FORCE}
+}
+
+# vim:set ts=4 sw=4 ft=sh et:
diff --git a/plymouth-zfs.initcpio.install b/plymouth-zfs.initcpio.install
new file mode 100644
index 000000000000..810d5c92e8fb
--- /dev/null
+++ b/plymouth-zfs.initcpio.install
@@ -0,0 +1,117 @@
+#!/bin/bash
+
+build() {
+
+ # zfs
+ map add_module \
+ zavl \
+ znvpair \
+ zunicode \
+ zcommon \
+ zfs \
+ zpios \
+ spl \
+ splat
+
+ map add_binary \
+ arcstat.py \
+ dbufstat.py \
+ fsck.zfs \
+ mount.zfs \
+ seq \
+ zdb \
+ zed \
+ zfs \
+ zhack \
+ zinject \
+ zpios \
+ zpool \
+ zstreamdump \
+ ztest \
+ splat \
+ /lib/udev/vdev_id \
+ /lib/udev/zvol_id \
+ findmnt
+
+ map add_file \
+ /lib/udev/rules.d/60-zvol.rules \
+ /lib/udev/rules.d/69-vdev.rules \
+ /lib/udev/rules.d/90-zfs.rules \
+ /lib/libgcc_s.so.1
+
+ map add_dir \
+ /etc/zfs/zed.d
+
+ # add plymouth prompt support
+ add_binary "/usr/lib/plymouth/label.so"
+ add_binary "/usr/lib/plymouth/text.so"
+ add_file "/usr/share/fonts/TTF/DejaVuSans.ttf"
+ add_file "/etc/fonts/fonts.conf"
+ add_file "/etc/fonts/conf.d/60-latin.conf"
+
+ add_runscript
+
+ # allow mount(8) to "autodetect" ZFS
+ echo 'zfs' >>"${BUILDROOT}/etc/filesystems"
+
+ [[ -f /etc/zfs/zpool.cache ]] && add_file "/etc/zfs/zpool.cache"
+ [[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf"
+}
+
+help() {
+ cat<<HELPEOF
+This hook allows you to use ZFS as your root filesystem.
+
+Command Line Setup:
+
+ You can append the following arguments to your kernel parameters list. See
+ https://wiki.archlinux.org/index.php/Kernel_parameters for more information.
+
+ To use ZFS as your boot filesystem:
+
+ zfs=bootfs or zfs=auto or root=zfs
+
+ To use a pool or dataset:
+
+ zfs=<pool/dataset>
+
+ To force importing of a ZFS pool:
+
+ zfs_force=1
+
+ If set to 1, this will use "zpool import -f" when attempting to import
+ pools.
+
+ To change the seconds of time to wait for ZFS devices to show up at boot:
+
+ zfs_wait=30
+
+ To search for devices in a directory other than "/dev":
+
+ zfs_import_dir=/dev/disk/by-uuid
+ or
+ zfs_import_dir=/dev/disk/by-partuuid
+ or
+ zfs_import_dir=/dev/disk/by-path
+ etc.
+
+ Following initcpio convention, the 'rw' option must be specified to load the
+ pool as read/write. Pools are loaded as read only by default.
+
+Examples:
+
+ To use bootfs on your pool, use
+
+ zfs=bootfs rw
+
+ This will setup your root using tank/root zfs pool.
+
+ zfs=tank/root rw
+
+If you want to set properties for zfs-on-linux module, you should add them to
+/etc/modprobe.d/zfs.conf and then rebuild initcpio.
+
+HELPEOF
+}
+
+# vim: set ts=4 sw=4 ft=sh et:
diff --git a/plymouth-zfs.install b/plymouth-zfs.install
new file mode 100644
index 000000000000..ccd30d597ecb
--- /dev/null
+++ b/plymouth-zfs.install
@@ -0,0 +1,3 @@
+post_install() {
+ echo "Note: replace the 'zfs' hook with 'plymouth-zfs' in '/etc/mkinitpcio.conf'"
+}