summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorEli Schwartz2018-10-28 16:52:30 -0400
committerEli Schwartz2018-10-28 16:52:30 -0400
commit7be37649b31f102de19cddb49b9e53f70202f2cd (patch)
tree42ddb68a2e61ca192d251a3fa4e87f71b5483b41
downloadaur-7be37649b31f102de19cddb49b9e53f70202f2cd.tar.gz
Initial upload of split zfs-utils 0.7.11
Split out into separate package as there's no need to make binary zfs module packages depend on a dkms split package just to acquire the utils.
-rw-r--r--.SRCINFO20
-rw-r--r--PKGBUILD59
-rw-r--r--zfs.initcpio.hook119
-rw-r--r--zfs.initcpio.install108
4 files changed, 306 insertions, 0 deletions
diff --git a/.SRCINFO b/.SRCINFO
new file mode 100644
index 000000000000..0068852addff
--- /dev/null
+++ b/.SRCINFO
@@ -0,0 +1,20 @@
+pkgbase = zfs-utils
+ pkgdesc = Userspace utilities for the Zettabyte File System.
+ pkgver = 0.7.11
+ pkgrel = 1
+ url = https://zfsonlinux.org/
+ arch = i686
+ arch = x86_64
+ license = CDDL
+ makedepends = git
+ source = git+https://github.com/zfsonlinux/zfs.git#tag=zfs-0.7.11?signed
+ source = zfs.initcpio.install
+ source = zfs.initcpio.hook
+ validpgpkeys = 4F3BA9AB6D1F8D683DC2DFB56AD860EED4598027
+ validpgpkeys = C33DF142657ED1F7C328A2960AB9E991C6AF658B
+ sha256sums = SKIP
+ sha256sums = aa5706bf08b36209a318762680f3c9fb45b3fc4b8e4ef184c8a5370b2c3000ca
+ sha256sums = f95ad1a5421ccbb8b01f448373f46cfd1f718361a82c2687a597325cf9827e3e
+
+pkgname = zfs-utils
+
diff --git a/PKGBUILD b/PKGBUILD
new file mode 100644
index 000000000000..03b68791e9dc
--- /dev/null
+++ b/PKGBUILD
@@ -0,0 +1,59 @@
+# Maintainer: Eli Schwartz <eschwartz@archlinux.org>
+# Contributor: Iacopo Isimbaldi <isiachi@rhye.it>
+
+pkgname=zfs-utils
+pkgver=0.7.11
+pkgrel=1
+pkgdesc="Userspace utilities for the Zettabyte File System."
+arch=("i686" "x86_64")
+url="https://zfsonlinux.org/"
+license=('CDDL')
+makedepends=("git")
+source=("git+https://github.com/zfsonlinux/zfs.git#tag=zfs-${pkgver}?signed"
+ "zfs.initcpio.install"
+ "zfs.initcpio.hook")
+sha256sums=('SKIP'
+ 'aa5706bf08b36209a318762680f3c9fb45b3fc4b8e4ef184c8a5370b2c3000ca'
+ 'f95ad1a5421ccbb8b01f448373f46cfd1f718361a82c2687a597325cf9827e3e')
+validpgpkeys=('4F3BA9AB6D1F8D683DC2DFB56AD860EED4598027' # Tony Hutter (GPG key for signing ZFS releases) <hutter2@llnl.gov>
+ 'C33DF142657ED1F7C328A2960AB9E991C6AF658B') # Brian Behlendorf <behlendorf1@llnl.gov>
+
+prepare() {
+ cd "${srcdir}"/zfs
+
+ autoreconf -fi
+}
+
+build() {
+ cd "${srcdir}"/zfs
+
+ ./configure --prefix=/usr \
+ --sysconfdir=/etc \
+ --sbindir=/usr/bin \
+ --with-mounthelperdir=/usr/bin \
+ --libdir=/usr/lib \
+ --datadir=/usr/share \
+ --includedir=/usr/include \
+ --with-udevdir=/usr/lib/udev \
+ --libexecdir=/usr/lib/zfs \
+ --with-config=user
+ make
+}
+
+package() {
+ cd "${srcdir}"/zfs
+
+ make DESTDIR="${pkgdir}" install
+ install -D -m644 contrib/bash_completion.d/zfs "${pkgdir}"/usr/share/bash-completion/completions/zfs
+
+ # Remove uneeded files
+ rm -r "${pkgdir}"/etc/init.d
+ rm -r "${pkgdir}"/etc/sudoers.d #???
+ rm -r "${pkgdir}"/usr/lib/dracut
+ rm -r "${pkgdir}"/usr/lib/modules-load.d
+ rm -r "${pkgdir}"/usr/share/initramfs-tools
+ rm -r "${pkgdir}"/usr/share/zfs
+
+ install -D -m644 "${srcdir}"/zfs.initcpio.hook "${pkgdir}"/usr/lib/initcpio/hooks/zfs
+ install -D -m644 "${srcdir}"/zfs.initcpio.install "${pkgdir}"/usr/lib/initcpio/install/zfs
+}
diff --git a/zfs.initcpio.hook b/zfs.initcpio.hook
new file mode 100644
index 000000000000..7288ab8018b3
--- /dev/null
+++ b/zfs.initcpio.hook
@@ -0,0 +1,119 @@
+#
+# WARNING: This script is parsed by ash in busybox at boot time, not bash!
+# http://linux.die.net/man/1/ash
+# https://wiki.ubuntu.com/DashAsBinSh
+# http://www.jpsdomain.org/public/2008-JP_bash_vs_dash.pdf
+#
+ZPOOL_FORCE=""
+ZPOOL_IMPORT_FLAGS=""
+
+zfs_get_bootfs () {
+ for zfs_dataset in $(zpool list -H -o bootfs); do
+ case ${zfs_dataset} in
+ "" | "-")
+ # skip this line/dataset
+ ;;
+ "no pools available")
+ return 1
+ ;;
+ *)
+ ZFS_DATASET=${zfs_dataset}
+ return 0
+ ;;
+ esac
+ done
+ return 1
+}
+
+zfs_mount_handler () {
+ if [ "${ZFS_DATASET}" = "bootfs" ] ; then
+ if ! zfs_get_bootfs ; then
+ # Lets import everything and try again
+ zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
+ if ! zfs_get_bootfs ; then
+ die "ZFS: Cannot find bootfs."
+ fi
+ fi
+ fi
+
+ local pool="${ZFS_DATASET%%/*}"
+ local rwopt_exp="${rwopt:-ro}"
+
+ if ! zpool list -H "${pool}" 2>&1 > /dev/null ; then
+ if [ ! "${rwopt_exp}" = "rw" ]; then
+ msg "ZFS: Importing pool ${pool} readonly."
+ ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on"
+ else
+ msg "ZFS: Importing pool ${pool}."
+ fi
+
+ if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then
+ die "ZFS: Unable to import pool ${pool}."
+ fi
+ fi
+
+ local node="$1"
+ local tab_file="${node}/etc/fstab"
+ local zfs_datasets="$(zfs list -H -o name -t filesystem -r ${ZFS_DATASET})"
+
+ # Mount the root, and any child datasets
+ for dataset in ${zfs_datasets}; do
+ mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
+ case ${mountpoint} in
+ "none")
+ # skip this line/dataset.
+ ;;
+ "legacy")
+ if [ -f "${tab_file}" ]; then
+ if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
+ opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
+ mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
+ mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
+ fi
+ fi
+ ;;
+ *)
+ mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}${mountpoint}"
+ ;;
+ esac
+ done
+}
+
+run_hook() {
+ # Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
+ [ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f"
+
+ # Add import directory to import command flags
+ [ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
+
+ # Wait 15 seconds for ZFS devices to show up
+ [ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
+
+ [ "${root}" = "zfs" ] && mount_handler="zfs_mount_handler"
+
+ case ${zfs} in
+ "")
+ # skip this line/dataset
+ ;;
+ auto|bootfs)
+ ZFS_DATASET="bootfs"
+ mount_handler="zfs_mount_handler"
+ ;;
+ *)
+ ZFS_DATASET="${zfs}"
+ mount_handler="zfs_mount_handler"
+ ;;
+ esac
+
+ # Allow up to n seconds for zfs device to show up
+ for i in $(seq 1 ${ZFS_WAIT}); do
+ [ -c "/dev/zfs" ] && break
+ sleep 1
+ done
+}
+
+run_latehook () {
+ zpool import -N -a ${ZPOOL_FORCE}
+}
+
+# vim:set ts=4 sw=4 ft=sh et:
diff --git a/zfs.initcpio.install b/zfs.initcpio.install
new file mode 100644
index 000000000000..3ab0f5266e60
--- /dev/null
+++ b/zfs.initcpio.install
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+build() {
+ map add_module \
+ zavl \
+ znvpair \
+ zunicode \
+ zcommon \
+ zfs \
+ zpios \
+ spl \
+ splat
+
+ map add_binary \
+ arcstat.py \
+ dbufstat.py \
+ fsck.zfs \
+ mount.zfs \
+ seq \
+ zdb \
+ zed \
+ zfs \
+ zhack \
+ zinject \
+ zpios \
+ zpool \
+ zstreamdump \
+ ztest \
+ splat \
+ /lib/udev/vdev_id \
+ /lib/udev/zvol_id \
+ findmnt
+
+ map add_file \
+ /lib/udev/rules.d/60-zvol.rules \
+ /lib/udev/rules.d/69-vdev.rules \
+ /lib/udev/rules.d/90-zfs.rules \
+ /lib/libgcc_s.so.1
+
+ map add_dir \
+ /etc/zfs/zed.d
+
+ add_runscript
+
+ # allow mount(8) to "autodetect" ZFS
+ echo 'zfs' >>"${BUILDROOT}/etc/filesystems"
+
+ [[ -f /etc/zfs/zpool.cache ]] && add_file "/etc/zfs/zpool.cache"
+ [[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf"
+}
+
+help() {
+ cat<<HELPEOF
+This hook allows you to use ZFS as your root filesystem.
+
+Command Line Setup:
+
+ You can append the following arguments to your kernel parameters list. See
+ https://wiki.archlinux.org/index.php/Kernel_parameters for more information.
+
+ To use ZFS as your boot filesystem:
+
+ zfs=bootfs or zfs=auto or root=zfs
+
+ To use a pool or dataset:
+
+ zfs=<pool/dataset>
+
+ To force importing of a ZFS pool:
+
+ zfs_force=1
+
+ If set to 1, this will use "zpool import -f" when attempting to import
+ pools.
+
+ To change the seconds of time to wait for ZFS devices to show up at boot:
+
+ zfs_wait=30
+
+ To search for devices in a directory other than "/dev":
+
+ zfs_import_dir=/dev/disk/by-uuid
+ or
+ zfs_import_dir=/dev/disk/by-partuuid
+ or
+ zfs_import_dir=/dev/disk/by-path
+ etc.
+
+ Following initcpio convention, the 'rw' option must be specified to load the
+ pool as read/write. Pools are loaded as read only by default.
+
+Examples:
+
+ To use bootfs on your pool, use
+
+ zfs=bootfs rw
+
+ This will setup your root using tank/root zfs pool.
+
+ zfs=tank/root rw
+
+If you want to set properties for zfs-on-linux module, you should add them to
+/etc/modprobe.d/zfs.conf and then rebuild initcpio.
+
+HELPEOF
+}
+
+# vim: set ts=4 sw=4 ft=sh et: