diff --git a/upgrade/FAQ.md b/upgrade/FAQ.md new file mode 100644 index 00000000..4b4ff44e --- /dev/null +++ b/upgrade/FAQ.md @@ -0,0 +1,116 @@ +# Frequently Asked Questions + +## What is an "upgrade container"? + +An "upgrade container" is a collection of ZFS datasets that can be used +in conjunction with systemd-nspawn to verify an upgrade in a container, +prior to executing the upgrade and modifying the actual root filesystem +of the appliance. + +To create and start an upgrade container, the "upgrade-container" script +can be used: + + $ /var/dlpx-update/latest/upgrade-container create in-place + delphix.uE0noy5 + $ /var/dlpx-update/latest/upgrade-container start delphix.uE0noy5 + +This will clone the currently running root filesystem, mount it in +"/var/lib/machines", and then start a new instance of the systemd-nspawn +service to run the container. + +Here's an example of the datasets that're created: + + $ zfs list -r -d 1 rpool/ROOT/delphix.uE0noy5 + NAME USED AVAIL REFER MOUNTPOINT + rpool/ROOT/delphix.uE0noy5 3.42M 39.3G 64K none + rpool/ROOT/delphix.uE0noy5/data 3.16M 39.3G 45.7M legacy + rpool/ROOT/delphix.uE0noy5/home 1K 39.3G 11.0G legacy + rpool/ROOT/delphix.uE0noy5/root 196K 39.3G 6.35G /var/lib/machines/delphix.uE0noy5 + +Here's an example of the status of the systemd-nspawn service: + + $ systemctl status systemd-nspawn@delphix.uE0noy5 | head -n 11 + ● systemd-nspawn@delphix.uE0noy5.service - Container delphix.uE0noy5 + Loaded: loaded (/lib/systemd/system/systemd-nspawn@.service; disabled; vendor preset: enabled) + Drop-In: /etc/systemd/system/systemd-nspawn@delphix.uE0noy5.service.d + └─override.conf + Active: active (running) since Tue 2019-01-29 19:41:04 UTC; 15min ago + Docs: man:systemd-nspawn(1) + Main PID: 2837 (systemd-nspawn) + Status: "Container running: Startup finished in 49.256s." + Tasks: 1 (limit: 16384) + CGroup: /machine.slice/systemd-nspawn@delphix.uE0noy5.service + └─2837 /usr/bin/systemd-nspawn --quiet --boot --capability=all --machine=delphix.uE0noy5 + +## What is an "in-place" upgrade container? + +When creating an upgrade container, one can create either an "in-place" +upgrade container, or a "not-in-place" upgrade container. An in-place +container will have its "root" dataset be a clone of the root dataset of +the currently booted root filesystem; i.e. it's created with "zfs clone" +as opposed to "zfs create". + +For example, the ZFS datasets for an in-place upgrade container will +resemble the following: + + $ /var/dlpx-update/latest/upgrade-container create in-place + delphix.4qL2URY + + $ zfs list -r -d 1 -o name,mountpoint,origin rpool/ROOT/delphix.4qL2URY + NAME MOUNTPOINT ORIGIN + rpool/ROOT/delphix.4qL2URY none - + rpool/ROOT/delphix.4qL2URY/data legacy rpool/ROOT/delphix.JNHeZad/data@delphix.4qL2URY + rpool/ROOT/delphix.4qL2URY/home legacy rpool/ROOT/delphix.JNHeZad/home@delphix.4qL2URY + rpool/ROOT/delphix.4qL2URY/root /var/lib/machines/delphix.4qL2URY rpool/ROOT/delphix.JNHeZad/root@delphix.4qL2URY + +## What is a "not-in-place" upgrade container? + +When creating an upgrade container, one can create either an "in-place" +upgrade container, or a "not-in-place" upgrade container. A not-in-place +container will have its "root" dataset be seperate from any other root +dataset on the appliance; i.e. it's created with "zfs create" as opposed +to "zfs clone". + +For example, the ZFS datasets for an in-place upgrade container will +resemble the following: + + $ /var/dlpx-update/latest/upgrade-container create not-in-place + ... + delphix.Oy4JfnU + + $ sudo zfs list -r -d 1 -o name,mountpoint,origin rpool/ROOT/delphix.Oy4JfnU + NAME MOUNTPOINT ORIGIN + rpool/ROOT/delphix.Oy4JfnU none - + rpool/ROOT/delphix.Oy4JfnU/data legacy rpool/ROOT/delphix.JNHeZad/data@delphix.Oy4JfnU + rpool/ROOT/delphix.Oy4JfnU/home legacy rpool/ROOT/delphix.JNHeZad/home@delphix.Oy4JfnU + rpool/ROOT/delphix.Oy4JfnU/root /var/lib/machines/delphix.Oy4JfnU - + +## What is a "rootfs container"? + +A "rootfs container" is a collection of ZFS datasets that can be used as +the "root filesytsem" of the appliance. This includes a dataset for "/" +of the appliance, but also seperate datasets for "/export/home" and +"/var/delphix". + +Here's an example of the datasets for a rootfs container: + + $ sudo zfs list -r -d 1 -o name,mountpoint,origin rpool/ROOT/delphix.Oy4JfnU + NAME MOUNTPOINT ORIGIN + rpool/ROOT/delphix.Oy4JfnU none - + rpool/ROOT/delphix.Oy4JfnU/data legacy rpool/ROOT/delphix.JNHeZad/data@delphix.Oy4JfnU + rpool/ROOT/delphix.Oy4JfnU/home legacy rpool/ROOT/delphix.JNHeZad/home@delphix.Oy4JfnU + rpool/ROOT/delphix.Oy4JfnU/root / - + +## What is the difference between upgrade and rootfs containers? + +The two main distictions between an upgrade container and a rootfs +container are the following: + + 1. The mountpoint of the container's "root" dataset is different; for + upgrade container's it'll be "/var/lib/machines/...", whereas it'll + be "/" for a rootfs container. + + 2. Due to the first difference, a rootfs contianer can be used as the + root filesystem of the appliance when the appliance boots. An + upgrade container cannot be used to boot the appliance; it can only + be used to start a system-nspawn machine container. diff --git a/upgrade/README.md b/upgrade/README.md index ed7dbe85..87ae81ea 100644 --- a/upgrade/README.md +++ b/upgrade/README.md @@ -40,6 +40,10 @@ Log into that VM using the "delphix" user, and run these commands: $ sudo unpack-image internal-dev.upgrade.tar.gz $ sudo /var/dlpx-update/latest/upgrade -v in-place +## FAQ + +See the [FAQ](FAQ.md) for answers to commonly asked questions. + ## Statement of Support This software is provided as-is, without warranty of any kind or diff --git a/upgrade/upgrade-scripts/rootfs-container b/upgrade/upgrade-scripts/rootfs-container new file mode 100755 index 00000000..9270c32d --- /dev/null +++ b/upgrade/upgrade-scripts/rootfs-container @@ -0,0 +1,290 @@ +#!/bin/bash +# +# Copyright 2019 Delphix +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. "${BASH_SOURCE%/*}/common.sh" + +CONTAINER= + +function get_dataset_snapshots() { + zfs list -rt snapshot -Hpo name "$1" +} + +function get_snapshot_clones() { + zfs get clones -Hpo value "$1" +} + +function delete() { + zfs list "rpool/ROOT/$CONTAINER/root" &>/dev/null || + die "rootfs container '$CONTAINER' does not exist" + + MOUNTED=$(zfs get mounted -Hpo value "rpool/ROOT/$CONTAINER/root") + [[ "$MOUNTED" == "no" ]] || + die "cannot delete mounted rootfs container: '$CONTAINER'" + + # + # In the common case, the "root" dataset should never have + # clones of it. With that said, it is possible for clones to + # exist in a couple edge cases: + # + # 1. When "in-place" upgrade occurs, it'll clone the root + # dataset during the upgrade verification phase; i.e. when + # creating the in-place upgrade container. Thus, if there + # happens to be a concurrent in-place upgrade running, a + # clone of the "root" dataset could exist. + # + # 2. If an "in-place" upgrade container has been manually + # created, i.e. explicitly via the "upgrade-container" + # script, then it's possible for a clone of the "root" + # dataset to exist. + # + # Thus, to handle these edge cases, we abort the delete + # operation if a clone of "root" exists. + # + for snap in $(get_dataset_snapshots "rpool/ROOT/$CONTAINER/root"); do + for clone in $(get_snapshot_clones "$snap"); do + die \ + "cannot delete rootfs container: '$CONTAINER'," \ + "'root' dataset clone exists: '$clone'" + done + done + + # + # The "data" and "home" datasets of a rootfs container may have + # been cloned as part of an upgrade. Thus, in order to delete + # this specific rootfs container, we need to promote any clones + # that exist. Otherwise we won't be able to destroy the snapshots + # for the "data" and "home" datasets, and thus can't destroy the + # datasets themselves. + # + for snap in \ + $(get_dataset_snapshots "rpool/ROOT/$CONTAINER/data") \ + $(get_dataset_snapshots "rpool/ROOT/$CONTAINER/home"); do + for clone in $(get_snapshot_clones "$snap"); do + zfs promote "$clone" || + die "'zfs promote $clone' failed" + done + done + + zfs destroy -r "rpool/ROOT/$CONTAINER" || + die "'zfs destroy -r rpool/ROOT/$CONTAINER' failed" +} + +function get_bootloader_devices() { + # + # When installing/updating the bootloader during upgrade, we + # need to determine which devices are being used as bootloader + # devices. We determine this by listing the devices used by the + # rpool. Additionally, we have to filter out devices that could + # be attached to the rpool, but would never be used for the + # bootloader. Finally, we need to strip off any parition + # information, since we want to install the bootloader directly + # to the device, rather than to a partition of the device. + # + zpool list -vH rpool | + awk '! /rpool|mirror|replacing|spare/ {print $1}' | + sed 's/[0-9]*$//' +} + +function set_bootfs_not_mounted_cleanup() { + umount "/var/lib/machines/$CONTAINER/mnt" || + warn "'umount' of '/var/lib/machines/$CONTAINER/mnt' failed" + + for dir in /proc /sys /dev; do + umount -R "/var/lib/machines/${CONTAINER}${dir}" || + warn "'umount -R' of '$dir' failed" + done + + zfs umount "rpool/ROOT/$CONTAINER/root" || + warn "'zfs umount rpool/ROOT/$CONTAINER/root' failed" + zfs set mountpoint=/ "rpool/ROOT/$CONTAINER/root" || + warn "zfs set mountpoint rpool/ROOT/$CONTAINER/root' failed" +} + +# +# This function assumes the rootfs container specified is not currently +# mounted; see the "set_bootfs_mounted" function for doing this same +# operation, but for an already mounted rootfs container. +# +function set_bootfs_not_mounted() { + trap set_bootfs_not_mounted_cleanup EXIT + + zfs set mountpoint="/var/lib/machines/$CONTAINER" \ + "rpool/ROOT/$CONTAINER/root" || + die "zfs set mountpoint rpool/ROOT/$CONTAINER/root' failed" + + zfs mount "rpool/ROOT/$CONTAINER/root" || + die "'zfs mount rpool/ROOT/$CONTAINER/root' failed" + + mount --make-slave "/var/lib/machines/$CONTAINER" || + die "'mount --make-slave /var/lib/machines/$CONTAINER' failed" + + for dir in /proc /sys /dev; do + mount --rbind "$dir" "/var/lib/machines/${CONTAINER}${dir}" || + die "'mount --rbind' of '$dir' failed" + mount --make-rslave "/var/lib/machines/${CONTAINER}${dir}" || + die "'mount --make-rslave' of '$dir' failed" + done + + mount -t zfs rpool/grub "/var/lib/machines/$CONTAINER/mnt" || + die "'mount -t zfs rpool/grub' failed for '$CONTAINER'" + + for dev in $(get_bootloader_devices); do + [[ -e "/dev/$dev" ]] || + die "bootloader device '/dev/$dev' not found" + + [[ -b "/dev/$dev" ]] || + die "bootloader device '/dev/$dev' not block device" + + chroot "/var/lib/machines/$CONTAINER" \ + grub-install --root-directory=/mnt "/dev/$dev" || + die "'grub-install' for '$dev' failed in '$CONTAINER'" + done + + chroot "/var/lib/machines/$CONTAINER" \ + grub-mkconfig -o /mnt/boot/grub/grub.cfg || + die "'grub-mkconfig' failed in '$CONTAINER'" + + set_bootfs_not_mounted_cleanup + trap - EXIT + + # + # The mountpoint for the root filesystem should have been reset + # back to "/" in the cleanup function called above. Since that + # function will only "warn" when setting the mountpoint fails, + # we verify the mountpoint here, and "die" if it's incorrect. + # + MOUNTPOINT=$(zfs get mountpoint -Hpo value "rpool/ROOT/$CONTAINER/root") + [[ "$MOUNTPOINT" == "/" ]] || + die "incorrect mountpoint for '$CONTAINER' root: '$MOUNTPOINT'" +} + +function set_bootfs_mounted_cleanup() { + umount "/mnt" || warn "'umount' of '/mnt' failed" +} + +# +# This function assumes the rootfs container specified is currently +# mounted; see the "set_bootfs_not_mounted" function for doing this same +# operation, but for a rootfs container that's not mounted. +# +function set_bootfs_mounted() { + trap set_bootfs_mounted_cleanup EXIT + + # + # Since this function assumes the rootfs container is mounted, + # we verify that it's mounted as the root filesystem; otherwise + # the logic below will fail. + # + MOUNTPOINT=$(zfs get mountpoint -Hpo value "rpool/ROOT/$CONTAINER/root") + [[ "$MOUNTPOINT" == "/" ]] || + die "incorrect mountpoint for '$CONTAINER' root: '$MOUNTPOINT'" + + mount -t zfs rpool/grub "/mnt" || + die "'mount -t zfs rpool/grub' failed for '$CONTAINER'" + + for dev in $(get_bootloader_devices); do + [[ -e "/dev/$dev" ]] || + die "bootloader device '/dev/$dev' not found" + + [[ -b "/dev/$dev" ]] || + die "bootloader device '/dev/$dev' not block device" + + grub-install --root-directory=/mnt "/dev/$dev" || + die "'grub-install' for '$dev' failed in '$CONTAINER'" + done + + grub-mkconfig -o /mnt/boot/grub/grub.cfg || + die "'grub-mkconfig' failed in '$CONTAINER'" + + set_bootfs_mounted_cleanup + trap - EXIT +} + +# +# The purpose of this function is to convert an existing rootfs container +# (specified by the $CONTAINER global variable) to be used as the boot +# filesystem by the appliance; i.e. after calling this function, the +# specified rootfs container will be used as the appliance's root +# filesystem, the next time the appliance boots. This is done by +# updating the appliance's bootloader (i.e. grub) to point to the +# container's filesystem. +# +function set_bootfs() { + zfs list "rpool/ROOT/$CONTAINER/root" &>/dev/null || + die "rootfs container '$CONTAINER' does not exist" + + MOUNTED=$(zfs get mounted -Hpo value "rpool/ROOT/$CONTAINER/root") + case "$MOUNTED" in + yes) + set_bootfs_mounted + ;; + no) + set_bootfs_not_mounted + ;; + *) + die "'zfs get mounted' returned unexpected value: '$MOUNTED'" + ;; + esac +} + +function usage() { + echo "$(basename "$0"): $*" >&2 + + PREFIX_STRING="Usage: $(basename "$0")" + PREFIX_NCHARS=$(echo -n "$PREFIX_STRING" | wc -c) + PREFIX_SPACES=$(printf "%.s " $(seq "$PREFIX_NCHARS")) + + echo "$PREFIX_SPACES delete " + echo "$PREFIX_SPACES set-bootfs " + + exit 2 +} + +[[ "$EUID" -ne 0 ]] && die "must be run as root" + +case "$1" in +delete) + [[ $# -lt 2 ]] && usage "too few arguments specified" + [[ $# -gt 2 ]] && usage "too many arguments specified" + + CONTAINER="$2" + delete + ;; +set-bootfs) + [[ $# -lt 2 ]] && usage "too few arguments specified" + [[ $# -gt 2 ]] && usage "too many arguments specified" + + # + # We only have a single bootloader on any given appliance, so we + # need to ensure that only a single process is attempting to + # update the bootloader at any given time. The locking done here + # is to help prevent accidential corruption of the bootloader, + # by ensuring only a single invocation of this script can set + # the boot filesystem at any given time. + # + if [[ "$SET_BOOTFS_LOCKED" != "true" ]]; then + exec env SET_BOOTFS_LOCKED="true" \ + flock -e "/var/run/delphix-set-bootfs-lock" "$0" "$@" + fi + + CONTAINER="$2" + set_bootfs + ;; +*) + usage "invalid option specified: '$1'" + ;; +esac diff --git a/upgrade/upgrade-scripts/upgrade b/upgrade/upgrade-scripts/upgrade index e20996d0..5dfed14f 100755 --- a/upgrade/upgrade-scripts/upgrade +++ b/upgrade/upgrade-scripts/upgrade @@ -17,6 +17,13 @@ . "${BASH_SOURCE%/*}/common.sh" +# +# Any changes to this value needs to be careful to properly support +# existing rootfs container datasets which may have this property +# already set. Thus, changes here may require backwards compatibility. +# +ROLLBACK_PROPERTY="com.delphix:rollback-container" + IMAGE_PATH=$(get_image_path) [[ -n "$IMAGE_PATH" ]] || die "failed to determine image path" @@ -42,6 +49,7 @@ function usage() { echo "$PREFIX_STRING [-v] in-place" echo "$PREFIX_SPACES [-v] not-in-place" + echo "$PREFIX_SPACES rollback" exit 2 } @@ -136,9 +144,23 @@ function cleanup_not_in_place_upgrade() { return "$rc" } +function get_mounted_rootfs_container() { + basename "$(dirname "$(zfs list -Hpo name /)")" +} + function upgrade_not_in_place() { trap cleanup_not_in_place_upgrade EXIT + # + # We query the mounted rootfs container name here, so that if we + # can't get this information for whatever reason, we can easily + # handle the error and abort; rather than having to handle this + # error later, when it might require more work to handle it. + # + MOUNTED_CONTAINER="$(get_mounted_rootfs_container)" + [[ -n "$MOUNTED_CONTAINER" ]] || + die "failed to determine mounted rootfs container" + CONTAINER=$("$IMAGE_PATH/upgrade-container" create not-in-place) [[ -n "$CONTAINER" ]] || die "failed to create upgrade container" @@ -166,17 +188,51 @@ function upgrade_not_in_place() { die "failed to stop '$CONTAINER'" # - # After this point, we no longer want to execute the normal - # cleanup handler on any failure. The following command will - # convert the container to be used as the next boot/root - # filesystem, and thus the "stop" and "destroy" container logic - # will no longer work. So, if the "convert-to-bootfs" script - # fails, we'll need to manually rectify the situation. + # After this point, we no longer want to execute the normal cleanup + # handler on any failure. The following command will convert the + # container to be used as the next boot/root filesystem, and thus + # the "stop" and "destroy" container logic will no longer work. So, + # if either the "convert-to-rootfs" or "set-bootfs" logic fails, + # we'll need to manually rectify the situation. # trap - EXIT - "$IMAGE_PATH/upgrade-container" convert-to-bootfs "$CONTAINER" || - die "failed to convert-to-bootfs '$CONTAINER'" + "$IMAGE_PATH/upgrade-container" convert-to-rootfs "$CONTAINER" || + die "failed to convert-to-rootfs '$CONTAINER'" + + # + # In order for the "rollback" back command to work, we need to + # know which rootfs container to rollback to. Thus, we embed + # this information into the rootfs container using a ZFS + # property; rollback will read this information, to determine + # which rootfs container to rollback to. + # + zfs set \ + "$ROLLBACK_PROPERTY=$MOUNTED_CONTAINER" \ + "rpool/ROOT/$CONTAINER" || + die "'zfs set com.delphix:rollback-container' failed" + + "$IMAGE_PATH/rootfs-container" set-bootfs "$CONTAINER" || + die "failed to set-bootfs '$CONTAINER'" +} + +function rollback() { + MOUNTED_CONTAINER="$(get_mounted_rootfs_container)" + [[ -n "$MOUNTED_CONTAINER" ]] || + die "failed to determine mounted rootfs container" + + ROLLBACK_CONTAINER="$(zfs get -Hpo value \ + "$ROLLBACK_PROPERTY" "rpool/ROOT/$MOUNTED_CONTAINER")" + [[ -n "$ROLLBACK_CONTAINER" && "$ROLLBACK_CONTAINER" != "-" ]] || + die "failed to determine rollback rootfs container" + + # + # The "rollback" operation is nothing more than "set-bootfs" of + # a specific rootfs container. Now that we have the specific + # rootfs container that we want to use as the next bootfs. + # + "$IMAGE_PATH/rootfs-container" set-bootfs "$ROLLBACK_CONTAINER" || + die "failed to set-bootfs '$ROLLBACK_CONTAINER'" } [[ "$EUID" -ne 0 ]] && die "must be run as root" @@ -205,6 +261,10 @@ not-in-place) verify_upgrade_is_allowed upgrade_not_in_place "$@" ;; +rollback) + shift 1 + rollback "$@" + ;; *) usage "invalid option -- '$1'" ;; diff --git a/upgrade/upgrade-scripts/upgrade-container b/upgrade/upgrade-scripts/upgrade-container index 09629e9c..7f438176 100755 --- a/upgrade/upgrade-scripts/upgrade-container +++ b/upgrade/upgrade-scripts/upgrade-container @@ -364,81 +364,16 @@ function run() { --quiet --wait --pipe -- "$@" } -function get_bootloader_devices() { - # - # When installing/updating the bootloader during upgrade, we - # need to determine which devices are being used as bootloader - # devices. We determine this by listing the devices used by the - # rpool. Additionally, we have to filter out devices that could - # be attached to the rpool, but would never be used for the - # bootloader. Finally, we need to strip off any parition - # information, since we want to install the bootloader directly - # to the device, rather than to a partition of the device. - # - zpool list -vH rpool | - awk '! /rpool|mirror|replacing|spare/ {print $1}' | - sed 's/[0-9]*$//' -} - -function convert_to_bootfs_cleanup() { - umount "/var/lib/machines/$CONTAINER/mnt" || - warn "'umount' of '/var/lib/machines/$CONTAINER/mnt' failed" - - for dir in /proc /sys /dev; do - umount -R "/var/lib/machines/${CONTAINER}${dir}" || - warn "'umount -R' of '$dir' failed" - done -} - -# -# The purpose of this function is to convert an existing upgrade -# container (specified by the $CONTAINER global variable) into the next -# boot filesystem used by the appliance; i.e. after calling this -# function, the container's root filesystem will be used as the -# appliance's root filesystem, the next time the appliance boots. -# -# This is done by updating the appliance's bootloader (i.e. grub) to -# point to the container's filesystem, along with setting the mountpoint -# of the filesystem to be "/" instead of "/var/lib/machines/$CONTAINER". -# This effectively removes the container, since systemd-nspawn only -# looks in /var/lib/machines, so it'll no longer find the container -# after the mountpoint changes. -# -function convert_to_bootfs() { - trap convert_to_bootfs_cleanup EXIT - - mount --make-slave "/var/lib/machines/$CONTAINER" || - die "'mount --make-slave /var/lib/machines/$CONTAINER' failed" - - for dir in /proc /sys /dev; do - mount --rbind "$dir" "/var/lib/machines/${CONTAINER}${dir}" || - die "'mount --rbind' of '$dir' failed" - mount --make-rslave "/var/lib/machines/${CONTAINER}${dir}" || - die "'mount --make-rslave' of '$dir' failed" - done - - mount -t zfs rpool/grub "/var/lib/machines/$CONTAINER/mnt" || - die "'mount -t zfs rpool/grub' failed for '$CONTAINER'" - - for dev in $(get_bootloader_devices); do - [[ -e "/dev/$dev" ]] || - die "bootloader device '/dev/$dev' not found" - - [[ -b "/dev/$dev" ]] || - die "bootloader device '/dev/$dev' not block device" - - chroot "/var/lib/machines/$CONTAINER" \ - grub-install --root-directory=/mnt "/dev/$dev" || - die "'grub-install' for '$dev' failed in '$CONTAINER'" - done - - chroot "/var/lib/machines/$CONTAINER" \ - grub-mkconfig -o /mnt/boot/grub/grub.cfg || - die "'grub-mkconfig' failed in '$CONTAINER'" - - convert_to_bootfs_cleanup - trap - EXIT - +function convert_to_rootfs() { + # + # We're relying on the "mountpoint" property for the "data" and + # "home" datasets already being set properly when those datasets + # are first created, so we don't explicitly set them here. + # + # Additionally, we're also relying on the "canmount" property of + # the "root" dataset already being set properly, so we don't + # explicitly set that here, either. + # zfs umount "rpool/ROOT/$CONTAINER/root" || die "'zfs umount rpool/ROOT/$CONTAINER/root' failed" zfs set mountpoint=/ "rpool/ROOT/$CONTAINER/root" || @@ -582,7 +517,7 @@ function usage() { echo "$PREFIX_SPACES stop " echo "$PREFIX_SPACES destroy " echo "$PREFIX_SPACES run " - echo "$PREFIX_SPACES convert-to-bootfs " + echo "$PREFIX_SPACES convert-to-rootfs " echo "$PREFIX_SPACES migrate-configuration " exit 2 @@ -629,11 +564,11 @@ run) shift 2 run "$@" ;; -convert-to-bootfs) +convert-to-rootfs) [[ $# -lt 2 ]] && usage "too few arguments specified" [[ $# -gt 2 ]] && usage "too many arguments specified" CONTAINER="$2" - convert_to_bootfs + convert_to_rootfs ;; migrate-configuration) [[ $# -lt 2 ]] && usage "too few arguments specified"