diff --git a/README.md b/README.md index 398abd0..1a739e9 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # ZFSBackup -DISCLAIMER: This is a work in progress - USE AT YOUR OWN RISK! +DISCLAIMER: This is a work in progress in still considered beta though I personally use this in a production environment and have tested it for my own use cases (looking for feedback on other people's experience before considering this "production ready"). ## Overview: @@ -33,8 +33,34 @@ The compression algorithm builtin to the software is a parallel gzip ([pgzip](ht ### Encryption/Signing: The PGP algorithm is used for encryption/signing. The cipher used is AES-256. +## Installation + +Download the latest binaries from the [releases](https://github.com/someone1/zfsbackup-go/releases) section or compile your own by: + +```shell +go get github.com/someone1/zfsbackup-go +``` + +The compiled binary should be in yor $GOPATH/bin directory. + ## Usage +### "Smart" Options: + +Use the `--full` option to auto select the most recent snapshot on the target volume to do a full backup of: + + $ ./zfsbackup send --encryptTo user@domain.com --signFrom user@domain.com --publicKeyRingPath pubring.gpg.asc --secretKeyRingPath secring.gpg.asc --full Tank/Dataset gs://backup-bucket-target,s3://another-backup-target + +Use the `--increment` option to auto select the most recent snapshot on the target volume to do an incremental snapshot of the most recent snapshot found in the target destination: + + $ ./zfsbackup send --encryptTo user@domain.com --signFrom user@domain.com --publicKeyRingPath pubring.gpg.asc --secretKeyRingPath secring.gpg.asc --increment Tank/Dataset gs://backup-bucket-target,s3://another-backup-target + +Use the `--fullIfOlderThan` option to auto select the most recent snapshot on the target volume to do an incremental snapshot of the most recent snapshot found in the target destination: + + $ ./zfsbackup send --encryptTo user@domain.com --signFrom user@domain.com --publicKeyRingPath pubring.gpg.asc --secretKeyRingPath secring.gpg.asc --fullIfOlderThan 720h Tank/Dataset gs://backup-bucket-target,s3://another-backup-target + +### Manual Options: + Full backup example: $ ./zfsbackup send --encryptTo user@domain.com --signFrom user@domain.com --publicKeyRingPath pubring.gpg.asc --secretKeyRingPath secring.gpg.asc Tank/Dataset@snapshot-20170101 gs://backup-bucket-target @@ -57,8 +83,9 @@ Notes: * PGP Passphrase will be prompted during execution if it is not found in the PGP_PASSPHRASE environmental variable. * `--maxFileBuffer=0` will disable parallel processing, chunking, multiple destinations, and upload hash verification but will use virtually no disk space. * For S3: Specify Standard/Bulk/Expedited in the AWS_S3_GLACIER_RESTORE_TIER environmental variable to change Glacier restore option (default: Bulk) +* A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -Example output for top level command: +Help Output: ```shell $ ./zfsbackup @@ -74,12 +101,12 @@ Usage: zfsbackup [command] Available Commands: - clean Clean will delete any objects in the destination that are not found in the manifest files found in the destination. + clean Clean will delete any objects in the target that are not found in the manifest files found in the target. help Help about any command - list List all backup sets found at the provided destination. + list List all backup sets found at the provided target. receive receive will restore a snapshot of a ZFS volume similar to how the "zfs recv" command works. send send will backup of a ZFS volume similar to how the "zfs send" command works. - verify Verify will ensure that the backupset for the given snapshot exists in the destination + verify Verify will ensure that the backupset for the given snapshot exists in the target version Print the version of zfsbackup in use and relevant compile information Flags: @@ -93,9 +120,49 @@ Flags: --secretKeyRingPath string the path to the PGP secret key ring --signFrom string the email of the user to sign on behalf of from the provided private keyring. --workingDirectory string the working directory path for zfsbackup. (default "~/.zfsbackup") + --zfsPath string the path to the zfs executable. (default "zfs") Use "zfsbackup [command] --help" for more information about a command. ``` +Send Options: + +```shell +$ ./zfsbackup send +Usage: + zfsbackup send [flags] filesystem|volume|snapshot uri(s) + +Flags: + --compressionLevel int the compression level to use with the compressor. Valid values are between 1-9. (default 6) + -D, --deduplication See the -D flag for zfs send for more information. + --full set this flag to take a full backup of the specified volume using the most recent snapshot. + --fullIfOlderThan duration set this flag to do an incremental backup of the most recent snapshot from the most recent snapshot found in the target unless the it's been greater than the time specified in this flag, then do a full backup. (default -1m0s) + -h, --help help for send + --increment set this flag to do an incremental backup of the most recent snapshot from the most recent snapshot found in the target. + -i, --incremental string See the -i flag on zfs send for more information + -I, --intermediary string See the -I flag on zfs send for more information + --maxBackoffTime duration the maximum delay you'd want a worker to sleep before retrying an upload. (default 30m0s) + --maxFileBuffer int the maximum number of files to have active during the upload process. Should be set to at least the number of max parallel uploads. Set to 0 to bypass local storage and upload straight to your destination - this will limit you to a single destination and disable any hash checks for the upload where available. (default 5) + --maxParallelUploads int the maximum number of uploads to run in parallel. (default 4) + --maxRetryTime duration the maximum time that can elapse when retrying a failed upload. Use 0 for no limit. (default 12h0m0s) + --maxUploadSpeed uint the maximum upload speed (in KB/s) the program should use between all upload workers. Use 0 for no limit + -p, --properties See the -p flag on zfs send for more information. + -R, --replication See the -R flag on zfs send for more information + --resume set this flag to true when you want to try and resume a previously cancled or failed backup. It is up to the caller to ensure the same command line arguements are provided between the original backup and the resumed one. + --separator string the separator to use between object component names. (default "|") + --volsize uint the maximum size (in MiB) a volume should be before splitting to a new volume. Note: zfsbackup will try its best to stay close/under this limit but it is not garaunteed. (default 200) + +Global Flags: + --compressor string specify to use the internal (parallel) gzip implementation or an external binary (e.g. gzip, bzip2, pigz, lzma, xz, etc. Syntax must be similiar to the gzip compression tool) to compress the stream for storage. Please take into consideration time, memory, and CPU usage for any of the compressors used. (default "internal") + --encryptTo string the email of the user to encrypt the data to from the provided public keyring. + --logLevel string this controls the verbosity level of logging. Possible values are critical, error, warning, notice, info, debug. (default "notice") + --manifestPrefix string the prefix to use for all manifest files. (default "manifests") + --numCores int number of CPU cores to utilize. Do not exceed the number of CPU cores on the system. (default 2) + --publicKeyRingPath string the path to the PGP public key ring + --secretKeyRingPath string the path to the PGP secret key ring + --signFrom string the email of the user to sign on behalf of from the provided private keyring. + --workingDirectory string the working directory path for zfsbackup. (default "~/.zfsbackup") + --zfsPath string the path to the zfs executable. (default "zfs") +``` ## TODOs: * Make PGP cipher configurable. @@ -107,5 +174,3 @@ Use "zfsbackup [command] --help" for more information about a command. * Fix error handling (at least omit panic dumps!) * Add delete feature * Appease linters - - diff --git a/backup/backup.go b/backup/backup.go index 844462c..f0f9e52 100644 --- a/backup/backup.go +++ b/backup/backup.go @@ -42,6 +42,115 @@ import ( "github.com/someone1/zfsbackup-go/helpers" ) +// ProcessSmartOptions will compute the snapshots to use +func ProcessSmartOptions(jobInfo *helpers.JobInfo) error { + snapshots, err := helpers.GetSnapshots(context.Background(), jobInfo.VolumeName) + if err != nil { + return err + } + jobInfo.BaseSnapshot = snapshots[0] + if jobInfo.Full { + // TODO: Check if we already have a full backup for this snapshot in the destination(s) + return nil + } + lastComparableSnapshots := make([]*helpers.SnapshotInfo, len(jobInfo.Destinations)) + lastBackup := make([]*helpers.SnapshotInfo, len(jobInfo.Destinations)) + for idx := range jobInfo.Destinations { + destBackups, derr := getBackupsForTarget(context.Background(), jobInfo.VolumeName, jobInfo.Destinations[idx], jobInfo) + if derr != nil { + return derr + } + if len(destBackups) == 0 { + continue + } + lastBackup[idx] = &destBackups[0].BaseSnapshot + if jobInfo.Incremental { + lastComparableSnapshots[idx] = &destBackups[0].BaseSnapshot + } + if jobInfo.FullIfOlderThan != -1*time.Minute { + for _, bkp := range destBackups { + if bkp.IncrementalSnapshot.Name == "" { + lastComparableSnapshots[idx] = &bkp.BaseSnapshot + break + } + } + } + } + + var lastNotEqual bool + // Verify that all "comparable" snapshots are the same across destinations + for i := 1; i < len(lastComparableSnapshots); i++ { + if !lastComparableSnapshots[i-1].Equal(lastComparableSnapshots[i]) { + return fmt.Errorf("destinations are out of sync, cannot continue with smart option") + } + + if !lastNotEqual && !lastBackup[i-1].Equal(lastBackup[i]) { + lastNotEqual = true + } + } + + // Now select the proper job options and continue + if jobInfo.Incremental { + if lastComparableSnapshots[0] == nil { + return fmt.Errorf("no snapshot to increment from - try doing a full backup instead") + } + if lastComparableSnapshots[0].Equal(&snapshots[0]) { + return fmt.Errorf("no new snapshot to sync") + } + jobInfo.IncrementalSnapshot = *lastComparableSnapshots[0] + } + + if jobInfo.FullIfOlderThan != -1*time.Minute { + if lastComparableSnapshots[0] == nil { + // No previous full backup, so do one + helpers.AppLogger.Infof("No previous full backup found, performing full backup.") + return nil + } + if snapshots[0].CreationTime.Sub(lastComparableSnapshots[0].CreationTime) > jobInfo.FullIfOlderThan { + // Been more than the allotted time, do a full backup + helpers.AppLogger.Infof("Last Full backup was %v and is more than %v before the most recent snapshot, performing full backup.", lastComparableSnapshots[0].CreationTime, jobInfo.FullIfOlderThan) + return nil + } + if lastNotEqual { + return fmt.Errorf("want to do an incremental backup but last incremental backup at destinations do not match") + } + if lastBackup[0].Equal(&snapshots[0]) { + return fmt.Errorf("no new snapshot to sync") + } + jobInfo.IncrementalSnapshot = *lastBackup[0] + } + return nil +} + +func getBackupsForTarget(ctx context.Context, volume, target string, jobInfo *helpers.JobInfo) ([]*helpers.JobInfo, error) { + // Prepare the backend client + backend := prepareBackend(ctx, jobInfo, target, nil) + + // Get the local cache dir + localCachePath := getCacheDir(target) + + // Sync the local cache + safeManifests, _ := syncCache(ctx, jobInfo, localCachePath, backend) + + // Read in Manifests and display + decodedManifests := make([]*helpers.JobInfo, 0, len(safeManifests)) + for _, manifest := range safeManifests { + manifestPath := filepath.Join(localCachePath, manifest) + decodedManifest, oerr := readManifest(ctx, manifestPath, jobInfo) + if oerr != nil { + return nil, oerr + } + if strings.Compare(decodedManifest.VolumeName, volume) == 0 { + decodedManifests = append(decodedManifests, decodedManifest) + } + } + + sort.SliceStable(decodedManifests, func(i, j int) bool { + return decodedManifests[i].BaseSnapshot.CreationTime.After(decodedManifests[j].BaseSnapshot.CreationTime) + }) + return decodedManifests, nil +} + // Backup will iniate a backup with the provided configuration. func Backup(jobInfo *helpers.JobInfo) { defer helpers.HandleExit() diff --git a/backup/list.go b/backup/list.go index e57ff20..4f5b92d 100644 --- a/backup/list.go +++ b/backup/list.go @@ -63,13 +63,6 @@ func List(jobInfo *helpers.JobInfo) { sort.SliceStable(decodedManifests, func(i, j int) bool { cmp := strings.Compare(decodedManifests[i].VolumeName, decodedManifests[j].VolumeName) if cmp == 0 { - if decodedManifests[i].IncrementalSnapshot.Name != "" && decodedManifests[j].IncrementalSnapshot.Name != "" { - return decodedManifests[i].IncrementalSnapshot.CreationTime.Before(decodedManifests[j].IncrementalSnapshot.CreationTime) - } else if decodedManifests[i].IncrementalSnapshot.Name != "" { - return decodedManifests[i].IncrementalSnapshot.CreationTime.Before(decodedManifests[j].BaseSnapshot.CreationTime) - } else if decodedManifests[j].IncrementalSnapshot.Name != "" { - return decodedManifests[i].BaseSnapshot.CreationTime.Before(decodedManifests[j].IncrementalSnapshot.CreationTime) - } return decodedManifests[i].BaseSnapshot.CreationTime.Before(decodedManifests[j].BaseSnapshot.CreationTime) } return cmp < 0 diff --git a/cmd/root.go b/cmd/root.go index d073854..786486c 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -79,6 +79,7 @@ func init() { RootCmd.PersistentFlags().StringVar(&jobInfo.Compressor, "compressor", "internal", "specify to use the internal (parallel) gzip implementation or an external binary (e.g. gzip, bzip2, pigz, lzma, xz, etc. Syntax must be similiar to the gzip compression tool) to compress the stream for storage. Please take into consideration time, memory, and CPU usage for any of the compressors used.") RootCmd.PersistentFlags().StringVar(&jobInfo.EncryptTo, "encryptTo", "", "the email of the user to encrypt the data to from the provided public keyring.") RootCmd.PersistentFlags().StringVar(&jobInfo.SignFrom, "signFrom", "", "the email of the user to sign on behalf of from the provided private keyring.") + RootCmd.PersistentFlags().StringVar(&helpers.ZFSPath, "zfsPath", "zfs", "the path to the zfs executable.") passphrase = []byte(os.Getenv("PGP_PASSPHRASE")) } diff --git a/cmd/send.go b/cmd/send.go index e5c0b7d..49dce61 100644 --- a/cmd/send.go +++ b/cmd/send.go @@ -79,6 +79,9 @@ func init() { sendCmd.Flags().Uint64Var(&jobInfo.VolumeSize, "volsize", 200, "the maximum size (in MiB) a volume should be before splitting to a new volume. Note: zfsbackup will try its best to stay close/under this limit but it is not garaunteed.") sendCmd.Flags().IntVar(&jobInfo.CompressionLevel, "compressionLevel", 6, "the compression level to use with the compressor. Valid values are between 1-9.") sendCmd.Flags().BoolVar(&jobInfo.Resume, "resume", false, "set this flag to true when you want to try and resume a previously cancled or failed backup. It is up to the caller to ensure the same command line arguements are provided between the original backup and the resumed one.") + sendCmd.Flags().BoolVar(&jobInfo.Full, "full", false, "set this flag to take a full backup of the specified volume using the most recent snapshot.") + sendCmd.Flags().BoolVar(&jobInfo.Incremental, "increment", false, "set this flag to do an incremental backup of the most recent snapshot from the most recent snapshot found in the target.") + sendCmd.Flags().DurationVar(&jobInfo.FullIfOlderThan, "fullIfOlderThan", -1*time.Minute, "set this flag to do an incremental backup of the most recent snapshot from the most recent snapshot found in the target unless the it's been greater than the time specified in this flag, then do a full backup.") sendCmd.Flags().IntVar(&jobInfo.MaxFileBuffer, "maxFileBuffer", 5, "the maximum number of files to have active during the upload process. Should be set to at least the number of max parallel uploads. Set to 0 to bypass local storage and upload straight to your destination - this will limit you to a single destination and disable any hash checks for the upload where available.") sendCmd.Flags().IntVar(&jobInfo.MaxParallelUploads, "maxParallelUploads", 4, "the maximum number of uploads to run in parallel.") @@ -98,18 +101,7 @@ func updateJobInfo(args []string) { } parts := strings.Split(args[0], "@") - if len(parts) != 2 { - helpers.AppLogger.Errorf("Invalid base snapshot provided. Expected format @, got %s instead", args[0]) - panic(helpers.Exit{Code: 10}) - } jobInfo.VolumeName = parts[0] - jobInfo.BaseSnapshot = helpers.SnapshotInfo{Name: parts[1]} - creationTime, err := helpers.GetCreationDate(context.TODO(), args[0]) - if err != nil { - helpers.AppLogger.Errorf("Error trying to get creation date of specified base snapshot - %v", err) - panic(helpers.Exit{Code: 10}) - } - jobInfo.BaseSnapshot.CreationTime = creationTime jobInfo.Destinations = strings.Split(args[1], ",") if len(jobInfo.Destinations) > 1 && jobInfo.MaxFileBuffer == 0 { @@ -117,18 +109,6 @@ func updateJobInfo(args []string) { panic(helpers.Exit{Code: 10}) } - if jobInfo.IncrementalSnapshot.Name != "" { - jobInfo.IncrementalSnapshot.Name = strings.TrimPrefix(jobInfo.IncrementalSnapshot.Name, jobInfo.VolumeName) - jobInfo.IncrementalSnapshot.Name = strings.TrimPrefix(jobInfo.IncrementalSnapshot.Name, "@") - - creationTime, err = helpers.GetCreationDate(context.TODO(), fmt.Sprintf("%s@%s", jobInfo.VolumeName, jobInfo.IncrementalSnapshot.Name)) - if err != nil { - helpers.AppLogger.Errorf("Error trying to get creation date of specified incremental snapshot - %v", err) - panic(helpers.Exit{Code: 10}) - } - jobInfo.IncrementalSnapshot.CreationTime = creationTime - } - for _, destination := range jobInfo.Destinations { prefix := strings.Split(destination, "://") if len(prefix) < 2 { @@ -141,6 +121,58 @@ func updateJobInfo(args []string) { panic(helpers.Exit{Code: 10}) } } + + // If we aren't using a "smart" option, rely on the user to provide the snapshots to use! + if !jobInfo.Full && !jobInfo.Incremental && jobInfo.FullIfOlderThan == -1*time.Minute { + if len(parts) != 2 { + helpers.AppLogger.Errorf("Invalid base snapshot provided. Expected format @, got %s instead", args[0]) + panic(helpers.Exit{Code: 10}) + } + jobInfo.BaseSnapshot = helpers.SnapshotInfo{Name: parts[1]} + creationTime, err := helpers.GetCreationDate(context.TODO(), args[0]) + if err != nil { + helpers.AppLogger.Errorf("Error trying to get creation date of specified base snapshot - %v", err) + panic(helpers.Exit{Code: 10}) + } + jobInfo.BaseSnapshot.CreationTime = creationTime + + if jobInfo.IncrementalSnapshot.Name != "" { + jobInfo.IncrementalSnapshot.Name = strings.TrimPrefix(jobInfo.IncrementalSnapshot.Name, jobInfo.VolumeName) + jobInfo.IncrementalSnapshot.Name = strings.TrimPrefix(jobInfo.IncrementalSnapshot.Name, "@") + + creationTime, err = helpers.GetCreationDate(context.TODO(), fmt.Sprintf("%s@%s", jobInfo.VolumeName, jobInfo.IncrementalSnapshot.Name)) + if err != nil { + helpers.AppLogger.Errorf("Error trying to get creation date of specified incremental snapshot - %v", err) + panic(helpers.Exit{Code: 10}) + } + jobInfo.IncrementalSnapshot.CreationTime = creationTime + } + } else { + // Some basic checks here + onlyOneCheck := 0 + if jobInfo.Full { + onlyOneCheck++ + } + if jobInfo.Incremental { + onlyOneCheck++ + } + if jobInfo.FullIfOlderThan != -1*time.Minute { + onlyOneCheck++ + } + if onlyOneCheck > 1 { + helpers.AppLogger.Errorf("Please specify only one \"smart\" option at a time") + panic(helpers.Exit{Code: 11}) + } + if len(parts) != 1 { + helpers.AppLogger.Errorf("When using a smart option, please only specify the volume to backup, do not include any snapshot information.") + panic(helpers.Exit{Code: 10}) + } + if err := backup.ProcessSmartOptions(&jobInfo); err != nil { + helpers.AppLogger.Errorf("Error while trying to process smart option - %v", err) + panic(helpers.Exit{Code: 10}) + } + helpers.AppLogger.Debugf("Utilizing smart option.") + } } func validateSendFlags(cmd *cobra.Command, args []string) { diff --git a/helpers/jobinfo.go b/helpers/jobinfo.go index 7d10e26..43cb6f8 100644 --- a/helpers/jobinfo.go +++ b/helpers/jobinfo.go @@ -56,6 +56,10 @@ type JobInfo struct { Properties bool IntermediaryIncremental bool Resume bool `json:"-"` + // "Smart" Options + Full bool `json:"-"` + Incremental bool `json:"-"` + FullIfOlderThan time.Duration `json:"-"` // ZFS Receive options Force bool `json:"-"` @@ -82,6 +86,13 @@ type SnapshotInfo struct { Name string } +func (s *SnapshotInfo) Equal(t *SnapshotInfo) bool { + if s == nil || t == nil { + return s == t + } + return strings.Compare(s.Name, t.Name) == 0 && s.CreationTime.Equal(t.CreationTime) +} + // TotalBytesWritten will sum up the size of all underlying Volumes to give a total // that represents how many bytes have been written. func (j *JobInfo) TotalBytesWritten() uint64 { diff --git a/helpers/zfs.go b/helpers/zfs.go index 993a0f6..5abe677 100644 --- a/helpers/zfs.go +++ b/helpers/zfs.go @@ -30,6 +30,9 @@ import ( "time" ) +// ZFSPath is the path to the zfs binary +var ZFSPath = "zfs" + // GetCreationDate will use the zfs command to get and parse the creation datetime // of the specified volume/snapshot func GetCreationDate(ctx context.Context, target string) (time.Time, error) { @@ -44,12 +47,43 @@ func GetCreationDate(ctx context.Context, target string) (time.Time, error) { return time.Unix(epochTime, 0), nil } +// GetSnapshots will retrieve all snapshots for the given target +func GetSnapshots(ctx context.Context, target string) ([]SnapshotInfo, error) { + errB := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, ZFSPath, "list", "-H", "-d", "1", "-p", "-t", "snapshot", "-r", "-o", "name,creation", "-S", "creation", target) + AppLogger.Debugf("Getting ZFS Snapshots with command \"%s\"", strings.Join(cmd.Args, " ")) + cmd.Stderr = errB + rpipe, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + err = cmd.Start() + if err != nil { + return nil, fmt.Errorf("%s (%v)", strings.TrimSpace(errB.String()), err) + } + var snapshots []SnapshotInfo + for { + snapInfo := SnapshotInfo{} + var creation int64 + n, nerr := fmt.Fscanln(rpipe, &snapInfo.Name, &creation) + if n == 0 || nerr != nil { + break + } + snapInfo.CreationTime = time.Unix(creation, 0) + snapInfo.Name = strings.TrimPrefix(snapInfo.Name, target) + snapInfo.Name = strings.TrimPrefix(snapInfo.Name, "@") + snapshots = append(snapshots, snapInfo) + } + err = cmd.Wait() + return snapshots, err +} + // GetZFSProperty will return the raw value returned by the "zfs get" command for // the given property on the given target. func GetZFSProperty(ctx context.Context, prop, target string) (string, error) { b := new(bytes.Buffer) errB := new(bytes.Buffer) - cmd := exec.CommandContext(ctx, "zfs", "get", "-H", "-p", "-o", "value", prop, target) + cmd := exec.CommandContext(ctx, ZFSPath, "get", "-H", "-p", "-o", "value", prop, target) AppLogger.Debugf("Getting ZFS Property with command \"%s\"", strings.Join(cmd.Args, " ")) cmd.Stdout = b cmd.Stderr = errB @@ -92,7 +126,7 @@ func GetZFSSendCommand(ctx context.Context, j *JobInfo) *exec.Cmd { } zfsArgs = append(zfsArgs, fmt.Sprintf("%s@%s", j.VolumeName, j.BaseSnapshot.Name)) - cmd := exec.CommandContext(ctx, "zfs", zfsArgs...) + cmd := exec.CommandContext(ctx, ZFSPath, zfsArgs...) return cmd } @@ -129,7 +163,7 @@ func GetZFSReceiveCommand(ctx context.Context, j *JobInfo) *exec.Cmd { } zfsArgs = append(zfsArgs, j.LocalVolume) - cmd := exec.CommandContext(ctx, "zfs", zfsArgs...) + cmd := exec.CommandContext(ctx, ZFSPath, zfsArgs...) return cmd }