Skip to content

Commit

Permalink
Initial ZFS clones support
Browse files Browse the repository at this point in the history
  • Loading branch information
tuffnatty committed Jun 22, 2021
1 parent c5363a1 commit 19de921
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 26 deletions.
8 changes: 6 additions & 2 deletions zfs_autobackup/ZfsAutobackup.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,9 @@ def sync_datasets(self, source_node, source_datasets, target_node):
:type source_node: ZfsNode
"""

def make_target_name(source_dataset):
return self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)

send_pipes=self.get_send_pipes(source_node.verbose)
recv_pipes=self.get_recv_pipes(target_node.verbose)

Expand All @@ -365,7 +368,7 @@ def sync_datasets(self, source_node, source_datasets, target_node):

try:
# determine corresponding target_dataset
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
target_name = make_target_name(source_dataset)
target_dataset = ZfsDataset(target_node, target_name)
target_datasets.append(target_dataset)

Expand All @@ -391,7 +394,8 @@ def sync_datasets(self, source_node, source_datasets, target_node):
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, zfs_compressed=self.args.zfs_compressed )
decrypt=self.args.decrypt, encrypt=self.args.encrypt, zfs_compressed=self.args.zfs_compressed,
make_target_name=make_target_name)
except Exception as e:
fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e))
Expand Down
26 changes: 23 additions & 3 deletions zfs_autobackup/ZfsDataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,12 @@ def find_next_snapshot(self, snapshot, also_other_snapshots=False):
if self.is_snapshot:
raise (Exception("Please call this on a dataset."))

index = self.find_snapshot_index(snapshot)
if snapshot.name == self.properties.get("origin"):
# Special case when start snapshot filesystem is other
index = -1
else:
index = self.find_snapshot_index(snapshot)

while index is not None and index < len(self.snapshots) - 1:
index = index + 1
if also_other_snapshots or self.snapshots[index].is_ours():
Expand Down Expand Up @@ -553,7 +558,10 @@ def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, s

# incremental?
if prev_snapshot:
cmd.extend(["-i", "@" + prev_snapshot.snapshot_name])
if self.filesystem_name == prev_snapshot.filesystem_name:
cmd.extend(["-i", "@" + prev_snapshot.snapshot_name])
else:
cmd.extend(["-i", prev_snapshot.name])

cmd.append(self.name)

Expand Down Expand Up @@ -762,6 +770,10 @@ def find_common_snapshot(self, target_dataset):
"""
if not target_dataset.snapshots:
# target has nothing yet
origin = self.properties.get("origin")
if origin:
# We are a clone. The origin has earlier creation time and thus must have been already synced.
return ZfsDataset(self.zfs_node, origin)
return None
else:
# snapshot=self.find_snapshot(target_dataset.snapshots[-1].snapshot_name)
Expand Down Expand Up @@ -968,7 +980,7 @@ def handle_incompatible_snapshots(self, incompatible_target_snapshots, destroy_i

def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed):
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, make_target_name):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
Expand All @@ -993,6 +1005,14 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)

if not target_dataset.exists and common_snapshot and common_snapshot.filesystem_name != target_dataset.filesystem_name:
target_origin = ZfsDataset(target_dataset.zfs_node, make_target_name(common_snapshot))
if not target_origin.exists:
raise Exception("Origin {} for clone {} does not exist on target.{}"
.format(target_origin.name, target_dataset.name,
("" if also_other_snapshots
else " You may want to retransfer {} with --other-snapshots.".format(common_snapshot.filesystem_name))))

# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
# Also usefull with no_send to still cleanup stuff.
self._pre_clean(
Expand Down
47 changes: 26 additions & 21 deletions zfs_autobackup/ZfsNode.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# python 2 compatibility
from __future__ import print_function
from operator import attrgetter
import re
import shlex
import subprocess
Expand Down Expand Up @@ -219,15 +220,15 @@ def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_sn
def selected_datasets(self, exclude_received, exclude_paths):
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
returns: list of ZfsDataset
returns: list of ZfsDataset sorted by creation time
"""

self.debug("Getting selected datasets")

# get all source filesystems that have the backup property
lines = self.run(tab_split=True, readonly=True, cmd=[
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H",
"autobackup:" + self.backup_name
"zfs", "get", "-t", "volume,filesystem", "-Hp",
"autobackup:{},creation".format(self.backup_name)
])

# The returnlist of selected ZfsDataset's:
Expand All @@ -237,21 +238,25 @@ def selected_datasets(self, exclude_received, exclude_paths):
sources = {}

for line in lines:
(name, value, raw_source) = line
dataset = ZfsDataset(self, name)

# "resolve" inherited sources
sources[name] = raw_source
if raw_source.find("inherited from ") == 0:
inherited = True
inherited_from = re.sub("^inherited from ", "", raw_source)
source = sources[inherited_from]
else:
inherited = False
source = raw_source

# determine it
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received, exclude_paths=exclude_paths):
selected_filesystems.append(dataset)

return selected_filesystems
(name, prop_name, value, raw_source) = line
if prop_name.startswith("autobackup:"):
dataset = ZfsDataset(self, name)

# "resolve" inherited sources
sources[name] = raw_source
if raw_source.find("inherited from ") == 0:
inherited = True
inherited_from = re.sub("^inherited from ", "", raw_source)
source = sources[inherited_from]
else:
inherited = False
source = raw_source

# determine it
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received, exclude_paths=exclude_paths):
selected_filesystems.append(dataset)
elif prop_name == "creation":
# creation date for the last dataset
if selected_filesystems and selected_filesystems[-1].name == name:
selected_filesystems[-1].creation = int(value)
return sorted(selected_filesystems, key=attrgetter("creation"))

0 comments on commit 19de921

Please sign in to comment.