Skip to content

Commit

Permalink
feat(osm45): wip on ovh3 backups
Browse files Browse the repository at this point in the history
More work needed, replication on ovh cluster is coming in the way on sanoid/syncoid
  • Loading branch information
alexgarel authored and root committed Oct 14, 2024
1 parent 64af1ca commit fa8278d
Show file tree
Hide file tree
Showing 4 changed files with 84 additions and 0 deletions.
30 changes: 30 additions & 0 deletions confs/osm45/sanoid/sanoid.conf
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,13 @@
use_template=local_data
recursive=yes

# backups of ovh3
[hdd-zfs/off-backups/ovh3-rpool]
use_template=synced_replicate
process_children_only=yes
recursive=yes


# Template to regularly snapshot
[template_local_data]
# How often snapshots should be taken under an hour
Expand Down Expand Up @@ -105,3 +112,26 @@
autosnap = no
# Should old snapshots be pruned
autoprune = yes

# template for data synced from a replication data set
[template_synced_replicate]
# How often snapshots should be taken under an hour
frequent_period=60
# What to keep
# number of sub-hourly snapshots
frequently = 0
# number of hourly snapshots
hourly = 10
# number of daily snashots
daily = 10
# number of monthly snashots
monthly = 6
# number of yearly snashots
yearly = 0
# whether snapshots should be taken automatically
autosnap = no
# Should old snapshots be pruned
autoprune = yes
# We need to remove replication snapshots
post_snapshot_script = /opt/openfoodfacts-infrastructure/scripts/zfs/sanoid_post_remove_replication_snapshots.sh

32 changes: 32 additions & 0 deletions confs/osm45/sanoid/syncoid-args.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@

# saving ovh3 volumes
# they all are at the root, so we have no choice but listing them all
# we also use a sync-snap because otherwise as we have few snapshots on ovh3 it would be complicated
--no-privilege-elevation [email protected]:rpool/subvol-100-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-100-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-101-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-101-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-102-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-102-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-103-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-103-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-104-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-104-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-105-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-105-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-106-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-106-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-107-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-107-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-108-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-108-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-110-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-110-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-113-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-113-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-115-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-115-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-130-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-130-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-140-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-140-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-141-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-141-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-142-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-142-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-150-disk-0 hdd-zfs/off-backups/ovh3-rpool/subvol-150-disk-0
--no-privilege-elevation [email protected]:rpool/subvol-150-disk-1 hdd-zfs/off-backups/ovh3-rpool/subvol-150-disk-1
--no-privilege-elevation [email protected]:rpool/vm-200-disk-0 hdd-zfs/off-backups/ovh3-rpool/vm-200-disk-0
--no-privilege-elevation [email protected]:rpool/vm-200-disk-2 hdd-zfs/off-backups/ovh3-rpool/vm-200-disk-2
--no-privilege-elevation [email protected]:rpool/vm-201-disk-0 hdd-zfs/off-backups/ovh3-rpool/vm-201-disk-0
--no-privilege-elevation [email protected]:rpool/vm-201-disk-1 hdd-zfs/off-backups/ovh3-rpool/vm-201-disk-1
--no-privilege-elevation [email protected]:rpool/vm-202-disk-0 hdd-zfs/off-backups/ovh3-rpool/vm-202-disk-0
--no-privilege-elevation [email protected]:rpool/vm-203-disk-0 hdd-zfs/off-backups/ovh3-rpool/vm-203-disk-0

# pulling ovh3 backups - not recursive because there is non useful stuff inside
# we need a sync snap, as for the moment we don't use it sanoid there
--no-privilege-elevation [email protected]:rpool/backups hdd-zfs/off-backups/ovh3-rpool/backups
1 change: 1 addition & 0 deletions confs/osm45/systemd/system/syncoid.service
21 changes: 21 additions & 0 deletions scripts/zfs/sanoid_post_remove_replication_snapshots.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash

# SANOID_TARGETS is an env variable set by sanoid before calling this script

# This script removes eventual replication snapshot that have been synced
# but shan't have been
#
# Use this only if the server is not a replication target !
readarray -d "," DATASETS <<< $SANOID_TARGETS
for DATASET in "${DATASETS[@]}"
do
# remove line returns
DATASET=$(echo $DATASET|tr -d '\r\n')
if [[ -n "$DATASET" ]] && ( zfs list -t snap $DATASET | grep "__replicate_" )
then
for REPLICATION_SNAPSHOT in $( zfs list -t snap $DATASET -o name | grep "@__replicate_" )
do
zfs destroy $REPLICATION_SNAPSHOT
done
fi
done

0 comments on commit fa8278d

Please sign in to comment.