-
Notifications
You must be signed in to change notification settings - Fork 15
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
4 changed files
with
277 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,256 @@ | ||
//! `lock` subcommand | ||
use std::collections::BTreeSet; | ||
|
||
use chrono::{DateTime, Local}; | ||
use derive_setters::Setters; | ||
use log::error; | ||
use rayon::ThreadPoolBuilder; | ||
|
||
use crate::{ | ||
backend::{ | ||
decrypt::{DecryptReadBackend, DecryptWriteBackend}, | ||
node::NodeType, | ||
FileType, | ||
}, | ||
blob::{tree::TreeStreamerOnce, BlobType}, | ||
error::{CommandErrorKind, RepositoryErrorKind, RusticResult}, | ||
id::Id, | ||
index::{ | ||
binarysorted::{IndexCollector, IndexType}, | ||
indexer::Indexer, | ||
GlobalIndex, ReadGlobalIndex, | ||
}, | ||
progress::{Progress, ProgressBars}, | ||
repofile::{indexfile::LockOption, DeleteOption, IndexFile, SnapshotFile}, | ||
repository::{Open, Repository}, | ||
}; | ||
|
||
pub(super) mod constants { | ||
/// The maximum number of reader threads to use for locking. | ||
pub(super) const MAX_LOCKER_THREADS_NUM: usize = 20; | ||
} | ||
|
||
#[cfg_attr(feature = "clap", derive(clap::Parser))] | ||
#[derive(Debug, Clone, Copy, Setters)] | ||
/// Options for the `prune` command | ||
pub struct LockOptions { | ||
/// Extend locks even if the files are already locked long enough | ||
#[cfg_attr(feature = "clap", clap(long))] | ||
always_extend_lock: bool, | ||
|
||
until: Option<DateTime<Local>>, | ||
} | ||
|
||
impl LockOptions { | ||
pub fn lock<P: ProgressBars, S: Open>( | ||
&self, | ||
repo: &Repository<P, S>, | ||
snapshots: &[SnapshotFile], | ||
now: DateTime<Local>, | ||
) -> RusticResult<()> { | ||
let pb = &repo.pb; | ||
let be = repo.dbe(); | ||
|
||
let mut index_files = Vec::new(); | ||
|
||
let p = pb.progress_counter("reading index..."); | ||
let mut index_collector = IndexCollector::new(IndexType::OnlyTrees); | ||
|
||
for index in be.stream_all::<IndexFile>(&p)? { | ||
let (id, index) = index?; | ||
index_collector.extend(index.packs.clone()); | ||
index_files.push((id, index)); | ||
} | ||
p.finish(); | ||
let index = GlobalIndex::new_from_index(index_collector.into_index()); | ||
|
||
let snap_tress = snapshots.iter().map(|sn| sn.tree).collect(); | ||
let packs = find_needed_packs(be, &index, snap_tress, pb)?; | ||
self.lock_packs(repo, index_files, packs)?; | ||
|
||
self.lock_snapshots(repo, snapshots, now)?; | ||
|
||
Ok(()) | ||
} | ||
|
||
pub fn lock_snapshots<P: ProgressBars, S: Open>( | ||
&self, | ||
repo: &Repository<P, S>, | ||
snapshots: &[SnapshotFile], | ||
now: DateTime<Local>, | ||
) -> RusticResult<()> { | ||
let mut new_snaps = Vec::new(); | ||
let mut remove_snaps = Vec::new(); | ||
let mut lock_snaps = Vec::new(); | ||
|
||
let new_lock = DeleteOption::set_from_until(self.until); | ||
|
||
for snap in snapshots { | ||
if !snap.must_keep(now) { | ||
remove_snaps.push(snap.id); | ||
} | ||
|
||
if snap.delete.needs_lock_update(&new_lock) { | ||
new_snaps.push(SnapshotFile { | ||
delete: new_lock, | ||
..snap.clone() | ||
}); | ||
} else if self.always_extend_lock { | ||
lock_snaps.push(snap.id); | ||
} | ||
} | ||
|
||
// save new snapshots | ||
let new_ids = repo.save_snapshots(new_snaps)?; | ||
lock_snaps.extend(new_ids); | ||
|
||
// remove old snapshots | ||
repo.delete_snapshots(&remove_snaps)?; | ||
|
||
// Do the actual locking | ||
lock_files(repo, FileType::Snapshot, &lock_snaps, self.until)?; | ||
|
||
Ok(()) | ||
} | ||
|
||
pub fn lock_packs<P: ProgressBars, S: Open>( | ||
&self, | ||
repo: &Repository<P, S>, | ||
index_files: Vec<(Id, IndexFile)>, | ||
packs: BTreeSet<Id>, | ||
) -> RusticResult<()> { | ||
let mut lock_packs = Vec::new(); | ||
let mut remove_index = Vec::new(); | ||
|
||
// Check for indexfiles-to-modify and for packs to lock | ||
// Also already write the new index from the index files which are modified. | ||
let p = repo.pb.progress_counter("processing index files..."); | ||
p.set_length(index_files.len().try_into().unwrap()); | ||
let indexer = Indexer::new_unindexed(repo.dbe().clone()).into_shared(); | ||
for (id, mut index) in index_files { | ||
let mut modified = false; | ||
for pack in &mut index.packs { | ||
if !packs.contains(&pack.id) { | ||
continue; | ||
} | ||
if !pack.lock.is_locked(self.until) { | ||
pack.lock = LockOption::set_from_until(self.until); | ||
modified = true; | ||
lock_packs.push(pack.id); | ||
} else if self.always_extend_lock { | ||
lock_packs.push(pack.id); | ||
} | ||
} | ||
if modified { | ||
for pack in index.packs { | ||
indexer.write().unwrap().add(pack)?; | ||
} | ||
for pack_remove in index.packs_to_delete { | ||
indexer.write().unwrap().add_remove(pack_remove)?; | ||
} | ||
remove_index.push(id); | ||
} | ||
p.inc(1); | ||
} | ||
indexer.write().unwrap().finalize()?; | ||
p.finish(); | ||
|
||
// Remove old index files | ||
let p = repo.pb.progress_counter("removing old index files..."); | ||
repo.dbe() | ||
.delete_list(FileType::Index, true, remove_index.iter(), p)?; | ||
|
||
// Do the actual locking | ||
lock_files(repo, FileType::Pack, &lock_packs, self.until)?; | ||
|
||
Ok(()) | ||
} | ||
} | ||
|
||
fn lock_files<P: ProgressBars, S>( | ||
repo: &Repository<P, S>, | ||
file_type: FileType, | ||
ids: &[Id], | ||
until: Option<DateTime<Local>>, | ||
) -> RusticResult<()> { | ||
let pool = ThreadPoolBuilder::new() | ||
.num_threads(constants::MAX_LOCKER_THREADS_NUM) | ||
.build() | ||
.map_err(RepositoryErrorKind::FromThreadPoolbilderError)?; | ||
let progress_bar_ref = &repo.pb.progress_counter("locking {filetype:?} files.."); | ||
let backend = &repo.be; | ||
pool.in_place_scope(|scope| { | ||
for id in ids { | ||
scope.spawn(move |_| { | ||
if let Err(e) = backend.lock(file_type, id, until) { | ||
// FIXME: Use error handling | ||
error!("lock failed for {file_type:?} {id:?}. {e}"); | ||
}; | ||
progress_bar_ref.inc(1); | ||
}); | ||
} | ||
}); | ||
Ok(()) | ||
} | ||
|
||
/// Find packs which are needed for the given Trees | ||
/// | ||
/// # Arguments | ||
/// | ||
/// * `index` - The index to use | ||
/// * `trees` - The trees to consider | ||
/// * `pb` - The progress bars | ||
/// | ||
/// # Errors | ||
/// | ||
// TODO!: add errors! | ||
fn find_needed_packs( | ||
be: &impl DecryptReadBackend, | ||
index: &impl ReadGlobalIndex, | ||
trees: Vec<Id>, | ||
pb: &impl ProgressBars, | ||
) -> RusticResult<BTreeSet<Id>> { | ||
let p = pb.progress_counter("finding needed packs..."); | ||
|
||
let mut packs = BTreeSet::new(); | ||
|
||
for tree_id in &trees { | ||
_ = packs.insert( | ||
index | ||
.get_id(BlobType::Tree, tree_id) | ||
.ok_or_else(|| CommandErrorKind::IdNotFoundinIndex(*tree_id))? | ||
.pack, | ||
); | ||
} | ||
|
||
let mut tree_streamer = TreeStreamerOnce::new(be, index, trees, p)?; | ||
while let Some(item) = tree_streamer.next().transpose()? { | ||
let (_, tree) = item; | ||
for node in tree.nodes { | ||
match node.node_type { | ||
NodeType::File => { | ||
for id in node.content.iter().flatten() { | ||
_ = packs.insert( | ||
index | ||
.get_id(BlobType::Data, id) | ||
.ok_or_else(|| CommandErrorKind::IdNotFoundinIndex(*id))? | ||
.pack, | ||
); | ||
} | ||
} | ||
NodeType::Dir => { | ||
let id = &node.subtree.unwrap(); | ||
_ = packs.insert( | ||
index | ||
.get_id(BlobType::Tree, id) | ||
.ok_or_else(|| CommandErrorKind::IdNotFoundinIndex(*id))? | ||
.pack, | ||
); | ||
} | ||
_ => {} // nothing to do | ||
} | ||
} | ||
} | ||
|
||
Ok(packs) | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters