From f201cd0c60ddd9f5cb8564c52e3ee765722a3da1 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 7 Aug 2021 13:45:56 -0500 Subject: [PATCH 1/2] Refactor data to settings (specificity) --- src/cli.rs | 16 ++++++++-------- src/lib.rs | 14 +++++++------- src/{data.rs => settings.rs} | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) rename src/{data.rs => settings.rs} (99%) diff --git a/src/cli.rs b/src/cli.rs index 61890e43..68ac6c97 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -10,7 +10,7 @@ use clap::{ArgMatches, Clap, FromArgMatches}; use crate as deploy; -use self::deploy::{DeployFlake, ParseFlakeError}; +use self::deploy::{DeployFlake, ParseFlakeError, settings}; use futures_util::stream::{StreamExt, TryStreamExt}; use log::{debug, error, info, warn}; use serde::Serialize; @@ -170,7 +170,7 @@ async fn get_deployment_data( supports_flakes: bool, flakes: &[deploy::DeployFlake<'_>], extra_build_args: &[String], -) -> Result, GetDeploymentDataError> { +) -> Result, GetDeploymentDataError> { futures_util::stream::iter(flakes).then(|flake| async move { info!("Evaluating flake in {}", flake.repo); @@ -389,14 +389,14 @@ pub enum RunDeployError { type ToDeploy<'a> = Vec<( &'a deploy::DeployFlake<'a>, - &'a deploy::data::Data, - (&'a str, &'a deploy::data::Node), - (&'a str, &'a deploy::data::Profile), + &'a settings::Root, + (&'a str, &'a settings::Node), + (&'a str, &'a settings::Profile), )>; async fn run_deploy( deploy_flakes: Vec>, - data: Vec, + data: Vec, supports_flakes: bool, check_sigs: bool, interactive: bool, @@ -437,7 +437,7 @@ async fn run_deploy( None => return Err(RunDeployError::NodeNotFound(node_name.clone())), }; - let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new(); + let mut profiles_list: Vec<(&str, &settings::Profile)> = Vec::new(); for profile_name in [ node.node_settings.profiles_order.iter().collect(), @@ -466,7 +466,7 @@ async fn run_deploy( let mut l = Vec::new(); for (node_name, node) in &data.nodes { - let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new(); + let mut profiles_list: Vec<(&str, &settings::Profile)> = Vec::new(); for profile_name in [ node.node_settings.profiles_order.iter().collect(), diff --git a/src/lib.rs b/src/lib.rs index 981ec1ed..630df179 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -145,7 +145,7 @@ pub fn init_logger( Ok(()) } -pub mod data; +pub mod settings; pub mod deploy; pub mod push; pub mod cli; @@ -312,13 +312,13 @@ fn test_parse_flake() { #[derive(Debug, Clone)] pub struct DeployData<'a> { pub node_name: &'a str, - pub node: &'a data::Node, + pub node: &'a settings::Node, pub profile_name: &'a str, - pub profile: &'a data::Profile, + pub profile: &'a settings::Profile, pub cmd_overrides: &'a CmdOverrides, - pub merged_settings: data::GenericSettings, + pub merged_settings: settings::GenericSettings, pub debug_logs: bool, pub log_dir: Option<&'a str>, @@ -395,10 +395,10 @@ impl<'a> DeployData<'a> { } pub fn make_deploy_data<'a, 's>( - top_settings: &'s data::GenericSettings, - node: &'a data::Node, + top_settings: &'s settings::GenericSettings, + node: &'a settings::Node, node_name: &'a str, - profile: &'a data::Profile, + profile: &'a settings::Profile, profile_name: &'a str, cmd_overrides: &'a CmdOverrides, debug_logs: bool, diff --git a/src/data.rs b/src/settings.rs similarity index 99% rename from src/data.rs rename to src/settings.rs index 6fe7f75f..9ce50a0f 100644 --- a/src/data.rs +++ b/src/settings.rs @@ -66,7 +66,7 @@ pub struct Node { } #[derive(Deserialize, Debug, Clone)] -pub struct Data { +pub struct Root { #[serde(flatten)] pub generic_settings: GenericSettings, pub nodes: HashMap, From c99adbde8c979953a8c5bebe2f811ff634c89787 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 7 Aug 2021 14:33:52 -0500 Subject: [PATCH 2/2] Refactor data structures into thier own module - preparation for a more view based data access --- src/cli.rs | 44 ++++---- src/data.rs | 300 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/deploy.rs | 16 +-- src/lib.rs | 296 +------------------------------------------------ src/push.rs | 6 +- 5 files changed, 335 insertions(+), 327 deletions(-) create mode 100644 src/data.rs diff --git a/src/cli.rs b/src/cli.rs index 68ac6c97..593ed06b 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -10,7 +10,7 @@ use clap::{ArgMatches, Clap, FromArgMatches}; use crate as deploy; -use self::deploy::{DeployFlake, ParseFlakeError, settings}; +use self::deploy::{data, settings}; use futures_util::stream::{StreamExt, TryStreamExt}; use log::{debug, error, info, warn}; use serde::Serialize; @@ -168,7 +168,7 @@ pub enum GetDeploymentDataError { /// Evaluates the Nix in the given `repo` and return the processed Data from it async fn get_deployment_data( supports_flakes: bool, - flakes: &[deploy::DeployFlake<'_>], + flakes: &[data::DeployFlake<'_>], extra_build_args: &[String], ) -> Result, GetDeploymentDataError> { futures_util::stream::iter(flakes).then(|flake| async move { @@ -272,9 +272,9 @@ struct PromptPart<'a> { fn print_deployment( parts: &[( - &deploy::DeployFlake<'_>, - deploy::DeployData, - deploy::DeployDefs, + &data::DeployFlake<'_>, + data::DeployData, + data::DeployDefs, )], ) -> Result<(), toml::ser::Error> { let mut part_map: HashMap> = HashMap::new(); @@ -315,9 +315,9 @@ pub enum PromptDeploymentError { fn prompt_deployment( parts: &[( - &deploy::DeployFlake<'_>, - deploy::DeployData, - deploy::DeployDefs, + &data::DeployFlake<'_>, + data::DeployData, + data::DeployDefs, )], ) -> Result<(), PromptDeploymentError> { print_deployment(parts)?; @@ -378,7 +378,7 @@ pub enum RunDeployError { #[error("Profile was provided without a node name")] ProfileWithoutNode, #[error("Error processing deployment definitions: {0}")] - DeployDataDefs(#[from] deploy::DeployDataDefsError), + InvalidDeployDataDefs(#[from] data::DeployDataDefsError), #[error("Failed to make printable TOML of deployment: {0}")] TomlFormat(#[from] toml::ser::Error), #[error("{0}")] @@ -388,19 +388,19 @@ pub enum RunDeployError { } type ToDeploy<'a> = Vec<( - &'a deploy::DeployFlake<'a>, + &'a data::DeployFlake<'a>, &'a settings::Root, (&'a str, &'a settings::Node), (&'a str, &'a settings::Profile), )>; async fn run_deploy( - deploy_flakes: Vec>, + deploy_flakes: Vec>, data: Vec, supports_flakes: bool, check_sigs: bool, interactive: bool, - cmd_overrides: &deploy::CmdOverrides, + cmd_overrides: &data::CmdOverrides, keep_result: bool, result_path: Option<&str>, extra_build_args: &[String], @@ -508,13 +508,13 @@ async fn run_deploy( .collect(); let mut parts: Vec<( - &deploy::DeployFlake<'_>, - deploy::DeployData, - deploy::DeployDefs, + &data::DeployFlake<'_>, + data::DeployData, + data::DeployDefs, )> = Vec::new(); for (deploy_flake, data, (node_name, node), (profile_name, profile)) in to_deploy { - let deploy_data = deploy::make_deploy_data( + let deploy_data = data::make_deploy_data( &data.generic_settings, node, node_name, @@ -550,7 +550,7 @@ async fn run_deploy( .await?; } - let mut succeeded: Vec<(&deploy::DeployData, &deploy::DeployDefs)> = vec![]; + let mut succeeded: Vec<(&data::DeployData, &data::DeployDefs)> = vec![]; // Run all deployments // In case of an error rollback any previoulsy made deployment. @@ -595,7 +595,7 @@ pub enum RunError { #[error("Failed to evaluate deployment data: {0}")] GetDeploymentData(#[from] GetDeploymentDataError), #[error("Error parsing flake: {0}")] - ParseFlake(#[from] deploy::ParseFlakeError), + ParseFlake(#[from] data::ParseFlakeError), #[error("Error initiating logger: {0}")] Logger(#[from] flexi_logger::FlexiLoggerError), #[error("{0}")] @@ -619,12 +619,12 @@ pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { .targets .unwrap_or_else(|| vec![opts.clone().target.unwrap_or_else(|| ".".to_string())]); - let deploy_flakes: Vec = deploys + let deploy_flakes: Vec = deploys .iter() - .map(|f| deploy::parse_flake(f.as_str())) - .collect::, ParseFlakeError>>()?; + .map(|f| data::parse_flake(f.as_str())) + .collect::, data::ParseFlakeError>>()?; - let cmd_overrides = deploy::CmdOverrides { + let cmd_overrides = data::CmdOverrides { ssh_user: opts.ssh_user, profile_user: opts.profile_user, ssh_opts: opts.ssh_opts, diff --git a/src/data.rs b/src/data.rs new file mode 100644 index 00000000..86e1b6cd --- /dev/null +++ b/src/data.rs @@ -0,0 +1,300 @@ +// SPDX-FileCopyrightText: 2020 Serokell +// SPDX-FileCopyrightText: 2021 Yannik Sander +// +// SPDX-License-Identifier: MPL-2.0 + +use rnix::{types::*, SyntaxKind::*}; +use merge::Merge; +use thiserror::Error; + +use crate::settings; + +#[derive(PartialEq, Debug)] +pub struct DeployFlake<'a> { + pub repo: &'a str, + pub node: Option, + pub profile: Option, +} + +#[derive(Error, Debug)] +pub enum ParseFlakeError { + #[error("The given path was too long, did you mean to put something in quotes?")] + PathTooLong, + #[error("Unrecognized node or token encountered")] + Unrecognized, +} + +pub fn parse_flake(flake: &str) -> Result { + let flake_fragment_start = flake.find('#'); + let (repo, maybe_fragment) = match flake_fragment_start { + Some(s) => (&flake[..s], Some(&flake[s + 1..])), + None => (flake, None), + }; + + let mut node: Option = None; + let mut profile: Option = None; + + if let Some(fragment) = maybe_fragment { + let ast = rnix::parse(fragment); + + let first_child = match ast.root().node().first_child() { + Some(x) => x, + None => { + return Ok(DeployFlake { + repo, + node: None, + profile: None, + }) + } + }; + + let mut node_over = false; + + for entry in first_child.children_with_tokens() { + let x: Option = match (entry.kind(), node_over) { + (TOKEN_DOT, false) => { + node_over = true; + None + } + (TOKEN_DOT, true) => { + return Err(ParseFlakeError::PathTooLong); + } + (NODE_IDENT, _) => Some(entry.into_node().unwrap().text().to_string()), + (TOKEN_IDENT, _) => Some(entry.into_token().unwrap().text().to_string()), + (NODE_STRING, _) => { + let c = entry + .into_node() + .unwrap() + .children_with_tokens() + .nth(1) + .unwrap(); + + Some(c.into_token().unwrap().text().to_string()) + } + _ => return Err(ParseFlakeError::Unrecognized), + }; + + if !node_over { + node = x; + } else { + profile = x; + } + } + } + + Ok(DeployFlake { + repo, + node, + profile, + }) +} + +#[test] +fn test_parse_flake() { + assert_eq!( + parse_flake("../deploy/examples/system").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: None, + profile: None, + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: None, + profile: None, + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#computer.\"something.nix\"").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: Some("computer".to_string()), + profile: Some("something.nix".to_string()), + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#\"example.com\".system").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: Some("example.com".to_string()), + profile: Some("system".to_string()), + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#example").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: Some("example".to_string()), + profile: None + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#example.system").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: Some("example".to_string()), + profile: Some("system".to_string()) + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: None, + profile: None, + } + ); +} + +#[derive(Debug)] +pub struct CmdOverrides { + pub ssh_user: Option, + pub profile_user: Option, + pub ssh_opts: Option, + pub fast_connection: Option, + pub auto_rollback: Option, + pub hostname: Option, + pub magic_rollback: Option, + pub temp_path: Option, + pub confirm_timeout: Option, + pub dry_activate: bool, +} + +#[derive(Debug, Clone)] +pub struct DeployData<'a> { + pub node_name: &'a str, + pub node: &'a settings::Node, + pub profile_name: &'a str, + pub profile: &'a settings::Profile, + + pub cmd_overrides: &'a CmdOverrides, + + pub merged_settings: settings::GenericSettings, + + pub debug_logs: bool, + pub log_dir: Option<&'a str>, +} + +#[derive(Debug)] +pub struct DeployDefs { + pub ssh_user: String, + pub profile_user: String, + pub profile_path: String, + pub sudo: Option, +} + +#[derive(Error, Debug)] +pub enum DeployDataDefsError { + #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")] + NoProfileUser(String, String), +} + +impl<'a> DeployData<'a> { + pub fn defs(&'a self) -> Result { + let ssh_user = match self.merged_settings.ssh_user { + Some(ref u) => u.clone(), + None => whoami::username(), + }; + + let profile_user = self.get_profile_user()?; + + let profile_path = self.get_profile_path()?; + + let sudo: Option = match self.merged_settings.user { + Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)), + _ => None, + }; + + Ok(DeployDefs { + ssh_user, + profile_user, + profile_path, + sudo, + }) + } + + pub fn get_profile_path(&'a self) -> Result { + let profile_user = self.get_profile_user()?; + let profile_path = match self.profile.profile_settings.profile_path { + None => match &profile_user[..] { + "root" => format!("/nix/var/nix/profiles/{}", self.profile_name), + _ => format!( + "/nix/var/nix/profiles/per-user/{}/{}", + profile_user, self.profile_name + ), + }, + Some(ref x) => x.clone(), + }; + Ok(profile_path) + } + + pub fn get_profile_user(&'a self) -> Result { + let profile_user = match self.merged_settings.user { + Some(ref x) => x.clone(), + None => match self.merged_settings.ssh_user { + Some(ref x) => x.clone(), + None => { + return Err(DeployDataDefsError::NoProfileUser( + self.profile_name.to_owned(), + self.node_name.to_owned(), + )) + } + }, + }; + Ok(profile_user) + } +} + +pub fn make_deploy_data<'a, 's>( + top_settings: &'s settings::GenericSettings, + node: &'a settings::Node, + node_name: &'a str, + profile: &'a settings::Profile, + profile_name: &'a str, + cmd_overrides: &'a CmdOverrides, + debug_logs: bool, + log_dir: Option<&'a str>, +) -> DeployData<'a> { + let mut merged_settings = profile.generic_settings.clone(); + merged_settings.merge(node.generic_settings.clone()); + merged_settings.merge(top_settings.clone()); + + if cmd_overrides.ssh_user.is_some() { + merged_settings.ssh_user = cmd_overrides.ssh_user.clone(); + } + if cmd_overrides.profile_user.is_some() { + merged_settings.user = cmd_overrides.profile_user.clone(); + } + if let Some(ref ssh_opts) = cmd_overrides.ssh_opts { + merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect(); + } + if let Some(fast_connection) = cmd_overrides.fast_connection { + merged_settings.fast_connection = Some(fast_connection); + } + if let Some(auto_rollback) = cmd_overrides.auto_rollback { + merged_settings.auto_rollback = Some(auto_rollback); + } + if let Some(magic_rollback) = cmd_overrides.magic_rollback { + merged_settings.magic_rollback = Some(magic_rollback); + } + + DeployData { + node_name, + node, + profile_name, + profile, + cmd_overrides, + merged_settings, + debug_logs, + log_dir, + } +} diff --git a/src/deploy.rs b/src/deploy.rs index f8fc2f90..7c1048ea 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -9,7 +9,7 @@ use std::borrow::Cow; use thiserror::Error; use tokio::process::Command; -use crate::DeployDataDefsError; +use crate::data; struct ActivateCommandData<'a> { sudo: &'a Option, @@ -207,8 +207,8 @@ pub enum ConfirmProfileError { } pub async fn confirm_profile( - deploy_data: &super::DeployData<'_>, - deploy_defs: &super::DeployDefs, + deploy_data: &data::DeployData<'_>, + deploy_defs: &data::DeployDefs, temp_path: Cow<'_, str>, ssh_addr: &str, ) -> Result<(), ConfirmProfileError> { @@ -267,8 +267,8 @@ pub enum DeployProfileError { } pub async fn deploy_profile( - deploy_data: &super::DeployData<'_>, - deploy_defs: &super::DeployDefs, + deploy_data: &data::DeployData<'_>, + deploy_defs: &data::DeployDefs, dry_activate: bool, ) -> Result<(), DeployProfileError> { if !dry_activate { @@ -415,11 +415,11 @@ pub enum RevokeProfileError { SSHRevokeExit(Option), #[error("Deployment data invalid: {0}")] - InvalidDeployDataDefs(#[from] DeployDataDefsError), + InvalidDeployDataDefs(#[from] data::DeployDataDefsError), } pub async fn revoke( - deploy_data: &crate::DeployData<'_>, - deploy_defs: &crate::DeployDefs, + deploy_data: &data::DeployData<'_>, + deploy_defs: &data::DeployDefs, ) -> Result<(), RevokeProfileError> { let self_revoke_command = build_revoke_command(&RevokeCommandData { sudo: &deploy_defs.sudo, diff --git a/src/lib.rs b/src/lib.rs index 630df179..5cd69f81 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,12 +4,6 @@ // // SPDX-License-Identifier: MPL-2.0 -use rnix::{types::*, SyntaxKind::*}; - -use merge::Merge; - -use thiserror::Error; - use flexi_logger::*; pub fn make_lock_path(temp_path: &str, closure: &str) -> String { @@ -146,295 +140,7 @@ pub fn init_logger( } pub mod settings; +pub mod data; pub mod deploy; pub mod push; pub mod cli; - -#[derive(Debug)] -pub struct CmdOverrides { - pub ssh_user: Option, - pub profile_user: Option, - pub ssh_opts: Option, - pub fast_connection: Option, - pub auto_rollback: Option, - pub hostname: Option, - pub magic_rollback: Option, - pub temp_path: Option, - pub confirm_timeout: Option, - pub dry_activate: bool, -} - -#[derive(PartialEq, Debug)] -pub struct DeployFlake<'a> { - pub repo: &'a str, - pub node: Option, - pub profile: Option, -} - -#[derive(Error, Debug)] -pub enum ParseFlakeError { - #[error("The given path was too long, did you mean to put something in quotes?")] - PathTooLong, - #[error("Unrecognized node or token encountered")] - Unrecognized, -} -pub fn parse_flake(flake: &str) -> Result { - let flake_fragment_start = flake.find('#'); - let (repo, maybe_fragment) = match flake_fragment_start { - Some(s) => (&flake[..s], Some(&flake[s + 1..])), - None => (flake, None), - }; - - let mut node: Option = None; - let mut profile: Option = None; - - if let Some(fragment) = maybe_fragment { - let ast = rnix::parse(fragment); - - let first_child = match ast.root().node().first_child() { - Some(x) => x, - None => { - return Ok(DeployFlake { - repo, - node: None, - profile: None, - }) - } - }; - - let mut node_over = false; - - for entry in first_child.children_with_tokens() { - let x: Option = match (entry.kind(), node_over) { - (TOKEN_DOT, false) => { - node_over = true; - None - } - (TOKEN_DOT, true) => { - return Err(ParseFlakeError::PathTooLong); - } - (NODE_IDENT, _) => Some(entry.into_node().unwrap().text().to_string()), - (TOKEN_IDENT, _) => Some(entry.into_token().unwrap().text().to_string()), - (NODE_STRING, _) => { - let c = entry - .into_node() - .unwrap() - .children_with_tokens() - .nth(1) - .unwrap(); - - Some(c.into_token().unwrap().text().to_string()) - } - _ => return Err(ParseFlakeError::Unrecognized), - }; - - if !node_over { - node = x; - } else { - profile = x; - } - } - } - - Ok(DeployFlake { - repo, - node, - profile, - }) -} - -#[test] -fn test_parse_flake() { - assert_eq!( - parse_flake("../deploy/examples/system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: None, - profile: None, - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: None, - profile: None, - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#computer.\"something.nix\"").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: Some("computer".to_string()), - profile: Some("something.nix".to_string()), - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#\"example.com\".system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: Some("example.com".to_string()), - profile: Some("system".to_string()), - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#example").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: Some("example".to_string()), - profile: None - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#example.system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: Some("example".to_string()), - profile: Some("system".to_string()) - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: None, - profile: None, - } - ); -} - -#[derive(Debug, Clone)] -pub struct DeployData<'a> { - pub node_name: &'a str, - pub node: &'a settings::Node, - pub profile_name: &'a str, - pub profile: &'a settings::Profile, - - pub cmd_overrides: &'a CmdOverrides, - - pub merged_settings: settings::GenericSettings, - - pub debug_logs: bool, - pub log_dir: Option<&'a str>, -} - -#[derive(Debug)] -pub struct DeployDefs { - pub ssh_user: String, - pub profile_user: String, - pub profile_path: String, - pub sudo: Option, -} - -#[derive(Error, Debug)] -pub enum DeployDataDefsError { - #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")] - NoProfileUser(String, String), -} - -impl<'a> DeployData<'a> { - pub fn defs(&'a self) -> Result { - let ssh_user = match self.merged_settings.ssh_user { - Some(ref u) => u.clone(), - None => whoami::username(), - }; - - let profile_user = self.get_profile_user()?; - - let profile_path = self.get_profile_path()?; - - let sudo: Option = match self.merged_settings.user { - Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)), - _ => None, - }; - - Ok(DeployDefs { - ssh_user, - profile_user, - profile_path, - sudo, - }) - } - - fn get_profile_path(&'a self) -> Result { - let profile_user = self.get_profile_user()?; - let profile_path = match self.profile.profile_settings.profile_path { - None => match &profile_user[..] { - "root" => format!("/nix/var/nix/profiles/{}", self.profile_name), - _ => format!( - "/nix/var/nix/profiles/per-user/{}/{}", - profile_user, self.profile_name - ), - }, - Some(ref x) => x.clone(), - }; - Ok(profile_path) - } - - fn get_profile_user(&'a self) -> Result { - let profile_user = match self.merged_settings.user { - Some(ref x) => x.clone(), - None => match self.merged_settings.ssh_user { - Some(ref x) => x.clone(), - None => { - return Err(DeployDataDefsError::NoProfileUser( - self.profile_name.to_owned(), - self.node_name.to_owned(), - )) - } - }, - }; - Ok(profile_user) - } -} - -pub fn make_deploy_data<'a, 's>( - top_settings: &'s settings::GenericSettings, - node: &'a settings::Node, - node_name: &'a str, - profile: &'a settings::Profile, - profile_name: &'a str, - cmd_overrides: &'a CmdOverrides, - debug_logs: bool, - log_dir: Option<&'a str>, -) -> DeployData<'a> { - let mut merged_settings = profile.generic_settings.clone(); - merged_settings.merge(node.generic_settings.clone()); - merged_settings.merge(top_settings.clone()); - - if cmd_overrides.ssh_user.is_some() { - merged_settings.ssh_user = cmd_overrides.ssh_user.clone(); - } - if cmd_overrides.profile_user.is_some() { - merged_settings.user = cmd_overrides.profile_user.clone(); - } - if let Some(ref ssh_opts) = cmd_overrides.ssh_opts { - merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect(); - } - if let Some(fast_connection) = cmd_overrides.fast_connection { - merged_settings.fast_connection = Some(fast_connection); - } - if let Some(auto_rollback) = cmd_overrides.auto_rollback { - merged_settings.auto_rollback = Some(auto_rollback); - } - if let Some(magic_rollback) = cmd_overrides.magic_rollback { - merged_settings.magic_rollback = Some(magic_rollback); - } - - DeployData { - node_name, - node, - profile_name, - profile, - cmd_overrides, - merged_settings, - debug_logs, - log_dir, - } -} diff --git a/src/push.rs b/src/push.rs index 69eba0db..ee55a123 100644 --- a/src/push.rs +++ b/src/push.rs @@ -9,6 +9,8 @@ use std::process::Stdio; use thiserror::Error; use tokio::process::Command; +use crate::data; + #[derive(Error, Debug)] pub enum PushProfileError { #[error("Failed to run Nix show-derivation command: {0}")] @@ -47,8 +49,8 @@ pub struct PushProfileData<'a> { pub supports_flakes: bool, pub check_sigs: bool, pub repo: &'a str, - pub deploy_data: &'a super::DeployData<'a>, - pub deploy_defs: &'a super::DeployDefs, + pub deploy_data: &'a data::DeployData<'a>, + pub deploy_defs: &'a data::DeployDefs, pub keep_result: bool, pub result_path: Option<&'a str>, pub extra_build_args: &'a [String],