Skip to content

Commit

Permalink
feat!: dryrun features in commands add nodes to group and `remove
Browse files Browse the repository at this point in the history
nodes to group` inverted
feat!: remove feature to create hsm group in command `add nodes to group`
feat!: remove feature to clean hsm group in command `remove nodes to group`
feat: update mesa
feat: command to `add nodes to group` now accepts regex
feat: command to `remove nodes to group` now accepts regex
feat!: command `add nodes to group` to shows a dialog asking user for
configuration
feat!: command `remove nodes from group` to shows a dialog asking user for
configuration
refactor: JWT operations
refactor: clean code
  • Loading branch information
Manuel Sopena Ballesteros committed Oct 21, 2024
1 parent b8b4503 commit 70b916d
Show file tree
Hide file tree
Showing 13 changed files with 276 additions and 328 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ publish = false # cargo
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
mesa = "0.41.20"
mesa = "0.41.21"
# mesa = { path = "../mesa" } # Only for development purposes
hostlist-parser = "0.1.6"
strum = "0.25.0"
Expand Down
14 changes: 7 additions & 7 deletions src/cli/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,17 +182,17 @@ pub fn build_cli() -> Command {
.visible_aliases(["ag"])
.about("Add nodes to a list of groups")
.arg(arg!(-n --nodes <VALUE> "Comma separated list of nodes"))
.arg(arg!(-g --groups <VALUE> "Comma separated list of groups"))
.arg(arg!(-c --create "Create the group if missing"))
.arg(arg!(-g --group <VALUE> "HSM group to assign the nodes to"))
.arg(arg!(-r --"regex" "Input nodes in regex format.").action(ArgAction::SetTrue))
.arg(arg!(-d --"dry-run" "Simulates the execution of the command without making any actual changes.").action(ArgAction::SetTrue))
)
.subcommand(Command::new("delete-nodes-from-groups")
.visible_aliases(["dg"])
.subcommand(Command::new("remove-nodes-from-groups")
.visible_aliases(["rg"])
.about("Remove nodes from groups")
.arg(arg!(-n --nodes <VALUE> "Comma separated list of nodes"))
.arg(arg!(-g --groups <VALUE> "Comma separated list of groups"))
.arg(arg!(-c --clean "Delete the group if empty"))
.arg(arg!(-d --"dry-run" "Simulates the execution of the command without making any actual changes.").action(ArgAction::SetTrue))
.arg(arg!(-g --group <VALUE> "HSM group to remove the nodes from"))
.arg(arg!(-r --"regex" "Input nodes in regex format.").action(ArgAction::SetTrue))
.arg(arg!(-d --"dry-run" "Simulates the execution of the command without making any actual changes.").action(ArgAction::SetTrue))
)
}

Expand Down
139 changes: 75 additions & 64 deletions src/cli/commands/add_nodes_to_hsm_groups.rs
Original file line number Diff line number Diff line change
@@ -1,38 +1,45 @@
use std::collections::HashMap;

use dialoguer::{theme::ColorfulTheme, Confirm};

use crate::common;

/// Add/assign a list of xnames to a list of HSM groups
pub async fn exec(
shasta_token: &str,
shasta_base_url: &str,
shasta_root_cert: &[u8],
target_hsm_name_vec: Vec<String>,
xname_requested_hostlist: &str,
nodryrun: bool,
create_hsm_group_if_does_not_exists: bool,
target_hsm_name: &String,
is_regex: bool,
xname_requested: &str,
dryrun: bool,
) {
// Filter xnames to the ones members to HSM groups the user has access to
//
// Get HashMap with HSM groups and members curated for this request.
// NOTE: the list of HSM groups are the ones the user has access to and containing nodes within
// the hostlist input. Also, each HSM goup member list is also curated so xnames not in
// hostlist have been removed
let mut hsm_group_summary: HashMap<String, Vec<String>> =
crate::common::node_ops::get_curated_hsm_group_from_hostlist(
let hsm_group_summary: HashMap<String, Vec<String>> = if is_regex {
common::node_ops::get_curated_hsm_group_from_hostregex(
shasta_token,
shasta_base_url,
shasta_root_cert,
xname_requested_hostlist,
xname_requested,
)
.await;

// Keep HSM groups based on list of target HSM groups provided
hsm_group_summary.retain(|hsm_name, _xname_vec| target_hsm_name_vec.contains(hsm_name));
.await
} else {
// Get HashMap with HSM groups and members curated for this request.
// NOTE: the list of HSM groups are the ones the user has access to and containing nodes within
// the hostlist input. Also, each HSM goup member list is also curated so xnames not in
// hostlist have been removed
common::node_ops::get_curated_hsm_group_from_hostlist(
shasta_token,
shasta_base_url,
shasta_root_cert,
xname_requested,
)
.await
};

// Get list of xnames available
let mut xname_to_move_vec: Vec<&String> = hsm_group_summary
.iter()
.flat_map(|(_hsm_group_name, hsm_group_members)| hsm_group_members)
.collect();
let mut xname_to_move_vec: Vec<&String> = hsm_group_summary.values().flatten().collect();

xname_to_move_vec.sort();
xname_to_move_vec.dedup();
Expand All @@ -43,53 +50,57 @@ pub async fn exec(
std::process::exit(0);
}

for target_hsm_name in target_hsm_name_vec {
if mesa::hsm::group::http_client::get(
shasta_token,
shasta_base_url,
shasta_root_cert,
Some(&target_hsm_name),
)
.await
.is_ok()
{
log::debug!("The HSM group {} exists, good.", target_hsm_name);
} else {
if create_hsm_group_if_does_not_exists {
log::info!(
"HSM group {} does not exist, it will be created",
target_hsm_name
);
} else {
log::error!("HSM group {} does not exist, but the option to create the group was NOT specificied, cannot continue.", target_hsm_name);
std::process::exit(1);
}
}
if Confirm::with_theme(&ColorfulTheme::default())
.with_prompt(format!(
"{:?}\nThe nodes above will be added to HSM group '{}'. Do you want to proceed?",
xname_to_move_vec, target_hsm_name
))
.interact()
.unwrap()
{
log::info!("Continue",);
} else {
println!("Cancelled by user. Aborting.");
std::process::exit(0);
}

let target_hsm_group_vec = mesa::hsm::group::http_client::get(
shasta_token,
shasta_base_url,
shasta_root_cert,
Some(&target_hsm_name),
)
.await
.expect("ERROR - Could not get target HSM group");

if target_hsm_group_vec.is_empty() {
eprintln!(
"Target HSM group {} does not exist, Nothing to do. Exit",
target_hsm_name
);
}

for (target_hsm_name, xname_to_move_vec) in &hsm_group_summary {
let node_migration_rslt = mesa::hsm::group::utils::add_hsm_members(
shasta_token,
shasta_base_url,
shasta_root_cert,
&target_hsm_name,
xname_to_move_vec
.iter()
.map(|xname| xname.as_str())
.collect(),
nodryrun,
)
.await;
let node_migration_rslt = mesa::hsm::group::utils::add_hsm_members(
shasta_token,
shasta_base_url,
shasta_root_cert,
&target_hsm_name,
xname_to_move_vec
.iter()
.map(|xname| xname.as_str())
.collect(),
dryrun,
)
.await;

match node_migration_rslt {
Ok(mut target_hsm_group_member_vec) => {
target_hsm_group_member_vec.sort();
println!(
"HSM '{}' members: {:?}",
target_hsm_name, target_hsm_group_member_vec
);
}
Err(e) => eprintln!("{}", e),
}
match node_migration_rslt {
Ok(mut target_hsm_group_member_vec) => {
target_hsm_group_member_vec.sort();
println!(
"HSM '{}' members: {:?}",
target_hsm_name, target_hsm_group_member_vec
);
}
Err(e) => eprintln!("{}", e),
}
}
11 changes: 2 additions & 9 deletions src/cli/commands/apply_session.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,7 @@ use dialoguer::{theme::ColorfulTheme, Confirm};
use futures::TryStreamExt;
use mesa::{
cfs::{self, session::mesa::r#struct::v3::CfsSessionPostRequest},
common::{
jwt_ops::get_claims_from_jwt_token, kubernetes,
vault::http_client::fetch_shasta_k8s_secrets,
},
common::{jwt_ops, kubernetes, vault::http_client::fetch_shasta_k8s_secrets},
error::Error,
node::utils::validate_xnames,
};
Expand Down Expand Up @@ -199,11 +196,7 @@ pub async fn exec(
// * End Create CFS session

// Audit
let jwt_claims = get_claims_from_jwt_token(shasta_token).unwrap();
/* println!("jwt_claims:\n{:#?}", jwt_claims);
println!("Name: {}", jwt_claims["name"]); */

log::info!(target: "app::audit", "User: {} ({}) ; Operation: Apply session", jwt_claims["name"].as_str().unwrap(), jwt_claims["preferred_username"].as_str().unwrap());
log::info!(target: "app::audit", "User: {} ({}) ; Operation: Apply session", jwt_ops::get_name(shasta_token).unwrap(), jwt_ops::get_preferred_username(shasta_token).unwrap());

(cfs_configuration_name, cfs_session_name)
}
Expand Down
13 changes: 3 additions & 10 deletions src/cli/commands/config_set_hsm.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use std::{fs, io::Write, path::PathBuf};

use directories::ProjectDirs;
use mesa::common::jwt_ops::get_claims_from_jwt_token;
use mesa::common::jwt_ops;
use toml_edit::{value, Document};

pub async fn exec(
Expand Down Expand Up @@ -36,15 +36,8 @@ pub async fn exec(
.parse::<Document>()
.expect("ERROR: could not parse configuration file to TOML");

let mut settings_hsm_available_vec = get_claims_from_jwt_token(shasta_token)
.unwrap()
.pointer("/realm_access/roles")
.unwrap_or(&serde_json::json!([]))
.as_array()
.unwrap()
.iter()
.map(|role_value| role_value.as_str().unwrap().to_string())
.collect::<Vec<String>>();
let mut settings_hsm_available_vec =
jwt_ops::get_hsm_name_available(shasta_token).unwrap_or_default();

settings_hsm_available_vec
.retain(|role| !role.eq("offline_access") && !role.eq("uma_authorization"));
Expand Down
3 changes: 2 additions & 1 deletion src/cli/commands/config_show.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ pub async fn get_hsm_name_available_from_jwt_or_all(
shasta_base_url: &str,
shasta_root_cert: &[u8],
) -> Vec<String> {
// FIXME: stop calling `mesa::jwt_ops::get_claims_from_jwt_token` and use `mesa::jwt_ops::get_hsm_name_available` instead
let mut realm_access_role_vec = get_claims_from_jwt_token(shasta_token)
.unwrap()
.pointer("/realm_access/roles")
Expand All @@ -91,7 +92,7 @@ pub async fn get_hsm_name_available_from_jwt_or_all(
.retain(|role| !role.eq("offline_access") && !role.eq("uma_authorization"));

if !realm_access_role_vec.is_empty() {
//TODO: Get rid of this by making sure CSM admins don't create HSM groups for system
//FIXME: Get rid of this by making sure CSM admins don't create HSM groups for system
//wide operations instead of using roles
let mut realm_access_role_filtered_vec =
mesa::hsm::group::hacks::filter_system_hsm_group_names(realm_access_role_vec);
Expand Down
31 changes: 2 additions & 29 deletions src/cli/commands/power_off_nodes.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use mesa::{
common::jwt_ops::get_claims_from_jwt_token,
common::jwt_ops,
error::Error,
pcs::{self, transitions::r#struct::Location},
};
Expand Down Expand Up @@ -64,32 +64,5 @@ pub async fn exec(
common::pcs_utils::print_summary_table(power_mgmt_summary, output);

// Audit
let jwt_claims = get_claims_from_jwt_token(shasta_token).unwrap();

log::info!(target: "app::audit", "User: {} ({}) ; Operation: Power off nodes {:?}", jwt_claims["name"].as_str().unwrap(), jwt_claims["preferred_username"].as_str().unwrap(), xname_vec);

/* // Check Nodes are shutdown
let _ = capmc::http_client::node_power_status::post(
shasta_token,
shasta_base_url,
shasta_root_cert,
&xname_vec,
)
.await
.unwrap();
// Audit
let jwt_claims = get_claims_from_jwt_token(shasta_token).unwrap();
log::info!(target: "app::audit", "User: {} ({}) ; Operation: Power off nodes {:?}", jwt_claims["name"].as_str().unwrap(), jwt_claims["preferred_username"].as_str().unwrap(), xname_vec);
let _ = wait_nodes_to_power_off(
shasta_token,
shasta_base_url,
shasta_root_cert,
xname_vec,
reason_opt,
force,
)
.await; */
log::info!(target: "app::audit", "User: {} ({}) ; Operation: Power off nodes {:?}", jwt_ops::get_name(shasta_token).unwrap(), jwt_ops::get_preferred_username(shasta_token).unwrap(), xname_vec);
}
10 changes: 6 additions & 4 deletions src/cli/commands/power_on_cluster.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
use mesa::{common::jwt_ops::get_claims_from_jwt_token, error::Error, pcs};
use mesa::{
common::jwt_ops::{self},
error::Error,
pcs,
};

use crate::common;

Expand Down Expand Up @@ -63,7 +67,5 @@ pub async fn exec(
common::pcs_utils::print_summary_table(power_mgmt_summary, output);

// Audit
let jwt_claims = get_claims_from_jwt_token(shasta_token).unwrap();

log::info!(target: "app::audit", "User: {} ({}) ; Operation: Power on cluster {}", jwt_claims["name"].as_str().unwrap(), jwt_claims["preferred_username"].as_str().unwrap(), hsm_group_name_arg_opt);
log::info!(target: "app::audit", "User: {} ({}) ; Operation: Power on cluster {}", jwt_ops::get_name(shasta_token).unwrap(), jwt_ops::get_preferred_username(shasta_token).unwrap(), hsm_group_name_arg_opt);
}
30 changes: 2 additions & 28 deletions src/cli/commands/power_on_nodes.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use mesa::{common::jwt_ops::get_claims_from_jwt_token, error::Error, pcs};
use mesa::{common::jwt_ops, error::Error, pcs};

use crate::common;

Expand Down Expand Up @@ -47,31 +47,5 @@ pub async fn exec(
common::pcs_utils::print_summary_table(power_mgmt_summary, output);

// Audit
let jwt_claims = get_claims_from_jwt_token(shasta_token).unwrap();

log::info!(target: "app::audit", "User: {} ({}) ; Operation: Power on nodes {:?}", jwt_claims["name"].as_str().unwrap(), jwt_claims["preferred_username"].as_str().unwrap(), xname_vec);

/* // Check Nodes are shutdown
let _ = capmc::http_client::node_power_status::post(
shasta_token,
shasta_base_url,
shasta_root_cert,
&xname_vec,
)
.await
.unwrap();
// Audit
let jwt_claims = get_claims_from_jwt_token(shasta_token).unwrap();
log::info!(target: "app::audit", "User: {} ({}) ; Operation: Power on nodes {:?}", jwt_claims["name"].as_str().unwrap(), jwt_claims["preferred_username"].as_str().unwrap(), xname_vec);
let _ = wait_nodes_to_power_on(
shasta_token,
shasta_base_url,
shasta_root_cert,
xname_vec,
reason_opt,
)
.await; */
log::info!(target: "app::audit", "User: {} ({}) ; Operation: Power on nodes {:?}", jwt_ops::get_name(shasta_token).unwrap(), jwt_ops::get_preferred_username(shasta_token).unwrap(), xname_vec);
}
Loading

0 comments on commit 70b916d

Please sign in to comment.