From 33d1e98e753bea3e33530b13afa82bb63abf96d3 Mon Sep 17 00:00:00 2001 From: Philippe Karan Date: Thu, 15 Feb 2024 15:25:30 -0500 Subject: [PATCH] Uniformize verbose level in modules --- scilpy/image/volume_operations.py | 22 ++++------- scilpy/reconst/fodf.py | 4 +- scilpy/reconst/frf.py | 14 +++---- scilpy/stats/matrix_stats.py | 18 ++++----- scilpy/stats/stats.py | 26 ++++++------ scilpy/stats/utils.py | 32 ++++++++------- scilpy/tracking/tracker.py | 4 +- scilpy/tractanalysis/features.py | 6 +-- scilpy/tractograms/streamline_operations.py | 44 ++++++++++----------- scilpy/tractograms/tractogram_operations.py | 6 +-- scripts/scil_dwi_compute_snr.py | 3 +- 11 files changed, 87 insertions(+), 92 deletions(-) diff --git a/scilpy/image/volume_operations.py b/scilpy/image/volume_operations.py index 16fd8b04f..2643e14e7 100644 --- a/scilpy/image/volume_operations.py +++ b/scilpy/image/volume_operations.py @@ -241,7 +241,7 @@ def register_image(static, static_grid2world, moving, moving_grid2world, def compute_snr(dwi, bval, bvec, b0_thr, mask, noise_mask=None, noise_map=None, split_shells=False, - basename=None, verbose=False): + basename=None): """ Compute snr @@ -264,16 +264,10 @@ def compute_snr(dwi, bval, bvec, b0_thr, mask, basename: string Basename used for naming all output files. - verbose: boolean - Set to use logging - Return ------ Dictionary of values (bvec, bval, mean, std, snr) for all volumes. """ - if verbose: - logging.getLogger().setLevel(logging.INFO) - data = dwi.get_fdata(dtype=np.float32) affine = dwi.affine mask = get_data_as_mask(mask, dtype=bool) @@ -416,17 +410,17 @@ def resample_volume(img, ref=None, res=None, iso_min=False, zoom=None, if interp not in interp_choices: raise ValueError("interp must be one of 'nn', 'lin', 'quad', 'cubic'.") - logging.debug('Data shape: %s', data.shape) - logging.debug('Data affine: %s', affine) - logging.debug('Data affine setup: %s', nib.aff2axcodes(affine)) - logging.debug('Resampling data to %s with mode %s', new_zooms, interp) + logging.info('Data shape: %s', data.shape) + logging.info('Data affine: %s', affine) + logging.info('Data affine setup: %s', nib.aff2axcodes(affine)) + logging.info('Resampling data to %s with mode %s', new_zooms, interp) data2, affine2 = reslice(data, affine, original_zooms, new_zooms, _interp_code_to_order(interp)) - logging.debug('Resampled data shape: %s', data2.shape) - logging.debug('Resampled data affine: %s', affine2) - logging.debug('Resampled data affine setup: %s', nib.aff2axcodes(affine2)) + logging.info('Resampled data shape: %s', data2.shape) + logging.info('Resampled data affine: %s', affine2) + logging.info('Resampled data affine setup: %s', nib.aff2axcodes(affine2)) if enforce_dimensions: if ref is None: diff --git a/scilpy/reconst/fodf.py b/scilpy/reconst/fodf.py index 3cb1b614e..a64206a0c 100644 --- a/scilpy/reconst/fodf.py +++ b/scilpy/reconst/fodf.py @@ -91,13 +91,13 @@ def get_ventricles_max_fodf(data, fa, md, zoom, args): count += 1 mask[i, j, k] = 1 - logging.debug('Number of voxels detected: {}'.format(count)) + logging.info('Number of voxels detected: {}'.format(count)) if count == 0: logging.warning('No voxels found for evaluation! Change your fa ' 'and/or md thresholds') return 0, mask - logging.debug('Average max fodf value: {}'.format(sum_of_max / count)) + logging.info('Average max fodf value: {}'.format(sum_of_max / count)) return sum_of_max / count, mask diff --git a/scilpy/reconst/frf.py b/scilpy/reconst/frf.py index d40524926..f9c2a3137 100644 --- a/scilpy/reconst/frf.py +++ b/scilpy/reconst/frf.py @@ -107,7 +107,7 @@ def compute_ssst_frf(data, bvals, bvecs, mask=None, mask_wm=None, nvox = np.sum(mask) response, ratio = response_from_mask_ssst(gtab, data, mask) - logging.debug( + logging.info( "Number of indices is {:d} with threshold of {:.2f}".format( nvox, fa_thresh)) fa_thresh -= 0.05 @@ -117,14 +117,14 @@ def compute_ssst_frf(data, bvals, bvecs, mask=None, mask_wm=None, "Could not find at least {:d} voxels with sufficient FA " "to estimate the FRF!".format(min_nvox)) - logging.debug( + logging.info( "Found {:d} voxels with FA threshold {:.2f} for " "FRF estimation".format(nvox, fa_thresh + 0.05)) - logging.debug("FRF eigenvalues: {}".format(str(response[0]))) - logging.debug("Ratio for smallest to largest eigen value " - "is {:.3f}".format(ratio)) - logging.debug("Mean of the b=0 signal for voxels used " - "for FRF: {}".format(response[1])) + logging.info("FRF eigenvalues: {}".format(str(response[0]))) + logging.info("Ratio for smallest to largest eigen value " + "is {:.3f}".format(ratio)) + logging.info("Mean of the b=0 signal for voxels used " + "for FRF: {}".format(response[1])) full_response = np.array([response[0][0], response[0][1], response[0][2], response[1]]) diff --git a/scilpy/stats/matrix_stats.py b/scilpy/stats/matrix_stats.py index 8f1767d1e..d77b5e1fc 100644 --- a/scilpy/stats/matrix_stats.py +++ b/scilpy/stats/matrix_stats.py @@ -67,8 +67,8 @@ def ttest_two_matrices(matrices_g1, matrices_g2, paired, tail, fdr, sum_both_groups = np.sum(matrices_g1, axis=2) + np.sum(matrices_g2, axis=2) nbr_non_zeros = np.count_nonzero(np.triu(sum_both_groups)) - logging.debug('The provided matrices contain {} non zeros elements.' - .format(nbr_non_zeros)) + logging.info('The provided matrices contain {} non zeros elements.' + .format(nbr_non_zeros)) matrices_g1 = matrices_g1.reshape((np.prod(matrix_shape), nb_group_g1)) matrices_g2 = matrices_g2.reshape((np.prod(matrix_shape), nb_group_g2)) @@ -76,10 +76,10 @@ def ttest_two_matrices(matrices_g1, matrices_g2, paired, tail, fdr, matrix_pval = np.ones(np.prod(matrix_shape)) * -0.000001 text = ' paired' if paired else '' - logging.debug('Performing{} t-test with "{}" hypothesis.' - .format(text, tail)) - logging.debug('Data has dimensions {}x{} with {} and {} observations.' - .format(matrix_shape[0], matrix_shape[1], + logging.info('Performing{} t-test with "{}" hypothesis.' + .format(text, tail)) + logging.info('Data has dimensions {}x{} with {} and {} observations.' + .format(matrix_shape[0], matrix_shape[1], nb_group_g1, nb_group_g2)) # For conversion to p-values @@ -105,7 +105,7 @@ def ttest_two_matrices(matrices_g1, matrices_g2, paired, tail, fdr, corr_matrix_pval = matrix_pval.reshape(matrix_shape) if fdr: - logging.debug('Using FDR, the results will be q-values.') + logging.info('Using FDR, the results will be q-values.') corr_matrix_pval = np.triu(corr_matrix_pval) corr_matrix_pval[corr_matrix_pval > 0] = multipletests( corr_matrix_pval[corr_matrix_pval > 0], 0, method='fdr_bh')[1] @@ -158,8 +158,8 @@ def omega_sigma(matrix): transitivity_latt_list = [] path_length_rand_list = [] for i in range(10): - logging.debug('Generating random and lattice matrices, ' - 'iteration #{}.'.format(i)) + logging.info('Generating random and lattice matrices, ' + 'iteration #{}.'.format(i)) random = bct.randmio_und(matrix, 10)[0] lattice = bct.latmio_und(matrix, 10)[1] diff --git a/scilpy/stats/stats.py b/scilpy/stats/stats.py index 50ff83faa..f02246664 100644 --- a/scilpy/stats/stats.py +++ b/scilpy/stats/stats.py @@ -32,10 +32,10 @@ def verify_normality(data, alpha=0.05): # First, we verify if sample pass Shapiro-Wilk test W, p_value = scipy.stats.shapiro(data) if p_value < alpha and len(data) < 30: - logging.debug('The data sample can not be considered normal') + logging.info('The data sample can not be considered normal') normality = False else: - logging.debug('The data sample pass the normality assumption.') + logging.info('The data sample pass the normality assumption.') normality = True return normality, p_value @@ -76,12 +76,12 @@ def verify_homoscedasticity(data_by_group, normality=False, alpha=0.05): else: test = 'Levene' W, p_value = scipy.stats.levene(*data_by_group) - logging.debug('Test name: {}'.format(test)) + logging.info('Test name: {}'.format(test)) if p_value < alpha and mean_nb < 30: - logging.debug('The sample didnt pass the equal variance assumption') + logging.info('The sample didnt pass the equal variance assumption') homoscedasticity = False else: - logging.debug('The sample pass the equal variance assumption') + logging.info('The sample pass the equal variance assumption') homoscedasticity = True return test, homoscedasticity, p_value @@ -145,12 +145,12 @@ def verify_group_difference(data_by_group, normality=False, test = 'Kruskalwallis' T, p_value = scipy.stats.kruskal(*data_by_group) - logging.debug('Test name: {}'.format(test)) + logging.info('Test name: {}'.format(test)) if p_value < alpha: - logging.debug('There is a difference between groups') + logging.info('There is a difference between groups') difference = True else: - logging.debug('We are not able to detect difference between the groups.') + logging.info('We are not able to detect difference between the groups.') difference = False return test, difference, p_value @@ -191,9 +191,9 @@ def verify_post_hoc(data_by_group, groups_list, test, test : string Name of the test done to verify group difference """ - logging.debug('We need to do a post-hoc analysis since ' - 'there is a difference') - logging.debug('Post-hoc: {} pairwise'.format(test)) + logging.info('We need to do a post-hoc analysis since ' + 'there is a difference') + logging.info('Post-hoc: {} pairwise'.format(test)) differences = [] nb_group = len(groups_list) @@ -214,7 +214,7 @@ def verify_post_hoc(data_by_group, groups_list, test, data_by_group[x], data_by_group[y]) differences.append((groups_list[x], groups_list[y], p_value < alpha, p_value)) - logging.debug('Result:') - logging.debug(differences) + logging.info('Result:') + logging.info(differences) return test, differences diff --git a/scilpy/stats/utils.py b/scilpy/stats/utils.py index 6c6c837c5..973128a6a 100644 --- a/scilpy/stats/utils.py +++ b/scilpy/stats/utils.py @@ -51,8 +51,8 @@ def __init__(self, json_file, participants): self.data_dictionnary[participant['participant_id']]\ [variable] = participant[variable] - logging.debug('Data_dictionnary') - logging.debug(self.data_dictionnary[self.get_first_participant()]) + logging.info('Data_dictionnary') + logging.info(self.data_dictionnary[self.get_first_participant()]) with open('data.json', 'w') as fp: json.dump(self.data_dictionnary, fp, indent=4) @@ -64,15 +64,15 @@ def validation_participant_id(self, json_info, participants_info): # Create the list of participants id from the json dictionnary participants_from_json = list(json_info.keys()) - logging.debug('participant list from json dictionnary:') - logging.debug(participants_from_json) + logging.info('participant list from json dictionnary:') + logging.info(participants_from_json) # Create the list of participants id from the tsv list of dictionnary participants_from_tsv = [] for participant in participants_info: participants_from_tsv.append(participant['participant_id']) - logging.debug('participant list from tsv file:') - logging.debug(participants_from_tsv) + logging.info('participant list from tsv file:') + logging.info(participants_from_tsv) # Compare the two list participants_from_json.sort() @@ -80,15 +80,17 @@ def validation_participant_id(self, json_info, participants_info): if not participants_from_json == participants_from_tsv: if not len(participants_from_json) == len(participants_from_tsv): - logging.debug('The number of participants from json file is not the same ' - 'as the one in the tsv file.') + logging.info('The number of participants from json file is ' + 'not the same as the one in the tsv file.') is_in_tsv = np.in1d(participants_from_json, participants_from_tsv) is_in_json = np.in1d(participants_from_tsv, participants_from_json) - logging.debug('participants list from json file missing in tsv file :') - logging.debug(np.asarray(participants_from_json)[~is_in_tsv]) - logging.debug('participants list from tsv file missing in json file :') - logging.debug(np.asarray(participants_from_tsv)[~is_in_json]) + logging.info('participants list from json file missing in tsv ' + 'file :') + logging.info(np.asarray(participants_from_json)[~is_in_tsv]) + logging.info('participants list from tsv file missing in json ' + 'file :') + logging.info(np.asarray(participants_from_tsv)[~is_in_json]) logging.error('The subjects from the json file does not fit ' 'with the subjects of the tsv file. ' @@ -97,7 +99,7 @@ def validation_participant_id(self, json_info, participants_info): 'with the subjects of the tsv file. ' 'Impossible to build the data_for_stat object') else: - logging.debug('The json and the tsv are compatible') + logging.info('The json and the tsv are compatible') def get_participants_list(self): # Construct the list of participant_id from the data_dictionnary @@ -492,6 +494,6 @@ def visualise_distribution(data_by_group, participants_id, bundle, metric, fig.savefig(os.path.join(oFolder, 'Graph', bundle, metric)) - logging.debug('outliers:[(id, group)]') - logging.debug(outliers) + logging.info('outliers:[(id, group)]') + logging.info(outliers) return outliers diff --git a/scilpy/tracking/tracker.py b/scilpy/tracking/tracker.py index d81a7c902..208a33fc6 100644 --- a/scilpy/tracking/tracker.py +++ b/scilpy/tracking/tracker.py @@ -179,8 +179,8 @@ def _set_nbr_processes(self, nbr_processes): if nbr_processes > self.nbr_seeds: nbr_processes = self.nbr_seeds - logging.debug("Setting number of processes to {} since there were " - "less seeds than processes.".format(nbr_processes)) + logging.info("Setting number of processes to {} since there were " + "less seeds than processes.".format(nbr_processes)) return nbr_processes def _prepare_multiprocessing_pool(self, tmpdir): diff --git a/scilpy/tractanalysis/features.py b/scilpy/tractanalysis/features.py index a49724548..267f0611c 100644 --- a/scilpy/tractanalysis/features.py +++ b/scilpy/tractanalysis/features.py @@ -115,9 +115,9 @@ def remove_loops_and_sharp_turns(streamlines, if tm.mean_curvature(clusters.centroids[i]) <= mean_curvature: ids.extend(clusters[i].indices) else: - logging.debug("Impossible to use the use_qb option because " + - "not more than one streamline left from the\n" + - "input file.") + logging.info("Impossible to use the use_qb option because " + + "not more than one streamline left from the\n" + + "input file.") return ids diff --git a/scilpy/tractograms/streamline_operations.py b/scilpy/tractograms/streamline_operations.py index 18d247df1..dacc79db1 100644 --- a/scilpy/tractograms/streamline_operations.py +++ b/scilpy/tractograms/streamline_operations.py @@ -176,19 +176,19 @@ def filter_streamlines_by_total_length_per_dim( total_per_orientation = np.abs(np.asarray( [np.sum(d, axis=0) for d in all_dirs])) - logging.debug("Total length per orientation is:\n" - "Average: x: {:.2f}, y: {:.2f}, z: {:.2f} \n" - "Min: x: {:.2f}, y: {:.2f}, z: {:.2f} \n" - "Max: x: {:.2f}, y: {:.2f}, z: {:.2f} \n" - .format(np.mean(total_per_orientation[:, 0]), - np.mean(total_per_orientation[:, 1]), - np.mean(total_per_orientation[:, 2]), - np.min(total_per_orientation[:, 0]), - np.min(total_per_orientation[:, 1]), - np.min(total_per_orientation[:, 2]), - np.max(total_per_orientation[:, 0]), - np.max(total_per_orientation[:, 1]), - np.max(total_per_orientation[:, 2]))) + logging.info("Total length per orientation is:\n" + "Average: x: {:.2f}, y: {:.2f}, z: {:.2f} \n" + "Min: x: {:.2f}, y: {:.2f}, z: {:.2f} \n" + "Max: x: {:.2f}, y: {:.2f}, z: {:.2f} \n" + .format(np.mean(total_per_orientation[:, 0]), + np.mean(total_per_orientation[:, 1]), + np.mean(total_per_orientation[:, 2]), + np.min(total_per_orientation[:, 0]), + np.min(total_per_orientation[:, 1]), + np.min(total_per_orientation[:, 2]), + np.max(total_per_orientation[:, 0]), + np.max(total_per_orientation[:, 1]), + np.max(total_per_orientation[:, 2]))) # Find good ids mask_good_x = np.logical_and(limits_x[0] < total_per_orientation[:, 0], @@ -264,11 +264,11 @@ def resample_streamlines_step_size(sft, step_size): if step_size == 0: raise ValueError("Step size can't be 0!") elif step_size < 0.1: - logging.debug("The value of your step size seems suspiciously low. " - "Please check.") + logging.info("The value of your step size seems suspiciously low. " + "Please check.") elif step_size > np.max(sft.voxel_sizes): - logging.debug("The value of your step size seems suspiciously high. " - "Please check.") + logging.info("The value of your step size seems suspiciously high. " + "Please check.") # Make sure we are in world space orig_space = sft.space @@ -299,9 +299,9 @@ def _warn_and_save(new_streamlines, sft): Warn that we loose data_per_point, then create resampled SFT.""" if sft.data_per_point is not None and sft.data_per_point.keys(): - logging.debug("Initial StatefulTractogram contained data_per_point. " - "This information will not be carried in the final " - "tractogram.") + logging.info("Initial StatefulTractogram contained data_per_point. " + "This information will not be carried in the final " + "tractogram.") new_sft = StatefulTractogram.from_sft( new_streamlines, sft, data_per_streamline=sft.data_per_streamline) @@ -328,7 +328,7 @@ def smooth_line_gaussian(streamline, sigma): raise ValueError('Cant have a 0 sigma with gaussian.') if length(streamline) < 1: - logging.debug('Streamline shorter than 1mm, corner cases possible.') + logging.info('Streamline shorter than 1mm, corner cases possible.') # Smooth each dimension separately x, y, z = streamline.T @@ -367,7 +367,7 @@ def smooth_line_spline(streamline, smoothing_parameter, nb_ctrl_points): raise ValueError('Cant have a 0 sigma with spline.') if length(streamline) < 1: - logging.debug('Streamline shorter than 1mm, corner cases possible.') + logging.info('Streamline shorter than 1mm, corner cases possible.') if nb_ctrl_points < 3: nb_ctrl_points = 3 diff --git a/scilpy/tractograms/tractogram_operations.py b/scilpy/tractograms/tractogram_operations.py index e896f2300..2a517f55d 100644 --- a/scilpy/tractograms/tractogram_operations.py +++ b/scilpy/tractograms/tractogram_operations.py @@ -798,12 +798,12 @@ def split_sft_randomly_per_cluster(orig_sft, chunk_sizes, seed, thresholds): nb_chunks = len(chunk_sizes) percent_kept_per_chunk = [nb / len(orig_sft) for nb in chunk_sizes] - logging.debug("Computing QBx") + logging.info("Computing QBx") clusters = qbx_and_merge(orig_sft.streamlines, thresholds, nb_pts=20, verbose=False) - logging.debug("Done. Now getting list of indices in each of the {} " - "cluster.".format(len(clusters))) + logging.info("Done. Now getting list of indices in each of the {} " + "cluster.".format(len(clusters))) total_indices = [[] for _ in range(nb_chunks + 1)] for cluster in clusters: if len(cluster.indices) > 1: diff --git a/scripts/scil_dwi_compute_snr.py b/scripts/scil_dwi_compute_snr.py index 72bea436c..892338957 100755 --- a/scripts/scil_dwi_compute_snr.py +++ b/scripts/scil_dwi_compute_snr.py @@ -122,8 +122,7 @@ def main(): noise_mask=noise_mask, noise_map=noise_map, split_shells=args.split_shells, - basename=basename, - verbose=args.verbose) + basename=basename) df = pd.DataFrame.from_dict(values).T