Skip to content

Commit

Permalink
Uniformize verbose level in modules
Browse files Browse the repository at this point in the history
  • Loading branch information
karanphil committed Feb 15, 2024
1 parent 81ca6ab commit 33d1e98
Show file tree
Hide file tree
Showing 11 changed files with 87 additions and 92 deletions.
22 changes: 8 additions & 14 deletions scilpy/image/volume_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ def register_image(static, static_grid2world, moving, moving_grid2world,
def compute_snr(dwi, bval, bvec, b0_thr, mask,
noise_mask=None, noise_map=None,
split_shells=False,
basename=None, verbose=False):
basename=None):
"""
Compute snr
Expand All @@ -264,16 +264,10 @@ def compute_snr(dwi, bval, bvec, b0_thr, mask,
basename: string
Basename used for naming all output files.
verbose: boolean
Set to use logging
Return
------
Dictionary of values (bvec, bval, mean, std, snr) for all volumes.
"""
if verbose:
logging.getLogger().setLevel(logging.INFO)

data = dwi.get_fdata(dtype=np.float32)
affine = dwi.affine
mask = get_data_as_mask(mask, dtype=bool)
Expand Down Expand Up @@ -416,17 +410,17 @@ def resample_volume(img, ref=None, res=None, iso_min=False, zoom=None,
if interp not in interp_choices:
raise ValueError("interp must be one of 'nn', 'lin', 'quad', 'cubic'.")

logging.debug('Data shape: %s', data.shape)
logging.debug('Data affine: %s', affine)
logging.debug('Data affine setup: %s', nib.aff2axcodes(affine))
logging.debug('Resampling data to %s with mode %s', new_zooms, interp)
logging.info('Data shape: %s', data.shape)
logging.info('Data affine: %s', affine)
logging.info('Data affine setup: %s', nib.aff2axcodes(affine))
logging.info('Resampling data to %s with mode %s', new_zooms, interp)

data2, affine2 = reslice(data, affine, original_zooms, new_zooms,
_interp_code_to_order(interp))

logging.debug('Resampled data shape: %s', data2.shape)
logging.debug('Resampled data affine: %s', affine2)
logging.debug('Resampled data affine setup: %s', nib.aff2axcodes(affine2))
logging.info('Resampled data shape: %s', data2.shape)
logging.info('Resampled data affine: %s', affine2)
logging.info('Resampled data affine setup: %s', nib.aff2axcodes(affine2))

if enforce_dimensions:
if ref is None:
Expand Down
4 changes: 2 additions & 2 deletions scilpy/reconst/fodf.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,13 @@ def get_ventricles_max_fodf(data, fa, md, zoom, args):
count += 1
mask[i, j, k] = 1

logging.debug('Number of voxels detected: {}'.format(count))
logging.info('Number of voxels detected: {}'.format(count))
if count == 0:
logging.warning('No voxels found for evaluation! Change your fa '
'and/or md thresholds')
return 0, mask

logging.debug('Average max fodf value: {}'.format(sum_of_max / count))
logging.info('Average max fodf value: {}'.format(sum_of_max / count))
return sum_of_max / count, mask


Expand Down
14 changes: 7 additions & 7 deletions scilpy/reconst/frf.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def compute_ssst_frf(data, bvals, bvecs, mask=None, mask_wm=None,
nvox = np.sum(mask)
response, ratio = response_from_mask_ssst(gtab, data, mask)

logging.debug(
logging.info(
"Number of indices is {:d} with threshold of {:.2f}".format(
nvox, fa_thresh))
fa_thresh -= 0.05
Expand All @@ -117,14 +117,14 @@ def compute_ssst_frf(data, bvals, bvecs, mask=None, mask_wm=None,
"Could not find at least {:d} voxels with sufficient FA "
"to estimate the FRF!".format(min_nvox))

logging.debug(
logging.info(
"Found {:d} voxels with FA threshold {:.2f} for "
"FRF estimation".format(nvox, fa_thresh + 0.05))
logging.debug("FRF eigenvalues: {}".format(str(response[0])))
logging.debug("Ratio for smallest to largest eigen value "
"is {:.3f}".format(ratio))
logging.debug("Mean of the b=0 signal for voxels used "
"for FRF: {}".format(response[1]))
logging.info("FRF eigenvalues: {}".format(str(response[0])))
logging.info("Ratio for smallest to largest eigen value "
"is {:.3f}".format(ratio))
logging.info("Mean of the b=0 signal for voxels used "
"for FRF: {}".format(response[1]))

full_response = np.array([response[0][0], response[0][1],
response[0][2], response[1]])
Expand Down
18 changes: 9 additions & 9 deletions scilpy/stats/matrix_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,19 +67,19 @@ def ttest_two_matrices(matrices_g1, matrices_g2, paired, tail, fdr,
sum_both_groups = np.sum(matrices_g1, axis=2) + np.sum(matrices_g2, axis=2)
nbr_non_zeros = np.count_nonzero(np.triu(sum_both_groups))

logging.debug('The provided matrices contain {} non zeros elements.'
.format(nbr_non_zeros))
logging.info('The provided matrices contain {} non zeros elements.'
.format(nbr_non_zeros))

matrices_g1 = matrices_g1.reshape((np.prod(matrix_shape), nb_group_g1))
matrices_g2 = matrices_g2.reshape((np.prod(matrix_shape), nb_group_g2))
# Negative epsilon, to differentiate from null p-values
matrix_pval = np.ones(np.prod(matrix_shape)) * -0.000001

text = ' paired' if paired else ''
logging.debug('Performing{} t-test with "{}" hypothesis.'
.format(text, tail))
logging.debug('Data has dimensions {}x{} with {} and {} observations.'
.format(matrix_shape[0], matrix_shape[1],
logging.info('Performing{} t-test with "{}" hypothesis.'
.format(text, tail))
logging.info('Data has dimensions {}x{} with {} and {} observations.'
.format(matrix_shape[0], matrix_shape[1],
nb_group_g1, nb_group_g2))

# For conversion to p-values
Expand All @@ -105,7 +105,7 @@ def ttest_two_matrices(matrices_g1, matrices_g2, paired, tail, fdr,

corr_matrix_pval = matrix_pval.reshape(matrix_shape)
if fdr:
logging.debug('Using FDR, the results will be q-values.')
logging.info('Using FDR, the results will be q-values.')
corr_matrix_pval = np.triu(corr_matrix_pval)
corr_matrix_pval[corr_matrix_pval > 0] = multipletests(
corr_matrix_pval[corr_matrix_pval > 0], 0, method='fdr_bh')[1]
Expand Down Expand Up @@ -158,8 +158,8 @@ def omega_sigma(matrix):
transitivity_latt_list = []
path_length_rand_list = []
for i in range(10):
logging.debug('Generating random and lattice matrices, '
'iteration #{}.'.format(i))
logging.info('Generating random and lattice matrices, '
'iteration #{}.'.format(i))
random = bct.randmio_und(matrix, 10)[0]
lattice = bct.latmio_und(matrix, 10)[1]

Expand Down
26 changes: 13 additions & 13 deletions scilpy/stats/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ def verify_normality(data, alpha=0.05):
# First, we verify if sample pass Shapiro-Wilk test
W, p_value = scipy.stats.shapiro(data)
if p_value < alpha and len(data) < 30:
logging.debug('The data sample can not be considered normal')
logging.info('The data sample can not be considered normal')
normality = False
else:
logging.debug('The data sample pass the normality assumption.')
logging.info('The data sample pass the normality assumption.')
normality = True
return normality, p_value

Expand Down Expand Up @@ -76,12 +76,12 @@ def verify_homoscedasticity(data_by_group, normality=False, alpha=0.05):
else:
test = 'Levene'
W, p_value = scipy.stats.levene(*data_by_group)
logging.debug('Test name: {}'.format(test))
logging.info('Test name: {}'.format(test))
if p_value < alpha and mean_nb < 30:
logging.debug('The sample didnt pass the equal variance assumption')
logging.info('The sample didnt pass the equal variance assumption')
homoscedasticity = False
else:
logging.debug('The sample pass the equal variance assumption')
logging.info('The sample pass the equal variance assumption')
homoscedasticity = True

return test, homoscedasticity, p_value
Expand Down Expand Up @@ -145,12 +145,12 @@ def verify_group_difference(data_by_group, normality=False,
test = 'Kruskalwallis'
T, p_value = scipy.stats.kruskal(*data_by_group)

logging.debug('Test name: {}'.format(test))
logging.info('Test name: {}'.format(test))
if p_value < alpha:
logging.debug('There is a difference between groups')
logging.info('There is a difference between groups')
difference = True
else:
logging.debug('We are not able to detect difference between the groups.')
logging.info('We are not able to detect difference between the groups.')
difference = False

return test, difference, p_value
Expand Down Expand Up @@ -191,9 +191,9 @@ def verify_post_hoc(data_by_group, groups_list, test,
test : string
Name of the test done to verify group difference
"""
logging.debug('We need to do a post-hoc analysis since '
'there is a difference')
logging.debug('Post-hoc: {} pairwise'.format(test))
logging.info('We need to do a post-hoc analysis since '
'there is a difference')
logging.info('Post-hoc: {} pairwise'.format(test))
differences = []
nb_group = len(groups_list)

Expand All @@ -214,7 +214,7 @@ def verify_post_hoc(data_by_group, groups_list, test,
data_by_group[x], data_by_group[y])
differences.append((groups_list[x], groups_list[y],
p_value < alpha, p_value))
logging.debug('Result:')
logging.debug(differences)
logging.info('Result:')
logging.info(differences)

return test, differences
32 changes: 17 additions & 15 deletions scilpy/stats/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ def __init__(self, json_file, participants):
self.data_dictionnary[participant['participant_id']]\
[variable] = participant[variable]

logging.debug('Data_dictionnary')
logging.debug(self.data_dictionnary[self.get_first_participant()])
logging.info('Data_dictionnary')
logging.info(self.data_dictionnary[self.get_first_participant()])

with open('data.json', 'w') as fp:
json.dump(self.data_dictionnary, fp, indent=4)
Expand All @@ -64,31 +64,33 @@ def validation_participant_id(self, json_info, participants_info):
# Create the list of participants id from the json dictionnary

participants_from_json = list(json_info.keys())
logging.debug('participant list from json dictionnary:')
logging.debug(participants_from_json)
logging.info('participant list from json dictionnary:')
logging.info(participants_from_json)

# Create the list of participants id from the tsv list of dictionnary
participants_from_tsv = []
for participant in participants_info:
participants_from_tsv.append(participant['participant_id'])
logging.debug('participant list from tsv file:')
logging.debug(participants_from_tsv)
logging.info('participant list from tsv file:')
logging.info(participants_from_tsv)

# Compare the two list
participants_from_json.sort()
participants_from_tsv.sort()

if not participants_from_json == participants_from_tsv:
if not len(participants_from_json) == len(participants_from_tsv):
logging.debug('The number of participants from json file is not the same '
'as the one in the tsv file.')
logging.info('The number of participants from json file is '
'not the same as the one in the tsv file.')
is_in_tsv = np.in1d(participants_from_json, participants_from_tsv)
is_in_json = np.in1d(participants_from_tsv, participants_from_json)

logging.debug('participants list from json file missing in tsv file :')
logging.debug(np.asarray(participants_from_json)[~is_in_tsv])
logging.debug('participants list from tsv file missing in json file :')
logging.debug(np.asarray(participants_from_tsv)[~is_in_json])
logging.info('participants list from json file missing in tsv '
'file :')
logging.info(np.asarray(participants_from_json)[~is_in_tsv])
logging.info('participants list from tsv file missing in json '
'file :')
logging.info(np.asarray(participants_from_tsv)[~is_in_json])

logging.error('The subjects from the json file does not fit '
'with the subjects of the tsv file. '
Expand All @@ -97,7 +99,7 @@ def validation_participant_id(self, json_info, participants_info):
'with the subjects of the tsv file. '
'Impossible to build the data_for_stat object')
else:
logging.debug('The json and the tsv are compatible')
logging.info('The json and the tsv are compatible')

def get_participants_list(self):
# Construct the list of participant_id from the data_dictionnary
Expand Down Expand Up @@ -492,6 +494,6 @@ def visualise_distribution(data_by_group, participants_id, bundle, metric,

fig.savefig(os.path.join(oFolder, 'Graph', bundle, metric))

logging.debug('outliers:[(id, group)]')
logging.debug(outliers)
logging.info('outliers:[(id, group)]')
logging.info(outliers)
return outliers
4 changes: 2 additions & 2 deletions scilpy/tracking/tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,8 @@ def _set_nbr_processes(self, nbr_processes):

if nbr_processes > self.nbr_seeds:
nbr_processes = self.nbr_seeds
logging.debug("Setting number of processes to {} since there were "
"less seeds than processes.".format(nbr_processes))
logging.info("Setting number of processes to {} since there were "
"less seeds than processes.".format(nbr_processes))
return nbr_processes

def _prepare_multiprocessing_pool(self, tmpdir):
Expand Down
6 changes: 3 additions & 3 deletions scilpy/tractanalysis/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,9 @@ def remove_loops_and_sharp_turns(streamlines,
if tm.mean_curvature(clusters.centroids[i]) <= mean_curvature:
ids.extend(clusters[i].indices)
else:
logging.debug("Impossible to use the use_qb option because " +
"not more than one streamline left from the\n" +
"input file.")
logging.info("Impossible to use the use_qb option because " +
"not more than one streamline left from the\n" +
"input file.")
return ids


Expand Down
44 changes: 22 additions & 22 deletions scilpy/tractograms/streamline_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,19 +176,19 @@ def filter_streamlines_by_total_length_per_dim(
total_per_orientation = np.abs(np.asarray(
[np.sum(d, axis=0) for d in all_dirs]))

logging.debug("Total length per orientation is:\n"
"Average: x: {:.2f}, y: {:.2f}, z: {:.2f} \n"
"Min: x: {:.2f}, y: {:.2f}, z: {:.2f} \n"
"Max: x: {:.2f}, y: {:.2f}, z: {:.2f} \n"
.format(np.mean(total_per_orientation[:, 0]),
np.mean(total_per_orientation[:, 1]),
np.mean(total_per_orientation[:, 2]),
np.min(total_per_orientation[:, 0]),
np.min(total_per_orientation[:, 1]),
np.min(total_per_orientation[:, 2]),
np.max(total_per_orientation[:, 0]),
np.max(total_per_orientation[:, 1]),
np.max(total_per_orientation[:, 2])))
logging.info("Total length per orientation is:\n"
"Average: x: {:.2f}, y: {:.2f}, z: {:.2f} \n"
"Min: x: {:.2f}, y: {:.2f}, z: {:.2f} \n"
"Max: x: {:.2f}, y: {:.2f}, z: {:.2f} \n"
.format(np.mean(total_per_orientation[:, 0]),
np.mean(total_per_orientation[:, 1]),
np.mean(total_per_orientation[:, 2]),
np.min(total_per_orientation[:, 0]),
np.min(total_per_orientation[:, 1]),
np.min(total_per_orientation[:, 2]),
np.max(total_per_orientation[:, 0]),
np.max(total_per_orientation[:, 1]),
np.max(total_per_orientation[:, 2])))

# Find good ids
mask_good_x = np.logical_and(limits_x[0] < total_per_orientation[:, 0],
Expand Down Expand Up @@ -264,11 +264,11 @@ def resample_streamlines_step_size(sft, step_size):
if step_size == 0:
raise ValueError("Step size can't be 0!")
elif step_size < 0.1:
logging.debug("The value of your step size seems suspiciously low. "
"Please check.")
logging.info("The value of your step size seems suspiciously low. "
"Please check.")
elif step_size > np.max(sft.voxel_sizes):
logging.debug("The value of your step size seems suspiciously high. "
"Please check.")
logging.info("The value of your step size seems suspiciously high. "
"Please check.")

# Make sure we are in world space
orig_space = sft.space
Expand Down Expand Up @@ -299,9 +299,9 @@ def _warn_and_save(new_streamlines, sft):
Warn that we loose data_per_point, then create resampled SFT."""

if sft.data_per_point is not None and sft.data_per_point.keys():
logging.debug("Initial StatefulTractogram contained data_per_point. "
"This information will not be carried in the final "
"tractogram.")
logging.info("Initial StatefulTractogram contained data_per_point. "
"This information will not be carried in the final "
"tractogram.")
new_sft = StatefulTractogram.from_sft(
new_streamlines, sft, data_per_streamline=sft.data_per_streamline)

Expand All @@ -328,7 +328,7 @@ def smooth_line_gaussian(streamline, sigma):
raise ValueError('Cant have a 0 sigma with gaussian.')

if length(streamline) < 1:
logging.debug('Streamline shorter than 1mm, corner cases possible.')
logging.info('Streamline shorter than 1mm, corner cases possible.')

# Smooth each dimension separately
x, y, z = streamline.T
Expand Down Expand Up @@ -367,7 +367,7 @@ def smooth_line_spline(streamline, smoothing_parameter, nb_ctrl_points):
raise ValueError('Cant have a 0 sigma with spline.')

if length(streamline) < 1:
logging.debug('Streamline shorter than 1mm, corner cases possible.')
logging.info('Streamline shorter than 1mm, corner cases possible.')

if nb_ctrl_points < 3:
nb_ctrl_points = 3
Expand Down
Loading

0 comments on commit 33d1e98

Please sign in to comment.