From 9f28bc8b0c3e70665a7abdd4fa0fd20ee772acfe Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:47:08 +0200 Subject: [PATCH 1/9] STY: Apply ruff/pyupgrade preview rule UP031 UP031 Use format specifiers instead of percent format Co-authored-by: Chris Markiewicz --- nibabel/analyze.py | 6 +++--- nibabel/cifti2/cifti2_axes.py | 5 ++--- nibabel/cmdline/dicomfs.py | 12 ++++++------ nibabel/cmdline/diff.py | 4 ++-- nibabel/cmdline/ls.py | 10 +++++----- nibabel/dft.py | 2 +- nibabel/ecat.py | 4 ++-- nibabel/fileslice.py | 2 +- nibabel/freesurfer/io.py | 8 ++++---- nibabel/freesurfer/mghformat.py | 2 +- nibabel/gifti/gifti.py | 2 +- nibabel/gifti/parse_gifti_fast.py | 4 ++-- nibabel/nicom/csareader.py | 2 +- nibabel/nicom/dicomreaders.py | 4 ++-- nibabel/nifti1.py | 10 +++++----- nibabel/orientations.py | 2 +- nibabel/spatialimages.py | 2 +- nibabel/tests/test_funcs.py | 2 +- 18 files changed, 41 insertions(+), 42 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 34597319d..d02363c79 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -699,7 +699,7 @@ def set_zooms(self, zooms): ndim = dims[0] zooms = np.asarray(zooms) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) + raise HeaderDataError(f'Expecting {ndim} zoom values for ndim {ndim}') if np.any(zooms < 0): raise HeaderDataError('zooms must be positive') pixdims = hdr['pixdim'] @@ -818,11 +818,11 @@ def _chk_datatype(klass, hdr, fix=False): dtype = klass._data_type_codes.dtype[code] except KeyError: rep.problem_level = 40 - rep.problem_msg = 'data code %d not recognized' % code + rep.problem_msg = f'data code {code} not recognized' else: if dtype.itemsize == 0: rep.problem_level = 40 - rep.problem_msg = 'data code %d not supported' % code + rep.problem_msg = f'data code {code} not supported' else: return hdr, rep if fix: diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index af7c63bea..32914be1b 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -373,7 +373,7 @@ def from_mask(cls, mask, name='other', affine=None): else: raise ValueError( 'Mask should be either 1-dimensional (for surfaces) or ' - '3-dimensional (for volumes), not %i-dimensional' % mask.ndim + f'3-dimensional (for volumes), not {mask.ndim}-dimensional' ) @classmethod @@ -1519,7 +1519,6 @@ def get_element(self, index): index = self.size + index if index >= self.size or index < 0: raise IndexError( - 'index %i is out of range for SeriesAxis with size %i' - % (original_index, self.size) + f'index {original_index} is out of range for SeriesAxis with size {self.size}' ) return self.start + self.step * index diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index afd994b15..07aa51e2d 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -51,7 +51,7 @@ def __init__(self, fno): self.direct_io = False def __str__(self): - return 'FileHandle(%d)' % self.fno + return f'FileHandle({self.fno})' class DICOMFS(fuse.Fuse): @@ -85,11 +85,11 @@ def get_paths(self): series_info += f'UID: {series.uid}\n' series_info += f'number: {series.number}\n' series_info += f'description: {series.description}\n' - series_info += 'rows: %d\n' % series.rows - series_info += 'columns: %d\n' % series.columns - series_info += 'bits allocated: %d\n' % series.bits_allocated - series_info += 'bits stored: %d\n' % series.bits_stored - series_info += 'storage instances: %d\n' % len(series.storage_instances) + series_info += f'rows: {series.rows}\n' + series_info += f'columns: {series.columns}\n' + series_info += f'bits allocated: {series.bits_allocated}\n' + series_info += f'bits stored: {series.bits_stored}\n' + series_info += f'storage instances: {len(series.storage_instances)}\n' d[series.number] = { 'INFO': series_info.encode('ascii', 'replace'), f'{series.number}.nii': (series.nifti_size, series.as_nifti), diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index 36760f7eb..55f827e97 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -266,7 +266,7 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): diffs1.append({'CMP': 'incompat'}) if any(diffs1): - diffs['DATA(diff %d:)' % (i + 1)] = diffs1 + diffs[f'DATA(diff {i + 1}:)'] = diffs1 return diffs @@ -293,7 +293,7 @@ def display_diff(files, diff): output += field_width.format('Field/File') for i, f in enumerate(files, 1): - output += '%d:%s' % (i, filename_width.format(os.path.basename(f))) + output += f'{i}:{filename_width.format(os.path.basename(f))}' output += '\n' diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index f79c27f0c..72fb22768 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -73,7 +73,7 @@ def get_opt_parser(): action='store_true', dest='all_counts', default=False, - help='Output all counts, even if number of unique values > %d' % MAX_UNIQUE, + help=f'Output all counts, even if number of unique values > {MAX_UNIQUE}', ), Option( '-z', @@ -117,7 +117,7 @@ def proc_file(f, opts): row += [''] if hasattr(h, 'extensions') and len(h.extensions): - row += ['@l#exts: %d' % len(h.extensions)] + row += [f'@l#exts: {len(h.extensions)}'] else: row += [''] @@ -166,16 +166,16 @@ def proc_file(f, opts): d = d.reshape(-1) if opts.stats: # just # of elements - row += ['@l[%d]' % np.prod(d.shape)] + row += [f'@l[{np.prod(d.shape)}]'] # stats row += [f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' if len(d) else '-'] if opts.counts: items, inv = np.unique(d, return_inverse=True) if len(items) > 1000 and not opts.all_counts: - counts = _err('%d uniques. Use --all-counts' % len(items)) + counts = _err(f'{len(items)} uniques. Use --all-counts') else: freq = np.bincount(inv) - counts = ' '.join('%g:%d' % (i, f) for i, f in zip(items, freq)) + counts = ' '.join(f'{i:g}:{f}' for i, f in zip(items, freq)) row += ['@l' + counts] except OSError as e: verbose(2, f'Failed to obtain stats/counts -- {e}') diff --git a/nibabel/dft.py b/nibabel/dft.py index e63c9c479..23108895b 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -162,7 +162,7 @@ def as_nifti(self): for i, si in enumerate(self.storage_instances): if i + 1 != si.instance_number: raise InstanceStackError(self, i, si) - logger.info('reading %d/%d' % (i + 1, len(self.storage_instances))) + logger.info(f'reading {i + 1}/{len(self.storage_instances)}') d = self.storage_instances[i].dicom() data[i, :, :] = d.pixel_array diff --git a/nibabel/ecat.py b/nibabel/ecat.py index c4b55624f..f634bcd8a 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -309,14 +309,14 @@ def get_patient_orient(self): """ code = self._structarr['patient_orientation'].item() if code not in self._patient_orient_codes: - raise KeyError('Ecat Orientation CODE %d not recognized' % code) + raise KeyError(f'Ecat Orientation CODE {code} not recognized') return self._patient_orient_codes[code] def get_filetype(self): """Type of ECAT Matrix File from code stored in header""" code = self._structarr['file_type'].item() if code not in self._ft_codes: - raise KeyError('Ecat Filetype CODE %d not recognized' % code) + raise KeyError(f'Ecat Filetype CODE {code} not recognized') return self._ft_codes[code] @classmethod diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 816f1cdaf..91ed1f70a 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -127,7 +127,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): if slicer < 0: slicer = dim_len + slicer elif check_inds and slicer >= dim_len: - raise ValueError('Integer index %d to large' % slicer) + raise ValueError(f'Integer index {slicer} too large') can_slicers.append(slicer) # Fill out any missing dimensions if n_real < n_dim: diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 74bc05fc3..31745df72 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -427,7 +427,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): for i in range(n_entries): # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] + name = np.fromfile(fobj, f'|S{name_length}', 1)[0] names.append(name) # read RGBT for this entry ctab[i, :4] = np.fromfile(fobj, dt, 4) @@ -471,7 +471,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): ctab = np.zeros((max_index, 5), dt) # orig_tab string length + string length = np.fromfile(fobj, dt, 1)[0] - np.fromfile(fobj, '|S%d' % length, 1)[0] # Orig table path + np.fromfile(fobj, f'|S{length}', 1)[0] # Orig table path # number of LUT entries present in the file entries_to_read = np.fromfile(fobj, dt, 1)[0] names = list() @@ -480,7 +480,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): idx = np.fromfile(fobj, dt, 1)[0] # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] + name = np.fromfile(fobj, f'|S{name_length}', 1)[0] names.append(name) # RGBT ctab[idx, :4] = np.fromfile(fobj, dt, 4) @@ -525,7 +525,7 @@ def write(num, dtype=dt): def write_string(s): s = (s if isinstance(s, bytes) else s.encode()) + b'\x00' write(len(s)) - write(s, dtype='|S%d' % len(s)) + write(s, dtype=f'|S{len(s)}') # Generate annotation values for each ctab entry if fill_ctab: diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 6efa67ffa..0adcb88e2 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -281,7 +281,7 @@ def set_zooms(self, zooms): zooms = np.asarray(zooms) ndims = self._ndims() if len(zooms) > ndims: - raise HeaderDataError('Expecting %d zoom values' % ndims) + raise HeaderDataError(f'Expecting {ndims} zoom values') if np.any(zooms[:3] <= 0): raise HeaderDataError( f'Spatial (first three) zooms must be positive; got {tuple(zooms[:3])}' diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index c983a14df..76fcc4a45 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -522,7 +522,7 @@ def _to_xml_element(self): }, ) for di, dn in enumerate(self.dims): - data_array.attrib['Dim%d' % di] = str(dn) + data_array.attrib[f'Dim{di}'] = str(dn) if self.meta is not None: data_array.append(self.meta._to_xml_element()) diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index ccd608324..5bcd8c8c3 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -284,8 +284,8 @@ def EndElementHandler(self, name): if name == 'GIFTI': if hasattr(self, 'expected_numDA') and self.expected_numDA != self.img.numDA: warnings.warn( - 'Actual # of data arrays does not match ' - '# expected: %d != %d.' % (self.expected_numDA, self.img.numDA) + 'Actual # of data arrays does not match # expected: ' + f'{self.expected_numDA} != {self.img.numDA}.' ) # remove last element of the list self.fsm_state.pop() diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index df379e0be..b98dae740 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -179,7 +179,7 @@ def get_vector(csa_dict, tag_name, n): if len(items) == 0: return None if len(items) != n: - raise ValueError('Expecting %d vector' % n) + raise ValueError(f'Expecting {n} vector') return np.array(items) diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 5892bb8db..07362ee47 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -131,7 +131,7 @@ def slices_to_series(wrappers): break else: # no match in current volume lists volume_lists.append([dw]) - print('We appear to have %d Series' % len(volume_lists)) + print(f'We appear to have {len(volume_lists)} Series') # second pass out_vol_lists = [] for vol_list in volume_lists: @@ -143,7 +143,7 @@ def slices_to_series(wrappers): out_vol_lists += _third_pass(vol_list) continue out_vol_lists.append(vol_list) - print('We have %d volumes after second pass' % len(out_vol_lists)) + print(f'We have {len(out_vol_lists)} volumes after second pass') # final pass check for vol_list in out_vol_lists: zs = [s.slice_indicator for s in vol_list] diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 180f67cca..b9c78c81b 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1559,7 +1559,7 @@ def get_intent(self, code_repr='label'): else: raise TypeError('repr can be "label" or "code"') n_params = len(recoder.parameters[code]) if known_intent else 0 - params = (float(hdr['intent_p%d' % (i + 1)]) for i in range(n_params)) + params = (float(hdr[f'intent_p{i}']) for i in range(1, n_params + 1)) name = hdr['intent_name'].item().decode('latin-1') return label, tuple(params), name @@ -1632,8 +1632,8 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): hdr['intent_name'] = name all_params = [0] * 3 all_params[: len(params)] = params[:] - for i, param in enumerate(all_params): - hdr['intent_p%d' % (i + 1)] = param + for i, param in enumerate(all_params, start=1): + hdr[f'intent_p{i}'] = param def get_slice_duration(self): """Get slice duration @@ -1911,7 +1911,7 @@ def _chk_offset(hdr, fix=False): return hdr, rep if magic == hdr.single_magic and offset < hdr.single_vox_offset: rep.problem_level = 40 - rep.problem_msg = 'vox offset %d too low for single file nifti1' % offset + rep.problem_msg = f'vox offset {int(offset)} too low for single file nifti1' if fix: hdr['vox_offset'] = hdr.single_vox_offset rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}' @@ -1943,7 +1943,7 @@ def _chk_xform_code(klass, code_type, hdr, fix): if code in recoder.value_set(): return hdr, rep rep.problem_level = 30 - rep.problem_msg = '%s %d not valid' % (code_type, code) + rep.problem_msg = f'{code_type} {code} not valid' if fix: hdr[code_type] = 0 rep.fix_msg = 'setting to 0' diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 7265bf56f..12e414def 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -124,7 +124,7 @@ def ornt_transform(start_ornt, end_ornt): result[start_in_idx, :] = [end_in_idx, flip] break else: - raise ValueError('Unable to find out axis %d in start_ornt' % end_out_idx) + raise ValueError(f'Unable to find out axis {end_out_idx} in start_ornt') return result diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index ce8ee3c6e..19677c1a7 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -267,7 +267,7 @@ def set_zooms(self, zooms: Sequence[float]) -> None: shape = self.get_data_shape() ndim = len(shape) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) + raise HeaderDataError(f'Expecting {ndim} zoom values for ndim {ndim}') if any(z < 0 for z in zooms): raise HeaderDataError('zooms must be positive') self._zooms = zooms diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 5e59bc63b..866640616 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -23,7 +23,7 @@ def _as_fname(img): global _counter - fname = 'img%3d.nii' % _counter + fname = f'img{_counter:3d}.nii' _counter = _counter + 1 save(img, fname) return fname From 95cc728dd0c49245373d928f73c263a7ca7f7813 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 20:03:23 +0200 Subject: [PATCH 2/9] =?UTF-8?q?MNT:=20Python=203=20string=20formatting:=20?= =?UTF-8?q?%i=20=E2=86=92=20%d?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Chris Markiewicz --- nibabel/freesurfer/io.py | 2 +- nibabel/gifti/util.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 31745df72..5b3f6a366 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -465,7 +465,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): dt = _ANNOT_DT # This code works with a file version == 2, nothing else if ctab_version != 2: - raise Exception('Unrecognised .annot file version (%i)', ctab_version) + raise Exception(f'Unrecognised .annot file version ({ctab_version})') # maximum LUT index present in the file max_index = np.fromfile(fobj, dt, 1)[0] ctab = np.zeros((max_index, 5), dt) diff --git a/nibabel/gifti/util.py b/nibabel/gifti/util.py index 939329201..791f13302 100644 --- a/nibabel/gifti/util.py +++ b/nibabel/gifti/util.py @@ -10,7 +10,7 @@ from ..volumeutils import Recoder # Translate dtype.kind char codes to XML text output strings -KIND2FMT = {'i': '%i', 'u': '%i', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} +KIND2FMT = {'i': '%d', 'u': '%d', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} array_index_order_codes = Recoder( ( From 5daffcce1ed1f6c399d9ed057a32c038a0f87a25 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:49:36 +0200 Subject: [PATCH 3/9] STY: Apply ruff/refurb preview rule FURB145 FURB145 Prefer `copy` method over slicing --- nibabel/tests/test_nifti1.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 8eae0410e..f0029681b 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -578,12 +578,12 @@ def test_slice_times(self): with pytest.raises(HeaderDataError): # all None hdr.set_slice_times((None,) * len(times)) - n_mid_times = times[:] + n_mid_times = times.copy() n_mid_times[3] = None with pytest.raises(HeaderDataError): # None in middle hdr.set_slice_times(n_mid_times) - funny_times = times[:] + funny_times = times.copy() funny_times[3] = 0.05 with pytest.raises(HeaderDataError): # can't get single slice duration From 4810cd78bd7d21b9e9f8754bb0a7bd4a86235c49 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:52:29 +0200 Subject: [PATCH 4/9] STY: Apply ruff/refurb preview rule FURB148 FURB148 `enumerate` index is unused, use `for x in y` instead --- nibabel/cifti2/tests/test_cifti2io_header.py | 2 +- nibabel/tests/test_round_trip.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 1c37cfe0e..ecdf0c69a 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -72,7 +72,7 @@ def test_read_and_proxies(): @needs_nibabel_data('nitest-cifti2') def test_version(): - for i, dat in enumerate(datafiles): + for dat in datafiles: img = nib.load(dat) assert Version(img.header.version) == Version('2') diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index 07783fe55..6daf960aa 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -108,15 +108,15 @@ def test_round_trip(): iuint_types = [t for t in iuint_types if t in nifti_supported] f_types = [np.float32, np.float64] # Expanding standard deviations - for i, sd_10 in enumerate(sd_10s): + for sd_10 in sd_10s: sd = 10.0**sd_10 V_in = rng.normal(0, sd, size=(N, 1)) - for j, in_type in enumerate(f_types): - for k, out_type in enumerate(iuint_types): + for in_type in f_types: + for out_type in iuint_types: check_arr(sd_10, V_in, in_type, out_type, scaling_type) # Spread integers across range - for i, sd in enumerate(np.linspace(0.05, 0.5, 5)): - for j, in_type in enumerate(iuint_types): + for sd in np.linspace(0.05, 0.5, 5): + for in_type in iuint_types: info = np.iinfo(in_type) mn, mx = info.min, info.max type_range = mx - mn @@ -124,7 +124,7 @@ def test_round_trip(): # float(sd) because type_range can be type 'long' width = type_range * float(sd) V_in = rng.normal(center, width, size=(N, 1)) - for k, out_type in enumerate(iuint_types): + for out_type in iuint_types: check_arr(sd, V_in, in_type, out_type, scaling_type) From 02b7b0e308b594f730cd139448fbc3e9a0fc4b47 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:55:39 +0200 Subject: [PATCH 5/9] STY: Apply ruff/refurb preview rule FURB157 FURB157 Verbose expression in `Decimal` constructor --- nibabel/nicom/tests/test_dicomwrappers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index db3f66751..aefb35e89 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -991,8 +991,8 @@ def test_scale_data(self): assert_array_equal(data * 3 - 2, MFW(fake_mf)._scale_data(data)) # Decimals are OK for frame in frames: - frame.PixelValueTransformationSequence[0].RescaleSlope = Decimal('3') - frame.PixelValueTransformationSequence[0].RescaleIntercept = Decimal('-2') + frame.PixelValueTransformationSequence[0].RescaleSlope = Decimal(3) + frame.PixelValueTransformationSequence[0].RescaleIntercept = Decimal(-2) assert_array_equal(data * 3 - 2, MFW(fake_mf)._scale_data(data)) # A per-frame RWV scaling takes precedence over per-frame PixelValueTransformation for frame in frames: From 8c2a501de8c7a1d278634f00320acbfb22355799 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:56:25 +0200 Subject: [PATCH 6/9] STY: Apply ruff/refurb preview rule FURB192 FURB192 Prefer `min` over `sorted()` to compute the minimum value in a sequence --- nibabel/nicom/dicomwrappers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 009880e49..64b2b4a96 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -565,7 +565,7 @@ def applies(self, dcm_wrp) -> bool: warnings.warn( 'A multi-stack file was passed without an explicit filter, just using lowest StackID' ) - self._selected = sorted(stack_ids)[0] + self._selected = min(stack_ids) return True return False From 73bae7e98c4d86492f266adfad38febf41107a4a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:59:16 +0200 Subject: [PATCH 7/9] STY: Apply ruff/flake8-comprehensions preview rule C409 C409 Unnecessary list comprehension passed to `tuple()` (rewrite as a generator) --- nibabel/streamlines/tests/test_array_sequence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index a06b2c45d..96e66b44c 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -79,7 +79,7 @@ def test_creating_arraysequence_from_list(self): # List of ndarrays. N = 5 for ndim in range(1, N + 1): - common_shape = tuple([SEQ_DATA['rng'].randint(1, 10) for _ in range(ndim - 1)]) + common_shape = tuple(SEQ_DATA['rng'].randint(1, 10) for _ in range(ndim - 1)) data = generate_data(nb_arrays=5, common_shape=common_shape, rng=SEQ_DATA['rng']) check_arr_seq(ArraySequence(data), data) From b33bcde28337707fcd71dbddf69d8d1bc52a75ca Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 1 Oct 2024 19:00:30 +0200 Subject: [PATCH 8/9] STY: Apply ruff/flake8-comprehensions preview rule C419 C419 Unnecessary list comprehension --- nibabel/orientations.py | 2 +- nibabel/tests/test_volumeutils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 12e414def..b620fff02 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -322,7 +322,7 @@ def axcodes2ornt(axcodes, labels=None): [ 2., 1.]]) """ labels = list(zip('LPI', 'RAS')) if labels is None else labels - allowed_labels = sum([list(L) for L in labels], []) + [None] + allowed_labels = sum((list(L) for L in labels), []) + [None] if len(allowed_labels) != len(set(allowed_labels)): raise ValueError(f'Duplicate labels in {allowed_labels}') if not set(axcodes).issubset(allowed_labels): diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 9d321f07e..1bd44cbd0 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -607,7 +607,7 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error - NUMERICAL_TYPES = sum([sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) + NUMERICAL_TYPES = sum((sctypes[key] for key in ['int', 'uint', 'float', 'complex']), []) for in_type, out_type, slope, inter in itertools.product( NUMERICAL_TYPES, NUMERICAL_TYPES, From ec15839f8141745600e40ce1b737ba768d33d2fe Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 2 Oct 2024 19:05:00 +0200 Subject: [PATCH 9/9] MNT: better way to normalize sequences to lists and flatten Co-authored-by: Chris Markiewicz --- nibabel/orientations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/orientations.py b/nibabel/orientations.py index b620fff02..f1cdd228b 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -322,7 +322,7 @@ def axcodes2ornt(axcodes, labels=None): [ 2., 1.]]) """ labels = list(zip('LPI', 'RAS')) if labels is None else labels - allowed_labels = sum((list(L) for L in labels), []) + [None] + allowed_labels = sum(map(list, labels), [None]) if len(allowed_labels) != len(set(allowed_labels)): raise ValueError(f'Duplicate labels in {allowed_labels}') if not set(axcodes).issubset(allowed_labels):