From 43379e038c5db3f917f45cc69889de0dcb6caa35 Mon Sep 17 00:00:00 2001 From: Philipp A Date: Fri, 28 Aug 2020 15:00:56 +0200 Subject: [PATCH] Utilize magic comma in new black update (#1394) --- .travis.yml | 3 ++ docs/extensions/function_images.py | 2 +- docs/extensions/github_links.py | 2 +- pyproject.toml | 1 - scanpy/cli.py | 10 ++-- scanpy/datasets/_ebi_expression_atlas.py | 6 ++- scanpy/plotting/_anndata.py | 15 +++--- scanpy/plotting/_baseplot_class.py | 8 ++- scanpy/plotting/_dotplot.py | 4 +- scanpy/plotting/_stacked_violin.py | 6 +-- scanpy/plotting/_tools/scatterplots.py | 17 +++--- scanpy/plotting/_utils.py | 55 +++++++++---------- scanpy/preprocessing/_combat.py | 6 +-- scanpy/preprocessing/_deprecated/__init__.py | 4 +- scanpy/preprocessing/_recipes.py | 2 +- scanpy/readwrite.py | 28 +++++----- scanpy/tests/test_embedding.py | 4 +- scanpy/tests/test_embedding_plots.py | 15 +++--- scanpy/tests/test_plotting.py | 56 +++++++++++--------- scanpy/tests/test_read_10x.py | 6 ++- scanpy/tools/_ingest.py | 4 +- scanpy/tools/_leiden.py | 5 +- setup.py | 2 +- 23 files changed, 129 insertions(+), 132 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8af485c598..d5be1c9bea 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,8 @@ dist: xenial language: python +branches: + only: + - master # All other branches should become (draft) PRs and be build that way matrix: include: - name: "static analysis" diff --git a/docs/extensions/function_images.py b/docs/extensions/function_images.py index cae6ba21e8..42aac73c26 100644 --- a/docs/extensions/function_images.py +++ b/docs/extensions/function_images.py @@ -7,7 +7,7 @@ def insert_function_images( - app: Sphinx, what: str, name: str, obj: Any, options: Options, lines: List[str], + app: Sphinx, what: str, name: str, obj: Any, options: Options, lines: List[str] ): path = app.config.api_dir / f'{name}.png' if what != 'function' or not path.is_file(): diff --git a/docs/extensions/github_links.py b/docs/extensions/github_links.py index ada695d286..a2863627c0 100644 --- a/docs/extensions/github_links.py +++ b/docs/extensions/github_links.py @@ -26,7 +26,7 @@ def __call__( ): url = self.url_template.format(text) title = self.title_template.format(text) - options = {**dict(classes=[self.class_name],), **options} + options = {**dict(classes=[self.class_name]), **options} node = nodes.reference(rawtext, title, refuri=url, **options) return [node], [] diff --git a/pyproject.toml b/pyproject.toml index 696073c789..465e689cce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,6 @@ exclude = ''' |_paga |_umap |_utils - |_leiden |_louvain |_tsne_fix |_top_genes diff --git a/scanpy/cli.py b/scanpy/cli.py index bb748fb6e2..9a90cebd31 100644 --- a/scanpy/cli.py +++ b/scanpy/cli.py @@ -22,9 +22,7 @@ class _DelegatingSubparsersAction(_SubParsersAction): """Like a normal subcommand action, but uses a delegator for more choices""" - def __init__( - self, *args, _command: str, _runargs: Dict[str, Any], **kwargs, - ): + def __init__(self, *args, _command: str, _runargs: Dict[str, Any], **kwargs): super().__init__(*args, **kwargs) self.command = _command self._name_parser_map = self.choices = _CommandDelegator( @@ -38,9 +36,7 @@ class _CommandDelegator(cabc.MutableMapping): but don’t calculate the whole list until necessary """ - def __init__( - self, command: str, action: _DelegatingSubparsersAction, **runargs, - ): + def __init__(self, command: str, action: _DelegatingSubparsersAction, **runargs): self.command = command self.action = action self.parser_map = {} @@ -120,7 +116,7 @@ def _cmd_settings() -> None: def main( - argv: Optional[Sequence[str]] = None, *, check: bool = True, **runargs, + argv: Optional[Sequence[str]] = None, *, check: bool = True, **runargs ) -> Optional[CompletedProcess]: """\ Run a builtin scanpy command or a scanpy-* subcommand. diff --git a/scanpy/datasets/_ebi_expression_atlas.py b/scanpy/datasets/_ebi_expression_atlas.py index af6d4a8f01..013f0aa804 100644 --- a/scanpy/datasets/_ebi_expression_atlas.py +++ b/scanpy/datasets/_ebi_expression_atlas.py @@ -43,10 +43,12 @@ def download_experiment(accession: str): experiment_dir.mkdir(parents=True, exist_ok=True) _download( - design_url + "experiment-design", experiment_dir / "experimental_design.tsv", + design_url + "experiment-design", + experiment_dir / "experimental_design.tsv", ) _download( - mtx_url + "quantification-raw", experiment_dir / "expression_archive.zip", + mtx_url + "quantification-raw", + experiment_dir / "expression_archive.zip", ) diff --git a/scanpy/plotting/_anndata.py b/scanpy/plotting/_anndata.py index e3f696e08b..c2fcec7e5b 100755 --- a/scanpy/plotting/_anndata.py +++ b/scanpy/plotting/_anndata.py @@ -420,7 +420,12 @@ def add_centroid(centroids, name, Y, mask): if projection == '3d': data.append(Y[mask_remaining, 2]) axs[ikey].scatter( - *data, marker='.', c='lightgrey', s=size, edgecolors='none', zorder=-1, + *data, + marker='.', + c='lightgrey', + s=size, + edgecolors='none', + zorder=-1, ) legend = None if legend_loc.startswith('on data'): @@ -737,9 +742,7 @@ def violin( y = ys[0] g = sns.FacetGrid(obs_tidy, col=x, col_order=keys, sharey=False) # don't really know why this gives a warning without passing `order` - g = g.map( - sns.violinplot, y, orient='vertical', scale=scale, order=keys, **kwds, - ) + g = g.map(sns.violinplot, y, orient='vertical', scale=scale, order=keys, **kwds) if stripplot: g = g.map( sns.stripplot, @@ -1116,7 +1119,7 @@ def heatmap( if dendrogram: dendro_ax = fig.add_subplot(axs[1, 2], sharey=heatmap_ax) _plot_dendrogram( - dendro_ax, adata, groupby, ticks=ticks, dendrogram_key=dendrogram, + dendro_ax, adata, groupby, ticks=ticks, dendrogram_key=dendrogram ) # plot group legends on top of heatmap_ax (if given) @@ -1191,7 +1194,7 @@ def heatmap( if categorical: groupby_ax = fig.add_subplot(axs[2, 0]) ticks, labels, groupby_cmap, norm = _plot_categories_as_colorblocks( - groupby_ax, obs_tidy, colors=groupby_colors, orientation='bottom', + groupby_ax, obs_tidy, colors=groupby_colors, orientation='bottom' ) # add lines to main heatmap line_positions = ( diff --git a/scanpy/plotting/_baseplot_class.py b/scanpy/plotting/_baseplot_class.py index 41bc1fcca6..7d474c0836 100644 --- a/scanpy/plotting/_baseplot_class.py +++ b/scanpy/plotting/_baseplot_class.py @@ -27,7 +27,7 @@ cmap String denoting matplotlib color map. standard_scale - Whether or not to standardize the given dimension between 0 and 1, meaning for + Whether or not to standardize the given dimension between 0 and 1, meaning for each variable or group, subtract the minimum and divide each by its maximum. swap_axes By default, the x axis contains `var_names` (e.g. genes) and the y axis @@ -515,7 +515,7 @@ def _plot_legend(self, legend_ax, return_ax_dict, normalize): legend_height, ] fig, legend_gs = make_grid_spec( - legend_ax, nrows=2, ncols=1, height_ratios=height_ratios, + legend_ax, nrows=2, ncols=1, height_ratios=height_ratios ) color_legend_ax = fig.add_subplot(legend_gs[1]) @@ -792,9 +792,7 @@ def savefig(self, filename: str, bbox_inches: Optional[str] = 'tight', **kwargs) self.make_figure() pl.savefig(filename, bbox_inches=bbox_inches, **kwargs) - def _reorder_categories_after_dendrogram( - self, dendrogram, - ): + def _reorder_categories_after_dendrogram(self, dendrogram): """\ Function used by plotting functions that need to reorder the the groupby observations based on the dendrogram results. diff --git a/scanpy/plotting/_dotplot.py b/scanpy/plotting/_dotplot.py index 454a3ba945..f5f984453e 100644 --- a/scanpy/plotting/_dotplot.py +++ b/scanpy/plotting/_dotplot.py @@ -484,7 +484,7 @@ def _plot_legend(self, legend_ax, return_ax_dict, normalize): cbar_legend_height, ] fig, legend_gs = make_grid_spec( - legend_ax, nrows=4, ncols=1, height_ratios=height_ratios, + legend_ax, nrows=4, ncols=1, height_ratios=height_ratios ) if self.show_size_legend: @@ -943,7 +943,7 @@ def dotplot( dot_min=dot_min, smallest_dot=smallest_dot, dot_edge_lw=kwds.pop('linewidth', DotPlot.DEFAULT_DOT_EDGELW), - ).legend(colorbar_title=colorbar_title, size_title=size_title,) + ).legend(colorbar_title=colorbar_title, size_title=size_title) if return_fig: return dp diff --git a/scanpy/plotting/_stacked_violin.py b/scanpy/plotting/_stacked_violin.py index b27aec3733..16abbaeeb9 100644 --- a/scanpy/plotting/_stacked_violin.py +++ b/scanpy/plotting/_stacked_violin.py @@ -524,9 +524,7 @@ def _setup_violin_axes_ticks(self, row_ax, num_cols): row_ax.axis('off') # remove labels row_ax.set_yticklabels([]) - row_ax.tick_params( - axis='y', left=False, right=False, - ) + row_ax.tick_params(axis='y', left=False, right=False) row_ax.set_ylabel('') @@ -534,7 +532,7 @@ def _setup_violin_axes_ticks(self, row_ax, num_cols): row_ax.set_xticklabels([]) row_ax.tick_params( - axis='x', bottom=False, top=False, labeltop=False, labelbottom=False, + axis='x', bottom=False, top=False, labeltop=False, labelbottom=False ) diff --git a/scanpy/plotting/_tools/scatterplots.py b/scanpy/plotting/_tools/scatterplots.py index e5fb3c5b10..cd28057378 100644 --- a/scanpy/plotting/_tools/scatterplots.py +++ b/scanpy/plotting/_tools/scatterplots.py @@ -219,9 +219,7 @@ def embedding( if ( size is not None - and isinstance( - size, (cabc.Sequence, pandas.core.series.Series, np.ndarray,) - ) + and isinstance(size, (cabc.Sequence, pandas.core.series.Series, np.ndarray)) and len(size) == adata.shape[0] ): size = np.array(size, dtype=float) @@ -425,7 +423,7 @@ def embedding( if legend_fontoutline is not None: path_effect = [ - patheffects.withStroke(linewidth=legend_fontoutline, foreground='w',) + patheffects.withStroke(linewidth=legend_fontoutline, foreground='w') ] else: path_effect = None @@ -681,7 +679,7 @@ def diffmap(adata, **kwargs) -> Union[Axes, List[Axes], None]: show_save_ax=doc_show_save_ax, ) def draw_graph( - adata: AnnData, *, layout: Optional[_IGraphLayout] = None, **kwargs, + adata: AnnData, *, layout: Optional[_IGraphLayout] = None, **kwargs ) -> Union[Axes, List[Axes], None]: """\ Scatter plot in graph-drawing basis. @@ -786,7 +784,8 @@ def spatial( spatial_data = adata.uns['spatial'][library_id] if img_key is _empty: img_key = next( - (k for k in ['hires', 'lowres'] if k in spatial_data['images']), None, + (k for k in ['hires', 'lowres'] if k in spatial_data['images']), + None, ) if size is None: @@ -919,7 +918,7 @@ def _get_data_points( if f"tissue_{img_key}_scalef" in spatial_data['scalefactors'].keys(): scalef_key = f"tissue_{img_key}_scalef" data_points[0] = np.multiply( - data_points[0], spatial_data['scalefactors'][scalef_key], + data_points[0], spatial_data['scalefactors'][scalef_key] ) else: raise KeyError( @@ -999,7 +998,7 @@ def _add_categorical_legend( def _get_color_source_vector( - adata, value_to_plot, use_raw=False, gene_symbols=None, layer=None, groups=None, + adata, value_to_plot, use_raw=False, gene_symbols=None, layer=None, groups=None ): """ Get array from adata that colors will be based on. @@ -1024,7 +1023,7 @@ def _get_color_source_vector( else: values = adata.obs_vector(value_to_plot, layer=layer) if groups and is_categorical_dtype(values): - values = values.replace(values.categories.difference(groups), np.nan,) + values = values.replace(values.categories.difference(groups), np.nan) return values diff --git a/scanpy/plotting/_utils.py b/scanpy/plotting/_utils.py index 705c048b1c..7aec9ae6fe 100644 --- a/scanpy/plotting/_utils.py +++ b/scanpy/plotting/_utils.py @@ -155,7 +155,7 @@ def timeseries_subplot( def timeseries_as_heatmap( - X: np.ndarray, var_names: Collection[str] = (), highlights_x=(), color_map=None, + X: np.ndarray, var_names: Collection[str] = (), highlights_x=(), color_map=None ): """\ Plot timeseries as heatmap. @@ -357,7 +357,7 @@ def _validate_palette(adata, key): def _set_colors_for_categorical_obs( - adata, value_to_plot, palette: Union[str, Sequence[str], Cycler], + adata, value_to_plot, palette: Union[str, Sequence[str], Cycler] ): """ Sets the adata.uns[value_to_plot + '_colors'] according to the given palette @@ -541,8 +541,7 @@ def plot_arrows(axs, adata, basis, arrows_kwds=None): def scatter_group(ax, key, imask, adata, Y, projection='2d', size=3, alpha=None): - """Scatter of group using representation of data Y. - """ + """Scatter of group using representation of data Y.""" mask = adata.obs[key].cat.categories[imask] == adata.obs[key].values color = adata.uns[key + '_colors'][imask] if not isinstance(color[0], str): @@ -576,8 +575,7 @@ def setup_axes( projection: Literal['2d', '3d'] = '2d', show_ticks=False, ): - """Grid of axes for plotting, legends and colorbars. - """ + """Grid of axes for plotting, legends and colorbars.""" make_projection_available(projection) if left_margin is not None: raise NotImplementedError('We currently don’t support `left_margin`.') @@ -738,7 +736,7 @@ def scatter_base( fig = pl.gcf() ax_cb = fig.add_axes(rectangle) cb = pl.colorbar( - sct, format=ticker.FuncFormatter(ticks_formatter), cax=ax_cb, + sct, format=ticker.FuncFormatter(ticks_formatter), cax=ax_cb ) # set the title if title is not None: @@ -814,9 +812,7 @@ def scatter_single(ax: Axes, Y: np.ndarray, *args, **kwargs): ax.set_yticks([]) -def arrows_transitions( - ax: Axes, X: np.ndarray, indices: Sequence[int], weight=None, -): +def arrows_transitions(ax: Axes, X: np.ndarray, indices: Sequence[int], weight=None): """ Plot arrows of transitions in data matrix. @@ -876,14 +872,12 @@ def ticks_formatter(x, pos): def pimp_axis(x_or_y_ax): - """Remove trailing zeros. - """ + """Remove trailing zeros.""" x_or_y_ax.set_major_formatter(ticker.FuncFormatter(ticks_formatter)) def scale_to_zero_one(x): - """Take some 1d data and scale it so that min matches 0 and max 1. - """ + """Take some 1d data and scale it so that min matches 0 and max 1.""" xscaled = x - np.min(x) xscaled /= np.max(xscaled) return xscaled @@ -892,28 +886,27 @@ def scale_to_zero_one(x): def hierarchy_pos(G, root, levels=None, width=1.0, height=1.0): """Tree layout for networkx graph. - See https://stackoverflow.com/questions/29586520/can-one-get-hierarchical-graphs-from-networkx-with-python-3 - answer by burubum. - - If there is a cycle that is reachable from root, then this will see - infinite recursion. - - Parameters - ---------- - G: the graph - root: the root node - levels: a dictionary - key: level number (starting from 0) - value: number of nodes in this level - width: horizontal space allocated for drawing - height: vertical space allocated for drawing + See https://stackoverflow.com/questions/29586520/can-one-get-hierarchical-graphs-from-networkx-with-python-3 + answer by burubum. + + If there is a cycle that is reachable from root, then this will see + infinite recursion. + + Parameters + ---------- + G: the graph + root: the root node + levels: a dictionary + key: level number (starting from 0) + value: number of nodes in this level + width: horizontal space allocated for drawing + height: vertical space allocated for drawing """ TOTAL = "total" CURRENT = "current" def make_levels(levels, node=root, currentLevel=0, parent=None): - """Compute the number of nodes for each level - """ + """Compute the number of nodes for each level""" if currentLevel not in levels: levels[currentLevel] = {TOTAL: 0, CURRENT: 0} levels[currentLevel][TOTAL] += 1 diff --git a/scanpy/preprocessing/_combat.py b/scanpy/preprocessing/_combat.py index b6e03d2ddf..e2d8140bca 100644 --- a/scanpy/preprocessing/_combat.py +++ b/scanpy/preprocessing/_combat.py @@ -11,7 +11,7 @@ def _design_matrix( - model: pd.DataFrame, batch_key: str, batch_levels: Collection[str], + model: pd.DataFrame, batch_key: str, batch_levels: Collection[str] ) -> pd.DataFrame: """\ Computes a simple design matrix. @@ -63,7 +63,7 @@ def _design_matrix( def _standardize_data( - model: pd.DataFrame, data: pd.DataFrame, batch_key: str, + model: pd.DataFrame, data: pd.DataFrame, batch_key: str ) -> Tuple[pd.DataFrame, pd.DataFrame, np.ndarray, np.ndarray]: """\ Standardizes the data per gene. @@ -190,7 +190,7 @@ def combat( X = adata.X.A.T else: X = adata.X.T - data = pd.DataFrame(data=X, index=adata.var_names, columns=adata.obs_names,) + data = pd.DataFrame(data=X, index=adata.var_names, columns=adata.obs_names) sanitize_anndata(adata) diff --git a/scanpy/preprocessing/_deprecated/__init__.py b/scanpy/preprocessing/_deprecated/__init__.py index f3893be9e9..2bb8730540 100644 --- a/scanpy/preprocessing/_deprecated/__init__.py +++ b/scanpy/preprocessing/_deprecated/__init__.py @@ -3,7 +3,9 @@ def normalize_per_cell_weinreb16_deprecated( - X: np.ndarray, max_fraction: float = 1, mult_with_mean: bool = False, + X: np.ndarray, + max_fraction: float = 1, + mult_with_mean: bool = False, ) -> np.ndarray: """\ Normalize each cell [Weinreb17]_. diff --git a/scanpy/preprocessing/_recipes.py b/scanpy/preprocessing/_recipes.py index 40090e19a0..d211bcc20a 100644 --- a/scanpy/preprocessing/_recipes.py +++ b/scanpy/preprocessing/_recipes.py @@ -64,7 +64,7 @@ def recipe_weinreb17( def recipe_seurat( - adata: AnnData, log: bool = True, plot: bool = False, copy: bool = False, + adata: AnnData, log: bool = True, plot: bool = False, copy: bool = False ) -> Optional[AnnData]: """\ Normalization and filtering as of Seurat [Satija15]_. diff --git a/scanpy/readwrite.py b/scanpy/readwrite.py index 29bcc4d69d..b78edda728 100644 --- a/scanpy/readwrite.py +++ b/scanpy/readwrite.py @@ -174,7 +174,7 @@ def read_10x_h5( Feature types """ start = logg.info(f'reading {filename}') - is_present = _check_datafile_present_and_download(filename, backup_url=backup_url,) + is_present = _check_datafile_present_and_download(filename, backup_url=backup_url) if not is_present: logg.debug(f'... did not find original file {filename}') with tables.open_file(str(filename), 'r') as f: @@ -233,7 +233,8 @@ def _read_legacy_10x_h5(filename, *, genome=None, start=None): data = dsets['data'].view('float32') data[:] = dsets['data'] matrix = csr_matrix( - (data, dsets['indices'], dsets['indptr']), shape=(N, M), + (data, dsets['indices'], dsets['indptr']), + shape=(N, M), ) # the csc matrix is automatically the transposed csr matrix # as scanpy expects it, so, no need for a further transpostion @@ -268,7 +269,8 @@ def _read_v3_10x_h5(filename, *, start=None): data = dsets['data'].view('float32') data[:] = dsets['data'] matrix = csr_matrix( - (data, dsets['indices'], dsets['indptr']), shape=(N, M), + (data, dsets['indices'], dsets['indptr']), + shape=(N, M), ) adata = AnnData( matrix, @@ -498,7 +500,9 @@ def _read_legacy_10x_mtx( """ path = Path(path) adata = read( - path / f'{prefix}matrix.mtx', cache=cache, cache_compression=cache_compression, + path / f'{prefix}matrix.mtx', + cache=cache, + cache_compression=cache_compression, ).T # transpose the data genes = pd.read_csv(path / f'{prefix}genes.tsv', header=None, sep='\t') if var_names == 'gene_symbols': @@ -610,7 +614,7 @@ def write( def read_params( - filename: Union[Path, str], asheader: bool = False, + filename: Union[Path, str], asheader: bool = False ) -> Dict[str, Union[int, float, bool, str, None]]: """\ Read parameter dictionary from text file. @@ -691,7 +695,7 @@ def _read( ) else: ext = is_valid_filename(filename, return_ext=True) - is_present = _check_datafile_present_and_download(filename, backup_url=backup_url,) + is_present = _check_datafile_present_and_download(filename, backup_url=backup_url) if not is_present: logg.debug(f'... did not find original file {filename}') # read hdf5 files @@ -849,8 +853,7 @@ def is_float(string: str) -> float: def is_int(string: str) -> bool: - """Check whether string is integer. - """ + """Check whether string is integer.""" try: int(string) return True @@ -859,8 +862,7 @@ def is_int(string: str) -> bool: def convert_bool(string: str) -> Tuple[bool, bool]: - """Check whether string is boolean. - """ + """Check whether string is boolean.""" if string == 'True': return True, True elif string == 'False': @@ -870,8 +872,7 @@ def convert_bool(string: str) -> Tuple[bool, bool]: def convert_string(string: str) -> Union[int, float, bool, str, None]: - """Convert string to int, float or bool. - """ + """Convert string to int, float or bool.""" if is_int(string): return int(string) elif is_float(string): @@ -952,8 +953,7 @@ def _download(url: str, path: Path): def _check_datafile_present_and_download(path, backup_url=None): - """Check whether the file is present, otherwise download. - """ + """Check whether the file is present, otherwise download.""" path = Path(path) if path.is_file(): return True diff --git a/scanpy/tests/test_embedding.py b/scanpy/tests/test_embedding.py index 7f563c5b72..b617ae75e6 100644 --- a/scanpy/tests/test_embedding.py +++ b/scanpy/tests/test_embedding.py @@ -21,9 +21,7 @@ def test_umap_init_dtype(): needs_fa2 = pytest.mark.skipif(not find_spec("fa2"), reason="needs module `fa2`") -@pytest.mark.parametrize( - "layout", [pytest.param("fa", marks=needs_fa2), "fr"], -) +@pytest.mark.parametrize("layout", [pytest.param("fa", marks=needs_fa2), "fr"]) def test_umap_init_paga(layout): pbmc = sc.datasets.pbmc68k_reduced() pbmc = pbmc[:100, :].copy() diff --git a/scanpy/tests/test_embedding_plots.py b/scanpy/tests/test_embedding_plots.py index 47a6e2757a..a525afce58 100644 --- a/scanpy/tests/test_embedding_plots.py +++ b/scanpy/tests/test_embedding_plots.py @@ -71,7 +71,7 @@ def adata(): @pytest.fixture def fixture_request(request): - """Returns a Request object. + """Returns a Request object. Allows you to access names of parameterized tests from within a test. """ @@ -79,7 +79,8 @@ def fixture_request(request): @pytest.fixture( - params=[(0, 0, 0, 1), None], ids=["na_color.black_tup", "na_color.default"], + params=[(0, 0, 0, 1), None], + ids=["na_color.black_tup", "na_color.default"], ) def na_color(request): return request.param @@ -142,15 +143,13 @@ def test_missing_values_categorical( kwargs["na_color"] = na_color kwargs["na_in_legend"] = na_in_legend - plotfunc( - adata, color=["label", "label_missing"], **kwargs, - ) + plotfunc(adata, color=["label", "label_missing"], **kwargs) save_and_compare_images(base_name) def test_missing_values_continuous( - fixture_request, image_comparer, adata, plotfunc, na_color, legend_loc, vbounds, + fixture_request, image_comparer, adata, plotfunc, na_color, legend_loc, vbounds ): save_and_compare_images = image_comparer( MISSING_VALUES_ROOT, MISSING_VALUES_FIGS, tol=15 @@ -164,8 +163,6 @@ def test_missing_values_continuous( if na_color is not None: kwargs["na_color"] = na_color - plotfunc( - adata, color=["1", "1_missing"], **kwargs, - ) + plotfunc(adata, color=["1", "1_missing"], **kwargs) save_and_compare_images(base_name) diff --git a/scanpy/tests/test_plotting.py b/scanpy/tests/test_plotting.py index 7698806192..236ca9bc9b 100644 --- a/scanpy/tests/test_plotting.py +++ b/scanpy/tests/test_plotting.py @@ -37,7 +37,12 @@ def test_heatmap(image_comparer): adata = sc.datasets.krumsiek11() sc.pl.heatmap( - adata, adata.var_names, 'cell_type', use_raw=False, show=False, dendrogram=True, + adata, + adata.var_names, + 'cell_type', + use_raw=False, + show=False, + dendrogram=True, ) save_and_compare_images('master_heatmap') @@ -606,11 +611,11 @@ def test_rank_genes_groups(image_comparer, name, fn): @pytest.mark.parametrize( "id,fn", [ - ("heatmap", sc.pl.heatmap,), - ("dotplot", sc.pl.dotplot,), - ("matrixplot", sc.pl.matrixplot,), - ("stacked_violin", sc.pl.stacked_violin,), - ("tracksplot", sc.pl.tracksplot,), + ("heatmap", sc.pl.heatmap), + ("dotplot", sc.pl.dotplot), + ("matrixplot", sc.pl.matrixplot), + ("stacked_violin", sc.pl.stacked_violin), + ("tracksplot", sc.pl.tracksplot), ], ) def test_genes_symbols(image_comparer, id, fn): @@ -639,11 +644,11 @@ def pbmc_scatterplots(): @pytest.mark.parametrize( - "id,fn", + 'id,fn', [ - ("pca", partial(sc.pl.pca, color='bulk_labels')), + ('pca', partial(sc.pl.pca, color='bulk_labels')), ( - "pca_with_fonts", + 'pca_with_fonts', partial( sc.pl.pca, color=['bulk_labels', 'louvain'], @@ -654,10 +659,11 @@ def pbmc_scatterplots(): ), ), pytest.param( - "3dprojection", partial(sc.pl.pca, color='bulk_labels', projection='3d'), + '3dprojection', + partial(sc.pl.pca, color='bulk_labels', projection='3d'), ), ( - "multipanel", + 'multipanel', partial( sc.pl.pca, color=['CD3D', 'CD79A'], @@ -669,21 +675,19 @@ def pbmc_scatterplots(): ), ), ( - "pca_sparse_layer", - partial( - sc.pl.pca, color=['CD3D', 'CD79A'], layer="sparse", cmap='viridis', - ), + 'pca_sparse_layer', + partial(sc.pl.pca, color=['CD3D', 'CD79A'], layer='sparse', cmap='viridis'), ), pytest.param( - "tsne", + 'tsne', partial(sc.pl.tsne, color=['CD3D', 'louvain']), marks=pytest.mark.xfail( - reason="slight differences even after setting random_state." + reason='slight differences even after setting random_state.' ), ), - ("umap_nocolor", sc.pl.umap), + ('umap_nocolor', sc.pl.umap), ( - "umap", + 'umap', partial( sc.pl.umap, color=['louvain'], @@ -692,7 +696,7 @@ def pbmc_scatterplots(): ), ), ( - "umap_gene_expr", + 'umap_gene_expr', partial( sc.pl.umap, color=np.array(['LYZ', 'CD79A']), @@ -703,7 +707,7 @@ def pbmc_scatterplots(): ), ), ( - "umap_layer", + 'umap_layer', partial( sc.pl.umap, color=np.array(['LYZ', 'CD79A']), @@ -716,13 +720,13 @@ def pbmc_scatterplots(): ), ), ( - "umap_with_edges", + 'umap_with_edges', partial(sc.pl.umap, color='louvain', edges=True, edges_width=0.1, s=50), ), - # ("diffmap", partial(sc.pl.diffmap, components='all', color=['CD3D'])), + # ('diffmap', partial(sc.pl.diffmap, components='all', color=['CD3D'])), ( - "umap_symbols", - partial(sc.pl.umap, color=['1', '2', '3'], gene_symbols="numbers"), + 'umap_symbols', + partial(sc.pl.umap, color=['1', '2', '3'], gene_symbols='numbers'), ), ], ) @@ -967,7 +971,7 @@ def test_visium_default(image_comparer): adata = sc.read_visium(HERE / '_data' / 'visium_data' / '1.0.0') adata.obs = adata.obs.astype({'array_row': 'str'}) - sc.pl.spatial(adata,) + sc.pl.spatial(adata) save_and_compare_images('master_spatial_visium_default') diff --git a/scanpy/tests/test_read_10x.py b/scanpy/tests/test_read_10x.py index f633ddf86f..6ed125ab4c 100644 --- a/scanpy/tests/test_read_10x.py +++ b/scanpy/tests/test_read_10x.py @@ -64,7 +64,8 @@ def test_read_10x(tmp_path, mtx_path, h5_path, prefix): def test_read_10x_h5_v1(): spec_genome_v1 = sc.read_10x_h5( - ROOT / '1.2.0' / 'filtered_gene_bc_matrices_h5.h5', genome='hg19_chr21', + ROOT / '1.2.0' / 'filtered_gene_bc_matrices_h5.h5', + genome='hg19_chr21', ) nospec_genome_v1 = sc.read_10x_h5( ROOT / '1.2.0' / 'filtered_gene_bc_matrices_h5.h5' @@ -74,7 +75,8 @@ def test_read_10x_h5_v1(): def test_read_10x_h5(): spec_genome_v3 = sc.read_10x_h5( - ROOT / '3.0.0' / 'filtered_feature_bc_matrix.h5', genome='GRCh38_chr21', + ROOT / '3.0.0' / 'filtered_feature_bc_matrix.h5', + genome='GRCh38_chr21', ) nospec_genome_v3 = sc.read_10x_h5(ROOT / '3.0.0' / 'filtered_feature_bc_matrix.h5') assert_anndata_equal(spec_genome_v3, nospec_genome_v3) diff --git a/scanpy/tools/_ingest.py b/scanpy/tools/_ingest.py index 90a0ba55c7..ceb105349a 100644 --- a/scanpy/tools/_ingest.py +++ b/scanpy/tools/_ingest.py @@ -419,11 +419,11 @@ def neighbors(self, k=None, queue_size=5, random_state=0): k = self._n_neighbors init = self._initialise_search( - self._rp_forest, train, test, int(k * queue_size), rng_state=rng_state, + self._rp_forest, train, test, int(k * queue_size), rng_state=rng_state ) result = self._search( - train, self._search_graph.indptr, self._search_graph.indices, init, test, + train, self._search_graph.indptr, self._search_graph.indices, init, test ) indices, dists = deheap_sort(result) self._indices, self._distances = indices[:, :k], dists[:, :k] diff --git a/scanpy/tools/_leiden.py b/scanpy/tools/_leiden.py index ba0a304e98..d6b59a059c 100644 --- a/scanpy/tools/_leiden.py +++ b/scanpy/tools/_leiden.py @@ -14,7 +14,10 @@ try: from leidenalg.VertexPartition import MutableVertexPartition except ImportError: - class MutableVertexPartition: pass + + class MutableVertexPartition: + pass + MutableVertexPartition.__module__ = 'leidenalg.VertexPartition' diff --git a/setup.py b/setup.py index 0e87acb829..9a352db78d 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ magic=['magic-impute>=2.0'], skmisc=['scikit-misc>=0.1.3'], harmony=['harmonypy'], - dev=['setuptools_scm', 'pytoml', 'black'], + dev=['setuptools_scm', 'pytoml', 'black>=20.8b1'], doc=[ 'sphinx<3.1, >3', 'sphinx_rtd_theme>=0.3.1',