diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 5e3e7f64..e47ae07b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -11,6 +11,7 @@ jobs: tox-action: - lint - pytest + - mypy steps: - uses: actions/checkout@v4 with: diff --git a/Makefile b/Makefile index e7f6d6b4..ab106366 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,4 @@ # Default tests run with make test and make quick-tests -NOSE_TESTS?=tests ephemeris # Default environment for make tox ENV?=py37 # Extra arguments supplied to tox command @@ -72,17 +71,17 @@ setup-git-hook-lint: setup-git-hook-lint-and-test: cp $(BUILD_SCRIPTS_DIR)/pre-commit-lint-and-test .git/hooks/pre-commit -flake8: - $(IN_VENV) flake8 --max-complexity 15 $(SOURCE_DIR) $(TEST_DIR) +format: + $(IN_VENV) isort $(SOURCE_DIR) $(TEST_DIR) && black $(SOURCE_DIR) $(TEST_DIR) && ruff --fix $(SOURCE_DIR) $(TEST_DIR) lint: - $(IN_VENV) tox -e py27-lint && tox -e py34-lint + $(IN_VENV) tox -e py38-lint lint-readme: $(IN_VENV) python setup.py check -r -s test: - $(IN_VENV) nosetests $(NOSE_TESTS) + $(IN_VENV) pytest $(TEST_DIR) tox: $(IN_VENV) tox -e $(ENV) -- $(ARGS) diff --git a/dev-requirements.txt b/dev-requirements.txt index ae6e7192..ac858691 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -15,6 +15,9 @@ CommonMark pyflakes flake8 flake8-import-order +isort +black +ruff # For release wheel diff --git a/docs/conf.py b/docs/conf.py index c7a3b65c..f2768b0e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,19 +13,19 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys import os +import sys # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) -source = (os.path.join(project_root, 'src')) +source = os.path.join(project_root, "src") # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its @@ -39,27 +39,27 @@ # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinxarg.ext'] +extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinxarg.ext"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = ['.rst', '.md'] +source_suffix = [".rst", ".md"] # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'Ephemeris' -copyright = u'2017' +project = "Ephemeris" +copyright = "2017" # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout @@ -72,42 +72,42 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to # some non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', '_*'] +exclude_patterns = ["_build", "_*"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built # documents. -#keep_warnings = False +# keep_warnings = False # -- Options for HTML output ------------------------------------------- @@ -120,79 +120,79 @@ # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the # top of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon # of the docs. This file should be a Windows icon file (.ico) being # 16x16 or 32x32 pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. # Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. # Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages # will contain a tag referring to it. The value of this option # must be the base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'ephemerisdoc' +htmlhelp_basename = "ephemerisdoc" # -- Options for LaTeX output ------------------------------------------ @@ -200,10 +200,8 @@ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. #'preamble': '', } @@ -212,30 +210,34 @@ # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ - ('index', 'ephemeris.tex', - u'Ephemeris Documentation', - u'Galaxy Project and Community', 'manual'), + ( + "index", + "ephemeris.tex", + "Ephemeris Documentation", + "Galaxy Project and Community", + "manual", + ), ] # The name of an image file (relative to this directory) to place at # the top of the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings # are parts, not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output ------------------------------------ @@ -243,13 +245,17 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('index', 'ephemeris', - u'Ephemris Documentation', - [u'Galaxy Project and Community'], 1) + ( + "index", + "ephemeris", + "Ephemris Documentation", + ["Galaxy Project and Community"], + 1, + ) ] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ---------------------------------------- @@ -258,22 +264,25 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'ephemeris', - u'Ephemeris Documentation', - u'Galaxy Project and Community', - 'ephemeris', - 'One line description of project.', - 'Miscellaneous'), + ( + "index", + "ephemeris", + "Ephemeris Documentation", + "Galaxy Project and Community", + "ephemeris", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..0de1521d --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,28 @@ +[tool.black] +line-length = 120 +target-version = ["py37"] + +[tool.isort] +combine_as_imports = true +force_alphabetical_sort_within_sections = true +# Override force_grid_wrap value from profile=black, but black is still happy +force_grid_wrap = 2 +# Same line length as for black +line_length = 120 +no_lines_before = "LOCALFOLDER" +profile = "black" +reverse_relative = true +skip_gitignore = true + +[tool.ruff] +# Enable: pycodestyle errors (E), Pyflakes (F), flake8-bugbear (B), +# flake8-logging-format (G) and pyupgrade (UP) +select = ["E", "F", "B", "G", "UP"] +target-version = "py38" +# Exceptions: +# B008 Do not perform function calls in argument defaults (for FastAPI Depends and Body) +# B9 flake8-bugbear opinionated warnings +# E402 module level import not at top of file # TODO, we would like to improve this. +# E501 is line length (delegated to black) +# G* are TODOs +ignore = ["B008", "B9", "E402", "E501", "G001", "G002", "G004"] diff --git a/scripts/bootstrap_history.py b/scripts/bootstrap_history.py index a7b38839..b0622c93 100644 --- a/scripts/bootstrap_history.py +++ b/scripts/bootstrap_history.py @@ -3,6 +3,7 @@ # pull message down and embed, use arg parse, handle multiple, etc... import os import sys + try: import requests except ImportError: @@ -46,7 +47,7 @@ def extend(from_str, line): message = commit["message"] message = get_first_sentence(message) elif requests is not None and ident.startswith("pr"): - pull_request = ident[len("pr"):] + pull_request = ident[len("pr") :] api_url = urljoin(PROJECT_API, "pulls/%s" % pull_request) req = requests.get(api_url).json() print(api_url) @@ -56,7 +57,7 @@ def extend(from_str, line): message = message.rstrip(".") message += " (thanks to `@%s`_)." % req["user"]["login"] elif requests is not None and ident.startswith("issue"): - issue = ident[len("issue"):] + issue = ident[len("issue") :] api_url = urljoin(PROJECT_API, "issues/%s" % issue) req = requests.get(api_url).json() message = req["title"] @@ -66,12 +67,12 @@ def extend(from_str, line): to_doc = message + " " if ident.startswith("pr"): - pull_request = ident[len("pr"):] + pull_request = ident[len("pr") :] text = ".. _Pull Request {0}: {1}/pull/{0}".format(pull_request, PROJECT_URL) history = extend(".. github_links", text) to_doc += "`Pull Request {0}`_".format(pull_request) elif ident.startswith("issue"): - issue = ident[len("issue"):] + issue = ident[len("issue") :] text = ".. _Issue {0}: {1}/issues/{0}".format(issue, PROJECT_URL) history = extend(".. github_links", text) to_doc += "`Issue {0}`_".format(issue) @@ -94,7 +95,7 @@ def get_first_sentence(message): def wrap(message): wrapper = textwrap.TextWrapper(initial_indent="* ") - wrapper.subsequent_indent = ' ' + wrapper.subsequent_indent = " " wrapper.width = 78 return "\n".join(wrapper.wrap(message)) diff --git a/scripts/commit_version.py b/scripts/commit_version.py index fa33fd37..6051c967 100644 --- a/scripts/commit_version.py +++ b/scripts/commit_version.py @@ -6,7 +6,6 @@ import subprocess import sys - PROJECT_DIRECTORY = os.path.join(os.path.dirname(__file__), "..") @@ -17,7 +16,7 @@ def main(argv): with open(history_path, "r") as f: history = f.read() today = datetime.datetime.today() - today_str = today.strftime('%Y-%m-%d') + today_str = today.strftime("%Y-%m-%d") history = history.replace(".dev0", " (%s)" % today_str) with open(history_path, "w") as f: f.write(history) @@ -25,13 +24,19 @@ def main(argv): source_mod_path = os.path.join(PROJECT_DIRECTORY, source_dir, "__init__.py") with open(source_mod_path, "r") as f: mod = f.read() - mod = re.sub("__version__ = '[\d\.]*\.dev0'", - "__version__ = '%s'" % version, - mod) + mod = re.sub("__version__ = '[\d\.]*\.dev0'", "__version__ = '%s'" % version, mod) with open(source_mod_path, "w") as f: mod = f.write(mod) - shell(["git", "commit", "-m", "Version %s" % version, - "HISTORY.rst", "%s/__init__.py" % source_dir]) + shell( + [ + "git", + "commit", + "-m", + "Version %s" % version, + "HISTORY.rst", + "%s/__init__.py" % source_dir, + ] + ) shell(["git", "tag", version]) diff --git a/scripts/new_version.py b/scripts/new_version.py index 9ac9dbb5..f7d63cd9 100644 --- a/scripts/new_version.py +++ b/scripts/new_version.py @@ -6,7 +6,6 @@ import sys from distutils.version import StrictVersion - PROJECT_DIRECTORY = os.path.join(os.path.dirname(__file__), "..") @@ -33,27 +32,37 @@ def main(argv): def extend(from_str, line): from_str += "\n" - return history.replace(from_str, from_str + line + "\n" ) + return history.replace(from_str, from_str + line + "\n") - history = extend(".. to_doc", """ + history = extend( + ".. to_doc", + """ --------------------- %s.dev0 --------------------- - """ % new_version) + """ + % new_version, + ) with open(history_path, "w") as f: f.write(history) source_mod_path = os.path.join(PROJECT_DIRECTORY, source_dir, "__init__.py") with open(source_mod_path, "r") as f: mod = f.read() - mod = re.sub("__version__ = '[\d\.]+'", - "__version__ = '%s.dev0'" % new_version, - mod, 1) + mod = re.sub("__version__ = '[\d\.]+'", "__version__ = '%s.dev0'" % new_version, mod, 1) with open(source_mod_path, "w") as f: mod = f.write(mod) - shell(["git", "commit", "-m", "Starting work on %s" % new_version, - "HISTORY.rst", "%s/__init__.py" % source_dir]) + shell( + [ + "git", + "commit", + "-m", + "Starting work on %s" % new_version, + "HISTORY.rst", + "%s/__init__.py" % source_dir, + ] + ) def shell(cmds, **kwds): diff --git a/scripts/print_version_for_release.py b/scripts/print_version_for_release.py index 7ee52cc1..289e3cb9 100644 --- a/scripts/print_version_for_release.py +++ b/scripts/print_version_for_release.py @@ -1,16 +1,16 @@ from __future__ import print_function -from distutils.version import LooseVersion + import ast import re import sys +from distutils.version import LooseVersion source_dir = sys.argv[1] -_version_re = re.compile(r'__version__\s+=\s+(.*)') +_version_re = re.compile(r"__version__\s+=\s+(.*)") -with open('%s/__init__.py' % source_dir, 'rb') as f: - version = str(ast.literal_eval(_version_re.search( - f.read().decode('utf-8')).group(1))) +with open("%s/__init__.py" % source_dir, "rb") as f: + version = str(ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1))) # Strip .devN version_tuple = LooseVersion(version).version[0:3] diff --git a/setup.py b/setup.py index 19506109..5b1f7bc8 100644 --- a/setup.py +++ b/setup.py @@ -4,18 +4,22 @@ import ast import os import re -from setuptools import setup, find_packages + +from setuptools import ( + find_packages, + setup, +) SOURCE_DIR = "src/ephemeris" -_version_re = re.compile(r'__version__\s+=\s+(.*)') +_version_re = re.compile(r"__version__\s+=\s+(.*)") -with open('%s/__init__.py' % SOURCE_DIR, 'rb') as f: - init_contents = f.read().decode('utf-8') +with open("%s/__init__.py" % SOURCE_DIR, "rb") as f: + init_contents = f.read().decode("utf-8") def get_var(var_name): - pattern = re.compile(r'%s\s+=\s+(.*)' % var_name) + pattern = re.compile(r"%s\s+=\s+(.*)" % var_name) match = pattern.search(init_contents).group(1) return str(ast.literal_eval(match)) @@ -25,9 +29,9 @@ def get_var(var_name): PROJECT_AUTHOR = get_var("PROJECT_AUTHOR") PROJECT_EMAIL = get_var("PROJECT_EMAIL") -TEST_DIR = 'tests' -PROJECT_DESCRIPTION = 'Ephemeris is an opinionated library and set of scripts for managing the bootstrapping of Galaxy project plugins - tools, index data, and workflows.' -ENTRY_POINTS = ''' +TEST_DIR = "tests" +PROJECT_DESCRIPTION = "Ephemeris is an opinionated library and set of scripts for managing the bootstrapping of Galaxy project plugins - tools, index data, and workflows." +ENTRY_POINTS = """ [console_scripts] get-tool-list=ephemeris.get_tool_list_from_galaxy:main shed-tools=ephemeris.shed_tools:main @@ -39,7 +43,7 @@ def get_var(var_name): install_tool_deps=ephemeris.install_tool_deps:main install-tool-deps=ephemeris.install_tool_deps:main set-library-permissions=ephemeris.set_library_permissions:main -''' +""" PACKAGE_DATA = { # Be sure to update MANIFEST.in for source dist. } @@ -47,8 +51,8 @@ def get_var(var_name): SOURCE_DIR: SOURCE_DIR, } -readme = open('README.rst').read() -history = open('HISTORY.rst').read().replace('.. :changelog:', '') +readme = open("README.rst").read() +history = open("HISTORY.rst").read().replace(".. :changelog:", "") if os.path.exists("requirements.txt"): requirements = open("requirements.txt").read().split("\n") @@ -66,36 +70,36 @@ def get_var(var_name): name=PROJECT_NAME, version=version, description=PROJECT_DESCRIPTION, - long_description=readme + '\n\n' + history, + long_description=readme + "\n\n" + history, author=PROJECT_AUTHOR, author_email=PROJECT_EMAIL, url=PROJECT_URL, - packages=find_packages('src'), + packages=find_packages("src"), entry_points=ENTRY_POINTS, package_data=PACKAGE_DATA, - package_dir={'': 'src'}, + package_dir={"": "src"}, include_package_data=True, install_requires=requirements, license="AFL", zip_safe=False, python_requires=">=3.7", - keywords='galaxy', + keywords="galaxy", classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Environment :: Console', - 'License :: OSI Approved :: Academic Free License (AFL)', - 'Operating System :: POSIX', - 'Topic :: Software Development', - 'Topic :: Software Development :: Code Generators', - 'Topic :: Software Development :: Testing', - 'Natural Language :: English', + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Environment :: Console", + "License :: OSI Approved :: Academic Free License (AFL)", + "Operating System :: POSIX", + "Topic :: Software Development", + "Topic :: Software Development :: Code Generators", + "Topic :: Software Development :: Testing", + "Natural Language :: English", "Programming Language :: Python :: 3", - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ], test_suite=TEST_DIR, - tests_require=test_requirements + tests_require=test_requirements, ) diff --git a/src/ephemeris/__init__.py b/src/ephemeris/__init__.py index 51910180..2a1d461d 100644 --- a/src/ephemeris/__init__.py +++ b/src/ephemeris/__init__.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - import yaml from bioblend import galaxy @@ -10,18 +8,13 @@ PROJECT_URL = "https://github.com/galaxyproject/ephemeris" PROJECT_AUTHOR = "Galaxy Project and Community" PROJECT_EMAIL = "jmchilton@gmail.com" -RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % ( - PROJECT_USERAME, - PROJECT_NAME, -) +RAW_CONTENT_URL = f"https://raw.github.com/{PROJECT_USERAME}/{PROJECT_NAME}/master/" def check_url(url, log=None): if not url.startswith("http"): if log: - log.warning( - "URL should start with http:// or https://. https:// chosen by default." - ) + log.warning("URL should start with http:// or https://. https:// chosen by default.") url = "https://" + url return url @@ -42,24 +35,20 @@ def get_galaxy_connection(args, file=None, log=None, login_required=True): api_key = args.api_key or file_content.get("api_key") if args.user and args.password: - return galaxy.GalaxyInstance( - url=galaxy_url, email=args.user, password=args.password - ) + return galaxy.GalaxyInstance(url=galaxy_url, email=args.user, password=args.password) elif api_key: return galaxy.GalaxyInstance(url=galaxy_url, key=api_key) elif not login_required: return galaxy.GalaxyInstance(url=galaxy_url) else: - raise ValueError( - "Missing api key or user & password combination, in order to make a galaxy connection." - ) + raise ValueError("Missing api key or user & password combination, in order to make a galaxy connection.") def load_yaml_file(filename): """ Load YAML from the `tool_list_file` and return a dict with the content. """ - with open(filename, "r") as f: + with open(filename) as f: dictionary = yaml.safe_load(f) return dictionary diff --git a/src/ephemeris/common_parser.py b/src/ephemeris/common_parser.py index aa69ad54..cf56a753 100644 --- a/src/ephemeris/common_parser.py +++ b/src/ephemeris/common_parser.py @@ -16,23 +16,22 @@ class RawDescriptionHideUnderscoresHelpFormatter(HideUnderscoresHelpFormatter, a pass -class ArgumentDefaultsHideUnderscoresHelpFormatter(HideUnderscoresHelpFormatter, argparse.ArgumentDefaultsHelpFormatter): +class ArgumentDefaultsHideUnderscoresHelpFormatter( + HideUnderscoresHelpFormatter, argparse.ArgumentDefaultsHelpFormatter +): pass def get_common_args(login_required=True, log_file=False): parser = argparse.ArgumentParser(add_help=False) general_group = parser.add_argument_group("General options") - general_group.add_argument( - "-v", "--verbose", help="Increase output verbosity.", action="store_true" - ) + general_group.add_argument("-v", "--verbose", help="Increase output verbosity.", action="store_true") if log_file: general_group.add_argument( "--log-file", "--log_file", dest="log_file", - help="Where the log file should be stored. " - "Default is a file in your system's temp folder", + help="Where the log file should be stored. " "Default is a file in your system's temp folder", default=None, ) diff --git a/src/ephemeris/ephemeris_log.py b/src/ephemeris/ephemeris_log.py index c5d8f2b7..626ee1a7 100644 --- a/src/ephemeris/ephemeris_log.py +++ b/src/ephemeris/ephemeris_log.py @@ -59,5 +59,5 @@ def setup_global_logger(name, log_file=None): log_file = temp.name file_handler = logging.FileHandler(log_file) logger.addHandler(file_handler) - logger.info("Storing log file in: {0}".format(log_file)) + logger.info(f"Storing log file in: {log_file}") return logger diff --git a/src/ephemeris/generate_tool_list_from_ga_workflow_files.py b/src/ephemeris/generate_tool_list_from_ga_workflow_files.py index 7d1dc0f9..8f1c3bb2 100644 --- a/src/ephemeris/generate_tool_list_from_ga_workflow_files.py +++ b/src/ephemeris/generate_tool_list_from_ga_workflow_files.py @@ -49,24 +49,19 @@ def _parser(): "--panel_label", dest="panel_label", default="Tools from workflows", - help="The name of the panel where the tools will show up in Galaxy." - 'If not specified: "Tools from workflows"', + help="The name of the panel where the tools will show up in Galaxy." 'If not specified: "Tools from workflows"', ) return parser def get_workflow_dictionary(json_file): - with open(json_file, "r") as File: + with open(json_file) as File: mydict = json.load(File) return mydict -def translate_workflow_dictionary_to_tool_list( - workflow_dictionary, panel_label: str -) -> List[InstallRepoDict]: - starting_tool_list = extract_tool_shed_repositories_from_workflow_dict( - workflow_dictionary - ) +def translate_workflow_dictionary_to_tool_list(workflow_dictionary, panel_label: str) -> List[InstallRepoDict]: + starting_tool_list = extract_tool_shed_repositories_from_workflow_dict(workflow_dictionary) tool_list: List[InstallRepoDict] = [] for tool in starting_tool_list: sub_dic: InstallRepoDict = { @@ -85,9 +80,7 @@ def extract_tool_shed_repositories_from_workflow_dict(workflow_dictionary): for step in workflow_dictionary["steps"].values(): subworkflow = step.get("subworkflow") if subworkflow: - tool_list.extend( - extract_tool_shed_repositories_from_workflow_dict(subworkflow) - ) + tool_list.extend(extract_tool_shed_repositories_from_workflow_dict(subworkflow)) tsr = step.get("tool_shed_repository") if tsr: tool_list.append(tsr) @@ -119,8 +112,7 @@ def reduce_tool_list(tool_list: List[InstallRepoDict]) -> List[InstallRepoDict]: if ( tool["name"] == current_tool["name"] and tool["owner"] == current_tool["owner"] - and tool["tool_panel_section_label"] - == current_tool["tool_panel_section_label"] + and tool["tool_panel_section_label"] == current_tool["tool_panel_section_label"] and tool["tool_shed_url"] == current_tool["tool_shed_url"] ): current_tool["revisions"].extend(tool["revisions"]) @@ -129,38 +121,26 @@ def reduce_tool_list(tool_list: List[InstallRepoDict]) -> List[InstallRepoDict]: return tool_list -def generate_repo_list_from_workflow( - workflow_files: Iterable[str], panel_label: str -) -> List[InstallRepoDict]: +def generate_repo_list_from_workflow(workflow_files: Iterable[str], panel_label: str) -> List[InstallRepoDict]: intermediate_tool_list: List[InstallRepoDict] = [] for workflow in workflow_files: workflow_dictionary = get_workflow_dictionary(workflow) - intermediate_tool_list += translate_workflow_dictionary_to_tool_list( - workflow_dictionary, panel_label - ) + intermediate_tool_list += translate_workflow_dictionary_to_tool_list(workflow_dictionary, panel_label) return reduce_tool_list(intermediate_tool_list) -def generate_tool_list_from_workflow( - workflow_files: Iterable[str], panel_label: str, output_file: str -): +def generate_tool_list_from_workflow(workflow_files: Iterable[str], panel_label: str, output_file: str): """ :rtype: object """ - convert_dict = { - "tools": generate_repo_list_from_workflow( - workflow_files=workflow_files, panel_label=panel_label - ) - } + convert_dict = {"tools": generate_repo_list_from_workflow(workflow_files=workflow_files, panel_label=panel_label)} print_yaml_tool_list(convert_dict, output_file) def main(argv=None): options = _parser().parse_args(argv) - generate_tool_list_from_workflow( - options.workflow_files, options.panel_label, options.output_file - ) + generate_tool_list_from_workflow(options.workflow_files, options.panel_label, options.output_file) if __name__ == "__main__": diff --git a/src/ephemeris/get_tool_list_from_galaxy.py b/src/ephemeris/get_tool_list_from_galaxy.py index d92b25e6..4d8578bf 100644 --- a/src/ephemeris/get_tool_list_from_galaxy.py +++ b/src/ephemeris/get_tool_list_from_galaxy.py @@ -41,9 +41,7 @@ def handle_tool(tool_elem): if tsr["name"] != name or tsr["owner"] != owner: return - if tool_shed_url and format_tool_shed_url( - tsr["tool_shed"] - ) != format_tool_shed_url(tool_shed_url): + if tool_shed_url and format_tool_shed_url(tsr["tool_shed"]) != format_tool_shed_url(tool_shed_url): return if changeset_revision and changeset_revision != tsr["changeset_revision"]: @@ -135,15 +133,9 @@ def record_repo(tool_elem): tool_panel_section_id = None tool_panel_section_label = None for repo_with_panel in tools_with_panel: - if the_same_repository( - repo_with_panel, repo, check_revision=False - ): - tool_panel_section_id = repo_with_panel.get( - "tool_panel_section_id" - ) - tool_panel_section_label = repo_with_panel.get( - "tool_panel_section_label" - ) + if the_same_repository(repo_with_panel, repo, check_revision=False): + tool_panel_section_id = repo_with_panel.get("tool_panel_section_id") + tool_panel_section_label = repo_with_panel.get("tool_panel_section_label") break repositories.append( dict( @@ -189,17 +181,11 @@ def the_same_repository(repo_1_info, repo_2_info, check_revision=True): (either `tool_shed` or `tool_shed_url`). """ # Sort from most unique to least unique for fast comparison. - if not check_revision or repo_1_info.get("changeset_revision") == repo_2_info.get( - "changeset_revision" - ): + if not check_revision or repo_1_info.get("changeset_revision") == repo_2_info.get("changeset_revision"): if repo_1_info.get("name") == repo_2_info.get("name"): if repo_1_info.get("owner") == repo_2_info.get("owner"): - t1ts = repo_1_info.get( - "tool_shed", repo_1_info.get("tool_shed_url", None) - ) - t2ts = repo_2_info.get( - "tool_shed", repo_2_info.get("tool_shed_url", None) - ) + t1ts = repo_1_info.get("tool_shed", repo_1_info.get("tool_shed_url", None)) + t2ts = repo_2_info.get("tool_shed", repo_2_info.get("tool_shed_url", None)) if t1ts in t2ts or t2ts in t1ts: return True return False @@ -244,9 +230,7 @@ def merge_repository_changeset_revisions(repository_list): def _parser(): """Creates the parser object.""" parent = get_common_args(login_required=True) - parser = ArgumentParser( - parents=[parent], formatter_class=ArgumentDefaultsHideUnderscoresHelpFormatter - ) + parser = ArgumentParser(parents=[parent], formatter_class=ArgumentDefaultsHideUnderscoresHelpFormatter) parser.add_argument( "-o", "--output-file", @@ -286,8 +270,7 @@ def _parser(): "--get-all-tools", "--get_all_tools", action="store_true", - help="Get all tools and revisions, not just those which are present on the web ui." - "Requires login details.", + help="Get all tools and revisions, not just those which are present on the web ui." "Requires login details.", ) return parser diff --git a/src/ephemeris/install_tool_deps.py b/src/ephemeris/install_tool_deps.py index b1f632d4..8e776e13 100644 --- a/src/ephemeris/install_tool_deps.py +++ b/src/ephemeris/install_tool_deps.py @@ -27,9 +27,7 @@ def _parser(): help="Path to a tool file, tool_conf file, or yaml file containing a sequence of tool ids", nargs="*", ) - parser.add_argument( - "-i", "--id", help="Space-separated list of tool ids", nargs="*" - ) + parser.add_argument("-i", "--id", help="Space-separated list of tool ids", nargs="*") return parser @@ -70,35 +68,24 @@ def main(argv=None): os.path.abspath(os.path.dirname(tool_conf_path)), ) if tool_path: - log.info("Searching for tools relative to " + tool_path) + log.info("Searching for tools relative to %s", tool_path) tools = root.findall(".//tool[@file]") if len(tools) == 0: log.warning("No tools found in tool_conf") continue for tool in tools: - tool_id = ( - ET.ElementTree( - file=os.path.join(tool_path, tool.get("file")) - ) - .getroot() - .get("id") - ) + tool_id = ET.ElementTree(file=os.path.join(tool_path, tool.get("file"))).getroot().get("id") if tool_id: log.info( - "Installing tool dependencies for " - + tool_id - + " from: " - + tool.get("file") + "Installing tool dependencies for %s from: %s", + tool_id, + tool.get("file"), ) _install(tool_client, tool_id) elif root.tag == "tool" and root.get("id"): # Install from single tool file - log.info( - "Tool xml found. Installing " - + root.get("id") - + " dependencies.." - ) + log.info("Tool xml found. Installing %s dependencies", root.get("id")) _install(tool_client, root.get("id")) else: log.info("YAML tool list found, parsing..") @@ -106,12 +93,12 @@ def main(argv=None): tool_ids = yaml.safe_load(fh) for tool_id in tool_ids: # Install from yaml file - log.info("Installing " + tool_id + " dependencies..") + log.info("Installing %s dependencies..", tool_id) _install(tool_client, tool_id) if args.id: for tool_id in args.id: # type: str - log.info("Installing " + tool_id + " dependencies..") + log.info("Installing %s dependencies..", tool_id) _install(tool_client, tool_id.strip()) diff --git a/src/ephemeris/run_data_managers.py b/src/ephemeris/run_data_managers.py index dd101662..d89268ff 100644 --- a/src/ephemeris/run_data_managers.py +++ b/src/ephemeris/run_data_managers.py @@ -81,11 +81,7 @@ def wait(gi, job_list, log): job_hid=job_hid, **job_details ) ) - log.debug( - "Job {job_hid}: Tool '{tool_id}' stdout: {stdout}".format( - job_hid=job_hid, **job_details - ) - ) + log.debug("Job {job_hid}: Tool '{tool_id}' stdout: {stdout}".format(job_hid=job_hid, **job_details)) failed_jobs.append(job) finished_jobs.append(job) else: @@ -197,9 +193,7 @@ def data_table_entry_exists(self, data_table_name, entry, column="value"): try: column_index = data_table_content.get("columns").index(column) except IndexError: - raise IndexError( - 'Column "%s" does not exist in %s' % (column, data_table_name) - ) + raise IndexError(f'Column "{column}" does not exist in {data_table_name}') for field in data_table_content.get("fields"): if field[column_index] == entry: @@ -220,14 +214,10 @@ def input_entries_exist_in_data_tables(self, data_tables, input_dict): # Return False as soon as entry is not present for data_table in data_tables: if value_entry: - if not self.data_table_entry_exists( - data_table, value_entry, column="value" - ): + if not self.data_table_entry_exists(data_table, value_entry, column="value"): return False if name_entry: - if not self.data_table_entry_exists( - data_table, name_entry, column="name" - ): + if not self.data_table_entry_exists(data_table, name_entry, column="name"): return False # If all checks are passed the entries are present in the database tables. return True @@ -266,15 +256,13 @@ def run_jobs(jobs, skipped_jobs): for skipped_job in skipped_jobs: if overwrite: log.info( - "%s already run for %s. Entry will be overwritten." - % (skipped_job["tool_id"], skipped_job["inputs"]) + "{} already run for {}. Entry will be overwritten.".format( + skipped_job["tool_id"], skipped_job["inputs"] + ) ) jobs.append(skipped_job) else: - log.info( - "%s already run for %s. Skipping." - % (skipped_job["tool_id"], skipped_job["inputs"]) - ) + log.info("{} already run for {}. Skipping.".format(skipped_job["tool_id"], skipped_job["inputs"])) all_skipped_jobs.append(skipped_job) for job in jobs: started_job = self.tool_client.run_tool( @@ -296,10 +284,7 @@ def run_jobs(jobs, skipped_jobs): all_succesful_jobs.extend(successful_jobs) all_failed_jobs.extend(failed_jobs) - log.info( - "Running data managers that populate the following source data tables: %s" - % self.source_tables - ) + log.info("Running data managers that populate the following source data tables: %s" % self.source_tables) run_jobs(self.fetch_jobs, self.skipped_fetch_jobs) log.info("Running data managers that index sequences.") run_jobs(self.index_jobs, self.skipped_index_jobs) @@ -308,9 +293,7 @@ def run_jobs(jobs, skipped_jobs): log.info("Successful jobs: %i " % len(all_succesful_jobs)) log.info("Skipped jobs: %i " % len(all_skipped_jobs)) log.info("Failed jobs: %i " % len(all_failed_jobs)) - InstallResults = namedtuple( - "InstallResults", ["successful_jobs", "failed_jobs", "skipped_jobs"] - ) + InstallResults = namedtuple("InstallResults", ["successful_jobs", "failed_jobs", "skipped_jobs"]) return InstallResults( successful_jobs=all_succesful_jobs, failed_jobs=all_failed_jobs, diff --git a/src/ephemeris/set_library_permissions.py b/src/ephemeris/set_library_permissions.py index bd198bb0..23492877 100644 --- a/src/ephemeris/set_library_permissions.py +++ b/src/ephemeris/set_library_permissions.py @@ -43,9 +43,7 @@ def set_permissions(gi, library_id, role_ids, auto): ) if auto: for current in range(total): - log.debug( - "Processing dataset %d of %d, ID=%s", current, total, datasets[current] - ) + log.debug("Processing dataset %d of %d, ID=%s", current, total, datasets[current]) gi.libraries.set_dataset_permissions( dataset_id=datasets[current], access_in=role_ids, @@ -80,12 +78,10 @@ def _parser(): parser = argparse.ArgumentParser( parents=[parent], formatter_class=HideUnderscoresHelpFormatter, - description="Populate the Galaxy data library with data." + description="Populate the Galaxy data library with data.", ) parser.add_argument("library", help="Specify the data library ID") - parser.add_argument( - "--roles", nargs="+", help="Specify a list of comma separated role IDs" - ) + parser.add_argument("--roles", nargs="+", help="Specify a list of comma separated role IDs") parser.add_argument( "-y", "--yes", @@ -106,15 +102,11 @@ def _parser(): def main(argv=None): args = _parser().parse_args(argv) if args.user and args.password: - gi = galaxy.GalaxyInstance( - url=args.galaxy, email=args.user, password=args.password - ) + gi = galaxy.GalaxyInstance(url=args.galaxy, email=args.user, password=args.password) elif args.api_key: gi = galaxy.GalaxyInstance(url=args.galaxy, key=args.api_key) else: - sys.exit( - "Please specify either a valid Galaxy username/password or an API key." - ) + sys.exit("Please specify either a valid Galaxy username/password or an API key.") if args.verbose: log.basicConfig(level=log.DEBUG) @@ -126,9 +118,7 @@ def main(argv=None): if args.roles and args.library: args.roles = [r.strip() for r in args.roles.split(",")] else: - sys.exit( - "Specify library ID (--library myLibraryID) and (list of) role(s) (--roles roleId1,roleId2)" - ) + sys.exit("Specify library ID (--library myLibraryID) and (list of) role(s) (--roles roleId1,roleId2)") set_permissions(gi, library_id=args.library, role_ids=args.roles, auto=args.yes) log.info( "\nThis script uses bioblend to update ALL permissions of ALL datasets in a" diff --git a/src/ephemeris/setup_data_libraries.py b/src/ephemeris/setup_data_libraries.py index e8066694..68225fe4 100644 --- a/src/ephemeris/setup_data_libraries.py +++ b/src/ephemeris/setup_data_libraries.py @@ -18,9 +18,7 @@ def create_legacy(gi, desc): destination = desc["destination"] if destination["type"] != "library": - raise Exception( - "Only libraries may be created with pre-18.05 Galaxies using this script." - ) + raise Exception("Only libraries may be created with pre-18.05 Galaxies using this script.") library_name = destination.get("name") library_description = destination.get("description") library_synopsis = destination.get("synopsis") @@ -42,9 +40,7 @@ def create_legacy(gi, desc): print("Library already exists! id: " + str(lib_id)) folder_id = gi.libraries.show_library(lib_id)["root_folder_id"] else: - lib = gi.libraries.create_library( - library_name, library_description, library_synopsis - ) + lib = gi.libraries.create_library(library_name, library_description, library_synopsis) lib_id = lib["id"] folder_id = lib["root_folder_id"] @@ -64,21 +60,15 @@ def populate_items(base_folder_id, has_items): if rmt_folder_list: folder_id = rmt_folder_list[0]["id"] else: - folder = gi.libraries.create_folder( - lib_id, name, description, base_folder_id=base_folder_id - ) + folder = gi.libraries.create_folder(lib_id, name, description, base_folder_id=base_folder_id) folder_id = folder[0]["id"] for item in has_items["items"]: populate_items(folder_id, item) else: src = has_items["src"] if src != "url": - raise Exception( - "For pre-18.05 Galaxies only support URLs src items are supported." - ) - rmt_library_files = gi.folders.show_folder(base_folder_id, contents=True)[ - "folder_contents" - ] + raise Exception("For pre-18.05 Galaxies only support URLs src items are supported.") + rmt_library_files = gi.folders.show_folder(base_folder_id, contents=True)["folder_contents"] file_names = [] for item in rmt_library_files: if item["type"] == "file": @@ -163,9 +153,7 @@ def normalize_items(has_items): if training: destination["name"] = destination.get("name", "Training Data") - destination["description"] = destination.get( - "description", "Data pulled from online archives." - ) + destination["description"] = destination.get("description", "Data pulled from online archives.") else: destination["name"] = destination.get("name", "New Data Library") destination["description"] = destination.get("description", "") @@ -191,17 +179,10 @@ def normalize_items(has_items): job_ids.append(subjob["id"]) while True: - job_states = [ - jc.get_state(job) in ("ok", "error", "deleted") for job in job_ids - ] + job_states = [jc.get_state(job) in ("ok", "error", "deleted") for job in job_ids] log.debug( "Job states: %s" - % ",".join( - [ - "%s=%s" % (job_id, job_state) - for (job_id, job_state) in zip(job_ids, job_states) - ] - ) + % ",".join([f"{job_id}={job_state}" for (job_id, job_state) in zip(job_ids, job_states)]) ) if all(job_states): @@ -217,7 +198,7 @@ def _parser(): parser = argparse.ArgumentParser( parents=[parent], formatter_class=HideUnderscoresHelpFormatter, - description="Populate the Galaxy data library with data." + description="Populate the Galaxy data library with data.", ) parser.add_argument("-i", "--infile", required=True, type=argparse.FileType("r")) parser.add_argument( @@ -238,15 +219,11 @@ def _parser(): def main(argv=None): args = _parser().parse_args(argv) if args.user and args.password: - gi = galaxy.GalaxyInstance( - url=args.galaxy, email=args.user, password=args.password - ) + gi = galaxy.GalaxyInstance(url=args.galaxy, email=args.user, password=args.password) elif args.api_key: gi = galaxy.GalaxyInstance(url=args.galaxy, key=args.api_key) else: - sys.exit( - "Please specify either a valid Galaxy username/password or an API key." - ) + sys.exit("Please specify either a valid Galaxy username/password or an API key.") if args.verbose: log.basicConfig(level=log.DEBUG) diff --git a/src/ephemeris/shed_tools.py b/src/ephemeris/shed_tools.py index 9754c03d..8807fc07 100644 --- a/src/ephemeris/shed_tools.py +++ b/src/ephemeris/shed_tools.py @@ -136,9 +136,7 @@ def installed_repositories(self) -> List[InstallRepoDict]: get_all_tools=True, ).tool_list.get("tools") - def filter_installed_repos( - self, repos: Iterable[InstallRepoDict], check_revision: bool = True - ) -> FilterResults: + def filter_installed_repos(self, repos: Iterable[InstallRepoDict], check_revision: bool = True) -> FilterResults: """This filters a list of repositories""" not_installed_repos: List[InstallRepoDict] = [] already_installed_repos: List[InstallRepoDict] = [] @@ -185,11 +183,7 @@ def install_repositories( for key in repo.keys(): if key not in VALID_KEYS and key != "revisions": if log: - log.warning( - "'{0}' not a valid key. Will be skipped during parsing".format( - key - ) - ) + log.warning(f"'{key}' not a valid key. Will be skipped during parsing") # Start by flattening the repo list per revision flattened_repos = flatten_repo_info(repositories) @@ -221,9 +215,7 @@ def install_repositories( for skipped_repo in filtered_repos.already_installed_repos: counter += 1 if log: - log_repository_install_skip( - skipped_repo, counter, total_num_repositories, log - ) + log_repository_install_skip(skipped_repo, counter, total_num_repositories, log) skipped_repositories.append(skipped_repo) # Install repos @@ -248,36 +240,25 @@ def install_repositories( # Log results if log: log.info( - "Installed repositories ({0}): {1}".format( + "Installed repositories ({}): {}".format( len(installed_repositories), - [ - (t["name"], t.get("changeset_revision")) - for t in installed_repositories - ], + [(t["name"], t.get("changeset_revision")) for t in installed_repositories], ) ) log.info( - "Skipped repositories ({0}): {1}".format( + "Skipped repositories ({}): {}".format( len(skipped_repositories), - [ - (t["name"], t.get("changeset_revision")) - for t in skipped_repositories - ], + [(t["name"], t.get("changeset_revision")) for t in skipped_repositories], ) ) log.info( - "Errored repositories ({0}): {1}".format( + "Errored repositories ({}): {}".format( len(errored_repositories), - [ - (t["name"], t.get("changeset_revision", "")) - for t in errored_repositories - ], + [(t["name"], t.get("changeset_revision", "")) for t in errored_repositories], ) ) log.info("All repositories have been installed.") - log.info( - "Total run time: {0}".format(dt.datetime.now() - installation_start) - ) + log.info(f"Total run time: {dt.datetime.now() - installation_start}") return InstallResults( installed_repositories=installed_repositories, skipped_repositories=skipped_repositories, @@ -288,20 +269,16 @@ def update_repositories(self, repositories=None, log=None, **kwargs): if not repositories: # Repositories None or empty list repositories = self.installed_repositories() else: - filtered_repos = self.filter_installed_repos( - repositories, check_revision=False - ) + filtered_repos = self.filter_installed_repos(repositories, check_revision=False) if filtered_repos.not_installed_repos: if log: log.warning( - "The following tools are not installed and will not be upgraded: {0}".format( + "The following tools are not installed and will not be upgraded: {}".format( filtered_repos.not_installed_repos ) ) repositories = filtered_repos.already_installed_repos - return self.install_repositories( - repositories, force_latest_revision=True, log=log, **kwargs - ) + return self.install_repositories(repositories, force_latest_revision=True, log=log, **kwargs) def test_tools( self, @@ -329,26 +306,20 @@ def test_tools( installed_tools = [] for target_repository in target_repositories: - repo_tools = tools_for_repository( - self.gi, target_repository, all_tools=test_all_versions - ) + repo_tools = tools_for_repository(self.gi, target_repository, all_tools=test_all_versions) installed_tools.extend(repo_tools) all_test_results = [] galaxy_interactor = self._get_interactor(test_user, test_user_api_key) if client_test_config_path is not None: - with open(client_test_config_path, "r") as f: + with open(client_test_config_path) as f: client_test_config_dict = yaml.full_load(f) - client_test_config = DictClientTestConfig( - client_test_config_dict.get("tools") - ) + client_test_config = DictClientTestConfig(client_test_config_dict.get("tools")) else: client_test_config = None if test_history_name: - for history in self.gi.histories.get_histories( - name=test_history_name, deleted=False - ): + for history in self.gi.histories.get_histories(name=test_history_name, deleted=False): test_history = history["id"] log.debug( "Using existing history with id '%s', last updated: %s", @@ -357,9 +328,7 @@ def test_tools( ) break else: - test_history = galaxy_interactor.new_history( - history_name=test_history_name - ) + test_history = galaxy_interactor.new_history(history_name=test_history_name) else: test_history = galaxy_interactor.new_history() @@ -401,21 +370,9 @@ def test_tools( json.dump(report_obj, f) if log: log.info("Report written to '%s'", os.path.abspath(test_json)) - log.info( - "Passed tool tests ({0}): {1}".format( - n_passed, [t for t in tests_passed] - ) - ) - log.info( - "Failed tool tests ({0}): {1}".format( - n_failed, [t[0] for t in test_exceptions] - ) - ) - log.info( - "Total tool test time: {0}".format( - dt.datetime.now() - tool_test_start - ) - ) + log.info(f"Passed tool tests ({n_passed}): {[t for t in tests_passed]}") + log.info(f"Failed tool tests ({n_failed}): {[t[0] for t in test_exceptions]}") + log.info(f"Total tool test time: {dt.datetime.now() - tool_test_start}") def _get_interactor(self, test_user, test_user_api_key): if test_user_api_key is None: @@ -458,9 +415,7 @@ def _test_tool( if tool_version: label_base += "/" + str(tool_version) try: - tool_test_dicts = galaxy_interactor.get_tool_tests( - tool_id, tool_version=tool_version - ) + tool_test_dicts = galaxy_interactor.get_tool_tests(tool_id, tool_version=tool_version) except Exception as e: if log: log.warning( @@ -469,9 +424,7 @@ def _test_tool( exc_info=True, ) test_exceptions.append((label_base, e)) - Results = namedtuple( - "Results", ["tool_test_results", "tests_passed", "test_exceptions"] - ) + Results = namedtuple("Results", ["tool_test_results", "tests_passed", "test_exceptions"]) return Results( tool_test_results=tool_test_results, tests_passed=tests_passed, @@ -516,10 +469,7 @@ def register(job_data): executor.submit(run_test, test_index, test_id) def install_repository_revision(self, repository: InstallRepoDict, log): - default_err_msg = ( - "All repositories that you are attempting to install " - "have been previously installed." - ) + default_err_msg = "All repositories that you are attempting to install " "have been previously installed." start = dt.datetime.now() try: response = self.tool_shed_client.install_repository_revision( @@ -541,23 +491,18 @@ def install_repository_revision(self, repository: InstallRepoDict, log): # installed, possibly because the selected repository has # already been installed.'} if log: - log.debug( - "\tRepository {0} is already installed.".format( - repository["name"] - ) - ) + log.debug("\tRepository {} is already installed.".format(repository["name"])) if log: - log_repository_install_success( - repository=repository, start=start, log=log - ) + log_repository_install_success(repository=repository, start=start, log=log) return "installed" except (ConnectionError, requests.exceptions.ConnectionError) as e: if default_err_msg in unicodify(e): # THIS SHOULD NOT HAPPEN DUE TO THE CHECKS EARLIER if log: log.debug( - "\tRepository %s already installed (at revision %s)" - % (repository["name"], repository["changeset_revision"]) + "\tRepository {} already installed (at revision {})".format( + repository["name"], repository["changeset_revision"] + ) ) return "skipped" elif "504" in unicodify(e) or "Connection aborted" in unicodify(e): @@ -566,25 +511,27 @@ def install_repository_revision(self, repository: InstallRepoDict, log): "Timeout during install of %s, extending wait to 1h", repository["name"], ) - success = self.wait_for_install( - repository=repository, log=log, timeout=3600 - ) + success = self.wait_for_install(repository=repository, log=log, timeout=3600) if success: if log: - log_repository_install_success( - repository=repository, start=start, log=log - ) + log_repository_install_success(repository=repository, start=start, log=log) return "installed" else: if log: log_repository_install_error( - repository=repository, start=start, msg=getattr(e, "body", unicodify(e)), log=log + repository=repository, + start=start, + msg=getattr(e, "body", unicodify(e)), + log=log, ) return "error" else: if log: log_repository_install_error( - repository=repository, start=start, msg=getattr(e, "body", unicodify(e)), log=log + repository=repository, + start=start, + msg=getattr(e, "body", unicodify(e)), + log=log, ) return "error" @@ -601,15 +548,8 @@ def wait_for_install(self, repository, log=None, timeout=3600): owner = repository["owner"] changeset_revision = repository["changeset_revision"] installed_repos = self.tool_shed_client.get_repositories() - filtered_repos = [ - r for r in installed_repos if r["name"] == name and r["owner"] == owner - ] - assert ( - filtered_repos - ), "Repository '%s' from owner '%s' not in list of repositories." % ( - name, - owner, - ) + filtered_repos = [r for r in installed_repos if r["name"] == name and r["owner"] == owner] + assert filtered_repos, f"Repository '{name}' from owner '{owner}' not in list of repositories." # Check if exact repository revision in filtered_repos installing_repo_id = None for repo in filtered_repos: @@ -618,11 +558,7 @@ def wait_for_install(self, repository, log=None, timeout=3600): break else: # Galaxy may have decided to install a newer repository revision. We now try to guess which repository that is. - non_terminal = [ - r - for r in filtered_repos - if r["status"] in NON_TERMINAL_REPOSITORY_STATES - ] + non_terminal = [r for r in filtered_repos if r["status"] in NON_TERMINAL_REPOSITORY_STATES] if len(non_terminal) == 1: # Unambiguous, we wait for this repo installing_repo_id = non_terminal[0]["id"] @@ -633,15 +569,11 @@ def wait_for_install(self, repository, log=None, timeout=3600): # Raise an exception and continue with the remaining repos. msg = "Could not track repository for name '%s', owner '%s', revision '%s'. " msg += "Please uninstall all non-terminal repositories and ensure revision '%s' is installable." - raise AssertionError( - msg % (name, owner, changeset_revision, changeset_revision) - ) + raise AssertionError(msg % (name, owner, changeset_revision, changeset_revision)) start = dt.datetime.now() while (dt.datetime.now() - start) < dt.timedelta(seconds=timeout): try: - installed_repo = self.tool_shed_client.show_repository( - installing_repo_id - ) + installed_repo = self.tool_shed_client.show_repository(installing_repo_id) status = installed_repo["status"] if status == "Installed": return True @@ -650,10 +582,7 @@ def wait_for_install(self, repository, log=None, timeout=3600): elif status in NON_TERMINAL_REPOSITORY_STATES: time.sleep(10) else: - raise AssertionError( - "Repository name '%s', owner '%s' in unknown status '%s'" - % (name, owner, status) - ) + raise AssertionError(f"Repository name '{name}', owner '{owner}' in unknown status '{status}'") except ConnectionError as e: if log: log.warning("Failed to get repositories list: %s", unicodify(e)) @@ -667,9 +596,7 @@ def log_repository_install_error(repository, start, msg, log): """ end = dt.datetime.now() log.error( - "\t* Error installing a repository (after %s seconds)! Name: %s," - "owner: %s, " - "revision: %s, error: %s", + "\t* Error installing a repository (after %s seconds)! Name: %s," "owner: %s, " "revision: %s, error: %s", str(end - start), repository.get("name", ""), repository.get("owner", ""), @@ -685,14 +612,15 @@ def log_repository_install_success(repository, start, log): """ end = dt.datetime.now() log.debug( - "\trepository %s installed successfully (in %s) at revision %s" - % (repository["name"], str(end - start), repository["changeset_revision"]) + "\trepository {} installed successfully (in {}) at revision {}".format( + repository["name"], str(end - start), repository["changeset_revision"] + ) ) def log_repository_install_skip(repository, counter, total_num_repositories, log): log.debug( - "({0}/{1}) repository {2} already installed at revision {3}. Skipping.".format( + "({}/{}) repository {} already installed at revision {}. Skipping.".format( counter, total_num_repositories, repository["name"], @@ -702,17 +630,19 @@ def log_repository_install_skip(repository, counter, total_num_repositories, log def log_repository_install_start( - repository: InstallRepoDict, counter, total_num_repositories, installation_start, log + repository: InstallRepoDict, + counter, + total_num_repositories, + installation_start, + log, ): log.debug( - '(%s/%s) Installing repository %s from %s to section "%s" at revision %s (TRT: %s)' - % ( + '({}/{}) Installing repository {} from {} to section "{}" at revision {} (TRT: {})'.format( counter, total_num_repositories, repository["name"], repository["owner"], - repository.get("tool_panel_section_id") - or repository.get("tool_panel_section_label"), + repository.get("tool_panel_section_id") or repository.get("tool_panel_section_label"), repository.get("changeset_revision"), dt.datetime.now() - installation_start, ) @@ -745,9 +675,7 @@ def main(argv=None): disable_external_library_logging() args = parser().parse_args(argv) log = setup_global_logger(name=__name__, log_file=args.log_file) - gi = get_galaxy_connection( - args, file=args.tool_list_file, log=log, login_required=True - ) + gi = get_galaxy_connection(args, file=args.tool_list_file, log=log, login_required=True) install_repository_manager = InstallRepositoryManager(gi) repos = args_to_repos(args) @@ -761,13 +689,9 @@ def main(argv=None): kwargs = dict( default_install_tool_dependencies=tool_list.get("install_tool_dependencies") or getattr(args, "install_tool_dependencies", False), - default_install_repository_dependencies=tool_list.get( - "install_repository_dependencies" - ) + default_install_repository_dependencies=tool_list.get("install_repository_dependencies") or getattr(args, "install_repository_dependencies", False), - default_install_resolver_dependencies=tool_list.get( - "install_resolver_dependencies" - ) + default_install_resolver_dependencies=tool_list.get("install_resolver_dependencies") or getattr(args, "install_resolver_dependencies", False), ) @@ -775,9 +699,7 @@ def main(argv=None): # Or do testing if the action is `test` install_results = None if args.action == "update": - install_results = install_repository_manager.update_repositories( - repositories=repos, log=log, **kwargs - ) + install_results = install_repository_manager.update_repositories(repositories=repos, log=log, **kwargs) elif args.action == "install": install_results = install_repository_manager.install_repositories( repos, log=log, force_latest_revision=args.force_latest_revision, **kwargs @@ -795,9 +717,7 @@ def main(argv=None): client_test_config_path=args.client_test_config, ) else: - raise NotImplementedError( - "This point in the code should not be reached. Please contact the developers." - ) + raise NotImplementedError("This point in the code should not be reached. Please contact the developers.") # Run tests on the install results if required. if install_results and args.test or args.test_existing: diff --git a/src/ephemeris/shed_tools_args.py b/src/ephemeris/shed_tools_args.py index db51904a..18ec43cb 100644 --- a/src/ephemeris/shed_tools_args.py +++ b/src/ephemeris/shed_tools_args.py @@ -42,8 +42,7 @@ def parser(): # SUBPARSERS install_command_parser = subparsers.add_parser( "install", - help="This installs tools in Galaxy from the Tool Shed." - "Use shed-tools install --help for more information", + help="This installs tools in Galaxy from the Tool Shed." "Use shed-tools install --help for more information", formatter_class=HideUnderscoresHelpFormatter, parents=[common_arguments], ) @@ -57,8 +56,7 @@ def parser(): test_command_parser = subparsers.add_parser( "test", - help="This tests the supplied list of tools in Galaxy. " - "Use shed-tools test --help for more information", + help="This tests the supplied list of tools in Galaxy. " "Use shed-tools test --help for more information", formatter_class=HideUnderscoresHelpFormatter, parents=[common_arguments], ) @@ -91,13 +89,11 @@ def parser(): ) command_parser.add_argument( "--name", - help="The name of the tool to install (only applicable " - "if the tools file is not provided).", + help="The name of the tool to install (only applicable " "if the tools file is not provided).", ) command_parser.add_argument( "--owner", - help="The owner of the tool to install (only applicable " - "if the tools file is not provided).", + help="The owner of the tool to install (only applicable " "if the tools file is not provided).", ) command_parser.add_argument( "--revisions", @@ -295,8 +291,7 @@ def parser(): "--client-test-config", "--client_test_config", dest="client_test_config", - help="Annotate expectations about tools in client testing YAML " - "configuration file.", + help="Annotate expectations about tools in client testing YAML " "configuration file.", ) return shed_parser diff --git a/src/ephemeris/shed_tools_methods.py b/src/ephemeris/shed_tools_methods.py index 0995afb2..b1429726 100644 --- a/src/ephemeris/shed_tools_methods.py +++ b/src/ephemeris/shed_tools_methods.py @@ -31,9 +31,7 @@ def complete_repo_information( default_install_resolver_dependencies: bool, force_latest_revision, ) -> "InstallRepoDict": - tool["tool_shed_url"] = format_tool_shed_url( - tool.get("tool_shed_url") or default_toolshed_url - ) + tool["tool_shed_url"] = format_tool_shed_url(tool.get("tool_shed_url") or default_toolshed_url) tool = get_changeset_revisions(tool, force_latest_revision=force_latest_revision) repo: "InstallRepoDict" = dict( name=tool["name"], @@ -44,8 +42,7 @@ def complete_repo_information( or default_install_repository_dependencies, install_resolver_dependencies=tool.get("install_resolver_dependencies") or default_install_resolver_dependencies, - install_tool_dependencies=tool.get("install_tool_dependencies") - or default_install_tool_dependencies, + install_tool_dependencies=tool.get("install_tool_dependencies") or default_install_tool_dependencies, ) # We need those values. Throw a KeyError when not present tool_panel_section_label = tool.get("tool_panel_section_label") @@ -66,9 +63,7 @@ def format_tool_shed_url(tool_shed_url: str) -> str: return formatted_tool_shed_url -def get_changeset_revisions( - repository: "InstallRepoDict", force_latest_revision: bool = False -): +def get_changeset_revisions(repository: "InstallRepoDict", force_latest_revision: bool = False): """ Select the correct changeset revision for a repository, and make sure the repository exists @@ -83,9 +78,7 @@ def get_changeset_revisions( repository["name"], repository["owner"] ) if not installable_revisions: # - raise LookupError( - "Repo does not exist in tool shed: {0}".format(repository) - ) + raise LookupError(f"Repo does not exist in tool shed: {repository}") repository["changeset_revision"] = installable_revisions[-1] return repository diff --git a/src/ephemeris/sleep.py b/src/ephemeris/sleep.py index 0b78b9a4..aa3912b4 100755 --- a/src/ephemeris/sleep.py +++ b/src/ephemeris/sleep.py @@ -22,14 +22,10 @@ DEFAULT_SLEEP_WAIT = 1 MESSAGE_KEY_NOT_YET_VALID = "[%02d] Provided key not (yet) valid... %s\n" MESSAGE_INVALID_JSON = "[%02d] No valid json returned... %s\n" -MESSAGE_FETCHING_USER = ( - "[%02d] Connection error fetching user details, exiting with error code. %s\n" -) +MESSAGE_FETCHING_USER = "[%02d] Connection error fetching user details, exiting with error code. %s\n" MESSAGE_KEY_NOT_YET_ADMIN = "[%02d] Provided key not (yet) admin... %s\n" MESSAGE_GALAXY_NOT_YET_UP = "[%02d] Galaxy not up yet... %s\n" -MESSAGE_TIMEOUT = ( - "Failed to contact Galaxy within timeout (%s), exiting with error code.\n" -) +MESSAGE_TIMEOUT = "Failed to contact Galaxy within timeout (%s), exiting with error code.\n" def _parser(): @@ -48,13 +44,17 @@ def _parser(): help="Galaxy startup timeout in seconds. The default value of 0 waits forever", ) parser.add_argument( - "-a", "--api-key", "--api_key", dest="api_key", help="Sleep until key becomes available." + "-a", + "--api-key", + "--api_key", + dest="api_key", + help="Sleep until key becomes available.", ) parser.add_argument("--ensure-admin", "--ensure_admin", default=False, action="store_true") return parser -class SleepCondition(object): +class SleepCondition: def __init__(self): self.sleep = True @@ -72,18 +72,15 @@ def galaxy_wait( ): """Pass user_key to ensure it works before returning.""" if verbose: - sys.stdout.write( - "calling galaxy_wait with timeout=%s ensure_admin=%s\n\n\n" - % (timeout, ensure_admin) - ) + sys.stdout.write(f"calling galaxy_wait with timeout={timeout} ensure_admin={ensure_admin}\n\n\n") sys.stdout.flush() version_url = galaxy_url + "/api/version" if api_key: # adding the key to the URL will ensure Galaxy returns invalid responses until # the key is available. - version_url = "%s?key=%s" % (version_url, api_key) - current_user_url = "%s/api/users/current?key=%s" % (galaxy_url, api_key) + version_url = f"{version_url}?key={api_key}" + current_user_url = f"{galaxy_url}/api/users/current?key={api_key}" else: assert not ensure_admin @@ -99,24 +96,18 @@ def galaxy_wait( result = requests.get(version_url) if result.status_code == 403: if verbose: - sys.stdout.write( - MESSAGE_KEY_NOT_YET_VALID % (count, result.__str__()) - ) + sys.stdout.write(MESSAGE_KEY_NOT_YET_VALID % (count, result.__str__())) sys.stdout.flush() else: try: result = result.json() if verbose: - sys.stdout.write( - "Galaxy Version: %s\n" % result["version_major"] - ) + sys.stdout.write("Galaxy Version: %s\n" % result["version_major"]) sys.stdout.flush() version_obtained = True except ValueError: if verbose: - sys.stdout.write( - MESSAGE_INVALID_JSON % (count, result.__str__()) - ) + sys.stdout.write(MESSAGE_INVALID_JSON % (count, result.__str__())) sys.stdout.flush() if version_obtained: @@ -124,9 +115,7 @@ def galaxy_wait( result = requests.get(current_user_url) if result.status_code != 200: if verbose: - sys.stdout.write( - MESSAGE_FETCHING_USER % (count, result.__str__()) - ) + sys.stdout.write(MESSAGE_FETCHING_USER % (count, result.__str__())) sys.stdout.flush() return False @@ -139,17 +128,13 @@ def galaxy_wait( break else: if verbose: - sys.stdout.write( - MESSAGE_KEY_NOT_YET_ADMIN % (count, result.__str__()) - ) + sys.stdout.write(MESSAGE_KEY_NOT_YET_ADMIN % (count, result.__str__())) sys.stdout.flush() else: break except requests.exceptions.ConnectionError as e: if verbose: - sys.stdout.write( - MESSAGE_GALAXY_NOT_YET_UP % (count, unicodify(e)[:100]) - ) + sys.stdout.write(MESSAGE_GALAXY_NOT_YET_UP % (count, unicodify(e)[:100])) sys.stdout.flush() count += 1 diff --git a/src/ephemeris/workflow_install.py b/src/ephemeris/workflow_install.py index ab573250..2e670f76 100644 --- a/src/ephemeris/workflow_install.py +++ b/src/ephemeris/workflow_install.py @@ -16,11 +16,9 @@ def import_workflow(gi, path, publish_wf=False): Given a connection to a Galaxy Instance (gi) and a path to a Galaxy workflow file, this function will import the worklfow into Galaxy. """ - with open(path, "r") as wf_file: + with open(path) as wf_file: import_uuid = json.load(wf_file).get("uuid") - existing_uuids = [ - d.get("latest_workflow_uuid") for d in gi.workflows.get_workflows() - ] + existing_uuids = [d.get("latest_workflow_uuid") for d in gi.workflows.get_workflows()] if import_uuid not in existing_uuids: gi.workflows.import_workflow_from_local_path(path, publish=publish_wf) diff --git a/tests/conftest.py b/tests/conftest.py index 3f2a4c30..0ff220f5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,9 +5,7 @@ from galaxy.tool_util.verify.interactor import GalaxyInteractorApi from galaxy_test.driver.driver_util import GalaxyTestDriver -GalaxyContainer = namedtuple( - "GalaxyContainer", ["url", "gi", "password", "username", "api_key"] -) +GalaxyContainer = namedtuple("GalaxyContainer", ["url", "gi", "password", "username", "api_key"]) @pytest.fixture(scope="class") @@ -42,9 +40,7 @@ def start_container(tmpdir_factory): port = server_wrapper.port prefix = server_wrapper.prefix or "" url = f"http://{host}:{port}{prefix.rstrip('/')}/" - interactor = GalaxyInteractorApi( - galaxy_url=url, master_api_key="123456789", test_user="test@bx.psu.edu" - ) + interactor = GalaxyInteractorApi(galaxy_url=url, master_api_key="123456789", test_user="test@bx.psu.edu") gi = GalaxyInstance(url, key=interactor.api_key, password="testpass") try: yield GalaxyContainer( diff --git a/tests/test_run_data_managers.py b/tests/test_run_data_managers.py index 4e9ee824..d12f5272 100644 --- a/tests/test_run_data_managers.py +++ b/tests/test_run_data_managers.py @@ -14,7 +14,7 @@ AUTH_BY = "key" -class TestRunDataManagers(object): +class TestRunDataManagers: """This class tests run-data-managers""" def test_install_data_managers(self, start_container): @@ -43,9 +43,7 @@ def test_run_data_managers(self, start_container): ) else: argv.extend(["-a", container.api_key]) - argv.extend( - ["-g", container.url, "--config", "tests/run_data_managers.yaml.test"] - ) + argv.extend(["-g", container.url, "--config", "tests/run_data_managers.yaml.test"]) sys.argv = argv run_data_managers.main() diff --git a/tests/test_setup_data_libraries_cli.py b/tests/test_setup_data_libraries_cli.py index cc0ab25a..12f19b5e 100644 --- a/tests/test_setup_data_libraries_cli.py +++ b/tests/test_setup_data_libraries_cli.py @@ -5,9 +5,7 @@ from ephemeris.setup_data_libraries import main as setup_data_libraries_cli LIBRARY_DATA_EXAMPLE = pathlib.Path(__file__).parent / "library_data_example.yaml" -LIBRARY_DATA_LEGACY_EXAMPLE = ( - pathlib.Path(__file__).parent / "library_data_example_legacy.yaml" -) +LIBRARY_DATA_LEGACY_EXAMPLE = pathlib.Path(__file__).parent / "library_data_example_legacy.yaml" def test_setup_data_libraries_with_username_and_password( diff --git a/tests/test_shed_tools.py b/tests/test_shed_tools.py index 90d80042..459f84c3 100644 --- a/tests/test_shed_tools.py +++ b/tests/test_shed_tools.py @@ -16,7 +16,7 @@ # The start_container fixture has the "class" scope. -class TestMiscellaneous(object): +class TestMiscellaneous: """This class is for miscellaneous tests that can use the same galaxy container""" def test_invalid_keys_in_repo_list(self, caplog, start_container): @@ -34,10 +34,7 @@ def test_invalid_keys_in_repo_list(self, caplog, start_container): ], log=logging.getLogger(), ) - assert ( - "'sesame_ouvre_toi' not a valid key. Will be skipped during parsing" - in caplog.text - ) + assert "'sesame_ouvre_toi' not a valid key. Will be skipped during parsing" in caplog.text @pytest.mark.parametrize("parallel_tests", [1, 2]) def test_tool_tests(self, caplog, start_container, parallel_tests): diff --git a/tests/test_shed_tools_cli.py b/tests/test_shed_tools_cli.py index 64da4ad2..88780bff 100644 --- a/tests/test_shed_tools_cli.py +++ b/tests/test_shed_tools_cli.py @@ -31,9 +31,7 @@ def install_old_cdhit(start_container: GalaxyContainer): def get_tool_list(start_container: GalaxyContainer, *extra_args): with tempfile.NamedTemporaryFile(mode="r") as tool_list_file: - get_tool_list_cli( - ["-g", start_container.url, "-o", tool_list_file.name, *extra_args] - ) + get_tool_list_cli(["-g", start_container.url, "-o", tool_list_file.name, *extra_args]) return tool_list_file.read() diff --git a/tests/test_shed_tools_methods.py b/tests/test_shed_tools_methods.py index 76cc018f..5ec8e9a1 100644 --- a/tests/test_shed_tools_methods.py +++ b/tests/test_shed_tools_methods.py @@ -11,9 +11,7 @@ def test_flatten_repo_info(): tool_panel_section_label="NGS: Alignment", revisions=["1", "2"], ), - dict( - name="bowtie2", owner="devteam", tool_panel_section_label="NGS: Alignment" - ), + dict(name="bowtie2", owner="devteam", tool_panel_section_label="NGS: Alignment"), ] flattened_repos = flatten_repo_info(test_repositories) assert flattened_repos == [ @@ -29,7 +27,5 @@ def test_flatten_repo_info(): tool_panel_section_label="NGS: Alignment", changeset_revision="2", ), - dict( - name="bowtie2", owner="devteam", tool_panel_section_label="NGS: Alignment" - ), + dict(name="bowtie2", owner="devteam", tool_panel_section_label="NGS: Alignment"), ] diff --git a/tox.ini b/tox.ini index 6a8c85bd..9afd6851 100644 --- a/tox.ini +++ b/tox.ini @@ -5,9 +5,16 @@ source_dir = src/ephemeris test_dir = tests [testenv:py38-lint] -commands = flake8 {[tox]source_dir} {[tox]test_dir} +commands = + black --check {[tox]source_dir} {[tox]test_dir} + isort --check-only {[tox]source_dir} {[tox]test_dir} + ruff {[tox]source_dir} {[tox]test_dir} + flake8 {[tox]source_dir} {[tox]test_dir} skip_install = True deps = + black + isort + ruff flake8 flake8-import-order @@ -23,3 +30,11 @@ commands = # This is needed for codacy to understand which files have coverage testing # Unfortunately this has to run in the tox env to have access to envsitepackagesdir sed -i 's|{envsitepackagesdir}|src|' coverage.xml + +[testenv:mypy] +commands = + mypy -p ephemeris +deps = + mypy + types-PyYAML + types-requests