diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..4ecfbfe3 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,28 @@ +{ + "name": "nfcore", + "image": "nfcore/gitpod:latest", + "remoteUser": "gitpod", + "runArgs": ["--privileged"], + + // Configure tool-specific properties. + "customizations": { + // Configure properties specific to VS Code. + "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + "python.defaultInterpreterPath": "/opt/conda/bin/python", + "python.linting.enabled": true, + "python.linting.pylintEnabled": true, + "python.formatting.autopep8Path": "/opt/conda/bin/autopep8", + "python.formatting.yapfPath": "/opt/conda/bin/yapf", + "python.linting.flake8Path": "/opt/conda/bin/flake8", + "python.linting.pycodestylePath": "/opt/conda/bin/pycodestyle", + "python.linting.pydocstylePath": "/opt/conda/bin/pydocstyle", + "python.linting.pylintPath": "/opt/conda/bin/pylint" + }, + + // Add the IDs of extensions you want installed when the container is created. + "extensions": ["ms-python.python", "ms-python.vscode-pylance", "nf-core.nf-core-extensionpack"] + } + } +} diff --git a/.editorconfig b/.editorconfig index 95549501..b6b31907 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,12 +8,9 @@ trim_trailing_whitespace = true indent_size = 4 indent_style = space -[*.{yml,yaml}] +[*.{md,yml,yaml,html,css,scss,js}] indent_size = 2 -[*.json] -insert_final_newline = unset - # These files are edited and tested upstream in nf-core/modules [/modules/nf-core/**] charset = unset diff --git a/.gitattributes b/.gitattributes index 7fe55006..7a2dabc2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,4 @@ *.config linguist-language=nextflow +*.nf.test linguist-language=nextflow +modules/nf-core/** linguist-generated +subworkflows/nf-core/** linguist-generated diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 42b647d0..a46c780a 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -9,14 +9,15 @@ Please use the pre-filled template to save time. However, don't be put off by this template - other more general issues and suggestions are welcome! Contributions to the code are even more welcome ;) -> If you need help using or modifying nf-core/bacass then the best place to ask is on the nf-core Slack [#bacass](https://nfcore.slack.com/channels/bacass) channel ([join our Slack here](https://nf-co.re/join/slack)). +:::info +If you need help using or modifying nf-core/bacass then the best place to ask is on the nf-core Slack [#bacass](https://nfcore.slack.com/channels/bacass) channel ([join our Slack here](https://nf-co.re/join/slack)). +::: ## Contribution workflow If you'd like to write some code for nf-core/bacass, the standard workflow is as follows: -1. Check that there isn't already an issue about your idea in the [nf-core/bacass issues](https://github.com/nf-core/bacass/issues) to avoid duplicating work - * If there isn't one already, please create one so that others know you're working on this +1. Check that there isn't already an issue about your idea in the [nf-core/bacass issues](https://github.com/nf-core/bacass/issues) to avoid duplicating work. If there isn't one already, please create one so that others know you're working on this 2. [Fork](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) the [nf-core/bacass repository](https://github.com/nf-core/bacass) to your GitHub account 3. Make the necessary changes / additions within your forked repository following [Pipeline conventions](#pipeline-contribution-conventions) 4. Use `nf-core schema build` and add any new parameters to the pipeline JSON schema (requires [nf-core tools](https://github.com/nf-core/tools) >= 1.10). @@ -49,9 +50,9 @@ These tests are run both with the latest available version of `Nextflow` and als :warning: Only in the unlikely and regretful event of a release happening with a bug. -* On your own fork, make a new branch `patch` based on `upstream/master`. -* Fix the bug, and bump version (X.Y.Z+1). -* A PR should be made on `master` from patch to directly this particular bug. +- On your own fork, make a new branch `patch` based on `upstream/master`. +- Fix the bug, and bump version (X.Y.Z+1). +- A PR should be made on `master` from patch to directly this particular bug. ## Getting help @@ -68,16 +69,13 @@ If you wish to contribute a new step, please use the following coding standards: 1. Define the corresponding input channel into your new process from the expected previous process channel 2. Write the process block (see below). 3. Define the output channel if needed (see below). -4. Add any new flags/options to `nextflow.config` with a default (see below). -5. Add any new flags/options to `nextflow_schema.json` with help text (with `nf-core schema build`). -6. Add any new flags/options to the help message (for integer/text parameters, print to help the corresponding `nextflow.config` parameter). -7. Add sanity checks for all relevant parameters. -8. Add any new software to the `scrape_software_versions.py` script in `bin/` and the version command to the `scrape_software_versions` process in `main.nf`. -9. Do local tests that the new code works properly and as expected. -10. Add a new test command in `.github/workflow/ci.yml`. -11. If applicable add a [MultiQC](https://https://multiqc.info/) module. -12. Update MultiQC config `assets/multiqc_config.yaml` so relevant suffixes, name clean up, General Statistics Table column order, and module figures are in the right order. -13. Optional: Add any descriptions of MultiQC report sections and output files to `docs/output.md`. +4. Add any new parameters to `nextflow.config` with a default (see below). +5. Add any new parameters to `nextflow_schema.json` with help text (via the `nf-core schema build` tool). +6. Add sanity checks and validation for all relevant parameters. +7. Perform local tests to validate that the new code works as expected. +8. If applicable, add a new test command in `.github/workflow/ci.yml`. +9. Update MultiQC config `assets/multiqc_config.yml` so relevant suffixes, file name clean up and module plots are in the appropriate order. If applicable, add a [MultiQC](https://https://multiqc.info/) module. +10. Add a description of the output files and if relevant any appropriate images from the MultiQC report to `docs/output.md`. ### Default values @@ -95,34 +93,28 @@ The process resources can be passed on to the tool dynamically within the proces Please use the following naming schemes, to make it easy to understand what is going where. -* initial process channel: `ch_output_from_` -* intermediate and terminal channels: `ch__for_` +- initial process channel: `ch_output_from_` +- intermediate and terminal channels: `ch__for_` ### Nextflow version bumping If you are using a new feature from core Nextflow, you may bump the minimum required version of nextflow in the pipeline with: `nf-core bump-version --nextflow . [min-nf-version]` -### Software version reporting - -If you add a new tool to the pipeline, please ensure you add the information of the tool to the `get_software_version` process. - -Add to the script block of the process, something like the following: +### Images and figures -```bash - --version &> v_.txt 2>&1 || true -``` +For overview images and other documents we follow the nf-core [style guidelines and examples](https://nf-co.re/developers/design_guidelines). -or +## GitHub Codespaces -```bash - --help | head -n 1 &> v_.txt 2>&1 || true -``` +This repo includes a devcontainer configuration which will create a GitHub Codespaces for Nextflow development! This is an online developer environment that runs in your browser, complete with VSCode and a terminal. -You then need to edit the script `bin/scrape_software_versions.py` to: +To get started: -1. Add a Python regex for your tool's `--version` output (as in stored in the `v_.txt` file), to ensure the version is reported as a `v` and the version number e.g. `v2.1.1` -2. Add a HTML entry to the `OrderedDict` for formatting in MultiQC. +- Open the repo in [Codespaces](https://github.com/nf-core/bacass/codespaces) +- Tools installed + - nf-core + - Nextflow -### Images and figures +Devcontainer specs: -For overview images and other documents we follow the nf-core [style guidelines and examples](https://nf-co.re/developers/design_guidelines). +- [DevContainer config](.devcontainer/devcontainer.json) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 8e0a6155..00000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -name: Bug report -about: Report something that is broken or incorrect -labels: bug ---- - - - -## Check Documentation - -I have checked the following places for your error: - -- [ ] [nf-core website: troubleshooting](https://nf-co.re/usage/troubleshooting) -- [ ] [nf-core/bacass pipeline documentation](https://nf-co.re/bacass/usage) - -## Description of the bug - - - -## Steps to reproduce - -Steps to reproduce the behaviour: - -1. Command line: -2. See error: - -## Expected behaviour - - - -## Log files - -Have you provided the following extra information/files: - -- [ ] The command used to run the pipeline -- [ ] The `.nextflow.log` file - -## System - -- Hardware: -- Executor: -- OS: -- Version - -## Nextflow Installation - -- Version: - -## Container engine - -- Engine: -- version: - -## Additional context - - diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000..8f319341 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,50 @@ +name: Bug report +description: Report something that is broken or incorrect +labels: bug +body: + - type: markdown + attributes: + value: | + Before you post this issue, please check the documentation: + + - [nf-core website: troubleshooting](https://nf-co.re/usage/troubleshooting) + - [nf-core/bacass pipeline documentation](https://nf-co.re/bacass/usage) + + - type: textarea + id: description + attributes: + label: Description of the bug + description: A clear and concise description of what the bug is. + validations: + required: true + + - type: textarea + id: command_used + attributes: + label: Command used and terminal output + description: Steps to reproduce the behaviour. Please paste the command you used to launch the pipeline and the output from your terminal. + render: console + placeholder: | + $ nextflow run ... + + Some output where something broke + + - type: textarea + id: files + attributes: + label: Relevant files + description: | + Please drag and drop the relevant files here. Create a `.zip` archive if the extension is not allowed. + Your verbose log file `.nextflow.log` is often useful _(this is a hidden file in the directory where you launched the pipeline)_ as well as custom Nextflow configuration files. + + - type: textarea + id: system + attributes: + label: System information + description: | + * Nextflow version _(eg. 23.04.0)_ + * Hardware _(eg. HPC, Desktop, Cloud)_ + * Executor _(eg. slurm, local, awsbatch)_ + * Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter, Charliecloud, or Apptainer)_ + * OS _(eg. CentOS Linux, macOS, Linux Mint)_ + * Version of nf-core/bacass _(eg. 1.1, 1.5, 1.8.2)_ diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index eab75cde..02fa8097 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,3 @@ -blank_issues_enabled: false contact_links: - name: Join nf-core url: https://nf-co.re/join diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 29121dbd..00000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for the nf-core/bacass pipeline -labels: enhancement ---- - - - -## Is your feature request related to a problem? Please describe - - - - - -## Describe the solution you'd like - - - -## Describe alternatives you've considered - - - -## Additional context - - diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000..86e8463b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,11 @@ +name: Feature request +description: Suggest an idea for the nf-core/bacass pipeline +labels: enhancement +body: + - type: textarea + id: description + attributes: + label: Description of feature + description: Please describe your suggestion for a new feature. It might help to describe a problem or use case, plus any alternatives that you have considered. + validations: + required: true diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index dcccebd8..73441108 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -10,16 +10,15 @@ Remember that PRs should be made against the dev branch, unless you're preparing Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/bacass/tree/master/.github/CONTRIBUTING.md) --> - ## PR checklist - [ ] This comment contains a description of changes (with reason). - [ ] If you've fixed a bug or added code that should be tested, add tests! - - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/bacass/tree/master/.github/CONTRIBUTING.md) - - [ ] If necessary, also make a PR on the nf-core/bacass _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. +- [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/bacass/tree/master/.github/CONTRIBUTING.md) +- [ ] If necessary, also make a PR on the nf-core/bacass _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. - [ ] Make sure your code lints (`nf-core lint`). -- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker`). +- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker --outdir `). - [ ] Usage Documentation in `docs/usage.md` is updated. - [ ] Output Documentation in `docs/output.md` is updated. - [ ] `CHANGELOG.md` is updated. diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml index 5e94f7df..50399323 100644 --- a/.github/workflows/awsfulltest.yml +++ b/.github/workflows/awsfulltest.yml @@ -14,20 +14,25 @@ jobs: runs-on: ubuntu-latest steps: - name: Launch workflow via tower - uses: nf-core/tower-action@master + uses: seqeralabs/action-tower-launch@v2 # Add full size test data (but still relatively small datasets for few samples) # on the `test_full.config` test runs with only one set of parameters - with: workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} - bearer_token: ${{ secrets.TOWER_BEARER_TOKEN }} + access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} - pipeline: ${{ github.repository }} revision: ${{ github.sha }} workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/bacass/work-${{ github.sha }} parameters: | { + "hook_url": "${{ secrets.MEGATESTS_ALERTS_SLACK_HOOK_URL }}", "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/bacass/results-${{ github.sha }}" } - profiles: '[ "test_full", "aws_tower" ]' + profiles: test_full + - uses: actions/upload-artifact@v3 + with: + name: Tower debug log file + path: | + tower_action_*.log + tower_action_*.json diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml index 56b6ad8a..5ff8ad1a 100644 --- a/.github/workflows/awstest.yml +++ b/.github/workflows/awstest.yml @@ -10,19 +10,24 @@ jobs: if: github.repository == 'nf-core/bacass' runs-on: ubuntu-latest steps: + # Launch workflow using Tower CLI tool action - name: Launch workflow via tower - uses: nf-core/tower-action@master - + uses: seqeralabs/action-tower-launch@v2 with: workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} - bearer_token: ${{ secrets.TOWER_BEARER_TOKEN }} + access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} - pipeline: ${{ github.repository }} revision: ${{ github.sha }} workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/bacass/work-${{ github.sha }} parameters: | { - "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/bacass/results-${{ github.sha }}" + "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/bacass/results-test-${{ github.sha }}" } - profiles: '[ "test", "aws_tower" ]' + profiles: test + - uses: actions/upload-artifact@v3 + with: + name: Tower debug log file + path: | + tower_action_*.log + tower_action_*.json diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml index 453a14f4..b711a8eb 100644 --- a/.github/workflows/branch.yml +++ b/.github/workflows/branch.yml @@ -13,8 +13,7 @@ jobs: - name: Check PRs if: github.repository == 'nf-core/bacass' run: | - { [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/bacass ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] - + { [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/bacass ]] && [[ $GITHUB_HEAD_REF == "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] # If the above check failed, post a comment on the PR explaining the failure # NOTE - this doesn't currently work if the PR is coming from a fork, due to limitations in GitHub actions secrets @@ -43,4 +42,3 @@ jobs: Thanks again for your contribution! repo-token: ${{ secrets.GITHUB_TOKEN }} allow-repeats: false - diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7f5d08e6..bf037552 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,38 +8,38 @@ on: release: types: [published] -# Uncomment if we need an edge release of Nextflow again -# env: NXF_EDGE: 1 +env: + NXF_ANSI_LOG: false + +concurrency: + group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}" + cancel-in-progress: true jobs: test: - name: Run workflow tests + name: Run pipeline with test data # Only run on push if this is the nf-core dev branch (merged PRs) - if: ${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/bacass') }} + if: "${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/bacass') }}" runs-on: ubuntu-latest - env: - NXF_VER: ${{ matrix.nxf_ver }} - NXF_ANSI_LOG: false strategy: matrix: - # Nextflow versions: check pipeline minimum and current latest - nxf_ver: ['21.04.0', ''] + NXF_VER: + - "23.04.0" + - "latest-everything" steps: - name: Check out pipeline code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Nextflow - env: - CAPSULE_LOG: none - run: | - wget -qO- get.nextflow.io | bash - sudo mv nextflow /usr/local/bin/ + uses: nf-core/setup-nextflow@v1 + with: + version: "${{ matrix.NXF_VER }}" - name: Run pipeline with test data # For example: adding multiple test runs with different parameters # Remember that you can parallelise this by using strategy.matrix run: | - nextflow run ${GITHUB_WORKSPACE} -profile test,docker + nextflow run ${GITHUB_WORKSPACE} -profile test,docker --outdir results profiles: name: Run workflow profile @@ -47,7 +47,7 @@ jobs: if: ${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/bacass') }} runs-on: ubuntu-latest env: - NXF_VER: '21.04.0' + NXF_VER: "23.04.0" NXF_ANSI_LOG: false strategy: matrix: @@ -65,4 +65,4 @@ jobs: sudo mv nextflow /usr/local/bin/ - name: Run pipeline with ${{ matrix.profile }} test profile run: | - nextflow run ${GITHUB_WORKSPACE} -profile ${{ matrix.profile }},docker + nextflow run ${GITHUB_WORKSPACE} -profile ${{ matrix.profile }},docker --outdir results diff --git a/.github/workflows/clean-up.yml b/.github/workflows/clean-up.yml new file mode 100644 index 00000000..694e90ec --- /dev/null +++ b/.github/workflows/clean-up.yml @@ -0,0 +1,24 @@ +name: "Close user-tagged issues and PRs" +on: + schedule: + - cron: "0 0 * * 0" # Once a week + +jobs: + clean-up: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v7 + with: + stale-issue-message: "This issue has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment otherwise this issue will be closed in 20 days." + stale-pr-message: "This PR has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment if it is still useful." + close-issue-message: "This issue was closed because it has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor and then staled for 20 days with no activity." + days-before-stale: 30 + days-before-close: 20 + days-before-pr-close: -1 + any-of-labels: "awaiting-changes,awaiting-feedback" + exempt-issue-labels: "WIP" + exempt-pr-labels: "WIP" + repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/fix-linting.yml b/.github/workflows/fix-linting.yml new file mode 100644 index 00000000..474f2f76 --- /dev/null +++ b/.github/workflows/fix-linting.yml @@ -0,0 +1,55 @@ +name: Fix linting from a comment +on: + issue_comment: + types: [created] + +jobs: + deploy: + # Only run if comment is on a PR with the main repo, and if it contains the magic keywords + if: > + contains(github.event.comment.html_url, '/pull/') && + contains(github.event.comment.body, '@nf-core-bot fix linting') && + github.repository == 'nf-core/bacass' + runs-on: ubuntu-latest + steps: + # Use the @nf-core-bot token to check out so we can push later + - uses: actions/checkout@v3 + with: + token: ${{ secrets.nf_core_bot_auth_token }} + + # Action runs on the issue comment, so we don't get the PR by default + # Use the gh cli to check out the PR + - name: Checkout Pull Request + run: gh pr checkout ${{ github.event.issue.number }} + env: + GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }} + + - uses: actions/setup-node@v3 + + - name: Install Prettier + run: npm install -g prettier @prettier/plugin-php + + # Check that we actually need to fix something + - name: Run 'prettier --check' + id: prettier_status + run: | + if prettier --check ${GITHUB_WORKSPACE}; then + echo "result=pass" >> $GITHUB_OUTPUT + else + echo "result=fail" >> $GITHUB_OUTPUT + fi + + - name: Run 'prettier --write' + if: steps.prettier_status.outputs.result == 'fail' + run: prettier --write ${GITHUB_WORKSPACE} + + - name: Commit & push changes + if: steps.prettier_status.outputs.result == 'fail' + run: | + git config user.email "core@nf-co.re" + git config user.name "nf-core-bot" + git config push.default upstream + git add . + git status + git commit -m "[automated] Fix linting with Prettier" + git push diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 3b448773..b8bdd214 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -1,77 +1,49 @@ name: nf-core linting # This workflow is triggered on pushes and PRs to the repository. -# It runs the `nf-core lint` and markdown lint tests to ensure that the code meets the nf-core guidelines +# It runs the `nf-core lint` and markdown lint tests to ensure +# that the code meets the nf-core guidelines. on: push: + branches: + - dev pull_request: release: types: [published] jobs: - Markdown: + EditorConfig: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-node@v1 - with: - node-version: '10' - - name: Install markdownlint - run: npm install -g markdownlint-cli - - name: Run Markdownlint - run: markdownlint . - - # If the above check failed, post a comment on the PR explaining the failure - - name: Post PR comment - if: failure() - uses: mshick/add-pr-comment@v1 - with: - message: | - ## Markdown linting is failing - - To keep the code consistent with lots of contributors, we run automated code consistency checks. - To fix this CI test, please run: + - uses: actions/checkout@v3 - * Install `markdownlint-cli` - * On Mac: `brew install markdownlint-cli` - * Everything else: [Install `npm`](https://www.npmjs.com/get-npm) then [install `markdownlint-cli`](https://www.npmjs.com/package/markdownlint-cli) (`npm install -g markdownlint-cli`) - * Fix the markdown errors - * Automatically: `markdownlint . --fix` - * Manually resolve anything left from `markdownlint .` + - uses: actions/setup-node@v3 - Once you push these changes the test should pass, and you can hide this comment :+1: - - We highly recommend setting up markdownlint in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help! + - name: Install editorconfig-checker + run: npm install -g editorconfig-checker - Thanks again for your contribution! - repo-token: ${{ secrets.GITHUB_TOKEN }} - allow-repeats: false + - name: Run ECLint check + run: editorconfig-checker -exclude README.md $(find .* -type f | grep -v '.git\|.py\|.md\|json\|yml\|yaml\|html\|css\|work\|.nextflow\|build\|nf_core.egg-info\|log.txt\|Makefile') - EditorConfig: + Prettier: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/setup-node@v1 - with: - node-version: '10' + - uses: actions/setup-node@v3 - - name: Install editorconfig-checker - run: npm install -g editorconfig-checker + - name: Install Prettier + run: npm install -g prettier - - name: Run ECLint check - run: editorconfig-checker -exclude README.md $(git ls-files | grep -v test) + - name: Run Prettier --check + run: prettier --check ${GITHUB_WORKSPACE} - YAML: + PythonBlack: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 - - uses: actions/setup-node@v1 - with: - node-version: '10' - - name: Install yaml-lint - run: npm install -g yaml-lint - - name: Run yaml-lint - run: yamllint $(find ${GITHUB_WORKSPACE} -type f -name "*.yml" -o -name "*.yaml") + - uses: actions/checkout@v3 + + - name: Check code lints with Black + uses: psf/black@stable # If the above check failed, post a comment on the PR explaining the failure - name: Post PR comment @@ -79,20 +51,17 @@ jobs: uses: mshick/add-pr-comment@v1 with: message: | - ## YAML linting is failing + ## Python linting (`black`) is failing To keep the code consistent with lots of contributors, we run automated code consistency checks. To fix this CI test, please run: - * Install `yaml-lint` - * [Install `npm`](https://www.npmjs.com/get-npm) then [install `yaml-lint`](https://www.npmjs.com/package/yaml-lint) (`npm install -g yaml-lint`) - * Fix the markdown errors - * Run the test locally: `yamllint $(find . -type f -name "*.yml" -o -name "*.yaml")` - * Fix any reported errors in your YAML files + * Install [`black`](https://black.readthedocs.io/en/stable/): `pip install black` + * Fix formatting errors in your pipeline: `black .` Once you push these changes the test should pass, and you can hide this comment :+1: - We highly recommend setting up yaml-lint in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help! + We highly recommend setting up Black in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help! Thanks again for your contribution! repo-token: ${{ secrets.GITHUB_TOKEN }} @@ -101,21 +70,16 @@ jobs: nf-core: runs-on: ubuntu-latest steps: - - name: Check out pipeline code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Nextflow - env: - CAPSULE_LOG: none - run: | - wget -qO- get.nextflow.io | bash - sudo mv nextflow /usr/local/bin/ + uses: nf-core/setup-nextflow@v1 - - uses: actions/setup-python@v1 + - uses: actions/setup-python@v4 with: - python-version: '3.6' - architecture: 'x64' + python-version: "3.11" + architecture: "x64" - name: Install dependencies run: | @@ -135,11 +99,10 @@ jobs: - name: Upload linting log file artifact if: ${{ always() }} - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: linting-logs path: | lint_log.txt lint_results.md PR_number.txt - diff --git a/.github/workflows/linting_comment.yml b/.github/workflows/linting_comment.yml index 90f03c6f..0bbcd30f 100644 --- a/.github/workflows/linting_comment.yml +++ b/.github/workflows/linting_comment.yml @@ -1,4 +1,3 @@ - name: nf-core linting comment # This workflow is triggered after the linting action is complete # It posts an automated comment to the PR, even if the PR is coming from a fork @@ -15,10 +14,11 @@ jobs: uses: dawidd6/action-download-artifact@v2 with: workflow: linting.yml + workflow_conclusion: completed - name: Get PR number id: pr_number - run: echo "::set-output name=pr_number::$(cat linting-logs/PR_number.txt)" + run: echo "pr_number=$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT - name: Post PR comment uses: marocchino/sticky-pull-request-comment@v2 @@ -26,4 +26,3 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} number: ${{ steps.pr_number.outputs.pr_number }} path: linting-logs/lint_results.md - diff --git a/.github/workflows/release-announcments.yml b/.github/workflows/release-announcments.yml new file mode 100644 index 00000000..6ad33927 --- /dev/null +++ b/.github/workflows/release-announcments.yml @@ -0,0 +1,68 @@ +name: release-announcements +# Automatic release toot and tweet anouncements +on: + release: + types: [published] + workflow_dispatch: + +jobs: + toot: + runs-on: ubuntu-latest + steps: + - uses: rzr/fediverse-action@master + with: + access-token: ${{ secrets.MASTODON_ACCESS_TOKEN }} + host: "mstdn.science" # custom host if not "mastodon.social" (default) + # GitHub event payload + # https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#release + message: | + Pipeline release! ${{ github.repository }} v${{ github.event.release.tag_name }} - ${{ github.event.release.name }}! + + Please see the changelog: ${{ github.event.release.html_url }} + + send-tweet: + runs-on: ubuntu-latest + + steps: + - uses: actions/setup-python@v4 + with: + python-version: "3.10" + - name: Install dependencies + run: pip install tweepy==4.14.0 + - name: Send tweet + shell: python + run: | + import os + import tweepy + + client = tweepy.Client( + access_token=os.getenv("TWITTER_ACCESS_TOKEN"), + access_token_secret=os.getenv("TWITTER_ACCESS_TOKEN_SECRET"), + consumer_key=os.getenv("TWITTER_CONSUMER_KEY"), + consumer_secret=os.getenv("TWITTER_CONSUMER_SECRET"), + ) + tweet = os.getenv("TWEET") + client.create_tweet(text=tweet) + env: + TWEET: | + Pipeline release! ${{ github.repository }} v${{ github.event.release.tag_name }} - ${{ github.event.release.name }}! + + Please see the changelog: ${{ github.event.release.html_url }} + TWITTER_CONSUMER_KEY: ${{ secrets.TWITTER_CONSUMER_KEY }} + TWITTER_CONSUMER_SECRET: ${{ secrets.TWITTER_CONSUMER_SECRET }} + TWITTER_ACCESS_TOKEN: ${{ secrets.TWITTER_ACCESS_TOKEN }} + TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }} + + bsky-post: + runs-on: ubuntu-latest + steps: + - uses: zentered/bluesky-post-action@v0.0.2 + with: + post: | + Pipeline release! ${{ github.repository }} v${{ github.event.release.tag_name }} - ${{ github.event.release.name }}! + + Please see the changelog: ${{ github.event.release.html_url }} + env: + BSKY_IDENTIFIER: ${{ secrets.BSKY_IDENTIFIER }} + BSKY_PASSWORD: ${{ secrets.BSKY_PASSWORD }} + # diff --git a/.gitpod.yml b/.gitpod.yml new file mode 100644 index 00000000..25488dcc --- /dev/null +++ b/.gitpod.yml @@ -0,0 +1,19 @@ +image: nfcore/gitpod:latest +tasks: + - name: Update Nextflow and setup pre-commit + command: | + pre-commit install --install-hooks + nextflow self-update + +vscode: + extensions: # based on nf-core.nf-core-extensionpack + - codezombiech.gitignore # Language support for .gitignore files + # - cssho.vscode-svgviewer # SVG viewer + - esbenp.prettier-vscode # Markdown/CommonMark linting and style checking for Visual Studio Code + - eamodio.gitlens # Quickly glimpse into whom, why, and when a line or code block was changed + - EditorConfig.EditorConfig # override user/workspace settings with settings found in .editorconfig files + - Gruntfuggly.todo-tree # Display TODO and FIXME in a tree view in the activity bar + - mechatroner.rainbow-csv # Highlight columns in csv files in different colors + # - nextflow.nextflow # Nextflow syntax highlighting + - oderwat.indent-rainbow # Highlight indentation level + - streetsidesoftware.code-spell-checker # Spelling checker for source code diff --git a/.markdownlint.yml b/.markdownlint.yml deleted file mode 100644 index 9e605fcf..00000000 --- a/.markdownlint.yml +++ /dev/null @@ -1,14 +0,0 @@ -# Markdownlint configuration file -default: true -line-length: false -ul-indent: - indent: 4 -no-duplicate-header: - siblings_only: true -no-inline-html: - allowed_elements: - - img - - p - - kbd - - details - - summary diff --git a/.nf-core.yml b/.nf-core.yml new file mode 100644 index 00000000..778ae193 --- /dev/null +++ b/.nf-core.yml @@ -0,0 +1,4 @@ +repository_type: pipeline +lint: + files_exist: + - conf/igenomes.config diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..0c31cdb9 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,5 @@ +repos: + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v2.7.1" + hooks: + - id: prettier diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 00000000..437d763d --- /dev/null +++ b/.prettierignore @@ -0,0 +1,12 @@ +email_template.html +adaptivecard.json +slackreport.json +.nextflow* +work/ +data/ +results/ +.DS_Store +testing/ +testing* +*.pyc +bin/ diff --git a/.prettierrc.yml b/.prettierrc.yml new file mode 100644 index 00000000..c81f9a76 --- /dev/null +++ b/.prettierrc.yml @@ -0,0 +1 @@ +printWidth: 120 diff --git a/CHANGELOG.md b/CHANGELOG.md index d9b44c37..9d3b4376 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,37 +3,101 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## v2.1.0 nf-core/bacass: "Navy Steel Swordfish" - 2023/10/20 + +This version merges the nf-core template updates of v2.9 and v2.10, and updates modules or dependencies to ensure compatibility with the new template. Additionally, new modules have been added to process short-reads and perform gene annotation with Bakta. + +### `Changed` + +- [#86](https://github.com/nf-core/bacass/pull/86) - Update nf-core/bacass to the new nf-core 2.9 `TEMPLATE`. +- [#61](https://github.com/nf-core/bacass/issues/61) - Update local/modules to nf-core/modules (detailed below). +- [#91](https://github.com/nf-core/bacass/pull/91) - Update nf-core/bacass to the new nf-core 2.10 `TEMPLATE`. +- [#95](https://github.com/nf-core/bacass/pull/95) - Update MultiQC module to v1.17. + +### `Added` + +- [#86](https://github.com/nf-core/bacass/pull/86) - Added nf-core subworkflow for trimming and QC of short-reads [nf-core/fastq_trim_fastp_fastqc](https://github.com/nf-core/modules/tree/master/subworkflows/nf-core/fastq_trim_fastp_fastqc). +- [#88](https://github.com/nf-core/bacass/pull/88) - Added nf-validation on samplesheet +- [#93](https://github.com/nf-core/bacass/pull/93) - Added missing modules output to MultiQC. ( Fastp, PycoQC, Porechop, Quast, Kraken2, and Prokka). +- [#95](https://github.com/nf-core/bacass/pull/95) - Added subworkflow for gene annotation with Bakta. + +### `Fixed` + +- Fixed modules + - Medaka: Medaka last version (see version update below) doesn't allow gzip compressed files. Add bgzip compression instead. + - Dfast: fix overwriting issues detected when copying sample files from `work/` to `results/` + +### `Dependencies` + +- [#61](https://github.com/nf-core/bacass/issues/61) - Update local/modules to nf-core/modules plus version update. + +| Tool | Previous version | New version | +| -------- | ---------------- | ----------- | +| Canu | 2.1.1 | 2.2 | +| Minimap2 | 2.21 | 2.2 | +| Miniasm | 0.3 | - | +| Racon | 1.4.20-1 | - | + +- Update already nf-core modules + +| Tool | Previous version | New version | +| -------- | ---------------- | ----------- | +| Fastqc | 0.11.9 | - | +| Samtools | 1.13 | 2.1.2 | +| Kraken2 | 2.1.1 | 2.1.2 | +| Quast | 5.0.2 | 5.2.0 | +| Prokka | 1.14.6 | - | +| Multiqc | 1.10.1 | 1.15 | + +- Refactor `local/modules` making them follow nf-core v2.9 structure/fashion. + +| Tool | Previous version | New version | +| ---------- | ---------------- | ----------- | +| Dfast | 1.2.14 | - | +| Medaka | 1.4.3-0 | - | +| Nanoplot | 1.38.0 | 1.41.6 | +| Nanopolish | 0.13.2-5 | 0.14.0 | +| Pycoqc | 2.5.2 | - | +| Unicycler | 0.4.8 | - | + +### `Deprecated` + +- [#86](https://github.com/nf-core/bacass/pull/86) Replace depecated modules with nf-core/modules. + + - Replace `local/get_software_versions.nf` with `nf-core/custom/dumpsoftwareversions.nf` + - Replace `local/skewer` by `nf-core/fastp` and wrap fastqc plus fastp into `subworkflows/nf-core/fastq_trim_fastp_fastqc` + ## v2.0.0 nf-core/bacass: "Navy Steel Swordfish" 2021/08/27 ### `Changed` -* [#56](https://github.com/nf-core/bacass/pull/56) - Switched to DSL2 & update to new nf-core 2.1 `TEMPLATE` -* [#56](https://github.com/nf-core/bacass/pull/56) - `--krakendb` now expects a `.tar.gz`/`.tgz` (compressed tar archive) directly from `https://benlangmead.github.io/aws-indexes/k2` instead of an uncompressed folder. +- [#56](https://github.com/nf-core/bacass/pull/56) - Switched to DSL2 & update to new nf-core 2.1 `TEMPLATE` +- [#56](https://github.com/nf-core/bacass/pull/56) - `--krakendb` now expects a `.tar.gz`/`.tgz` (compressed tar archive) directly from `https://benlangmead.github.io/aws-indexes/k2` instead of an uncompressed folder. ### `Added` -* [#56](https://github.com/nf-core/bacass/pull/56) - Added full size test dataset, two Zetaproteobacteria sequenced with Illumina MiSeq Reagent Kit V2, PE250, 3 to 4 million read pairs. +- [#56](https://github.com/nf-core/bacass/pull/56) - Added full size test dataset, two Zetaproteobacteria sequenced with Illumina MiSeq Reagent Kit V2, PE250, 3 to 4 million read pairs. ### `Fixed` -* [#51](https://github.com/nf-core/bacass/issues/51) - Fixed Unicycler +- [#51](https://github.com/nf-core/bacass/issues/51) - Fixed Unicycler ### `Dependencies` -* [#56](https://github.com/nf-core/bacass/pull/56) - Updated a bunch of dependencies (unchanged: FastQC, Miniasm, Prokka, Porechop, QUAST) - * Unicycler from 0.4.4 to 0.4.8 - * Kraken2 from 2.0.9beta to 2.1.1 - * MultiQC from 1.9 to 1.10.1 - * PYCOQC from 2.5.0.23 to 2.5.2 - * Samtools from 1.11 to 1.13 - * Canu from 2.0 to 2.1.1-2 - * dfast from 1.2.10 to 1.2.14 - * Medaka from 1.1.2 to 1.4.3-0 - * Minimap 2 from 2.17 to 2.21 - * Nanoplot from 1.32.1 to 1.38.0 - * Nanopolish from 0.13.2 to 0.13.2-5 - * Racon from 1.4.13 to 1.4.20-1 - * Skewer from 0.2.2 to 0.2.2-3 +- [#56](https://github.com/nf-core/bacass/pull/56) - Updated a bunch of dependencies (unchanged: FastQC, Miniasm, Prokka, Porechop, QUAST) + - Unicycler from 0.4.4 to 0.4.8 + - Kraken2 from 2.0.9beta to 2.1.1 + - MultiQC from 1.9 to 1.10.1 + - PYCOQC from 2.5.0.23 to 2.5.2 + - Samtools from 1.11 to 1.13 + - Canu from 2.0 to 2.1.1-2 + - dfast from 1.2.10 to 1.2.14 + - Medaka from 1.1.2 to 1.4.3-0 + - Minimap 2 from 2.17 to 2.21 + - Nanoplot from 1.32.1 to 1.38.0 + - Nanopolish from 0.13.2 to 0.13.2-5 + - Racon from 1.4.13 to 1.4.20-1 + - Skewer from 0.2.2 to 0.2.2-3 ### `Deprecated` @@ -41,48 +105,48 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 This is basically a maintenance update that includes template updates, fixed environments and some minor bugfixes. -* Merged in nf-core/tools template v 1.10.2 -* Updated dependencies - * fastqc=0.11.8, 0.11.9 - * multiqc=1.8, 1.9 - * kraken2=2.0.8_beta, 2.0.9beta - * prokka=1.14.5, 1.14.6 - * nanopolish=0.11.2, 0.13.2 - * parallel=20191122, 20200922 - * racon=1.4.10, 1.4.13 - * canu=1.9, 2.0 - * samtools=1.9, 1.11 - * nanoplot=1.28.1, 1.32.1 - * pycoqc=2.5.0.3, 2.5.0.23 -* Switched out containers for many tools to make DSLv2 transition easier (escape from dependency hell) +- Merged in nf-core/tools template v 1.10.2 +- Updated dependencies + - fastqc=0.11.8, 0.11.9 + - multiqc=1.8, 1.9 + - kraken2=2.0.8_beta, 2.0.9beta + - prokka=1.14.5, 1.14.6 + - nanopolish=0.11.2, 0.13.2 + - parallel=20191122, 20200922 + - racon=1.4.10, 1.4.13 + - canu=1.9, 2.0 + - samtools=1.9, 1.11 + - nanoplot=1.28.1, 1.32.1 + - pycoqc=2.5.0.3, 2.5.0.23 +- Switched out containers for many tools to make DSLv2 transition easier (escape from dependency hell) ## v1.1.0 nf-core/bacass: "Green Aluminium Shark" 2019/12/13 -* Added support for hybrid assembly using Nanopore and Illumina Short Reads -* Added methods for long-read Nanopore data - * Nanopolish, for polishing of Nanopore data with Illumina reads - * Medaka, as alternative assembly polishing method - * PoreChop, for quality trimming of Nanopore data - * Nanoplot, for plotting quality metrics of Nanopore data - * PycoQC, to QC Nanopore data -* Added multiple tools to assemble long-reads - * Miniasm + Racon - * Canu Assembler - * Unicycler in Long read Mode -* Add alternative assembly annotation using DFAST -* Add social preview image +- Added support for hybrid assembly using Nanopore and Illumina Short Reads +- Added methods for long-read Nanopore data + - Nanopolish, for polishing of Nanopore data with Illumina reads + - Medaka, as alternative assembly polishing method + - PoreChop, for quality trimming of Nanopore data + - Nanoplot, for plotting quality metrics of Nanopore data + - PycoQC, to QC Nanopore data +- Added multiple tools to assemble long-reads + - Miniasm + Racon + - Canu Assembler + - Unicycler in Long read Mode +- Add alternative assembly annotation using DFAST +- Add social preview image ### Dependency updates -* Bumped Nextflow Version to 19.10.0 +- Bumped Nextflow Version to 19.10.0 ## Added tools -* DFAST -* PycoQC -* Nanoplot -* PoreChop -* Nanopolish +- DFAST +- PycoQC +- Nanoplot +- PoreChop +- Nanopolish ## v1.0.0 nf-core/bacass: "Green Tin Ant" diff --git a/CITATIONS.md b/CITATIONS.md index dfec0ec6..9d2ab889 100644 --- a/CITATIONS.md +++ b/CITATIONS.md @@ -10,65 +10,84 @@ ## Pipeline tools -* [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) +- [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) -* [Skewer](https://pubmed.ncbi.nlm.nih.gov/24925680/) - > Jiang H, Lei R, Ding SW, Zhu S. Skewer: a fast and accurate adapter trimmer for next-generation sequencing paired-end reads. BMC Bioinformatics. 2014 Jun 12;15:182. doi: 10.1186/1471-2105-15-182. PMID: 24925680; PMCID: PMC4074385. + > Andrews, S. (2010). FastQC: A Quality Control Tool for High Throughput Sequence Data [Online]. -* [Porechop](https://github.com/rrwick/Porechop) +- [FastP](https://github.com/OpenGene/fastp) -* [NanoPlot](https://doi.org/10.1093/bioinformatics/bty149) - > De Coster, W., D’Hert, S., Schultz, D. T., Cruts, M., & Van Broeckhoven, C. (2018). NanoPack: visualizing and processing long-read sequencing data. Bioinformatics, 34(15), 2666-2669. doi: 10.1093/bioinformatics/bty149. + > Chen S, Zhou Y, Chen Y, Gu J. fastp: an ultra-fast all-in-one FASTQ preprocessor. Bioinformatics. 2018 Sep 1;34(17):i884-i890. doi: 10.1093/bioinformatics/bty560. PMID: 30423086; PMCID: PMC6129281. -* [pycoQC](https://github.com/tleonardi/pycoQC) +- [Porechop](https://github.com/rrwick/Porechop) -* [Unicycler](https://pubmed.ncbi.nlm.nih.gov/28594827/) - > Wick RR, Judd LM, Gorrie CL, Holt KE. Unicycler: Resolving bacterial genome assemblies from short and long sequencing reads. PLoS Comput Biol. 2017 Jun 8;13(6):e1005595. doi: 10.1371/journal.pcbi.1005595. PMID: 28594827; PMCID: PMC5481147. +- [NanoPlot](https://doi.org/10.1093/bioinformatics/bty149) -* [Miniasm](https://github.com/lh3/miniasm) with [Racon](https://github.com/isovic/racon) + > De Coster, W., D’Hert, S., Schultz, D. T., Cruts, M., & Van Broeckhoven, C. (2018). NanoPack: visualizing and processing long-read sequencing data. Bioinformatics, 34(15), 2666-2669. doi: 10.1093/bioinformatics/bty149. -* [Canu](https://pubmed.ncbi.nlm.nih.gov/28298431/) - > Koren S, Walenz BP, Berlin K, Miller JR, Bergman NH, Phillippy AM. Canu: scalable and accurate long-read assembly via adaptive k-mer weighting and repeat separation. Genome Res. 2017 May;27(5):722-736. doi: 10.1101/gr.215087.116. Epub 2017 Mar 15. PMID: 28298431; PMCID: PMC5411767. +- [pycoQC](https://github.com/tleonardi/pycoQC) -* [QUAST](https://pubmed.ncbi.nlm.nih.gov/23422339/) - > Gurevich A, Saveliev V, Vyahhi N, Tesler G. QUAST: quality assessment tool for genome assemblies. Bioinformatics. 2013 Apr 15;29(8):1072-5. doi: 10.1093/bioinformatics/btt086. Epub 2013 Feb 19. PMID: 23422339; PMCID: PMC3624806. +- [Unicycler](https://pubmed.ncbi.nlm.nih.gov/28594827/) -* [Prokka](https://pubmed.ncbi.nlm.nih.gov/24642063/) - > Seemann T. Prokka: rapid prokaryotic genome annotation. Bioinformatics. 2014 Jul 15;30(14):2068-9. doi: 10.1093/bioinformatics/btu153. Epub 2014 Mar 18. PMID: 24642063. + > Wick RR, Judd LM, Gorrie CL, Holt KE. Unicycler: Resolving bacterial genome assemblies from short and long sequencing reads. PLoS Comput Biol. 2017 Jun 8;13(6):e1005595. doi: 10.1371/journal.pcbi.1005595. PMID: 28594827; PMCID: PMC5481147. -* [DFAST](https://pubmed.ncbi.nlm.nih.gov/29106469/) - > Tanizawa Y, Fujisawa T, Nakamura Y. DFAST: a flexible prokaryotic genome annotation pipeline for faster genome publication. Bioinformatics. 2018 Mar 15;34(6):1037-1039. doi: 10.1093/bioinformatics/btx713. PMID: 29106469; PMCID: PMC5860143. +- [Miniasm](https://github.com/lh3/miniasm) with [Racon](https://github.com/isovic/racon) -* [Medaka](https://github.com/nanoporetech/medaka) + > Li H. Minimap and miniasm: fast mapping and de novo assembly for noisy long sequences. Bioinformatics. 2016 Jul 15;32(14):2103-10. doi: 10.1093/bioinformatics/btw152. Epub 2016 Mar 19. PMID: 27153593; PMCID: PMC4937194. -* [Nanopolish](https://github.com/jts/nanopolish) +- [Canu](https://pubmed.ncbi.nlm.nih.gov/28298431/) -* [SAMtools](https://doi.org/10.1093/bioinformatics/btp352) - > Li, H., Handsaker, B., Wysoker, A., Fennell, T., Ruan, J., Homer, N., … 1000 Genome Project Data Processing Subgroup. (2009). The Sequence Alignment/Map format and SAMtools. Bioinformatics , 25(16), 2078–2079. doi: 10.1093/bioinformatics/btp352. + > Koren S, Walenz BP, Berlin K, Miller JR, Bergman NH, Phillippy AM. Canu: scalable and accurate long-read assembly via adaptive k-mer weighting and repeat separation. Genome Res. 2017 May;27(5):722-736. doi: 10.1101/gr.215087.116. Epub 2017 Mar 15. PMID: 28298431; PMCID: PMC5411767. -* [Kraken2](https://doi.org/10.1186/s13059-019-1891-0) - > Wood, D et al., 2019. Improved metagenomic analysis with Kraken 2. Genome Biology volume 20, Article number: 257. doi: 10.1186/s13059-019-1891-0. +- [QUAST](https://pubmed.ncbi.nlm.nih.gov/23422339/) -* [MultiQC](https://www.ncbi.nlm.nih.gov/pubmed/27312411/) - > Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924 + > Gurevich A, Saveliev V, Vyahhi N, Tesler G. QUAST: quality assessment tool for genome assemblies. Bioinformatics. 2013 Apr 15;29(8):1072-5. doi: 10.1093/bioinformatics/btt086. Epub 2013 Feb 19. PMID: 23422339; PMCID: PMC3624806. + +- [Prokka](https://pubmed.ncbi.nlm.nih.gov/24642063/) + + > Seemann T. Prokka: rapid prokaryotic genome annotation. Bioinformatics. 2014 Jul 15;30(14):2068-9. doi: 10.1093/bioinformatics/btu153. Epub 2014 Mar 18. PMID: 24642063. + +- [DFAST](https://pubmed.ncbi.nlm.nih.gov/29106469/) + + > Tanizawa Y, Fujisawa T, Nakamura Y. DFAST: a flexible prokaryotic genome annotation pipeline for faster genome publication. Bioinformatics. 2018 Mar 15;34(6):1037-1039. doi: 10.1093/bioinformatics/btx713. PMID: 29106469; PMCID: PMC5860143. + +- [Medaka](https://github.com/nanoporetech/medaka) + +- [Nanopolish](https://github.com/jts/nanopolish) + +- [SAMtools](https://doi.org/10.1093/bioinformatics/btp352) + + > Li, H., Handsaker, B., Wysoker, A., Fennell, T., Ruan, J., Homer, N., … 1000 Genome Project Data Processing Subgroup. (2009). The Sequence Alignment/Map format and SAMtools. Bioinformatics , 25(16), 2078–2079. doi: 10.1093/bioinformatics/btp352. + +- [Kraken2](https://doi.org/10.1186/s13059-019-1891-0) + + > Wood, D et al., 2019. Improved metagenomic analysis with Kraken 2. Genome Biology volume 20, Article number: 257. doi: 10.1186/s13059-019-1891-0. + +- [MultiQC](https://www.ncbi.nlm.nih.gov/pubmed/27312411/) + > Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924 ## Data -* [Full-size test data](https://pubmed.ncbi.nlm.nih.gov/32561582/) - > Blackwell N, Bryce C, Straub D, Kappler A, Kleindienst S. Genomic Insights into Two Novel Fe(II)-Oxidizing Zetaproteobacteria Isolates Reveal Lifestyle Adaption to Coastal Marine Sediments. Appl Environ Microbiol. 2020 Aug 18;86(17):e01160-20. doi: 10.1128/AEM.01160-20. PMID: 32561582; PMCID: PMC7440796. +- [Full-size test data](https://pubmed.ncbi.nlm.nih.gov/32561582/) + > Blackwell N, Bryce C, Straub D, Kappler A, Kleindienst S. Genomic Insights into Two Novel Fe(II)-Oxidizing Zetaproteobacteria Isolates Reveal Lifestyle Adaption to Coastal Marine Sediments. Appl Environ Microbiol. 2020 Aug 18;86(17):e01160-20. doi: 10.1128/AEM.01160-20. PMID: 32561582; PMCID: PMC7440796. ## Software packaging/containerisation tools -* [Anaconda](https://anaconda.com) - > Anaconda Software Distribution. Computer software. Vers. 2-2.4.0. Anaconda, Nov. 2016. Web. +- [Anaconda](https://anaconda.com) + + > Anaconda Software Distribution. Computer software. Vers. 2-2.4.0. Anaconda, Nov. 2016. Web. + +- [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/) + + > Grüning B, Dale R, Sjödin A, Chapman BA, Rowe J, Tomkins-Tinch CH, Valieris R, Köster J; Bioconda Team. Bioconda: sustainable and comprehensive software distribution for the life sciences. Nat Methods. 2018 Jul;15(7):475-476. doi: 10.1038/s41592-018-0046-7. PubMed PMID: 29967506. + +- [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/) + + > da Veiga Leprevost F, Grüning B, Aflitos SA, Röst HL, Uszkoreit J, Barsnes H, Vaudel M, Moreno P, Gatto L, Weber J, Bai M, Jimenez RC, Sachsenberg T, Pfeuffer J, Alvarez RV, Griss J, Nesvizhskii AI, Perez-Riverol Y. BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics. 2017 Aug 15;33(16):2580-2582. doi: 10.1093/bioinformatics/btx192. PubMed PMID: 28379341; PubMed Central PMCID: PMC5870671. -* [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/) - > Grüning B, Dale R, Sjödin A, Chapman BA, Rowe J, Tomkins-Tinch CH, Valieris R, Köster J; Bioconda Team. Bioconda: sustainable and comprehensive software distribution for the life sciences. Nat Methods. 2018 Jul;15(7):475-476. doi: 10.1038/s41592-018-0046-7. PubMed PMID: 29967506. +- [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241) -* [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/) - > da Veiga Leprevost F, Grüning B, Aflitos SA, Röst HL, Uszkoreit J, Barsnes H, Vaudel M, Moreno P, Gatto L, Weber J, Bai M, Jimenez RC, Sachsenberg T, Pfeuffer J, Alvarez RV, Griss J, Nesvizhskii AI, Perez-Riverol Y. BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics. 2017 Aug 15;33(16):2580-2582. doi: 10.1093/bioinformatics/btx192. PubMed PMID: 28379341; PubMed Central PMCID: PMC5870671. + > Merkel, D. (2014). Docker: lightweight linux containers for consistent development and deployment. Linux Journal, 2014(239), 2. doi: 10.5555/2600239.2600241. -* [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241) +- [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/) -* [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/) - > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675. + > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index f4fd052f..c089ec78 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,18 +1,20 @@ -# Code of Conduct at nf-core (v1.0) +# Code of Conduct at nf-core (v1.4) ## Our Pledge -In the interest of fostering an open, collaborative, and welcoming environment, we as contributors and maintainers of nf-core, pledge to making participation in our projects and community a harassment-free experience for everyone, regardless of: +In the interest of fostering an open, collaborative, and welcoming environment, we as contributors and maintainers of nf-core pledge to making participation in our projects and community a harassment-free experience for everyone, regardless of: - Age +- Ability - Body size +- Caste - Familial status - Gender identity and expression - Geographical location - Level of experience - Nationality and national origins - Native language -- Physical and neurological ability +- Neurodiversity - Race or ethnicity - Religion - Sexual identity and orientation @@ -22,80 +24,133 @@ Please note that the list above is alphabetised and is therefore not ranked in a ## Preamble -> Note: This Code of Conduct (CoC) has been drafted by the nf-core Safety Officer and been edited after input from members of the nf-core team and others. "We", in this document, refers to the Safety Officer and members of the nf-core core team, both of whom are deemed to be members of the nf-core community and are therefore required to abide by this Code of Conduct. This document will amended periodically to keep it up-to-date, and in case of any dispute, the most current version will apply. +:::note +This Code of Conduct (CoC) has been drafted by Renuka Kudva, Cris Tuñí, and Michael Heuer, with input from the nf-core Core Team and Susanna Marquez from the nf-core community. "We", in this document, refers to the Safety Officers and members of the nf-core Core Team, both of whom are deemed to be members of the nf-core community and are therefore required to abide by this Code of Conduct. This document will be amended periodically to keep it up-to-date. In case of any dispute, the most current version will apply. +::: -An up-to-date list of members of the nf-core core team can be found [here](https://nf-co.re/about). Our current safety officer is Renuka Kudva. +An up-to-date list of members of the nf-core core team can be found [here](https://nf-co.re/about). + +Our Safety Officers are Saba Nafees, Cris Tuñí, and Michael Heuer. nf-core is a young and growing community that welcomes contributions from anyone with a shared vision for [Open Science Policies](https://www.fosteropenscience.eu/taxonomy/term/8). Open science policies encompass inclusive behaviours and we strive to build and maintain a safe and inclusive environment for all individuals. -We have therefore adopted this code of conduct (CoC), which we require all members of our community and attendees in nf-core events to adhere to in all our workspaces at all times. Workspaces include but are not limited to Slack, meetings on Zoom, Jitsi, YouTube live etc. +We have therefore adopted this CoC, which we require all members of our community and attendees of nf-core events to adhere to in all our workspaces at all times. Workspaces include, but are not limited to, Slack, meetings on Zoom, gather.town, YouTube live etc. -Our CoC will be strictly enforced and the nf-core team reserve the right to exclude participants who do not comply with our guidelines from our workspaces and future nf-core activities. +Our CoC will be strictly enforced and the nf-core team reserves the right to exclude participants who do not comply with our guidelines from our workspaces and future nf-core activities. -We ask all members of our community to help maintain a supportive and productive workspace and to avoid behaviours that can make individuals feel unsafe or unwelcome. Please help us maintain and uphold this CoC. +We ask all members of our community to help maintain supportive and productive workspaces and to avoid behaviours that can make individuals feel unsafe or unwelcome. Please help us maintain and uphold this CoC. -Questions, concerns or ideas on what we can include? Contact safety [at] nf-co [dot] re +Questions, concerns, or ideas on what we can include? Contact members of the Safety Team on Slack or email safety [at] nf-co [dot] re. ## Our Responsibilities -The safety officer is responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behaviour. +Members of the Safety Team (the Safety Officers) are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behaviour. -The safety officer in consultation with the nf-core core team have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. +The Safety Team, in consultation with the nf-core core team, have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this CoC, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. -Members of the core team or the safety officer who violate the CoC will be required to recuse themselves pending investigation. They will not have access to any reports of the violations and be subject to the same actions as others in violation of the CoC. +Members of the core team or the Safety Team who violate the CoC will be required to recuse themselves pending investigation. They will not have access to any reports of the violations and will be subject to the same actions as others in violation of the CoC. -## When are where does this Code of Conduct apply? +## When and where does this Code of Conduct apply? -Participation in the nf-core community is contingent on following these guidelines in all our workspaces and events. This includes but is not limited to the following listed alphabetically and therefore in no order of preference: +Participation in the nf-core community is contingent on following these guidelines in all our workspaces and events, such as hackathons, workshops, bytesize, and collaborative workspaces on gather.town. These guidelines include, but are not limited to, the following (listed alphabetically and therefore in no order of preference): - Communicating with an official project email address. - Communicating with community members within the nf-core Slack channel. - Participating in hackathons organised by nf-core (both online and in-person events). -- Participating in collaborative work on GitHub, Google Suite, community calls, mentorship meetings, email correspondence. -- Participating in workshops, training, and seminar series organised by nf-core (both online and in-person events). This applies to events hosted on web-based platforms such as Zoom, Jitsi, YouTube live etc. +- Participating in collaborative work on GitHub, Google Suite, community calls, mentorship meetings, email correspondence, and on the nf-core gather.town workspace. +- Participating in workshops, training, and seminar series organised by nf-core (both online and in-person events). This applies to events hosted on web-based platforms such as Zoom, gather.town, Jitsi, YouTube live etc. - Representing nf-core on social media. This includes both official and personal accounts. ## nf-core cares 😊 -nf-core's CoC and expectations of respectful behaviours for all participants (including organisers and the nf-core team) include but are not limited to the following (listed in alphabetical order): +nf-core's CoC and expectations of respectful behaviours for all participants (including organisers and the nf-core team) include, but are not limited to, the following (listed in alphabetical order): - Ask for consent before sharing another community member’s personal information (including photographs) on social media. - Be respectful of differing viewpoints and experiences. We are all here to learn from one another and a difference in opinion can present a good learning opportunity. -- Celebrate your accomplishments at events! (Get creative with your use of emojis 🎉 🥳 💯 🙌 !) +- Celebrate your accomplishments! (Get creative with your use of emojis 🎉 🥳 💯 🙌 !) - Demonstrate empathy towards other community members. (We don’t all have the same amount of time to dedicate to nf-core. If tasks are pending, don’t hesitate to gently remind members of your team. If you are leading a task, ask for help if you feel overwhelmed.) - Engage with and enquire after others. (This is especially important given the geographically remote nature of the nf-core community, so let’s do this the best we can) - Focus on what is best for the team and the community. (When in doubt, ask) -- Graciously accept constructive criticism, yet be unafraid to question, deliberate, and learn. +- Accept feedback, yet be unafraid to question, deliberate, and learn. - Introduce yourself to members of the community. (We’ve all been outsiders and we know that talking to strangers can be hard for some, but remember we’re interested in getting to know you and your visions for open science!) -- Show appreciation and **provide clear feedback**. (This is especially important because we don’t see each other in person and it can be harder to interpret subtleties. Also remember that not everyone understands a certain language to the same extent as you do, so **be clear in your communications to be kind.**) +- Show appreciation and **provide clear feedback**. (This is especially important because we don’t see each other in person and it can be harder to interpret subtleties. Also remember that not everyone understands a certain language to the same extent as you do, so **be clear in your communication to be kind.**) - Take breaks when you feel like you need them. -- Using welcoming and inclusive language. (Participants are encouraged to display their chosen pronouns on Zoom or in communication on Slack.) +- Use welcoming and inclusive language. (Participants are encouraged to display their chosen pronouns on Zoom or in communication on Slack) ## nf-core frowns on 😕 -The following behaviours from any participants within the nf-core community (including the organisers) will be considered unacceptable under this code of conduct. Engaging or advocating for any of the following could result in expulsion from nf-core workspaces. +The following behaviours from any participants within the nf-core community (including the organisers) will be considered unacceptable under this CoC. Engaging or advocating for any of the following could result in expulsion from nf-core workspaces: - Deliberate intimidation, stalking or following and sustained disruption of communication among participants of the community. This includes hijacking shared screens through actions such as using the annotate tool in conferencing software such as Zoom. - “Doxing” i.e. posting (or threatening to post) another person’s personal identifying information online. - Spamming or trolling of individuals on social media. -- Use of sexual or discriminatory imagery, comments, or jokes and unwelcome sexual attention. -- Verbal and text comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, ability, physical appearance, body size, race, age, religion or work experience. +- Use of sexual or discriminatory imagery, comments, jokes, or unwelcome sexual attention. +- Verbal and text comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, ability, physical appearance, body size, race, age, religion, or work experience. ### Online Trolling -The majority of nf-core interactions and events are held online. Unfortunately, holding events online comes with the added issue of online trolling. This is unacceptable, reports of such behaviour will be taken very seriously, and perpetrators will be excluded from activities immediately. +The majority of nf-core interactions and events are held online. Unfortunately, holding events online comes with the risk of online trolling. This is unacceptable — reports of such behaviour will be taken very seriously and perpetrators will be excluded from activities immediately. -All community members are required to ask members of the group they are working within for explicit consent prior to taking screenshots of individuals during video calls. +All community members are **required** to ask members of the group they are working with for explicit consent prior to taking screenshots of individuals during video calls. -## Procedures for Reporting CoC violations +## Procedures for reporting CoC violations If someone makes you feel uncomfortable through their behaviours or actions, report it as soon as possible. -You can reach out to members of the [nf-core core team](https://nf-co.re/about) and they will forward your concerns to the safety officer(s). +You can reach out to members of the Safety Team (Saba Nafees, Cris Tuñí, and Michael Heuer) on Slack. Alternatively, contact a member of the nf-core core team [nf-core core team](https://nf-co.re/about), and they will forward your concerns to the Safety Team. + +Issues directly concerning members of the Core Team or the Safety Team will be dealt with by other members of the core team and the safety manager — possible conflicts of interest will be taken into account. nf-core is also in discussions about having an ombudsperson and details will be shared in due course. + +All reports will be handled with the utmost discretion and confidentiality. + +You can also report any CoC violations to safety [at] nf-co [dot] re. In your email report, please do your best to include: + +- Your contact information. +- Identifying information (e.g. names, nicknames, pseudonyms) of the participant who has violated the Code of Conduct. +- The behaviour that was in violation and the circumstances surrounding the incident. +- The approximate time of the behaviour (if different than the time the report was made). +- Other people involved in the incident, if applicable. +- If you believe the incident is ongoing. +- If there is a publicly available record (e.g. mailing list record, a screenshot). +- Any additional information. + +After you file a report, one or more members of our Safety Team will contact you to follow up on your report. + +## Who will read and handle reports + +All reports will be read and handled by the members of the Safety Team at nf-core. + +If members of the Safety Team are deemed to have a conflict of interest with a report, they will be required to recuse themselves as per our Code of Conduct and will not have access to any follow-ups. + +To keep this first report confidential from any of the Safety Team members, please submit your first report by direct messaging on Slack/direct email to any of the nf-core members you are comfortable disclosing the information to, and be explicit about which member(s) you do not consent to sharing the information with. + +## Reviewing reports + +After receiving the report, members of the Safety Team will review the incident report to determine whether immediate action is required, for example, whether there is immediate threat to participants’ safety. + +The Safety Team, in consultation with members of the nf-core core team, will assess the information to determine whether the report constitutes a Code of Conduct violation, for them to decide on a course of action. + +In the case of insufficient information, one or more members of the Safety Team may contact the reporter, the reportee, or any other attendees to obtain more information. -Issues directly concerning members of the core team will be dealt with by other members of the core team and the safety manager, and possible conflicts of interest will be taken into account. nf-core is also in discussions about having an ombudsperson, and details will be shared in due course. +Once additional information is gathered, the Safety Team will collectively review and decide on the best course of action to take, if any. The Safety Team reserves the right to not act on a report. -All reports will be handled with utmost discretion and confidentially. +## Confidentiality + +All reports, and any additional information included, are only shared with the team of safety officers (and possibly members of the core team, in case the safety officer is in violation of the CoC). We will respect confidentiality requests for the purpose of protecting victims of abuse. + +We will not name harassment victims, beyond discussions between the safety officer and members of the nf-core team, without the explicit consent of the individuals involved. + +## Enforcement + +Actions taken by the nf-core’s Safety Team may include, but are not limited to: + +- Asking anyone to stop a behaviour. +- Asking anyone to leave the event and online spaces either temporarily, for the remainder of the event, or permanently. +- Removing access to the gather.town and Slack, either temporarily or permanently. +- Communicating to all participants to reinforce our expectations for conduct and remind what is unacceptable behaviour; this may be public for practical reasons. +- Communicating to all participants that an incident has taken place and how we will act or have acted — this may be for the purpose of letting event participants know we are aware of and dealing with the incident. +- Banning anyone from participating in nf-core-managed spaces, future events, and activities, either temporarily or permanently. +- No action. ## Attribution and Acknowledgements @@ -106,6 +161,22 @@ All reports will be handled with utmost discretion and confidentially. ## Changelog -### v1.0 - March 12th, 2021 +### v1.4 - February 8th, 2022 + +- Included a new member of the Safety Team. Corrected a typographical error in the text. + +### v1.3 - December 10th, 2021 + +- Added a statement that the CoC applies to nf-core gather.town workspaces. Corrected typographical errors in the text. + +### v1.2 - November 12th, 2021 + +- Removed information specific to reporting CoC violations at the Hackathon in October 2021. + +### v1.1 - October 14th, 2021 + +- Updated with names of new Safety Officers and specific information for the hackathon in October 2021. + +### v1.0 - March 15th, 2021 - Complete rewrite from original [Contributor Covenant](http://contributor-covenant.org/) CoC. diff --git a/README.md b/README.md index 3865ec47..2a10a542 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,17 @@ -# ![nf-core/bacass](docs/images/nf-core-bacass_logo.png) +# ![nf-core/bacass](docs/images/nf-core-bacass_logo_light.png#gh-light-mode-only) ![nf-core/bacass](docs/images/nf-core-bacass_logo_dark.png#gh-dark-mode-only) [![GitHub Actions CI Status](https://github.com/nf-core/bacass/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/bacass/actions?query=workflow%3A%22nf-core+CI%22) [![GitHub Actions Linting Status](https://github.com/nf-core/bacass/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/bacass/actions?query=workflow%3A%22nf-core+linting%22) [![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/bacass/results) [![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.2669428-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.2669428) -[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.04.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/) +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/) [![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/) [![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/) [![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/) +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/bacass) -[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23bacass-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/bacass) -[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core) -[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23bacass-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/bacass)[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core) ## Introduction @@ -26,7 +25,7 @@ On release, automated continuous integration tests run the pipeline on a full-si ### Short Read Assembly -This pipeline is primarily for bacterial assembly of next-generation sequencing reads. It can be used to quality trim your reads using [Skewer](https://github.com/relipmoc/skewer) and performs basic sequencing QC using [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/). Afterwards, the pipeline performs read assembly using [Unicycler](https://github.com/rrwick/Unicycler). Contamination of the assembly is checked using [Kraken2](https://ccb.jhu.edu/software/kraken2/) to verify sample purity. +This pipeline is primarily for bacterial assembly of next-generation sequencing reads. It can be used to quality trim your reads using [FastP](https://github.com/OpenGene/fastp) and performs basic sequencing QC using [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/). Afterwards, the pipeline performs read assembly using [Unicycler](https://github.com/rrwick/Unicycler). Contamination of the assembly is checked using [Kraken2](https://ccb.jhu.edu/software/kraken2/) to verify sample purity. ### Long Read Assembly @@ -39,45 +38,66 @@ For users specifying both short read and long read (NanoPore) data, the pipeline ### Assembly QC and annotation -In all cases, the assembly is assessed using [QUAST](http://bioinf.spbau.ru/quast). The resulting bacterial assembly is furthermore annotated using [Prokka](https://github.com/tseemann/prokka) or [DFAST](https://github.com/nigyta/dfast_core). +In all cases, the assembly is assessed using [QUAST](http://bioinf.spbau.ru/quast). The resulting bacterial assembly is furthermore annotated using [Prokka](https://github.com/tseemann/prokka), [Bakta](https://github.com/oschwengers/bakta) or [DFAST](https://github.com/nigyta/dfast_core). -## Quick Start +## Usage -1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`) +:::note +If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how +to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) +with `-profile test` before running the workflow on actual data. +::: -2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ +First, prepare a samplesheet with your input data that looks as follows: -3. Download the pipeline and test it on a minimal dataset with a single command: +`samplesheet.tsv`: - ```console - nextflow run nf-core/bacass -profile test, - ``` +```tsv +ID R1 R2 LongFastQ Fast5 GenomeSize +shortreads ./data/S1_R1.fastq.gz ./data/S1_R2.fastq.gz NA NA NA +longreads NA NA ./data/S1_long_fastq.gz ./data/FAST5 2.8m +shortNlong ./data/S1_R1.fastq.gz ./data/S1_R2.fastq.gz ./data/S1_long_fastq.gz ./data/FAST5 2.8m - > * Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. - > * If you are using `singularity` then the pipeline will auto-detect this and attempt to download the Singularity images directly as opposed to performing a conversion from Docker images. If you are persistently observing issues downloading Singularity images directly due to timeout or network issues then please use the `--singularity_pull_docker_container` parameter to pull and convert the Docker image instead. Alternatively, it is highly recommended to use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to pre-download all of the required containers before running the pipeline and to set the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options to be able to store and re-use the images from a central location for future pipeline runs. - > * If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs. +``` -4. Start running your own analysis! +Each row represents a fastq file (single-end) or a pair of fastq files (paired end). - Default: Short read assembly with Unicycler, `--kraken2db` can be any [compressed database (`.tar.gz`/`.tgz`)](https://benlangmead.github.io/aws-indexes/k2): +Default: Short read assembly with Unicycler, `--kraken2db` can be any [compressed database (`.tar.gz`/`.tgz`)](https://benlangmead.github.io/aws-indexes/k2): - ```console - nextflow run nf-core/bacass -profile --input samplesheet.tsv --kraken2db "https://genome-idx.s3.amazonaws.com/kraken/k2_standard_8gb_20210517.tar.gz" - ``` +```console +nextflow run nf-core/bacass -profile --input samplesheet.tsv --kraken2db "https://genome-idx.s3.amazonaws.com/kraken/k2_standard_8gb_20210517.tar.gz" +``` - Long read assembly with Miniasm: +Long read assembly with Miniasm: - ```console - nextflow run nf-core/bacass -profile --input samplesheet.tsv --assembly_type 'long' --assembler 'miniasm' --kraken2db "https://genome-idx.s3.amazonaws.com/kraken/k2_standard_8gb_20210517.tar.gz" - ``` +```console +nextflow run nf-core/bacass -profile --input samplesheet.tsv --assembly_type 'long' --assembler 'miniasm' --kraken2db "https://genome-idx.s3.amazonaws.com/kraken/k2_standard_8gb_20210517.tar.gz" +``` -## Documentation +```bash +nextflow run nf-core/bacass \ + -profile \ + --input samplesheet.tsv \ + --outdir +``` -The nf-core/bacass pipeline comes with documentation about the pipeline [usage](https://nf-co.re/bacass/usage), [parameters](https://nf-co.re/bacass/parameters) and [output](https://nf-co.re/bacass/output). +:::warning +Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those +provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_; +see [docs](https://nf-co.re/usage/configuration#custom-configuration-files). +::: + +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/bacass/usage) and the [parameter documentation](https://nf-co.re/bacass/parameters). + +## Pipeline output + +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/bacass/results) tab on the nf-core website pipeline page. +For more details about the output files and reports, please refer to the +[output documentation](https://nf-co.re/bacass/output). ## Credits -nf-core/bacass was initiated by [Andreas Wilm](https://github.com/andreas-wilm), originally written by [Alex Peltzer](https://github.com/apeltzer) (DSL1) and rewritten by [Daniel Straub](https://github.com/d4straub) (DSL2). +nf-core/bacass was initiated by [Andreas Wilm](https://github.com/andreas-wilm), originally written by [Alex Peltzer](https://github.com/apeltzer) (DSL1), rewritten by [Daniel Straub](https://github.com/d4straub) (DSL2) and maintained by [Daniel Valle-Millares](https://github.com/Daniel-VM). ## Contributions and Support @@ -87,7 +107,7 @@ For further information or help, don't hesitate to get in touch on the [Slack `# ## Citations -If you use nf-core/bacass for your analysis, please cite it using the following doi: [10.5281/zenodo.2669428](https://doi.org/10.5281/zenodo.2669428) +If you use nf-core/bacass for your analysis, please cite it using the following doi: [10.5281/zenodo.2669428](https://doi.org/10.5281/zenodo.2669428) An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file. diff --git a/assets/adaptivecard.json b/assets/adaptivecard.json new file mode 100644 index 00000000..52b0afd4 --- /dev/null +++ b/assets/adaptivecard.json @@ -0,0 +1,67 @@ +{ + "type": "message", + "attachments": [ + { + "contentType": "application/vnd.microsoft.card.adaptive", + "contentUrl": null, + "content": { + "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json", + "msteams": { + "width": "Full" + }, + "type": "AdaptiveCard", + "version": "1.2", + "body": [ + { + "type": "TextBlock", + "size": "Large", + "weight": "Bolder", + "color": "<% if (success) { %>Good<% } else { %>Attention<%} %>", + "text": "nf-core/bacass v${version} - ${runName}", + "wrap": true + }, + { + "type": "TextBlock", + "spacing": "None", + "text": "Completed at ${dateComplete} (duration: ${duration})", + "isSubtle": true, + "wrap": true + }, + { + "type": "TextBlock", + "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors. The full error message was: ${errorReport}.<% } %>", + "wrap": true + }, + { + "type": "TextBlock", + "text": "The command used to launch the workflow was as follows:", + "wrap": true + }, + { + "type": "TextBlock", + "text": "${commandLine}", + "isSubtle": true, + "wrap": true + } + ], + "actions": [ + { + "type": "Action.ShowCard", + "title": "Pipeline Configuration", + "card": { + "type": "AdaptiveCard", + "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json", + "body": [ + { + "type": "FactSet", + "facts": [<% out << summary.collect{ k,v -> "{\"title\": \"$k\", \"value\" : \"$v\"}"}.join(",\n") %> + ] + } + ] + } + } + ] + } + } + ] +} diff --git a/assets/email_template.txt b/assets/email_template.txt index ae71982d..6b645199 100644 --- a/assets/email_template.txt +++ b/assets/email_template.txt @@ -6,7 +6,6 @@ `._,._,' nf-core/bacass v${version} ---------------------------------------------------- - Run Name: $runName <% if (success){ diff --git a/assets/methods_description_template.yml b/assets/methods_description_template.yml new file mode 100644 index 00000000..c6aa4279 --- /dev/null +++ b/assets/methods_description_template.yml @@ -0,0 +1,28 @@ +id: "nf-core-bacass-methods-description" +description: "Suggested text and references to use when describing pipeline usage within the methods section of a publication." +section_name: "nf-core/bacass Methods Description" +section_href: "https://github.com/nf-core/bacass" +plot_type: "html" +## You inject any metadata in the Nextflow '${workflow}' object +data: | +

Methods

+

Data was processed using nf-core/bacass v${workflow.manifest.version} ${doi_text} of the nf-core collection of workflows (Ewels et al., 2020), utilising reproducible software environments from the Bioconda (Grüning et al., 2018) and Biocontainers (da Veiga Leprevost et al., 2017) projects.

+

The pipeline was executed with Nextflow v${workflow.nextflow.version} (Di Tommaso et al., 2017) with the following command:

+
${workflow.commandLine}
+

${tool_citations}

+

References

+
    +
  • Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316-319. doi: 10.1038/nbt.3820
  • +
  • Ewels, P. A., Peltzer, A., Fillinger, S., Patel, H., Alneberg, J., Wilm, A., Garcia, M. U., Di Tommaso, P., & Nahnsen, S. (2020). The nf-core framework for community-curated bioinformatics pipelines. Nature Biotechnology, 38(3), 276-278. doi: 10.1038/s41587-020-0439-x
  • +
  • Grüning, B., Dale, R., Sjödin, A., Chapman, B. A., Rowe, J., Tomkins-Tinch, C. H., Valieris, R., Köster, J., & Bioconda Team. (2018). Bioconda: sustainable and comprehensive software distribution for the life sciences. Nature Methods, 15(7), 475–476. doi: 10.1038/s41592-018-0046-7
  • +
  • da Veiga Leprevost, F., Grüning, B. A., Alves Aflitos, S., Röst, H. L., Uszkoreit, J., Barsnes, H., Vaudel, M., Moreno, P., Gatto, L., Weber, J., Bai, M., Jimenez, R. C., Sachsenberg, T., Pfeuffer, J., Vera Alvarez, R., Griss, J., Nesvizhskii, A. I., & Perez-Riverol, Y. (2017). BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics (Oxford, England), 33(16), 2580–2582. doi: 10.1093/bioinformatics/btx192
  • + ${tool_bibliography} +
+
+
Notes:
+
    + ${nodoi_text} +
  • The command above does not include parameters contained in any configs or profiles that may have been used. Ensure the config file is also uploaded with your publication!
  • +
  • You should also cite all software used within this run. Check the "Software Versions" of this report to get version information.
  • +
+
diff --git a/assets/multiqc_config.yaml b/assets/multiqc_config.yaml deleted file mode 100644 index 0f3cd94d..00000000 --- a/assets/multiqc_config.yaml +++ /dev/null @@ -1,11 +0,0 @@ -report_comment: > - This report has been generated by the nf-core/bacass - analysis pipeline. For information about how to interpret these results, please see the - documentation. -report_section_order: - software_versions: - order: -1000 - nf-core-bacass-summary: - order: -1001 - -export_plots: true diff --git a/assets/multiqc_config.yml b/assets/multiqc_config.yml new file mode 100644 index 00000000..d0641e6a --- /dev/null +++ b/assets/multiqc_config.yml @@ -0,0 +1,13 @@ +report_comment: > + This report has been generated by the nf-core/bacass + analysis pipeline. For information about how to interpret these results, please see the + documentation. +report_section_order: + "nf-core-bacass-methods-description": + order: -1000 + software_versions: + order: -1001 + "nf-core-bacass-summary": + order: -1002 + +export_plots: true diff --git a/assets/nf-core-bacass_logo.png b/assets/nf-core-bacass_logo.png deleted file mode 100644 index 75f92635..00000000 Binary files a/assets/nf-core-bacass_logo.png and /dev/null differ diff --git a/assets/nf-core-bacass_logo_light.png b/assets/nf-core-bacass_logo_light.png new file mode 100644 index 00000000..6a16bb92 Binary files /dev/null and b/assets/nf-core-bacass_logo_light.png differ diff --git a/assets/samplesheet.csv b/assets/samplesheet.tsv similarity index 100% rename from assets/samplesheet.csv rename to assets/samplesheet.tsv diff --git a/assets/schema_input.json b/assets/schema_input.json index 9d286b07..a34ad666 100644 --- a/assets/schema_input.json +++ b/assets/schema_input.json @@ -7,22 +7,76 @@ "items": { "type": "object", "properties": { - "sample": { + "ID": { "type": "string", "pattern": "^\\S+$", - "errorMessage": "Sample name must be provided and cannot contain spaces" + "unique": true, + "errorMessage": "Sample name must be provided and cannot contain spaces", + "meta": ["id"] }, - "fastq_1": { - "type": "string", - "pattern": "^\\S+\\.f(ast)?q\\.gz$", - "errorMessage": "FastQ file for reads 1 must be provided, cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'" + "R1": { + "errorMessage": "FastQ file for reads 1 must be provided, cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'", + "anyOf": [ + { + "type": ["string", "null"], + "exists": true, + "pattern": "^(\\S+\\.f(ast)?q\\.gz|NA)$" + }, + { + "type": "string", + "maxLength": 0 + } + ] }, - "fastq_2": { + "R2": { "errorMessage": "FastQ file for reads 2 cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'", "anyOf": [ + { + "type": ["string", "null"], + "exists": true, + "pattern": "^(\\S+\\.f(ast)?q\\.gz|NA)$" + }, { "type": "string", - "pattern": "^\\S+\\.f(ast)?q\\.gz$" + "maxLength": 0 + } + ] + }, + "LongFastQ": { + "errorMessage": "FastQ file for long reads cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'", + "anyOf": [ + { + "type": ["string", "null"], + "exists": true, + "pattern": "^(\\S+\\.f(ast)?q\\.gz|NA)$" + }, + { + "type": "string", + "maxLength": 0 + } + ] + }, + "Fast5": { + "errorMessage": "A valid path to Fast5 files. Example: ./data/FAST5", + "anyOf": [ + { + "type": ["string", "null"], + "format": "directory-path", + "exists": true, + "pattern": "^(\\/[\\S\\s]*|NA)$" + }, + { + "type": "string", + "maxLength": 0 + } + ] + }, + "GenomeSize": { + "errorMessage": "A number (including decimals) ending with 'm', representing genome size. No spaces allowed.", + "anyOf": [ + { + "type": ["string", "null"], + "pattern": "(\\b\\d+\\.\\d+m\\b|NA)" }, { "type": "string", @@ -31,9 +85,6 @@ ] } }, - "required": [ - "sample", - "fastq_1" - ] + "required": ["ID"] } } diff --git a/assets/sendmail_template.txt b/assets/sendmail_template.txt index 8f0cfd4c..746626db 100644 --- a/assets/sendmail_template.txt +++ b/assets/sendmail_template.txt @@ -12,9 +12,9 @@ $email_html Content-Type: image/png;name="nf-core-bacass_logo.png" Content-Transfer-Encoding: base64 Content-ID: -Content-Disposition: inline; filename="nf-core-bacass_logo.png" +Content-Disposition: inline; filename="nf-core-bacass_logo_light.png" -<% out << new File("$projectDir/assets/nf-core-bacass_logo.png"). +<% out << new File("$projectDir/assets/nf-core-bacass_logo_light.png"). bytes. encodeBase64(). toString(). diff --git a/assets/slackreport.json b/assets/slackreport.json new file mode 100644 index 00000000..b36b9c71 --- /dev/null +++ b/assets/slackreport.json @@ -0,0 +1,34 @@ +{ + "attachments": [ + { + "fallback": "Plain-text summary of the attachment.", + "color": "<% if (success) { %>good<% } else { %>danger<%} %>", + "author_name": "nf-core/bacass v${version} - ${runName}", + "author_icon": "https://www.nextflow.io/docs/latest/_static/favicon.ico", + "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors<% } %>", + "fields": [ + { + "title": "Command used to launch the workflow", + "value": "```${commandLine}```", + "short": false + } + <% + if (!success) { %> + , + { + "title": "Full error message", + "value": "```${errorReport}```", + "short": false + }, + { + "title": "Pipeline configuration", + "value": "<% out << summary.collect{ k,v -> k == "hook_url" ? "_${k}_: (_hidden_)" : ( ( v.class.toString().contains('Path') || ( v.class.toString().contains('String') && v.contains('/') ) ) ? "_${k}_: `${v}`" : (v.class.toString().contains('DateTime') ? ("_${k}_: " + v.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM))) : "_${k}_: ${v}") ) }.join(",\n") %>", + "short": false + } + <% } + %> + ], + "footer": "Completed at <% out << dateComplete.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM)) %> (duration: ${duration})" + } + ] +} diff --git a/assets/test_config_dfast.py b/assets/test_config_dfast.py index 234b0bdc..0dc915ef 100644 --- a/assets/test_config_dfast.py +++ b/assets/test_config_dfast.py @@ -18,17 +18,15 @@ class Config: # Otherwise, sequences will be renamed as 'Sequence001, Sequence002, ...'. # If 'sort_by_length' is set to True, sequences will be sorted so that longer sequences come first. # In a draft genome, sequences shorter than 'minimum_length' will be eliminated. - "complete": False, - "use_original_name": False, # If set to True, the first word in the Fasta header line will be used as a sequence name. + "use_original_name": False, # If set to True, the first word in the Fasta header line will be used as a sequence name. "sort_by_length": True, - "minimum_length": 200 + "minimum_length": 200, } GENOME_SOURCE_INFORMATION = { # These attributes are reflected in the source feature, # and do not affect the annotation results. - "organism": "", "strain": "", "seq_names": "", @@ -40,17 +38,15 @@ class Config: LOCUS_TAG_SETTINGS = { "locus_tag_prefix": "LOCUS", "step": 10, - "use_separate_tags": True, # If set to `True`, locus_tags are assigned separately according to feature type. + "use_separate_tags": True, # If set to `True`, locus_tags are assigned separately according to feature type. "symbols": {"CDS": "", "rRNA": "r", "tRNA": "t", "tmRNA": "tm"} # "symbols": {"CDS": "", "rRNA": "r", "tRNA": "t", "tmRNA": "tm", "nc_rna": "nc", "misc_rna": "misc"} } FEATURE_ADJUSTMENT = { "remove_partial_features": True, # True: enabled, False: disabled - "remove_overlapping_features": True, # True: enabled, False: disabled "feature_type_priority": ["assembly_gap", "CRISPR", ("tmRNA", "tRNA", "rRNA"), "CDS"], - "merge_cds": False, # True: enabled, False: disabled "tool_type_priority": {"MGA": 0, "Prodigal": 1}, } @@ -60,11 +56,7 @@ class Config: "verbosity": 3 # 1: minimum, 2: standard, 3: rich } - DDBJ_SUBMISSION = { - "enabled": True, - "output_verbosity": 1, - "metadata_file": None - } + DDBJ_SUBMISSION = {"enabled": True, "output_verbosity": 1, "metadata_file": None} GENBANK_SUBMISSION = { "enabled": True, @@ -72,7 +64,6 @@ class Config: "output_verbosity": 1, } - STRUCTURAL_ANNOTATION = [ { # GAP is a Gap Annotation Process that identifies gap regions (N's or n's runs) in the sequence. @@ -81,7 +72,7 @@ class Config: "options": { "len_cutoff": 5, # Gaps shorter than len_cutoff are ignored. "linkage_evidence": "paired-ends", # You can change this as you like. - "gap_type": "within scaffold" # You can change this as you like. + "gap_type": "within scaffold", # You can change this as you like. }, }, { @@ -104,30 +95,24 @@ class Config: # Please insall tRNAscan-SE and put it in your PATH to enable this. "tool_name": "tRNAscan", "enabled": False, - "options": { - "model": "--bact", # --bact, --arch, --organ, --general - "cmd_options": "" - }, + "options": {"model": "--bact", "cmd_options": ""}, # --bact, --arch, --organ, --general }, { # Barrnap for rRNA prediction "tool_name": "Barrnap", - "enabled": False, - "options": { - # Currently, Barrnap will run with default settings. - # You can set parameters such as --reject and --lencutoff to cmd_options. - # "cmd_options": "--reject 0.4 --lencutoff 0.6" - }, + "enabled": False, + "options": { + # Currently, Barrnap will run with default settings. + # You can set parameters such as --reject and --lencutoff to cmd_options. + # "cmd_options": "--reject 0.4 --lencutoff 0.6" + }, }, { # RNAmmer for rRNA prediction. By default, this is disabled. # Please insall RNAmmer and put it in your PATH to enable this. "tool_name": "RNAmmer", "enabled": False, - "options": { - "model": "bac", # arc/bac/euk - "cmd_options": "" - }, + "options": {"model": "bac", "cmd_options": ""}, # arc/bac/euk }, { # CRT for CRISPR detection @@ -151,17 +136,16 @@ class Config: "transl_table": 11, "cmd_options": "", }, - }, + }, ] FUNCTIONAL_ANNOTATION = [ # Fucntional annotation steps will be conducted in the order specified in this list. # You can swithch enabled/disabled, change the order, or add new steps. - { # OrthoSearch (All-vs-all pairwise alignment between each reference genome to assign orthologous genes) # Normally, this should be run before other annotation steps. - # In the default workflow, it is disabled. You can enable this by using the "--references" option. + # In the default workflow, it is disabled. You can enable this by using the "--references" option. "component_name": "OrthoSearch", "enabled": False, "options": { @@ -171,12 +155,11 @@ class Config: "scov_cutoff": 75, "aligner": "ghostx", # ghostx, ghostz, or blastp "aligner_options": {}, # Normally, leave this empty. (Current version does not use this option.) - "references": [ - ] + "references": [], }, }, { - # By default, this is disabled. + # By default, this is disabled. # If you want to add your original databases to be searched in prior to default DB, # set 'enabled' to True and specify 'database' # The database file must be in a DFAST reference format, @@ -254,7 +237,7 @@ class Config: "skipAnnotatedFeatures": False, "evalue_cutoff": 1e-6, "db_name": "", # eg 'Pfam', - "database": "" # eg '@@APP_ROOT@@/db/hmm/Pfam-A.hmm' + "database": "", # eg '@@APP_ROOT@@/db/hmm/Pfam-A.hmm' }, }, { @@ -280,5 +263,4 @@ class Config: "rpsbproc_data": "@@APP_ROOT@@/bin/common/rpsbproc_data", # Do not change this. }, }, - ] diff --git a/bin/scrape_software_versions.py b/bin/scrape_software_versions.py deleted file mode 100755 index df04fa4a..00000000 --- a/bin/scrape_software_versions.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -import os - -results = {} -version_files = [x for x in os.listdir(".") if x.endswith(".version.txt")] -for version_file in version_files: - - software = version_file.replace(".version.txt", "") - if software == "pipeline": - software = "nf-core/bacass" - - with open(version_file) as fin: - version = fin.read().strip() - results[software] = version - -# Dump to YAML -print( - """ -id: 'software_versions' -section_name: 'nf-core/bacass Software Versions' -section_href: 'https://github.com/nf-core/bacass' -plot_type: 'html' -description: 'are collected at run time from the software output.' -data: | -
-""" -) -for k, v in sorted(results.items()): - print("
{}
{}
".format(k, v)) -print("
") - -# Write out as tsv file: -with open("software_versions.tsv", "w") as f: - for k, v in sorted(results.items()): - f.write("{}\t{}\n".format(k, v)) diff --git a/conf/base.config b/conf/base.config index 842ca37c..69fd3dcb 100644 --- a/conf/base.config +++ b/conf/base.config @@ -1,7 +1,7 @@ /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nf-core/bacass Nextflow base config file -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A 'blank slate' config file, appropriate for general use on most high performance compute environments. Assumes that all software is installed and available on the PATH. Runs in `local` mode - all jobs will be run on the logged in environment. @@ -14,7 +14,7 @@ process { memory = { check_max( 1.GB * task.attempt, 'memory' ) } time = { check_max( 1.h * task.attempt, 'time' ) } - errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'finish' } + errorStrategy = { task.exitStatus in ((130..145) + 104) ? 'retry' : 'finish' } maxRetries = 1 maxErrors = '-1' @@ -24,6 +24,11 @@ process { // If possible, it would be nice to keep the same label naming convention when // adding in your local modules too. // See https://www.nextflow.io/docs/latest/config.html#config-process-selectors + withLabel:process_single { + cpus = { check_max( 1 , 'cpus' ) } + memory = { check_max( 6.GB * task.attempt, 'memory' ) } + time = { check_max( 4.h * task.attempt, 'time' ) } + } withLabel:process_low { cpus = { check_max( 2 * task.attempt, 'cpus' ) } memory = { check_max( 1.GB * task.attempt, 'memory' ) } @@ -52,4 +57,7 @@ process { errorStrategy = 'retry' maxRetries = 3 } + withName:CUSTOM_DUMPSOFTWAREVERSIONS { + cache = false + } } diff --git a/conf/modules.config b/conf/modules.config index 17944342..702ce9e8 100644 --- a/conf/modules.config +++ b/conf/modules.config @@ -1,134 +1,271 @@ /* -======================================================================================== - Config file for defining DSL2 per module options -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Config file for defining DSL2 per module options and publishing paths +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Available keys to override module options: - args = Additional arguments appended to command in module. - args2 = Second set of arguments appended to command in module (multi-tool modules). - args3 = Third set of arguments appended to command in module (multi-tool modules). - publish_dir = Directory to publish results. - publish_by_meta = Groovy list of keys available in meta map to append as directories to "publish_dir" path - If publish_by_meta = true - Value of ${meta['id']} is appended as a directory to "publish_dir" path - If publish_by_meta = ['id', 'custompath'] - If "id" is in meta map and "custompath" isn't then "${meta['id']}/custompath/" - is appended as a directory to "publish_dir" path - If publish_by_meta = false / null - No directories are appended to "publish_dir" path - publish_files = Groovy map where key = "file_ext" and value = "directory" to publish results for that file extension - The value of "directory" is appended to the standard "publish_dir" path as defined above. - If publish_files = null (unspecified) - All files are published. - If publish_files = false - No files are published. - suffix = File name suffix for output files. + ext.args = Additional arguments appended to command in module. + ext.args2 = Second set of arguments appended to command in module (multi-tool modules). + ext.args3 = Third set of arguments appended to command in module (multi-tool modules). + ext.prefix = File name prefix for output files. ---------------------------------------------------------------------------------------- */ -params { - modules { - 'fastqc' { - args = "--quiet" - publish_by_meta = ['id', 'FastQC'] - publish_dir = "." - } - 'skewer' { - args = "-m pe -q 3 -n --quiet" - publish_by_meta = ['id', 'trimming/shortreads'] - publish_dir = "." - } - 'nanoplot' { - args = "" - publish_by_meta = ['id', 'QC_longreads/NanoPlot'] - publish_dir = "." - } - 'pycoqc' { - args = "" - publish_files = [ '.html':'', '.json':'' ] - publish_by_meta = ['id', 'QC_longreads/PycoQC'] - publish_dir = "." - } - 'porechop' { - args = "" - publish_by_meta = ['id', 'trimming/longreads'] - publish_dir = "." - } - 'unicycler' { - args = "" - publish_by_meta = ['id', 'Unicycler'] - publish_dir = "." - } - 'canu' { - args = "" - publish_by_meta = ['id', 'Canu'] - publish_dir = "." - } - 'minimap_align' { - args = "-x ava-ont" - publish_files = false - publish_by_meta = ['id', 'minimap_align'] - publish_dir = "." - } - 'minimap_consensus' { - args = "-x map-ont" - publish_files = false - publish_by_meta = ['id', 'minimap_consensus'] - publish_dir = "." - } - 'minimap_polish' { - args = "-ax map-ont" - publish_files = false - publish_by_meta = ['id', 'minimap_polish'] - publish_dir = "." - } - 'miniasm' { - args = "" - publish_files = [ '_assembly.fasta':'' ] - publish_by_meta = ['id', 'Miniasm'] - publish_dir = "." - } - 'racon' { - args = "" - publish_files = [ '_assembly_consensus.fasta':'' ] - publish_by_meta = ['id', 'Miniasm'] - publish_dir = "." - } - 'medaka' { - args = "" - publish_by_meta = ['id', 'Medaka'] - publish_dir = "." - } - 'nanopolish' { - args = "" - publish_by_meta = ['id', 'Nanopolish'] - publish_dir = "." - } - 'kraken2' { - args = "" - publish_files = [ 'report.txt':'' ] - publish_by_meta = ['id', 'Kraken2'] - publish_dir = "." +// Let modules.config to store module configurations that share both shortreads and longreads +process { + + withName: 'NANOPLOT' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/QC_longreads/NanoPlot" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'PYCOQC' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/QC_longreads/PycoQC" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'PORECHOP_PORECHOP' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/trimming/longreads" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'UNICYCLER' { + ext.args = params.unicycler_args ? "${params.unicycler_args}" : '' + publishDir = [ + path: { "${params.outdir}/Unicycler" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'CANU' { + ext.args = { + [ params.canu_args ? "${params.canu_args}" : '', + "merylMemory=${task.memory.toGiga()}G", + "merylThreads=$task.cpus", + "hapThreads=${task.cpus}", + "batMemory=${task.memory.toGiga()}G", + "redMemory=${task.memory.toGiga()}G", + "redThreads=${task.cpus}", + "oeaMemory=${task.memory.toGiga()}G", + "oeaThreads=${task.cpus}", + "corMemory=${task.memory.toGiga()}G", + "corThreads=${task.cpus}" + ].join(' ').trim() } - 'kraken2_long' { - args = "" - suffix = "_longreads" - publish_files = [ 'report.txt':'' ] - publish_by_meta = ['id', 'Kraken2'] - publish_dir = "." + publishDir = [ + path: { "${params.outdir}/Canu" }, + mode: params.publish_dir_mode, + pattern: "*.{contigs.fasta.gz,report}", + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'MINIMAP2_ALIGN' { + ext.args = '-x ava-ont' + } + + withName: 'MINIMAP2_CONSENSUS' { + ext.args = '-x map-ont' + } + + withName: 'MINIMAP2_POLISH' { + ext.args = '-x map-ont' + } + + withName: 'MINIASM' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/Miniasm" }, + mode: params.publish_dir_mode, + pattern: '*.fasta.gz', + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'RACON' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/Miniasm" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'MEDAKA' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/Medaka" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'NANOPOLISH' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/Nanopolish" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'KRAKEN2' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/Kraken2" }, + mode: params.publish_dir_mode, + pattern: "*report.txt", + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'KRAKEN2_LONG' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/kraken2" }, + mode: params.publish_dir_mode, + pattern: "*report.txt", + saveAs: { filename -> + if (filename.equals('versions.yml')) { + null + } else { + "${filename.replaceFirst('\\..+$', '')}_longreads${filename.substring(filename.lastIndexOf('.'))}" + } + } + ] + } + + withName: 'QUAST' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/QUAST" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'PROKKA' { + ext.args = params.prokka_args ? "${params.prokka_args}" : '' + publishDir = [ + path: { "${params.outdir}/Prokka" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'DFAST' { + ext.args = '' + ext.args2 = '' + publishDir = [ + path: { "${params.outdir}/DFAST" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'MULTIQC' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/multiqc" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } + + withName: 'CUSTOM_DUMPSOFTWAREVERSIONS' { + publishDir = [ + path: { "${params.outdir}/pipeline_info" }, + mode: params.publish_dir_mode, + pattern: '*_versions.yml' + ] + } +} + +if (!params.skip_fastqc) { + process { + withName: '.*:.*:FASTQ_TRIM_FASTP_FASTQC:FASTQC_RAW' { + ext.args = '--quiet' + publishDir = [ + path: { "${params.outdir}/FastQC/raw" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] } - 'quast' { - args = "" - publish_by_meta = false //the module allows no meta, it collects all assemblies! - publish_dir = "./QUAST" - suffix = "other_files" + } +} +if (!params.skip_fastp) { + process { + withName: '.*:.*:FASTQ_TRIM_FASTP_FASTQC:FASTP' { + ext.args = '' + publishDir = [ + [ + path: { "${params.outdir}/trimming/shortreads" }, + mode: params.publish_dir_mode, + pattern: "*.fastp.fastq.gz", + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ], + [ + path: { "${params.outdir}/trimming/shortreads/json_html" }, + mode: params.publish_dir_mode, + pattern: "*.{json,html}" + ], + [ + path: { "${params.outdir}/trimming/shortreads/log" }, + mode: params.publish_dir_mode, + pattern: "*.log" + ], + [ + path: { "${params.outdir}/trimming/shortreads" }, + mode: params.publish_dir_mode, + pattern: "*.fail.fastq.gz", + enabled: params.save_trimmed_fail + ] + ] } - 'prokka' { - args = "" - publish_by_meta = ['id', 'Prokka'] - publish_dir = "." + } + if (!params.skip_fastqc) { + process { + withName: '.*:.*:FASTQ_TRIM_FASTP_FASTQC:FASTQC_TRIM' { + ext.args = '--quiet' + publishDir = [ + path: { "${params.outdir}/FastQC/trim" }, + mode: params.publish_dir_mode, + pattern: "*.{json,html}", + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] + } } - 'dfast' { - args = "" - publish_by_meta = ['id', 'DFAST'] - publish_dir = "." + } +} + +if (params.annotation_tool == 'bakta') { + if (params.baktadb_download == true) { + process { + withName: '.*:.*:BAKTA_DBDOWNLOAD_RUN:BAKTA_BAKTADBDOWNLOAD' { + ext.args = params.baktadb_download_args ? params.baktadb_download_args : '' + } } - 'multiqc' { - args = "" + } + process { + withName: '.*:.*:BAKTA_DBDOWNLOAD_RUN:BAKTA_BAKTA' { + ext.args = '' + publishDir = [ + path: { "${params.outdir}/Bakta/${meta.id}" }, + mode: params.publish_dir_mode, + saveAs: { filename -> filename.equals('versions.yml') ? null : filename } + ] } } } diff --git a/conf/test.config b/conf/test.config index 38cb8622..c827fd2d 100644 --- a/conf/test.config +++ b/conf/test.config @@ -1,11 +1,11 @@ /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Nextflow config file for running minimal tests -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Defines input files and everything required to run a fast and simple pipeline test. Use as follows: - nextflow run nf-core/bacass -profile test, + nextflow run nf-core/bacass -profile test, --outdir ---------------------------------------------------------------------------------------- */ @@ -16,11 +16,11 @@ params { // Limit resources so that this can run on GitHub Actions max_cpus = 2 - max_memory = 6.GB - max_time = 6.h + max_memory = '6.GB' + max_time = '6.h' // Input data - input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_short.csv' + input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_short.tsv' // some extra args to speed tests up unicycler_args="--no_correct --no_pilon" diff --git a/conf/test_dfast.config b/conf/test_dfast.config index 756554d9..b1b02c4b 100644 --- a/conf/test_dfast.config +++ b/conf/test_dfast.config @@ -20,12 +20,12 @@ params { max_time = 6.h // Input data - input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_short.csv' + input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_short.tsv' // some extra args to speed tests up unicycler_args="--no_correct --no_pilon" annotation_tool = 'dfast' assembly_type = 'short' skip_pycoqc = true - skip_kraken2 = true -} \ No newline at end of file + skip_kraken2 = true +} diff --git a/conf/test_full.config b/conf/test_full.config index 298a4447..9432d763 100644 --- a/conf/test_full.config +++ b/conf/test_full.config @@ -1,11 +1,11 @@ /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Nextflow config file for running full-size tests -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Defines input files and everything required to run a full size pipeline test. Use as follows: - nextflow run nf-core/bacass -profile test_full, + nextflow run nf-core/bacass -profile test_full, --outdir ---------------------------------------------------------------------------------------- */ @@ -15,6 +15,6 @@ params { config_profile_description = 'Full test dataset to check pipeline function' // Input data for full size test - input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_full.csv' + input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_full.tsv' kraken2db = 'https://genome-idx.s3.amazonaws.com/kraken/k2_standard_8gb_20210517.tar.gz' } diff --git a/conf/test_hybrid.config b/conf/test_hybrid.config index cd93e699..c27563a8 100644 --- a/conf/test_hybrid.config +++ b/conf/test_hybrid.config @@ -20,7 +20,7 @@ params { max_time = 6.h // Input data - input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_hybrid.csv' + input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_hybrid.tsv' // some extra args to speed tests up assembly_type='hybrid' diff --git a/conf/test_long.config b/conf/test_long.config index be225894..e722aae8 100644 --- a/conf/test_long.config +++ b/conf/test_long.config @@ -20,7 +20,7 @@ params { max_time = 6.h // Input data - input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_long_miniasm.csv' + input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_long_miniasm.tsv' // some extra args to speed tests up prokka_args = " --fast" diff --git a/conf/test_long_miniasm.config b/conf/test_long_miniasm.config index a68d3124..07af1a2c 100644 --- a/conf/test_long_miniasm.config +++ b/conf/test_long_miniasm.config @@ -20,7 +20,7 @@ params { max_time = 6.h // Input data - input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_long_miniasm.csv' + input = 'https://raw.githubusercontent.com/nf-core/test-datasets/bacass/bacass_long_miniasm.tsv' // some extra args to speed tests up prokka_args = " --fast" diff --git a/docs/README.md b/docs/README.md index 5fbf9904..cfb27a10 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,9 +2,9 @@ The nf-core/bacass documentation is split into the following pages: -* [Usage](usage.md) - * An overview of how the pipeline works, how to run it and a description of all of the different command-line flags. -* [Output](output.md) - * An overview of the different results produced by the pipeline and how to interpret them. +- [Usage](usage.md) + - An overview of how the pipeline works, how to run it and a description of all of the different command-line flags. +- [Output](output.md) + - An overview of the different results produced by the pipeline and how to interpret them. You can find a lot more documentation about installing, configuring and running nf-core pipelines on the website: [https://nf-co.re](https://nf-co.re) diff --git a/docs/images/nf-core-bacass_logo.png b/docs/images/nf-core-bacass_logo.png deleted file mode 100644 index d37cb23f..00000000 Binary files a/docs/images/nf-core-bacass_logo.png and /dev/null differ diff --git a/docs/images/nf-core-bacass_logo_dark.png b/docs/images/nf-core-bacass_logo_dark.png new file mode 100644 index 00000000..6afe4e9c Binary files /dev/null and b/docs/images/nf-core-bacass_logo_dark.png differ diff --git a/docs/images/nf-core-bacass_logo_light.png b/docs/images/nf-core-bacass_logo_light.png new file mode 100644 index 00000000..6a16bb92 Binary files /dev/null and b/docs/images/nf-core-bacass_logo_light.png differ diff --git a/docs/output.md b/docs/output.md index 974f9e22..4c58ec20 100644 --- a/docs/output.md +++ b/docs/output.md @@ -10,18 +10,18 @@ The directories listed below will be created in the results directory after the The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes data using the following steps: -* [Quality trimming and QC](#quality-trimming-and-qc) - * [Short Read Trimming](#short-read-trimming) - * [Short Read RAW QC](#short-read-raw-qc) - * [Long Read Trimming](#long-read-trimming) - * [Long Read RAW QC](#long-read-raw-qc) -* [Taxonomic classification](#taxonomic-classification) -* [Assembly Output](#assembly-output) - * [Polished assemblies](#polished-assemblies) -* [Assembly QC with QUAST](#assembly-qc-with-quast) -* [Annotation](#annotation) -* [Report](#report) -* [Pipeline information](#pipeline-information) - Report metrics generated during the workflow execution +- [Quality trimming and QC](#quality-trimming-and-qc) + - [Short Read Trimming](#short-read-trimming) + - [Short Read RAW QC](#short-read-raw-qc) + - [Long Read Trimming](#long-read-trimming) + - [Long Read RAW QC](#long-read-raw-qc) +- [Taxonomic classification](#taxonomic-classification) +- [Assembly Output](#assembly-output) + - [Polished assemblies](#polished-assemblies) +- [Assembly QC with QUAST](#assembly-qc-with-quast) +- [Annotation](#annotation) +- [Report](#report) +- [Pipeline information](#pipeline-information) - Report metrics generated during the workflow execution ## Quality trimming and QC @@ -33,8 +33,8 @@ combines reads coming from multiple sequencing runs.
Output files -* `{sample_id}/trimming/shortreads/` - * `*.fastq.gz`: Trimmed (and combined reads) +- `trimming/shortreads/` + - `*.fastp.fastq.gz`: The trimmed/modified/unmerged fastq reads
@@ -42,14 +42,22 @@ combines reads coming from multiple sequencing runs. [FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/) gives general quality metrics about your sequenced reads. It provides information about the quality score distribution across your reads, per base sequence content (%A/T/G/C), adapter contamination and overrepresented sequences. For further reading and documentation see the [FastQC help pages](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/). -> **NB:** The FastQC plots displayed in the MultiQC report shows _untrimmed_ reads. They may contain adapter sequence and potentially regions with low quality. +![MultiQC - FastQC sequence counts plot](images/mqc_fastqc_counts.png) + +![MultiQC - FastQC mean quality scores plot](images/mqc_fastqc_quality.png) + +![MultiQC - FastQC adapter content plot](images/mqc_fastqc_adapter.png) + +:::note +The FastQC plots displayed in the MultiQC report shows _untrimmed_ reads. They may contain adapter sequence and potentially regions with low quality. +:::
Output files -* `{sample_id}/FastQC/` - * `*_fastqc.html`: FastQC report containing quality metrics. - * `*_fastqc.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images. +- `FastQC/` + - `*.html`: FastQC report containing quality metrics. + - `*.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images. ![FastQC report](images/fastqc.png) @@ -62,8 +70,9 @@ This step performs long read trimming on Nanopore input (if provided).
Output files -* `{sample_id}/trimming/longreads/` - * `trimmed.fastq.gz`: The trimmed FASTQ file +- `trimming/longreads/` + - `*.fastq.gz`: The trimmed FASTQ file + - `*.log*`: Log file
@@ -76,11 +85,11 @@ Please refer to the documentation of [NanoPlot](https://github.com/wdecoster/Nan
Output files -* `{sample_id}/QC_Longreads/NanoPlot`: Various plots in HTML and PNG format +- `QC_Longreads/NanoPlot`: Various plots in HTML and PNG format -* `{sample_id}/QC_Longreads/PycoQC` - * `{sample_id}_pycoqc.html`: QC report in HTML format - * `{sample_id}_pycoqc.json`: QC report in JSON format +- `QC_Longreads/PycoQC` + - `*_pycoqc.html`: QC report in HTML format + - `*_pycoqc.json`: QC report in JSON format Example plot from Nanoplot: @@ -98,9 +107,9 @@ multiple species. If you like to visualize the report, try
Output files -* `{sample}/Kraken2` - * `{sample}.kraken2.report.txt`: Classification of short reads in the Kraken(1) report format. - * `{sample}_longreads.kraken2.report.txt`: Classification of long reads in the Kraken(1) report format. +- `Kraken2/` + - `*.kraken2.report.txt`: Classification of short reads in the Kraken(1) report format. + - `*_longreads.kraken2.report.txt`: Classification of long reads in the Kraken(1) report format. See [webpage](http://ccb.jhu.edu/software/kraken/MANUAL.html#sample-reports) for more details. @@ -118,22 +127,22 @@ Unicycler is a pipeline on its own, which at least for Illumina reads mainly act
Output files -* `{sample_id}/Unicycler` - * `{sample}.scaffolds.fa`: Final assembly in fasta format - * `{sample}.assembly.gfa`: Final assembly in Graphical Fragment Assembly (GFA) format - * `{sample}.unicycler.log`: Log file summarizing steps and intermediate results on the Unicycler execution +- `Unicycler/` + - `*.scaffolds.fa`: Final assembly in fasta format + - `*.assembly.gfa`: Final assembly in Graphical Fragment Assembly (GFA) format + - `*.unicycler.log`: Log file summarizing steps and intermediate results on the Unicycler execution Check out the [Unicycler documentation](https://github.com/rrwick/Unicycler) for more information on Unicycler output. -* `{sample_id}/Canu` - * `{sample}_assembly.fasta`: Final assembly in fasta format - * `{sample}_assembly.report`: Log file +- `Canu/` + - `*.contigs.fasta.gz`: Final assembly in fasta format + - `*.report`: Log file summarizing steps and intermediate results Check out the [Canu documentation](https://canu.readthedocs.io/en/latest/index.html) for more information on Canu output. -* `{sample_id}/Miniasm` - * `{sample}_assembly.fasta`: Assembly in fasta format - * `{sample}_assembly_consensus.fasta`: Consensus assembly in fasta format (polished by Racon) +- `Miniasm/` + - `*.fasta.gz`: Assembly in Fasta format + - `*_assembly_consensus.fasta.gz`: Consensus assembly in fasta format (polished by Racon) Check out the [Miniasm documentation](https://github.com/lh3/miniasm) for more information on Miniasm output. @@ -146,15 +155,16 @@ Long reads assemblies can be polished using [Medaka](https://github.com/nanopore
Output files -* `{sample_id}/Medaka/{sample_id}_polished_genome.fa` - * `consensus.fasta`: Polished consensus assembly in fasta format - * `calls_to_draft.bam`: Alignment in bam format - * `calls_to_draft.bam.bai`: Index of alignment - * `consensus.fasta.gaps_in_draft_coords.bed` - * `consensus_probs.hdf` +- `Medaka/*_polished_genome.fa` -* `{sample_id}/Nanopolish` - * `polished_genome.fa`: Polished consensus assembly in fasta format + - `*_polished_genome.fa`: Polished consensus assembly in fasta format + - `calls_to_draft.bam`: Alignment in bam format + - `calls_to_draft.bam.bai`: Index of alignment + - `consensus.fasta.gaps_in_draft_coords.bed` + - `consensus_probs.hdf` + +- `Nanopolish/` + - `polished_genome.fa`: Polished consensus assembly in fasta format
@@ -165,12 +175,12 @@ The assembly QC is performed with [QUAST](http://quast.sourceforge.net/quast) fo
Output files -* `QUAST` - * `report.tsv`: QUAST's report in text format -* `QUAST/other_files` - * `icarus.html`: QUAST's contig browser as HTML - * `report.html`: QUAST assembly QC as HTML report - * `report.pdf`: QUAST assembly QC as pdf +- `QUAST` + - `report.tsv`: QUAST's report in text format +- `QUAST/report` + - `icarus.html`: QUAST's contig browser as HTML + - `report.html`: QUAST assembly QC as HTML report + - `report.pdf`: QUAST assembly QC as pdf ![QUAST QC](images/quast.png) @@ -180,24 +190,31 @@ The assembly QC is performed with [QUAST](http://quast.sourceforge.net/quast) fo ## Annotation -By default, the assembly is annotated with [Prokka](https://github.com/tseemann/prokka) which acts as frontend for several annotation tools and includes rRNA and ORF predictions. Alternatively, on request, the assembly is annotated with [DFAST](https://github.com/nigyta/dfast_core). +By default, the assembly is annotated with [Prokka](https://github.com/tseemann/prokka) which acts as frontend for several annotation tools and includes rRNA and ORF predictions. Alternatively, on request, the assembly is annotated with [Bakta](https://github.com/oschwengers/bakta) or [DFAST](https://github.com/nigyta/dfast_core).
Output files -* `{sample_id}/Prokka/{sample_id}` - * `{sample_id}.gff`: Annotation in gff format - * `{sample_id}.txt`: Annotation in text format - * `{sample_id}.faa`: Protein sequences in fasta format +- `Prokka/{ID}/` + - `*.gff`: Annotation in gff format + - `*.txt`: Annotation in text format + - `*.faa`: Protein sequences in fasta format See [Prokka's documentation](https://github.com/tseemann/prokka#output-files) for a full description of all output files. ![Prokka annotation](images/prokka.png) -* `{sample_id}/DFAST/RESULT_{dfast_profile_name}` - * `genome.gff`: Annotation in gff format - * `statistics.txt`: Annotation statistics in text format - * `protein.faa`: Protein sequences in fasta format +- `Bakta/{ID}/` + - `*.gff3`: Annotations in gff3 format + - `*.txt`: Summary in txt format + - `*.faa`: CDS/sORF amino acid sequences in fasta format + +See [Baktas's documentation](https://github.com/oschwengers/bakta#output) for a full description of all output files. + +- `DFAST/{ID}_results/` + - `genome.gff`: Annotation in gff format + - `statistics.txt`: Annotation statistics in text format + - `protein.faa`: Protein sequences in fasta format
@@ -214,10 +231,10 @@ Results generated by MultiQC collate pipeline QC from supported tools e.g. FastQ
Output files -* `multiqc/` - * `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. - * `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. - * `multiqc_plots/`: directory containing static images from the report in various formats. +- `multiqc/` + - `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. + - `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. + - `multiqc_plots/`: directory containing static images from the report in various formats.
@@ -228,9 +245,10 @@ Results generated by MultiQC collate pipeline QC from supported tools e.g. FastQ
Output files -* `pipeline_info/` - * Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`. - * Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.tsv`. - * Reformatted samplesheet files used as input to the pipeline: `samplesheet.valid.csv`. +- `pipeline_info/` + - Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`. + - Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.yml`. The `pipeline_report*` files will only be present if the `--email` / `--email_on_fail` parameter's are used when running the pipeline. + - Reformatted samplesheet files used as input to the pipeline: `samplesheet.valid.csv`. + - Parameters used by the pipeline run: `params.json`.
diff --git a/docs/usage.md b/docs/usage.md index a2cab431..e26ef5a0 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -8,7 +8,7 @@ You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row as shown in the examples below. -```console +```bash --input '[path to samplesheet file]' ``` @@ -27,41 +27,65 @@ shortNlong ./data/S1_R1.fastq.gz ./data/S1_R2.fastq.gz ./data/S1_long_f > **NB:** `./data/FAST5` points at a folder containing all (i.e. one or mutiple) fast5 files that correspond to the long reads. `NA` indicates that the file is missing. -| Column | Description | -|-|-| -| `sample` | Custom sample name. May not contain spaces. | -| `R1` | Full path to FastQ file for Illumina short reads 1. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". `NA` indicates that the file is missing. | -| `R2` | Full path to FastQ file for Illumina short reads 2. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". `NA` indicates that the file is missing. | -| `LongFastQ` | Full path to FastQ file for ONT long reads. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". `NA` indicates that the file is missing. | -| `Fast5` | Full path to a folder containing Fast5 file(s) for ONT long reads. `NA` indicates that there are no Fast5 files available. | +| Column | Description | +| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `sample` | Custom sample name. May not contain spaces. | +| `R1` | Full path to FastQ file for Illumina short reads 1. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". `NA` indicates that the file is missing. | +| `R2` | Full path to FastQ file for Illumina short reads 2. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". `NA` indicates that the file is missing. | +| `LongFastQ` | Full path to FastQ file for ONT long reads. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". `NA` indicates that the file is missing. | +| `Fast5` | Full path to a folder containing Fast5 file(s) for ONT long reads. `NA` indicates that there are no Fast5 files available. | | `GenomeSize` | Expected genome size. For example, `2.8m` means 2.8 million basepairs genome size expected. This is only used by Canu assembler. `NA` indicates that this value is unknown. | -An [example samplesheet](../assets/samplesheet.csv) has been provided with the pipeline. +An [example samplesheet](../assets/samplesheet.tsv) has been provided with the pipeline. ## Running the pipeline The typical command for running the pipeline is as follows: ```console -nextflow run nf-core/bacass --input samplesheet.csv -profile docker --skip_kraken2 +nextflow run nf-core/bacass --input samplesheet.tsv -profile docker --skip_kraken2 ``` This will launch the pipeline with the `docker` configuration profile. See below for more information about profiles. Note that the pipeline will create the following files in your working directory: -```console -work # Directory containing the nextflow working files -results # Finished results (configurable, see below) -.nextflow_log # Log file from Nextflow +```bash +work # Directory containing the nextflow working files + # Finished results in specified location (defined with --outdir) +.nextflow_log # Log file from Nextflow # Other nextflow hidden files, eg. history of pipeline runs and old logs. ``` +If you wish to repeatedly use the same parameters for multiple runs, rather than specifying each flag in the command, you can specify these in a params file. + +Pipeline settings can be provided in a `yaml` or `json` file via `-params-file `. + +:::warning +Do not use `-c ` to specify parameters as this will result in errors. Custom config files specified with `-c` must only be used for [tuning process resource specifications](https://nf-co.re/docs/usage/configuration#tuning-workflow-resources), other infrastructural tweaks (such as output directories), or module arguments (args). +::: + +The above pipeline run specified with a params file in yaml format: + +```bash +nextflow run nf-core/bacass -profile docker -params-file params.yaml +``` + +with `params.yaml` containing: + +```yaml +input: './samplesheet.tsv' +outdir: './results/' +<...> +``` + +You can also generate such `YAML`/`JSON` files via [nf-core/launch](https://nf-co.re/launch). + ### Updating the pipeline When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline: -```console +```bash nextflow pull nf-core/bacass ``` @@ -69,48 +93,60 @@ nextflow pull nf-core/bacass It is a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since. -First, go to the [nf-core/bacass releases page](https://github.com/nf-core/bacass/releases) and find the latest version number - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`. +First, go to the [nf-core/bacass releases page](https://github.com/nf-core/bacass/releases) and find the latest pipeline version - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`. Of course, you can switch to another version by changing the number after the `-r` flag. -This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. +This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. For example, at the bottom of the MultiQC reports. + +To further assist in reproducbility, you can use share and re-use [parameter files](#running-the-pipeline) to repeat pipeline runs with the same settings without having to write out a command with every single parameter. + +:::tip +If you wish to share such profile (such as upload as supplementary material for academic publications), make sure to NOT include cluster specific paths to files, nor institutional specific profiles. +::: ## Core Nextflow arguments -> **NB:** These options are part of Nextflow and use a _single_ hyphen (pipeline parameters use a double-hyphen). +:::note +These options are part of Nextflow and use a _single_ hyphen (pipeline parameters use a double-hyphen). +::: ### `-profile` Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. -Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below. When using Biocontainers, most of these software packaging methods pull Docker containers from quay.io e.g [FastQC](https://quay.io/repository/biocontainers/fastqc) except for Singularity which directly downloads Singularity images via https hosted by the [Galaxy project](https://depot.galaxyproject.org/singularity/) and Conda which downloads and installs software locally from [Bioconda](https://bioconda.github.io/). +Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Apptainer, Conda) - see below. -> We highly recommend the use of Docker or Singularity containers for full pipeline reproducibility, however when this is not possible, Conda is also supported. +:::info +We highly recommend the use of Docker or Singularity containers for full pipeline reproducibility, however when this is not possible, Conda is also supported. +::: The pipeline also dynamically loads configurations from [https://github.com/nf-core/configs](https://github.com/nf-core/configs) when it runs, making multiple config profiles for various institutional clusters available at run time. For more information and to see if your system is available in these configs please see the [nf-core/configs documentation](https://github.com/nf-core/configs#documentation). Note that multiple profiles can be loaded, for example: `-profile test,docker` - the order of arguments is important! They are loaded in sequence, so later profiles can overwrite earlier profiles. -If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended. - -* `docker` - * A generic configuration profile to be used with [Docker](https://docker.com/) -* `singularity` - * A generic configuration profile to be used with [Singularity](https://sylabs.io/docs/) -* `podman` - * A generic configuration profile to be used with [Podman](https://podman.io/) -* `shifter` - * A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/) -* `charliecloud` - * A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/) -* `conda` - * A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud. -* `test` - * A profile with a complete configuration for automated testing - * Includes links to test data so needs no other parameters +If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended, since it can lead to different results on different machines dependent on the computer enviroment. + +- `test` + - A profile with a complete configuration for automated testing + - Includes links to test data so needs no other parameters +- `docker` + - A generic configuration profile to be used with [Docker](https://docker.com/) +- `singularity` + - A generic configuration profile to be used with [Singularity](https://sylabs.io/docs/) +- `podman` + - A generic configuration profile to be used with [Podman](https://podman.io/) +- `shifter` + - A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/) +- `charliecloud` + - A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/) +- `apptainer` + - A generic configuration profile to be used with [Apptainer](https://apptainer.org/) +- `conda` + - A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter, Charliecloud, or Apptainer. ### `-resume` -Specify this when restarting a pipeline. Nextflow will used cached results from any pipeline steps where the inputs are the same, continuing from where it got to previously. +Specify this when restarting a pipeline. Nextflow will use cached results from any pipeline steps where the inputs are the same, continuing from where it got to previously. For input to be considered the same, not only the names must be identical but the files' contents as well. For more info about this parameter, see [this blog post](https://www.nextflow.io/blog/2019/demystifying-nextflow-resume.html). You can also supply a run name to resume a specific run: `-resume [run-name]`. Use the `nextflow log` command to show previous run names. @@ -124,124 +160,19 @@ Specify the path to a specific config file (this is a core Nextflow command). Se Whilst the default requirements set within the pipeline will hopefully work for most people and with most input data, you may find that you want to customise the compute resources that the pipeline requests. Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with any of the error codes specified [here](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L18) it will automatically be resubmitted with higher requests (2 x original, then 3 x original). If it still fails after the third attempt then the pipeline execution is stopped. -For example, if the nf-core/rnaseq pipeline is failing after multiple re-submissions of the `STAR_ALIGN` process due to an exit code of `137` this would indicate that there is an out of memory issue: - -```console -[62/149eb0] NOTE: Process `RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) -- Execution is retried (1) -Error executing process > 'RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)' - -Caused by: - Process `RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) - -Command executed: - STAR \ - --genomeDir star \ - --readFilesIn WT_REP1_trimmed.fq.gz \ - --runThreadN 2 \ - --outFileNamePrefix WT_REP1. \ - +To change the resource requests, please see the [max resources](https://nf-co.re/docs/usage/configuration#max-resources) and [tuning workflow resources](https://nf-co.re/docs/usage/configuration#tuning-workflow-resources) section of the nf-core website. -Command exit status: - 137 +### Custom Containers -Command output: - (empty) - -Command error: - .command.sh: line 9: 30 Killed STAR --genomeDir star --readFilesIn WT_REP1_trimmed.fq.gz --runThreadN 2 --outFileNamePrefix WT_REP1. -Work dir: - /home/pipelinetest/work/9d/172ca5881234073e8d76f2a19c88fb - -Tip: you can replicate the issue by changing to the process work dir and entering the command `bash .command.run` -``` - -To bypass this error you would need to find exactly which resources are set by the `STAR_ALIGN` process. The quickest way is to search for `process STAR_ALIGN` in the [nf-core/rnaseq Github repo](https://github.com/nf-core/rnaseq/search?q=process+STAR_ALIGN). We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so based on the search results the file we want is `modules/nf-core/software/star/align/main.nf`. If you click on the link to that file you will notice that there is a `label` directive at the top of the module that is set to [`label process_high`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L9). The [Nextflow `label`](https://www.nextflow.io/docs/latest/process.html#label) directive allows us to organise workflow processes in separate groups which can be referenced in a configuration file to select and configure subset of processes having similar computing requirements. The default values for the `process_high` label are set in the pipeline's [`base.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L33-L37) which in this case is defined as 72GB. Providing you haven't set any other standard nf-core parameters to __cap__ the [maximum resources](https://nf-co.re/usage/configuration#max-resources) used by the pipeline then we can try and bypass the `STAR_ALIGN` process failure by creating a custom config file that sets at least 72GB of memory, in this case increased to 100GB. The custom config below can then be provided to the pipeline via the [`-c`](#-c) parameter as highlighted in previous sections. - -```nextflow -process { - withName: STAR_ALIGN { - memory = 100.GB - } -} -``` +In some cases you may wish to change which container or conda environment a step of the pipeline uses for a particular tool. By default nf-core pipelines use containers and software from the [biocontainers](https://biocontainers.pro/) or [bioconda](https://bioconda.github.io/) projects. However in some cases the pipeline specified version maybe out of date. -> **NB:** We specify just the process name i.e. `STAR_ALIGN` in the config file and not the full task name string that is printed to screen in the error message or on the terminal whilst the pipeline is running i.e. `RNASEQ:ALIGN_STAR:STAR_ALIGN`. You may get a warning suggesting that the process selector isn't recognised but you can ignore that if the process name has been specified correctly. This is something that needs to be fixed upstream in core Nextflow. +To use a different container from the default container or conda environment specified in a pipeline, please see the [updating tool versions](https://nf-co.re/docs/usage/configuration#updating-tool-versions) section of the nf-core website. -### Tool-specific options +### Custom Tool Arguments -For the ultimate flexibility, we have implemented and are using Nextflow DSL2 modules in a way where it is possible for both developers and users to change tool-specific command-line arguments (e.g. providing an additional command-line argument to the `STAR_ALIGN` process) as well as publishing options (e.g. saving files produced by the `STAR_ALIGN` process that aren't saved by default by the pipeline). In the majority of instances, as a user you won't have to change the default options set by the pipeline developer(s), however, there may be edge cases where creating a simple custom config file can improve the behaviour of the pipeline if for example it is failing due to a weird error that requires setting a tool-specific parameter to deal with smaller / larger genomes. +A pipeline might not always support every possible argument or option of a particular tool used in pipeline. Fortunately, nf-core pipelines provide some freedom to users to insert additional parameters that the pipeline does not include by default. -The command-line arguments passed to STAR in the `STAR_ALIGN` module are a combination of: - -* Mandatory arguments or those that need to be evaluated within the scope of the module, as supplied in the [`script`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L49-L55) section of the module file. - -* An [`options.args`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L56) string of non-mandatory parameters that is set to be empty by default in the module but can be overwritten when including the module in the sub-workflow / workflow context via the `addParams` Nextflow option. - -The nf-core/rnaseq pipeline has a sub-workflow (see [terminology](https://github.com/nf-core/modules#terminology)) specifically to align reads with STAR and to sort, index and generate some basic stats on the resulting BAM files using SAMtools. At the top of this file we import the `STAR_ALIGN` module via the Nextflow [`include`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/subworkflows/nf-core/align_star.nf#L10) keyword and by default the options passed to the module via the `addParams` option are set as an empty Groovy map [here](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/subworkflows/nf-core/align_star.nf#L5); this in turn means `options.args` will be set to empty by default in the module file too. This is an intentional design choice and allows us to implement well-written sub-workflows composed of a chain of tools that by default run with the bare minimum parameter set for any given tool in order to make it much easier to share across pipelines and to provide the flexibility for users and developers to customise any non-mandatory arguments. - -When including the sub-workflow above in the main pipeline workflow we use the same `include` statement, however, we now have the ability to overwrite options for each of the tools in the sub-workflow including the [`align_options`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/workflows/rnaseq.nf#L225) variable that will be used specifically to overwrite the optional arguments passed to the `STAR_ALIGN` module. In this case, the options to be provided to `STAR_ALIGN` have been assigned sensible defaults by the developer(s) in the pipeline's [`modules.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/modules.config#L70-L74) and can be accessed and customised in the [workflow context](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/workflows/rnaseq.nf#L201-L204) too before eventually passing them to the sub-workflow as a Groovy map called `star_align_options`. These options will then be propagated from `workflow -> sub-workflow -> module`. - -As mentioned at the beginning of this section it may also be necessary for users to overwrite the options passed to modules to be able to customise specific aspects of the way in which a particular tool is executed by the pipeline. Given that all of the default module options are stored in the pipeline's `modules.config` as a [`params` variable](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/modules.config#L24-L25) it is also possible to overwrite any of these options via a custom config file. - -Say for example we want to append an additional, non-mandatory parameter (i.e. `--outFilterMismatchNmax 16`) to the arguments passed to the `STAR_ALIGN` module. Firstly, we need to copy across the default `args` specified in the [`modules.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/modules.config#L71) and create a custom config file that is a composite of the default `args` as well as the additional options you would like to provide. This is very important because Nextflow will overwrite the default value of `args` that you provide via the custom config. - -As you will see in the example below, we have: - -* appended `--outFilterMismatchNmax 16` to the default `args` used by the module. -* changed the default `publish_dir` value to where the files will eventually be published in the main results directory. -* appended `'bam':''` to the default value of `publish_files` so that the BAM files generated by the process will also be saved in the top-level results directory for the module. Note: `'out':'log'` means any file/directory ending in `out` will now be saved in a separate directory called `my_star_directory/log/`. - -```nextflow -params { - modules { - 'star_align' { - args = "--quantMode TranscriptomeSAM --twopassMode Basic --outSAMtype BAM Unsorted --readFilesCommand zcat --runRNGseed 0 --outFilterMultimapNmax 20 --alignSJDBoverhangMin 1 --outSAMattributes NH HI AS NM MD --quantTranscriptomeBan Singleend --outFilterMismatchNmax 16" - publish_dir = "my_star_directory" - publish_files = ['out':'log', 'tab':'log', 'bam':''] - } - } -} -``` - -### Updating containers - -The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. If for some reason you need to use a different version of a particular tool with the pipeline then you just need to identify the `process` name and override the Nextflow `container` definition for that process using the `withName` declaration. For example, in the [nf-core/viralrecon](https://nf-co.re/viralrecon) pipeline a tool called [Pangolin](https://github.com/cov-lineages/pangolin) has been used during the COVID-19 pandemic to assign lineages to SARS-CoV-2 genome sequenced samples. Given that the lineage assignments change quite frequently it doesn't make sense to re-release the nf-core/viralrecon everytime a new version of Pangolin has been released. However, you can override the default container used by the pipeline by creating a custom config file and passing it as a command-line argument via `-c custom.config`. - -1. Check the default version used by the pipeline in the module file for [Pangolin](https://github.com/nf-core/viralrecon/blob/a85d5969f9025409e3618d6c280ef15ce417df65/modules/nf-core/software/pangolin/main.nf#L14-L19) -2. Find the latest version of the Biocontainer available on [Quay.io](https://quay.io/repository/biocontainers/pangolin?tag=latest&tab=tags) -3. Create the custom config accordingly: - - * For Docker: - - ```nextflow - process { - withName: PANGOLIN { - container = 'quay.io/biocontainers/pangolin:3.0.5--pyhdfd78af_0' - } - } - ``` - - * For Singularity: - - ```nextflow - process { - withName: PANGOLIN { - container = 'https://depot.galaxyproject.org/singularity/pangolin:3.0.5--pyhdfd78af_0' - } - } - ``` - - * For Conda: - - ```nextflow - process { - withName: PANGOLIN { - conda = 'bioconda::pangolin=3.0.5' - } - } - ``` - -> **NB:** If you wish to periodically update individual tool-specific results (e.g. Pangolin) generated by the pipeline then you must ensure to keep the `work/` directory otherwise the `-resume` ability of the pipeline will be compromised and it will restart from scratch. +To learn how to provide additional arguments to a particular tool of the pipeline, please see the [customising tool arguments](https://nf-co.re/docs/usage/configuration#customising-tool-arguments) section of the nf-core website. ### nf-core/configs @@ -251,6 +182,14 @@ See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config If you have any questions or issues please send us a message on [Slack](https://nf-co.re/join/slack) on the [`#configs` channel](https://nfcore.slack.com/channels/configs). +## Azure Resource Requests + +To be used with the `azurebatch` profile by specifying the `-profile azurebatch`. +We recommend providing a compute `params.vm_type` of `Standard_D16_v3` VMs by default but these options can be changed if required. + +Note that the choice of VM size depends on your quota and the overall workload during the analysis. +For a thorough list, please refer the [Azure Sizes for virtual machines in Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes). + ## Running in the background Nextflow handles job submissions and supervises the running jobs. The Nextflow process must run until the pipeline is finished. @@ -265,6 +204,6 @@ Some HPC setups also allow you to run nextflow within a cluster job submitted yo In some cases, the Nextflow Java virtual machines can start to request a large amount of memory. We recommend adding the following line to your environment to limit this (typically in `~/.bashrc` or `~./bash_profile`): -```console +```bash NXF_OPTS='-Xms1g -Xmx4g' ``` diff --git a/lib/NfcoreSchema.groovy b/lib/NfcoreSchema.groovy deleted file mode 100755 index 8d6920dd..00000000 --- a/lib/NfcoreSchema.groovy +++ /dev/null @@ -1,517 +0,0 @@ -// -// This file holds several functions used to perform JSON parameter validation, help and summary rendering for the nf-core pipeline template. -// - -import org.everit.json.schema.Schema -import org.everit.json.schema.loader.SchemaLoader -import org.everit.json.schema.ValidationException -import org.json.JSONObject -import org.json.JSONTokener -import org.json.JSONArray -import groovy.json.JsonSlurper -import groovy.json.JsonBuilder - -class NfcoreSchema { - - // - // Resolve Schema path relative to main workflow directory - // - public static String getSchemaPath(workflow, schema_filename='nextflow_schema.json') { - return "${workflow.projectDir}/${schema_filename}" - } - - // - // Function to loop over all parameters defined in schema and check - // whether the given parameters adhere to the specifications - // - /* groovylint-disable-next-line UnusedPrivateMethodParameter */ - public static void validateParameters(workflow, params, log, schema_filename='nextflow_schema.json') { - def has_error = false - //=====================================================================// - // Check for nextflow core params and unexpected params - def json = new File(getSchemaPath(workflow, schema_filename=schema_filename)).text - def Map schemaParams = (Map) new JsonSlurper().parseText(json).get('definitions') - def nf_params = [ - // Options for base `nextflow` command - 'bg', - 'c', - 'C', - 'config', - 'd', - 'D', - 'dockerize', - 'h', - 'log', - 'q', - 'quiet', - 'syslog', - 'v', - 'version', - - // Options for `nextflow run` command - 'ansi', - 'ansi-log', - 'bg', - 'bucket-dir', - 'c', - 'cache', - 'config', - 'dsl2', - 'dump-channels', - 'dump-hashes', - 'E', - 'entry', - 'latest', - 'lib', - 'main-script', - 'N', - 'name', - 'offline', - 'params-file', - 'pi', - 'plugins', - 'poll-interval', - 'pool-size', - 'profile', - 'ps', - 'qs', - 'queue-size', - 'r', - 'resume', - 'revision', - 'stdin', - 'stub', - 'stub-run', - 'test', - 'w', - 'with-charliecloud', - 'with-conda', - 'with-dag', - 'with-docker', - 'with-mpi', - 'with-notification', - 'with-podman', - 'with-report', - 'with-singularity', - 'with-timeline', - 'with-tower', - 'with-trace', - 'with-weblog', - 'without-docker', - 'without-podman', - 'work-dir' - ] - def unexpectedParams = [] - - // Collect expected parameters from the schema - def expectedParams = [] - for (group in schemaParams) { - for (p in group.value['properties']) { - expectedParams.push(p.key) - } - } - - for (specifiedParam in params.keySet()) { - // nextflow params - if (nf_params.contains(specifiedParam)) { - log.error "ERROR: You used a core Nextflow option with two hyphens: '--${specifiedParam}'. Please resubmit with '-${specifiedParam}'" - has_error = true - } - // unexpected params - def params_ignore = params.schema_ignore_params.split(',') + 'schema_ignore_params' - def expectedParamsLowerCase = expectedParams.collect{ it.replace("-", "").toLowerCase() } - def specifiedParamLowerCase = specifiedParam.replace("-", "").toLowerCase() - def isCamelCaseBug = (specifiedParam.contains("-") && !expectedParams.contains(specifiedParam) && expectedParamsLowerCase.contains(specifiedParamLowerCase)) - if (!expectedParams.contains(specifiedParam) && !params_ignore.contains(specifiedParam) && !isCamelCaseBug) { - // Temporarily remove camelCase/camel-case params #1035 - def unexpectedParamsLowerCase = unexpectedParams.collect{ it.replace("-", "").toLowerCase()} - if (!unexpectedParamsLowerCase.contains(specifiedParamLowerCase)){ - unexpectedParams.push(specifiedParam) - } - } - } - - //=====================================================================// - // Validate parameters against the schema - InputStream input_stream = new File(getSchemaPath(workflow, schema_filename=schema_filename)).newInputStream() - JSONObject raw_schema = new JSONObject(new JSONTokener(input_stream)) - - // Remove anything that's in params.schema_ignore_params - raw_schema = removeIgnoredParams(raw_schema, params) - - Schema schema = SchemaLoader.load(raw_schema) - - // Clean the parameters - def cleanedParams = cleanParameters(params) - - // Convert to JSONObject - def jsonParams = new JsonBuilder(cleanedParams) - JSONObject params_json = new JSONObject(jsonParams.toString()) - - // Validate - try { - schema.validate(params_json) - } catch (ValidationException e) { - println '' - log.error 'ERROR: Validation of pipeline parameters failed!' - JSONObject exceptionJSON = e.toJSON() - printExceptions(exceptionJSON, params_json, log) - println '' - has_error = true - } - - // Check for unexpected parameters - if (unexpectedParams.size() > 0) { - Map colors = NfcoreTemplate.logColours(params.monochrome_logs) - println '' - def warn_msg = 'Found unexpected parameters:' - for (unexpectedParam in unexpectedParams) { - warn_msg = warn_msg + "\n* --${unexpectedParam}: ${params[unexpectedParam].toString()}" - } - log.warn warn_msg - log.info "- ${colors.dim}Ignore this warning: params.schema_ignore_params = \"${unexpectedParams.join(',')}\" ${colors.reset}" - println '' - } - - if (has_error) { - System.exit(1) - } - } - - // - // Beautify parameters for --help - // - public static String paramsHelp(workflow, params, command, schema_filename='nextflow_schema.json') { - Map colors = NfcoreTemplate.logColours(params.monochrome_logs) - Integer num_hidden = 0 - String output = '' - output += 'Typical pipeline command:\n\n' - output += " ${colors.cyan}${command}${colors.reset}\n\n" - Map params_map = paramsLoad(getSchemaPath(workflow, schema_filename=schema_filename)) - Integer max_chars = paramsMaxChars(params_map) + 1 - Integer desc_indent = max_chars + 14 - Integer dec_linewidth = 160 - desc_indent - for (group in params_map.keySet()) { - Integer num_params = 0 - String group_output = colors.underlined + colors.bold + group + colors.reset + '\n' - def group_params = params_map.get(group) // This gets the parameters of that particular group - for (param in group_params.keySet()) { - if (group_params.get(param).hidden && !params.show_hidden_params) { - num_hidden += 1 - continue; - } - def type = '[' + group_params.get(param).type + ']' - def description = group_params.get(param).description - def defaultValue = group_params.get(param).default ? " [default: " + group_params.get(param).default.toString() + "]" : '' - def description_default = description + colors.dim + defaultValue + colors.reset - // Wrap long description texts - // Loosely based on https://dzone.com/articles/groovy-plain-text-word-wrap - if (description_default.length() > dec_linewidth){ - List olines = [] - String oline = "" // " " * indent - description_default.split(" ").each() { wrd -> - if ((oline.size() + wrd.size()) <= dec_linewidth) { - oline += wrd + " " - } else { - olines += oline - oline = wrd + " " - } - } - olines += oline - description_default = olines.join("\n" + " " * desc_indent) - } - group_output += " --" + param.padRight(max_chars) + colors.dim + type.padRight(10) + colors.reset + description_default + '\n' - num_params += 1 - } - group_output += '\n' - if (num_params > 0){ - output += group_output - } - } - if (num_hidden > 0){ - output += colors.dim + "!! Hiding $num_hidden params, use --show_hidden_params to show them !!\n" + colors.reset - } - output += NfcoreTemplate.dashedLine(params.monochrome_logs) - return output - } - - // - // Groovy Map summarising parameters/workflow options used by the pipeline - // - public static LinkedHashMap paramsSummaryMap(workflow, params, schema_filename='nextflow_schema.json') { - // Get a selection of core Nextflow workflow options - def Map workflow_summary = [:] - if (workflow.revision) { - workflow_summary['revision'] = workflow.revision - } - workflow_summary['runName'] = workflow.runName - if (workflow.containerEngine) { - workflow_summary['containerEngine'] = workflow.containerEngine - } - if (workflow.container) { - workflow_summary['container'] = workflow.container - } - workflow_summary['launchDir'] = workflow.launchDir - workflow_summary['workDir'] = workflow.workDir - workflow_summary['projectDir'] = workflow.projectDir - workflow_summary['userName'] = workflow.userName - workflow_summary['profile'] = workflow.profile - workflow_summary['configFiles'] = workflow.configFiles.join(', ') - - // Get pipeline parameters defined in JSON Schema - def Map params_summary = [:] - def blacklist = ['hostnames'] - def params_map = paramsLoad(getSchemaPath(workflow, schema_filename=schema_filename)) - for (group in params_map.keySet()) { - def sub_params = new LinkedHashMap() - def group_params = params_map.get(group) // This gets the parameters of that particular group - for (param in group_params.keySet()) { - if (params.containsKey(param) && !blacklist.contains(param)) { - def params_value = params.get(param) - def schema_value = group_params.get(param).default - def param_type = group_params.get(param).type - if (schema_value != null) { - if (param_type == 'string') { - if (schema_value.contains('$projectDir') || schema_value.contains('${projectDir}')) { - def sub_string = schema_value.replace('\$projectDir', '') - sub_string = sub_string.replace('\${projectDir}', '') - if (params_value.contains(sub_string)) { - schema_value = params_value - } - } - if (schema_value.contains('$params.outdir') || schema_value.contains('${params.outdir}')) { - def sub_string = schema_value.replace('\$params.outdir', '') - sub_string = sub_string.replace('\${params.outdir}', '') - if ("${params.outdir}${sub_string}" == params_value) { - schema_value = params_value - } - } - } - } - - // We have a default in the schema, and this isn't it - if (schema_value != null && params_value != schema_value) { - sub_params.put(param, params_value) - } - // No default in the schema, and this isn't empty - else if (schema_value == null && params_value != "" && params_value != null && params_value != false) { - sub_params.put(param, params_value) - } - } - } - params_summary.put(group, sub_params) - } - return [ 'Core Nextflow options' : workflow_summary ] << params_summary - } - - // - // Beautify parameters for summary and return as string - // - public static String paramsSummaryLog(workflow, params) { - Map colors = NfcoreTemplate.logColours(params.monochrome_logs) - String output = '' - def params_map = paramsSummaryMap(workflow, params) - def max_chars = paramsMaxChars(params_map) - for (group in params_map.keySet()) { - def group_params = params_map.get(group) // This gets the parameters of that particular group - if (group_params) { - output += colors.bold + group + colors.reset + '\n' - for (param in group_params.keySet()) { - output += " " + colors.blue + param.padRight(max_chars) + ": " + colors.green + group_params.get(param) + colors.reset + '\n' - } - output += '\n' - } - } - output += "!! Only displaying parameters that differ from the pipeline defaults !!\n" - output += NfcoreTemplate.dashedLine(params.monochrome_logs) - return output - } - - // - // Loop over nested exceptions and print the causingException - // - private static void printExceptions(ex_json, params_json, log) { - def causingExceptions = ex_json['causingExceptions'] - if (causingExceptions.length() == 0) { - def m = ex_json['message'] =~ /required key \[([^\]]+)\] not found/ - // Missing required param - if (m.matches()) { - log.error "* Missing required parameter: --${m[0][1]}" - } - // Other base-level error - else if (ex_json['pointerToViolation'] == '#') { - log.error "* ${ex_json['message']}" - } - // Error with specific param - else { - def param = ex_json['pointerToViolation'] - ~/^#\// - def param_val = params_json[param].toString() - log.error "* --${param}: ${ex_json['message']} (${param_val})" - } - } - for (ex in causingExceptions) { - printExceptions(ex, params_json, log) - } - } - - // - // Remove an element from a JSONArray - // - private static JSONArray removeElement(json_array, element) { - def list = [] - int len = json_array.length() - for (int i=0;i - if(raw_schema.keySet().contains('definitions')){ - raw_schema.definitions.each { definition -> - for (key in definition.keySet()){ - if (definition[key].get("properties").keySet().contains(ignore_param)){ - // Remove the param to ignore - definition[key].get("properties").remove(ignore_param) - // If the param was required, change this - if (definition[key].has("required")) { - def cleaned_required = removeElement(definition[key].required, ignore_param) - definition[key].put("required", cleaned_required) - } - } - } - } - } - if(raw_schema.keySet().contains('properties') && raw_schema.get('properties').keySet().contains(ignore_param)) { - raw_schema.get("properties").remove(ignore_param) - } - if(raw_schema.keySet().contains('required') && raw_schema.required.contains(ignore_param)) { - def cleaned_required = removeElement(raw_schema.required, ignore_param) - raw_schema.put("required", cleaned_required) - } - } - return raw_schema - } - - // - // Clean and check parameters relative to Nextflow native classes - // - private static Map cleanParameters(params) { - def new_params = params.getClass().newInstance(params) - for (p in params) { - // remove anything evaluating to false - if (!p['value']) { - new_params.remove(p.key) - } - // Cast MemoryUnit to String - if (p['value'].getClass() == nextflow.util.MemoryUnit) { - new_params.replace(p.key, p['value'].toString()) - } - // Cast Duration to String - if (p['value'].getClass() == nextflow.util.Duration) { - new_params.replace(p.key, p['value'].toString().replaceFirst(/d(?!\S)/, "day")) - } - // Cast LinkedHashMap to String - if (p['value'].getClass() == LinkedHashMap) { - new_params.replace(p.key, p['value'].toString()) - } - } - return new_params - } - - // - // This function tries to read a JSON params file - // - private static LinkedHashMap paramsLoad(String json_schema) { - def params_map = new LinkedHashMap() - try { - params_map = paramsRead(json_schema) - } catch (Exception e) { - println "Could not read parameters settings from JSON. $e" - params_map = new LinkedHashMap() - } - return params_map - } - - // - // Method to actually read in JSON file using Groovy. - // Group (as Key), values are all parameters - // - Parameter1 as Key, Description as Value - // - Parameter2 as Key, Description as Value - // .... - // Group - // - - private static LinkedHashMap paramsRead(String json_schema) throws Exception { - def json = new File(json_schema).text - def Map schema_definitions = (Map) new JsonSlurper().parseText(json).get('definitions') - def Map schema_properties = (Map) new JsonSlurper().parseText(json).get('properties') - /* Tree looks like this in nf-core schema - * definitions <- this is what the first get('definitions') gets us - group 1 - title - description - properties - parameter 1 - type - description - parameter 2 - type - description - group 2 - title - description - properties - parameter 1 - type - description - * properties <- parameters can also be ungrouped, outside of definitions - parameter 1 - type - description - */ - - // Grouped params - def params_map = new LinkedHashMap() - schema_definitions.each { key, val -> - def Map group = schema_definitions."$key".properties // Gets the property object of the group - def title = schema_definitions."$key".title - def sub_params = new LinkedHashMap() - group.each { innerkey, value -> - sub_params.put(innerkey, value) - } - params_map.put(title, sub_params) - } - - // Ungrouped params - def ungrouped_params = new LinkedHashMap() - schema_properties.each { innerkey, value -> - ungrouped_params.put(innerkey, value) - } - params_map.put("Other parameters", ungrouped_params) - - return params_map - } - - // - // Get maximum number of characters across all parameter names - // - private static Integer paramsMaxChars(params_map) { - Integer max_chars = 0 - for (group in params_map.keySet()) { - def group_params = params_map.get(group) // This gets the parameters of that particular group - for (param in group_params.keySet()) { - if (param.size() > max_chars) { - max_chars = param.size() - } - } - } - return max_chars - } -} diff --git a/lib/NfcoreTemplate.groovy b/lib/NfcoreTemplate.groovy index 44551e0a..01b8653d 100755 --- a/lib/NfcoreTemplate.groovy +++ b/lib/NfcoreTemplate.groovy @@ -3,6 +3,7 @@ // import org.yaml.snakeyaml.Yaml +import groovy.json.JsonOutput class NfcoreTemplate { @@ -19,28 +20,36 @@ class NfcoreTemplate { } // - // Check params.hostnames + // Warn if a -profile or Nextflow config has not been provided to run the pipeline // - public static void hostName(workflow, params, log) { - Map colors = logColours(params.monochrome_logs) - if (params.hostnames) { - try { - def hostname = "hostname".execute().text.trim() - params.hostnames.each { prof, hnames -> - hnames.each { hname -> - if (hostname.contains(hname) && !workflow.profile.contains(prof)) { - log.info "=${colors.yellow}====================================================${colors.reset}=\n" + - "${colors.yellow}WARN: You are running with `-profile $workflow.profile`\n" + - " but your machine hostname is ${colors.white}'$hostname'${colors.reset}.\n" + - " ${colors.yellow_bold}Please use `-profile $prof${colors.reset}`\n" + - "=${colors.yellow}====================================================${colors.reset}=" - } - } - } - } catch (Exception e) { - log.warn "[$workflow.manifest.name] Could not determine 'hostname' - skipping check. Reason: ${e.message}." - } + public static void checkConfigProvided(workflow, log) { + if (workflow.profile == 'standard' && workflow.configFiles.size() <= 1) { + log.warn "[$workflow.manifest.name] You are attempting to run the pipeline without any custom configuration!\n\n" + + "This will be dependent on your local compute environment but can be achieved via one or more of the following:\n" + + " (1) Using an existing pipeline profile e.g. `-profile docker` or `-profile singularity`\n" + + " (2) Using an existing nf-core/configs for your Institution e.g. `-profile crick` or `-profile uppmax`\n" + + " (3) Using your own local custom config e.g. `-c /path/to/your/custom.config`\n\n" + + "Please refer to the quick start section and usage docs for the pipeline.\n " + } + } + + // + // Generate version string + // + public static String version(workflow) { + String version_string = "" + + if (workflow.manifest.version) { + def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : '' + version_string += "${prefix_v}${workflow.manifest.version}" + } + + if (workflow.commitId) { + def git_shortsha = workflow.commitId.substring(0, 7) + version_string += "-g${git_shortsha}" } + + return version_string } // @@ -72,7 +81,7 @@ class NfcoreTemplate { misc_fields['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp def email_fields = [:] - email_fields['version'] = workflow.manifest.version + email_fields['version'] = NfcoreTemplate.version(workflow) email_fields['runName'] = workflow.runName email_fields['success'] = workflow.success email_fields['dateComplete'] = workflow.complete @@ -120,7 +129,7 @@ class NfcoreTemplate { def email_html = html_template.toString() // Render the sendmail template - def max_multiqc_email_size = params.max_multiqc_email_size as nextflow.util.MemoryUnit + def max_multiqc_email_size = (params.containsKey('max_multiqc_email_size') ? params.max_multiqc_email_size : 0) as nextflow.util.MemoryUnit def smail_fields = [ email: email_address, subject: subject, email_txt: email_txt, email_html: email_html, projectDir: "$projectDir", mqcFile: mqc_report, mqcMaxSize: max_multiqc_email_size.toBytes() ] def sf = new File("$projectDir/assets/sendmail_template.txt") def sendmail_template = engine.createTemplate(sf).make(smail_fields) @@ -156,6 +165,79 @@ class NfcoreTemplate { output_tf.withWriter { w -> w << email_txt } } + // + // Construct and send a notification to a web server as JSON + // e.g. Microsoft Teams and Slack + // + public static void IM_notification(workflow, params, summary_params, projectDir, log) { + def hook_url = params.hook_url + + def summary = [:] + for (group in summary_params.keySet()) { + summary << summary_params[group] + } + + def misc_fields = [:] + misc_fields['start'] = workflow.start + misc_fields['complete'] = workflow.complete + misc_fields['scriptfile'] = workflow.scriptFile + misc_fields['scriptid'] = workflow.scriptId + if (workflow.repository) misc_fields['repository'] = workflow.repository + if (workflow.commitId) misc_fields['commitid'] = workflow.commitId + if (workflow.revision) misc_fields['revision'] = workflow.revision + misc_fields['nxf_version'] = workflow.nextflow.version + misc_fields['nxf_build'] = workflow.nextflow.build + misc_fields['nxf_timestamp'] = workflow.nextflow.timestamp + + def msg_fields = [:] + msg_fields['version'] = NfcoreTemplate.version(workflow) + msg_fields['runName'] = workflow.runName + msg_fields['success'] = workflow.success + msg_fields['dateComplete'] = workflow.complete + msg_fields['duration'] = workflow.duration + msg_fields['exitStatus'] = workflow.exitStatus + msg_fields['errorMessage'] = (workflow.errorMessage ?: 'None') + msg_fields['errorReport'] = (workflow.errorReport ?: 'None') + msg_fields['commandLine'] = workflow.commandLine.replaceFirst(/ +--hook_url +[^ ]+/, "") + msg_fields['projectDir'] = workflow.projectDir + msg_fields['summary'] = summary << misc_fields + + // Render the JSON template + def engine = new groovy.text.GStringTemplateEngine() + // Different JSON depending on the service provider + // Defaults to "Adaptive Cards" (https://adaptivecards.io), except Slack which has its own format + def json_path = hook_url.contains("hooks.slack.com") ? "slackreport.json" : "adaptivecard.json" + def hf = new File("$projectDir/assets/${json_path}") + def json_template = engine.createTemplate(hf).make(msg_fields) + def json_message = json_template.toString() + + // POST + def post = new URL(hook_url).openConnection(); + post.setRequestMethod("POST") + post.setDoOutput(true) + post.setRequestProperty("Content-Type", "application/json") + post.getOutputStream().write(json_message.getBytes("UTF-8")); + def postRC = post.getResponseCode(); + if (! postRC.equals(200)) { + log.warn(post.getErrorStream().getText()); + } + } + + // + // Dump pipeline parameters in a json file + // + public static void dump_parameters(workflow, params) { + def output_d = new File("${params.outdir}/pipeline_info/") + if (!output_d.exists()) { + output_d.mkdirs() + } + + def timestamp = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss') + def output_pf = new File(output_d, "params_${timestamp}.json") + def jsonStr = JsonOutput.toJson(params) + output_pf.text = JsonOutput.prettyPrint(jsonStr) + } + // // Print pipeline summary on completion // @@ -165,10 +247,9 @@ class NfcoreTemplate { if (workflow.stats.ignoredCount == 0) { log.info "-${colors.purple}[$workflow.manifest.name]${colors.green} Pipeline completed successfully${colors.reset}-" } else { - log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed successfully, but with errored process(es) ${colors.reset}-" + log.info "-${colors.purple}[$workflow.manifest.name]${colors.yellow} Pipeline completed successfully, but with errored process(es) ${colors.reset}-" } } else { - hostName(workflow, params, log) log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed with errors${colors.reset}-" } } @@ -254,6 +335,7 @@ class NfcoreTemplate { // public static String logo(workflow, monochrome_logs) { Map colors = logColours(monochrome_logs) + String workflow_version = NfcoreTemplate.version(workflow) String.format( """\n ${dashedLine(monochrome_logs)} @@ -262,7 +344,7 @@ class NfcoreTemplate { ${colors.blue} |\\ | |__ __ / ` / \\ |__) |__ ${colors.yellow}} {${colors.reset} ${colors.blue} | \\| | \\__, \\__/ | \\ |___ ${colors.green}\\`-._,-`-,${colors.reset} ${colors.green}`._,._,\'${colors.reset} - ${colors.purple} ${workflow.manifest.name} v${workflow.manifest.version}${colors.reset} + ${colors.purple} ${workflow.manifest.name} ${workflow_version}${colors.reset} ${dashedLine(monochrome_logs)} """.stripIndent() ) diff --git a/lib/Utils.groovy b/lib/Utils.groovy old mode 100755 new mode 100644 index 18173e98..8d030f4e --- a/lib/Utils.groovy +++ b/lib/Utils.groovy @@ -21,27 +21,27 @@ class Utils { } // Check that all channels are present - def required_channels = ['conda-forge', 'bioconda', 'defaults'] - def conda_check_failed = !required_channels.every { ch -> ch in channels } + // This channel list is ordered by required channel priority. + def required_channels_in_order = ['conda-forge', 'bioconda', 'defaults'] + def channels_missing = ((required_channels_in_order as Set) - (channels as Set)) as Boolean // Check that they are in the right order - conda_check_failed |= !(channels.indexOf('conda-forge') < channels.indexOf('bioconda')) - conda_check_failed |= !(channels.indexOf('bioconda') < channels.indexOf('defaults')) + def channel_priority_violation = false + def n = required_channels_in_order.size() + for (int i = 0; i < n - 1; i++) { + channel_priority_violation |= !(channels.indexOf(required_channels_in_order[i]) < channels.indexOf(required_channels_in_order[i+1])) + } - if (conda_check_failed) { - log.warn "=============================================================================\n" + + if (channels_missing | channel_priority_violation) { + log.warn "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" + " There is a problem with your Conda configuration!\n\n" + " You will need to set-up the conda-forge and bioconda channels correctly.\n" + - " Please refer to https://bioconda.github.io/user/install.html#set-up-channels\n" + - " NB: The order of the channels matters!\n" + - "===================================================================================" + " Please refer to https://bioconda.github.io/\n" + + " The observed channel order is \n" + + " ${channels}\n" + + " but the following channel order is required:\n" + + " ${required_channels_in_order}\n" + + "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" } } - - // - // Join module args with appropriate spacing - // - public static String joinModuleArgs(args_list) { - return ' ' + args_list.join(' ') - } } diff --git a/lib/WorkflowBacass.groovy b/lib/WorkflowBacass.groovy index 4b99b4dc..a7b07355 100755 --- a/lib/WorkflowBacass.groovy +++ b/lib/WorkflowBacass.groovy @@ -2,6 +2,9 @@ // This file holds several functions specific to the workflow/bacass.nf in the nf-core/bacass pipeline // +import nextflow.Nextflow +import groovy.text.SimpleTemplateEngine + class WorkflowBacass { // @@ -9,8 +12,7 @@ class WorkflowBacass { // public static void initialise(params, log) { if(("${params.assembler}" == 'canu' || "${params.assembler}" == 'miniasm') && ("${params.assembly_type}" == 'short' || "${params.assembly_type}" == 'hybrid')){ - log.error "Canu and Miniasm can only be used for long read assembly and neither for Hybrid nor Shortread assembly!" - System.exit(1) + exit 1, "Canu and Miniasm can only be used for long read assembly and neither for Hybrid nor Shortread assembly!" } } @@ -40,4 +42,76 @@ class WorkflowBacass { yaml_file_text += "${summary_section}" return yaml_file_text } + + // + // Generate methods description for MultiQC + // + + public static String toolCitationText(params) { + + // Can use ternary operators to dynamically construct based conditions, e.g. params["run_xyz"] ? "Tool (Foo et al. 2023)" : "", + // Uncomment function in methodsDescriptionText to render in MultiQC report + def citation_text = [ + "Tools used in the workflow included:", + "FastQC (Andrews 2010),", + "MultiQC (Ewels et al. 2016)", + "." + ].join(' ').trim() + + return citation_text + } + + public static String toolBibliographyText(params) { + + // TODO Optionally add bibliographic entries to this list. + // Can use ternary operators to dynamically construct based conditions, e.g. params["run_xyz"] ? "
  • Author (2023) Pub name, Journal, DOI
  • " : "", + // Uncomment function in methodsDescriptionText to render in MultiQC report + def reference_text = [ + "
  • Andrews S, (2010) FastQC, URL: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/).
  • ", + "
  • Ewels, P., Magnusson, M., Lundin, S., & Käller, M. (2016). MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics , 32(19), 3047–3048. doi: /10.1093/bioinformatics/btw354
  • " + ].join(' ').trim() + + return reference_text + } + + public static String methodsDescriptionText(run_workflow, mqc_methods_yaml, params) { + // Convert to a named map so can be used as with familar NXF ${workflow} variable syntax in the MultiQC YML file + def meta = [:] + meta.workflow = run_workflow.toMap() + meta["manifest_map"] = run_workflow.manifest.toMap() + + // Pipeline DOI + meta["doi_text"] = meta.manifest_map.doi ? "(doi: ${meta.manifest_map.doi})" : "" + meta["nodoi_text"] = meta.manifest_map.doi ? "": "
  • If available, make sure to update the text to include the Zenodo DOI of version of the pipeline used.
  • " + + // Tool references + meta["tool_citations"] = "" + meta["tool_bibliography"] = "" + + // TODO Only uncomment below if logic in toolCitationText/toolBibliographyText has been filled! + //meta["tool_citations"] = toolCitationText(params).replaceAll(", \\.", ".").replaceAll("\\. \\.", ".").replaceAll(", \\.", ".") + //meta["tool_bibliography"] = toolBibliographyText(params) + + + def methods_text = mqc_methods_yaml.text + + def engine = new SimpleTemplateEngine() + def description_html = engine.createTemplate(methods_text).make(meta) + + return description_html + } + + // + // Exit pipeline if incorrect --genome key provided + // + private static void genomeExistsError(params, log) { + if (params.genomes && params.genome && !params.genomes.containsKey(params.genome)) { + def error_string = "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" + + " Genome '${params.genome}' not found in any config files provided to the pipeline.\n" + + " Currently, the available genome keys are:\n" + + " ${params.genomes.keySet().join(", ")}\n" + + "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + Nextflow.error(error_string) + } + } } diff --git a/lib/WorkflowMain.groovy b/lib/WorkflowMain.groovy index 049df694..51da9fea 100755 --- a/lib/WorkflowMain.groovy +++ b/lib/WorkflowMain.groovy @@ -2,6 +2,8 @@ // This file holds several functions specific to the main.nf workflow in the nf-core/bacass pipeline // +import nextflow.Nextflow + class WorkflowMain { // @@ -17,64 +19,44 @@ class WorkflowMain { " https://github.com/${workflow.manifest.name}/blob/master/CITATIONS.md" } - // - // Print help to screen if required - // - public static String help(workflow, params, log) { - def command = "nextflow run ${workflow.manifest.name} --input samplesheet.csv --genome GRCh37 -profile docker" - def help_string = '' - help_string += NfcoreTemplate.logo(workflow, params.monochrome_logs) - help_string += NfcoreSchema.paramsHelp(workflow, params, command) - help_string += '\n' + citation(workflow) + '\n' - help_string += NfcoreTemplate.dashedLine(params.monochrome_logs) - return help_string - } - - // - // Print parameter summary log to screen - // - public static String paramsSummaryLog(workflow, params, log) { - def summary_log = '' - summary_log += NfcoreTemplate.logo(workflow, params.monochrome_logs) - summary_log += NfcoreSchema.paramsSummaryLog(workflow, params) - summary_log += '\n' + citation(workflow) + '\n' - summary_log += NfcoreTemplate.dashedLine(params.monochrome_logs) - return summary_log - } // // Validate parameters and print summary to screen // public static void initialise(workflow, params, log) { - // Print help to screen if required - if (params.help) { - log.info help(workflow, params, log) - System.exit(0) - } - // Validate workflow parameters via the JSON schema - if (params.validate_params) { - NfcoreSchema.validateParameters(workflow, params, log) + // Print workflow version and exit on --version + if (params.version) { + String workflow_version = NfcoreTemplate.version(workflow) + log.info "${workflow.manifest.name} ${workflow_version}" + System.exit(0) } - // Print parameter summary log to screen - log.info paramsSummaryLog(workflow, params, log) + // Check that a -profile or Nextflow config has been provided to run the pipeline + NfcoreTemplate.checkConfigProvided(workflow, log) // Check that conda channels are set-up correctly - if (params.enable_conda) { + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { Utils.checkCondaChannels(log) } // Check AWS batch settings NfcoreTemplate.awsBatch(workflow, params) - // Check the hostnames against configured profiles - NfcoreTemplate.hostName(workflow, params, log) - // Check input has been provided if (!params.input) { - log.error "Please provide an input samplesheet to the pipeline e.g. '--input samplesheet.csv'" - System.exit(1) + Nextflow.error("Please provide an input samplesheet to the pipeline e.g. '--input samplesheet.csv'") + } + } + // + // Get attribute from genome config file e.g. fasta + // + public static Object getGenomeAttribute(params, attribute) { + if (params.genomes && params.genome && params.genomes.containsKey(params.genome)) { + if (params.genomes[ params.genome ].containsKey(attribute)) { + return params.genomes[ params.genome ][ attribute ] + } } + return null } } diff --git a/main.nf b/main.nf index 2ce50b0f..f72ff4c3 100644 --- a/main.nf +++ b/main.nf @@ -1,8 +1,8 @@ #!/usr/bin/env nextflow /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nf-core/bacass -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Github : https://github.com/nf-core/bacass Website: https://nf-co.re/bacass Slack : https://nfcore.slack.com/channels/bacass @@ -12,17 +12,33 @@ nextflow.enable.dsl = 2 /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ VALIDATE & PRINT PARAMETER SUMMARY -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +include { validateParameters; paramsHelp } from 'plugin/nf-validation' + +// Print help message if needed +if (params.help) { + def logo = NfcoreTemplate.logo(workflow, params.monochrome_logs) + def citation = '\n' + WorkflowMain.citation(workflow) + '\n' + def String command = "nextflow run ${workflow.manifest.name} --input samplesheet.csv -profile docker" + log.info logo + paramsHelp(command) + citation + NfcoreTemplate.dashedLine(params.monochrome_logs) + System.exit(0) +} + +// Validate input parameters +if (params.validate_params) { + validateParameters() +} + WorkflowMain.initialise(workflow, params, log) /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NAMED WORKFLOW FOR PIPELINE -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ include { BACASS } from './workflows/bacass' @@ -35,9 +51,9 @@ workflow NFCORE_BACASS { } /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RUN ALL WORKFLOWS -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ // @@ -49,7 +65,7 @@ workflow { } /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ THE END -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/modules.json b/modules.json index 2ef7020a..87f35378 100644 --- a/modules.json +++ b/modules.json @@ -2,28 +2,117 @@ "name": "nf-core/bacass", "homePage": "https://github.com/nf-core/bacass", "repos": { - "nf-core/modules": { - "fastqc": { - "git_sha": "e937c7950af70930d1f34bb961403d9d2aa81c7d" + "https://github.com/nf-core/modules.git": { + "modules": { + "nf-core": { + "bakta/bakta": { + "branch": "master", + "git_sha": "f05fa7c6753f92be861d606378860dcd5c828880", + "installed_by": ["modules"] + }, + "bakta/baktadbdownload": { + "branch": "master", + "git_sha": "f05fa7c6753f92be861d606378860dcd5c828880", + "installed_by": ["modules"], + "patch": "modules/nf-core/bakta/baktadbdownload/bakta-baktadbdownload.diff" + }, + "canu": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "custom/dumpsoftwareversions": { + "branch": "master", + "git_sha": "05c280924b6c768d484c7c443dad5e605c4ff4b4", + "installed_by": ["modules"] + }, + "fastp": { + "branch": "master", + "git_sha": "d497a4868ace3302016ea8ed4b395072d5e833cd", + "installed_by": ["fastq_trim_fastp_fastqc", "modules"] + }, + "fastqc": { + "branch": "master", + "git_sha": "9a4517e720bc812e95b56d23d15a1653b6db4f53", + "installed_by": ["modules", "fastq_trim_fastp_fastqc"] + }, + "gunzip": { + "branch": "master", + "git_sha": "e06548bfa36ee31869b81041879dd6b3a83b1d57", + "installed_by": ["modules"] + }, + "kraken2/kraken2": { + "branch": "master", + "git_sha": "603ecbd9f45300c9788f197d2a15a005685b4220", + "installed_by": ["modules"] + }, + "miniasm": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "minimap2/align": { + "branch": "master", + "git_sha": "603ecbd9f45300c9788f197d2a15a005685b4220", + "installed_by": ["modules"] + }, + "multiqc": { + "branch": "master", + "git_sha": "c4e79dd48ab2cedea2d7d525582bea061c241e0f", + "installed_by": ["modules"] + }, + "nanoplot": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"], + "patch": "modules/nf-core/nanoplot/nanoplot.diff" + }, + "porechop/porechop": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "prokka": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "quast": { + "branch": "master", + "git_sha": "344638191a5d6b3526556410819dfcf24e98039e", + "installed_by": ["modules"] + }, + "racon": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "samtools/index": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "samtools/sort": { + "branch": "master", + "git_sha": "a0f7be95788366c1923171e358da7d049eb440f9", + "installed_by": ["modules"] + }, + "untar": { + "branch": "master", + "git_sha": "d0b4fc03af52a1cc8c6fb4493b921b57352b1dd8", + "installed_by": ["modules"] + } + } }, - "kraken2/kraken2": { - "git_sha": "e937c7950af70930d1f34bb961403d9d2aa81c7d" - }, - "multiqc": { - "git_sha": "e937c7950af70930d1f34bb961403d9d2aa81c7d" - }, - "prokka": { - "git_sha": "e937c7950af70930d1f34bb961403d9d2aa81c7d" - }, - "quast": { - "git_sha": "e937c7950af70930d1f34bb961403d9d2aa81c7d" - }, - "samtools/index": { - "git_sha": "c5235a983d454787fa0c3247b02086969217163b" - }, - "samtools/sort": { - "git_sha": "c5235a983d454787fa0c3247b02086969217163b" + "subworkflows": { + "nf-core": { + "fastq_trim_fastp_fastqc": { + "branch": "master", + "git_sha": "bd448d5c60454640610681515538be3ec66036e4", + "installed_by": ["subworkflows"] + } + } } } } -} \ No newline at end of file +} diff --git a/modules/local/canu.nf b/modules/local/canu.nf deleted file mode 100644 index 09afecf6..00000000 --- a/modules/local/canu.nf +++ /dev/null @@ -1,50 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process CANU { - tag "$meta.id" - label 'process_high' - label 'process_long' - label 'process_high_memory' - label 'error_retry' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'canu=2.1.1-2' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/canu:2.1.1--h1b792b2_2" - } else { - container "quay.io/biocontainers/canu:2.1.1--h1b792b2_2" - } - - input: - tuple val(meta), val(reads), file(longreads) - - output: - tuple val(meta), path('*_assembly.fasta') , emit: assembly - tuple val(meta), path('*_assembly.report'), emit: log - path '*.version.txt' , emit: version - - script: - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" - def genomeSize = meta.genome_size == 'NA' ? "5m" : "${meta.genome_size}" - """ - canu -p assembly -d canu_out \ - ${options.args} \ - genomeSize="${genomeSize}" -nanopore "${longreads}" \ - maxThreads="${task.cpus}" merylMemory="${task.memory.toGiga()}G" \ - merylThreads="${task.cpus}" hapThreads="${task.cpus}" batMemory="${task.memory.toGiga()}G" \ - redMemory="${task.memory.toGiga()}G" redThreads="${task.cpus}" \ - oeaMemory="${task.memory.toGiga()}G" oeaThreads="${task.cpus}" \ - corMemory="${task.memory.toGiga()}G" corThreads="${task.cpus}" - mv canu_out/assembly.contigs.fasta ${prefix}_assembly.fasta - mv canu_out/assembly.report ${prefix}_assembly.report - - echo \$(canu --version 2>&1) | sed -e 's/Canu //g' > ${software}.version.txt - """ -} diff --git a/modules/local/dfast.nf b/modules/local/dfast.nf index bd515040..db7be3da 100644 --- a/modules/local/dfast.nf +++ b/modules/local/dfast.nf @@ -1,36 +1,43 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - process DFAST { tag "$meta.id" label 'process_medium' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - conda (params.enable_conda ? "dfast=1.2.14" : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/dfast:1.2.14--h2e03b76_0" - } else { - container "quay.io/biocontainers/dfast:1.2.14--h2e03b76_0" - } + conda "bioconda::dfast=1.2.20" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/dfast:1.2.20--h43eeafb_0' : + 'biocontainers/dfast:1.2.20--h43eeafb_0' }" input: tuple val(meta), path(fasta) file (config) output: - tuple val(meta), path("RESULT*"), emit: reads - path "*.version.txt" , emit: version + tuple val(meta), path("*_results"), emit: annotation + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when script: - def software = getSoftwareName(task.process) + def args = task.ext.args ?: '' + def args2 = task.ext.args2 ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" """ - dfast_file_downloader.py --protein dfast --dbroot . - dfast --genome ${fasta} --config $config - dfast --version | sed -e "s/DFAST ver. //g" > "${software}.version.txt" + dfast_file_downloader.py \\ + $args \\ + --protein dfast \\ + --dbroot . + + dfast \\ + $args2 \\ + --genome ${fasta} \\ + --config $config + + mv RESULT_TEST/ ${prefix}_results/ + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + dfast: \$( dfast --version | sed -e "s/DFAST ver. //g" ) + END_VERSIONS """ } diff --git a/modules/local/functions.nf b/modules/local/functions.nf deleted file mode 100644 index da9da093..00000000 --- a/modules/local/functions.nf +++ /dev/null @@ -1,68 +0,0 @@ -// -// Utility functions used in nf-core DSL2 module files -// - -// -// Extract name of software tool from process name using $task.process -// -def getSoftwareName(task_process) { - return task_process.tokenize(':')[-1].tokenize('_')[0].toLowerCase() -} - -// -// Function to initialise default values and to generate a Groovy Map of available options for nf-core modules -// -def initOptions(Map args) { - def Map options = [:] - options.args = args.args ?: '' - options.args2 = args.args2 ?: '' - options.args3 = args.args3 ?: '' - options.publish_by_meta = args.publish_by_meta ?: [] - options.publish_dir = args.publish_dir ?: '' - options.publish_files = args.publish_files - options.suffix = args.suffix ?: '' - return options -} - -// -// Tidy up and join elements of a list to return a path string -// -def getPathFromList(path_list) { - def paths = path_list.findAll { item -> !item?.trim().isEmpty() } // Remove empty entries - paths = paths.collect { it.trim().replaceAll("^[/]+|[/]+\$", "") } // Trim whitespace and trailing slashes - return paths.join('/') -} - -// -// Function to save/publish module results -// -def saveFiles(Map args) { - if (!args.filename.endsWith('.version.txt')) { - def ioptions = initOptions(args.options) - def path_list = [ ioptions.publish_dir ?: args.publish_dir ] - if (ioptions.publish_by_meta) { - def key_list = ioptions.publish_by_meta instanceof List ? ioptions.publish_by_meta : args.publish_by_meta - for (key in key_list) { - if (args.meta && key instanceof String) { - def path = key - if (args.meta.containsKey(key)) { - path = args.meta[key] instanceof Boolean ? "${key}_${args.meta[key]}".toString() : args.meta[key] - } - path = path instanceof String ? path : '' - path_list.add(path) - } - } - } - if (ioptions.publish_files instanceof Map) { - for (ext in ioptions.publish_files) { - if (args.filename.endsWith(ext.key)) { - def ext_list = path_list.collect() - ext_list.add(ext.value) - return "${getPathFromList(ext_list)}/$args.filename" - } - } - } else if (ioptions.publish_files == null) { - return "${getPathFromList(path_list)}/$args.filename" - } - } -} diff --git a/modules/local/get_software_versions.nf b/modules/local/get_software_versions.nf deleted file mode 100644 index d7a9a92e..00000000 --- a/modules/local/get_software_versions.nf +++ /dev/null @@ -1,33 +0,0 @@ -// Import generic module functions -include { saveFiles } from './functions' - -params.options = [:] - -process GET_SOFTWARE_VERSIONS { - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:'pipeline_info', meta:[:], publish_by_meta:[]) } - - conda (params.enable_conda ? "conda-forge::python=3.8.3" : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/python:3.8.3" - } else { - container "quay.io/biocontainers/python:3.8.3" - } - - cache false - - input: - path versions - - output: - path "software_versions.tsv" , emit: tsv - path 'software_versions_mqc.yaml', emit: yaml - - script: // This script is bundled with the pipeline, in nf-core/bacass/bin/ - """ - echo $workflow.manifest.version > pipeline.version.txt - echo $workflow.nextflow.version > nextflow.version.txt - scrape_software_versions.py &> software_versions_mqc.yaml - """ -} diff --git a/modules/local/kraken2_db_preparation.nf b/modules/local/kraken2_db_preparation.nf index 559f4b2a..d2af01b6 100644 --- a/modules/local/kraken2_db_preparation.nf +++ b/modules/local/kraken2_db_preparation.nf @@ -1,19 +1,11 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - process KRAKEN2_DB_PREPARATION { tag "${db.simpleName}" label 'process_low' - conda (params.enable_conda ? "conda-forge::sed=4.7" : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv1/biocontainers_v1.2.0_cv1.img" - } else { - container "biocontainers/biocontainers:v1.2.0_cv1" - } + conda 'conda-forge::sed=4.7' + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://containers.biocontainers.pro/s3/SingImgsRepo/biocontainers/v1.2.0_cv1/biocontainers_v1.2.0_cv1.img' : + 'docker.io/biocontainers/biocontainers:v1.2.0_cv1' }" input: path db @@ -21,6 +13,9 @@ process KRAKEN2_DB_PREPARATION { output: tuple val("${db.simpleName}"), path("database"), emit: db + when: + task.ext.when == null || task.ext.when + script: """ mkdir db_tmp diff --git a/modules/local/medaka.nf b/modules/local/medaka.nf index 7af1e028..e57666a8 100644 --- a/modules/local/medaka.nf +++ b/modules/local/medaka.nf @@ -1,43 +1,44 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - process MEDAKA { tag "$meta.id" label 'process_high' - label 'process_long' - label 'process_high_memory' - label 'error_retry' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'medaka=1.4.3-0' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/medaka:1.4.3--py38h130def0_0" - } else { - container "quay.io/biocontainers/medaka:1.4.3--py38h130def0_0" - } + + conda 'medaka=1.4.3-0' + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/medaka:1.4.3--py38h130def0_0' : + 'biocontainers/medaka:1.4.3--py38h130def0_0' }" input: - tuple val(meta), file(assembly), val(reads), file(longreads) + tuple val(meta), file(longreads), file(assembly) output: - tuple val(meta), path('*_polished_genome.fa'), emit: assembly - path '*.version.txt' , emit: version + tuple val(meta), path('*_polished_genome.fa') , emit: assembly + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when script: - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + def reads_bgzip_command = ("$longreads".endsWith('.gz')) ? "zcat $longreads | bgzip -c > ${prefix}.fastq.bgz" : '' + def assembly_bgzip_command = ("$assembly".endsWith('.gz')) ? "zcat $assembly | bgzip -c > ${prefix}.fasta.bgz" : '' + if ("$longreads".endsWith('.gz')) { reads_bgzip_out = "${prefix}.fastq.bgz"} else { reads_bgzip_out = null } + if ("$assembly".endsWith('.gz')) { assembly_bgzip_out = "${prefix}.fasta.bgz"} else { assembly_bgzip_out = null } + """ - medaka_consensus ${options.args} \ - -i ${longreads} \ - -d ${assembly} \ + # Recompress with bgzip + $reads_bgzip_command + $assembly_bgzip_command + + medaka_consensus $args \ + -i ${ reads_bgzip_out ?: longreads } \ + -d ${ assembly_bgzip_out ?: assembly } \ -o "${prefix}_polished_genome.fa" \ - -t ${task.cpus} + -t $task.cpus - echo \$(medaka --version 2>&1) | sed -e 's/medaka //g' > ${software}.version.txt + cat <<-END_VERSIONS > versions.yml + "${task.process}": + medaka: \$( medaka --version 2>&1 | sed 's/medaka //g' ) + END_VERSIONS """ } diff --git a/modules/local/miniasm.nf b/modules/local/miniasm.nf deleted file mode 100644 index 5a3cda7d..00000000 --- a/modules/local/miniasm.nf +++ /dev/null @@ -1,41 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process MINIASM { - tag "$meta.id" - label 'process_high' - label 'process_long' - label 'process_high_memory' - label 'error_retry' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'bioconda::miniasm=0.3_r179' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/miniasm:0.3_r179--h5bf99c6_2" - } else { - container "quay.io/biocontainers/miniasm:0.3_r179--h5bf99c6_2" - } - - input: - tuple val(meta), val(reads), file(longreads), file(assembly), path(paf) - - output: - tuple val(meta), path('*_assembly.fasta') , emit: assembly - tuple val(meta), val(reads), file(longreads), path('*_assembly.fasta') , emit: all - path '*.version.txt' , emit: version - - script: - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" - """ - miniasm -f "${longreads}" "${paf}" > "${longreads}.gfa" - awk '/^S/{print ">"\$2"\\n"\$3}' "${longreads}.gfa" | fold > ${prefix}_assembly.fasta - - echo \$(miniasm -V 2>&1) > ${software}.version.txt - """ -} diff --git a/modules/local/minimap_align.nf b/modules/local/minimap_align.nf deleted file mode 100644 index 89af8661..00000000 --- a/modules/local/minimap_align.nf +++ /dev/null @@ -1,41 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process MINIMAP2_ALIGN { - tag "$meta.id" - label 'process_medium' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'bioconda::minimap2=2.21' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/minimap2:2.21--h5bf99c6_0" - } else { - container "quay.io/biocontainers/minimap2:2.21--h5bf99c6_0" - } - - input: - tuple val(meta), val(reads), file(longreads), file('reference') - - output: - tuple val(meta), val(reads), file(longreads), file('reference'), path("*.paf"), emit: paf - path "*.version.txt", emit: version - - script: - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" - """ - minimap2 \\ - $options.args \\ - -t $task.cpus \\ - reference \\ - $longreads \\ - > ${prefix}.paf - - echo \$(minimap2 --version 2>&1) > ${software}.version.txt - """ -} diff --git a/modules/local/nanoplot.nf b/modules/local/nanoplot.nf deleted file mode 100644 index a0540aba..00000000 --- a/modules/local/nanoplot.nf +++ /dev/null @@ -1,41 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process NANOPLOT { - tag "$meta.id" - label 'process_low' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'bioconda::nanoplot=1.38.0' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/nanoplot:1.38.0--pyhdfd78af_0" - } else { - container "quay.io/biocontainers/nanoplot:1.38.0--pyhdfd78af_0" - } - - input: - tuple val(meta), path(ontfile) - - output: - tuple val(meta), path("*.html"), emit: html - tuple val(meta), path("*.png") , emit: png - tuple val(meta), path("*.txt") , emit: txt - tuple val(meta), path("*.log") , emit: log - path "*.version.txt" , emit: version - - script: - def software = getSoftwareName(task.process) - def input_file = "--fastq ${ontfile}" - """ - NanoPlot \\ - $options.args \\ - -t $task.cpus \\ - $input_file - echo \$(NanoPlot --version 2>&1) | sed 's/^.*NanoPlot //; s/ .*\$//' > ${software}.version.txt - """ -} diff --git a/modules/local/nanopolish.nf b/modules/local/nanopolish.nf index c1e6c943..34ca78d3 100644 --- a/modules/local/nanopolish.nf +++ b/modules/local/nanopolish.nf @@ -1,36 +1,25 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - process NANOPOLISH { tag "$meta.id" label 'process_high' - label 'process_long' - label 'process_high_memory' - label 'error_retry' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'nanopolish=0.13.2-5' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/nanopolish:0.13.2--h8cec615_5" - } else { - container "quay.io/biocontainers/nanopolish:0.13.2--h8cec615_5" - } + + conda "bioconda::nanopolish=0.14.0" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/nanopolish:0.14.0--h773013f_3' : + 'biocontainers/nanopolish:0.14.0--h773013f_3' }" input: tuple val(meta), val(reads), file(longreads), file(assembly), file(bam), file(bai), file(fast5) output: - tuple val(meta), file('polished_genome.fa'), emit: assembly - path "*.version.txt", emit: version + tuple val(meta), file('polished_genome.fa') , emit: assembly + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when script: - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" """ nanopolish index -d "${fast5}" "${longreads}" @@ -41,10 +30,14 @@ process NANOPOLISH { -b "${bam}" \ -g "${assembly}" \ -t "${task.cpus}" \ - --min-candidate-frequency 0.1 + --min-candidate-frequency 0.1 \ + $args nanopolish vcf2fasta -g "${assembly}" polished.vcf > polished_genome.fa - nanopolish --version | sed -e "s/nanopolish version //g" | head -n 1 > ${software}.version.txt + cat <<-END_VERSIONS > versions.yml + "${task.process}": + nanopolish: \$( nanopolish --version | sed -e "s/nanopolish version //g" | head -n 1 ) + END_VERSIONS """ } diff --git a/modules/local/porechop.nf b/modules/local/porechop.nf deleted file mode 100644 index 57a2cc2f..00000000 --- a/modules/local/porechop.nf +++ /dev/null @@ -1,34 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process PORECHOP { - tag "$meta.id" - label 'process_medium' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? "porechop=0.2.4" : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/porechop:0.2.4--py38hed8969a_1" - } else { - container "quay.io/biocontainers/porechop:0.2.4--py38hed8969a_1" - } - - input: - tuple val(meta), path(reads) - - output: - tuple val(meta), path('trimmed.fastq.gz'), emit: reads - path "*.version.txt" , emit: version - - script: - def software = getSoftwareName(task.process) - """ - porechop $options.args -i "${reads}" -t "${task.cpus}" -o trimmed.fastq.gz - porechop --version > "${software}.version.txt" - """ -} diff --git a/modules/local/pycoqc.nf b/modules/local/pycoqc.nf index 8c0de668..c3edb434 100644 --- a/modules/local/pycoqc.nf +++ b/modules/local/pycoqc.nf @@ -1,52 +1,41 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - process PYCOQC { - tag "$meta.id" label 'process_medium' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - conda (params.enable_conda ? "bioconda::pycoqc=2.5.2" : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/pycoqc:2.5.2--py_0" - } else { - container "quay.io/biocontainers/pycoqc:2.5.2--py_0" - } + conda "bioconda::pycoqc=2.5.2" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/pycoqc:2.5.2--py_0' : + 'biocontainers/pycoqc:2.5.2--py_0' }" input: tuple val(meta), path(fast5) output: - tuple val(meta), path('sequencing_summary.txt'), emit: summary - path "*.html" , emit: html - path "*.json" , emit: json - path "*.version.txt", emit: version + tuple val(meta), path("*.html"), emit: html + tuple val(meta), path("*.json"), emit: json + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when script: - def software = getSoftwareName(task.process) - //Find out whether the sequencing_summary already exists - if(file("${fast5}/sequencing_summary.txt").exists()){ - run_summary = "cp ${fast5}/sequencing_summary.txt ./sequencing_summary.txt" - } else { - run_summary = "Fast5_to_seq_summary -f $fast5 -t ${task.cpus} -s './sequencing_summary.txt' --verbose_level 2" - } - //Barcodes available? - barcode_me = file("${fast5}/barcoding_sequencing.txt").exists() ? "-b ${fast5}/barcoding_sequencing.txt" : '' + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + def run_summary = file("${fast5}/sequencing_summary.txt").exists() ? "cp ${fast5}/sequencing_summary.txt ./sequencing_summary.txt" : "Fast5_to_seq_summary -f $fast5 -t ${task.cpus} -s './sequencing_summary.txt' --verbose_level 2" + def barcode_me = file("${fast5}/barcoding_sequencing.txt").exists() ? "-b ${fast5}/barcoding_sequencing.txt" : '' + """ $run_summary pycoQC \\ - $options.args \\ + $args \\ -f "sequencing_summary.txt" \\ $barcode_me \\ - -o ${meta.id}_pycoqc.html \\ - -j ${meta.id}_pycoqc.json + -o ${prefix}.html \\ + -j ${prefix}.json - echo \$(pycoQC --version 2>&1) | sed 's/^.*pycoQC v//; s/ .*\$//' > ${software}.version.txt + cat <<-END_VERSIONS > versions.yml + "${task.process}": + pycoqc: \$(pycoQC --version 2>&1 | sed 's/^.*pycoQC v//; s/ .*\$//') + END_VERSIONS """ } diff --git a/modules/local/racon.nf b/modules/local/racon.nf deleted file mode 100644 index 28973921..00000000 --- a/modules/local/racon.nf +++ /dev/null @@ -1,39 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process RACON { - tag "$meta.id" - label 'process_high' - label 'process_long' - label 'process_high_memory' - label 'error_retry' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'racon=1.4.20-1' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/racon:1.4.20--h9a82719_1" - } else { - container "quay.io/biocontainers/racon:1.4.20--h9a82719_1" - } - - input: - tuple val(meta), val(reads), file(longreads), path('assembly.fasta'), path(paf) - - output: - tuple val(meta), path('*_assembly_consensus.fasta') , emit: assembly - path '*.version.txt' , emit: version - - script: - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" - """ - racon -t "${task.cpus}" "${longreads}" "${paf}" "assembly.fasta" > ${prefix}_assembly_consensus.fasta - - echo \$(racon --version 2>&1) | sed 's/^.*v//' > ${software}.version.txt - """ -} diff --git a/modules/local/skewer.nf b/modules/local/skewer.nf deleted file mode 100644 index ac983b3b..00000000 --- a/modules/local/skewer.nf +++ /dev/null @@ -1,46 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process SKEWER { - tag "$meta.id" - label 'process_medium' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? "skewer=0.2.2-3" : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/skewer:0.2.2--hc9558a2_3" - } else { - container "quay.io/biocontainers/skewer:0.2.2--hc9558a2_3" - } - - input: - tuple val(meta), path(reads) - - output: - tuple val(meta), path("*_trm-cmb.R{1,2}.fastq.gz"), emit: reads - path("*.log") , emit: log - path "*.version.txt" , emit: version - - script: - def software = getSoftwareName(task.process) - """ - # loop over readunits in pairs per sample - pairno=0 - echo "${reads[0]} ${reads[1]}" | xargs -n2 | while read fq1 fq2; do - skewer $options.args -t ${task.cpus} \$fq1 \$fq2; - done - - # gzip, because skewer's -z returns an error - gzip *.fastq - - cat \$(ls *trimmed-pair1.fastq.gz | sort) >> ${meta.id}_trm-cmb.R1.fastq.gz - cat \$(ls *trimmed-pair2.fastq.gz | sort) >> ${meta.id}_trm-cmb.R2.fastq.gz - - echo \$(skewer --version 2>&1) | sed 's/^.*skewer version: //; s/ .*//' > ${software}.version.txt - """ -} diff --git a/modules/local/unicycler.nf b/modules/local/unicycler.nf index 83ce4703..8a045827 100644 --- a/modules/local/unicycler.nf +++ b/modules/local/unicycler.nf @@ -1,56 +1,50 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - process UNICYCLER { tag "$meta.id" label 'process_high' - label 'process_long' - label 'process_high_memory' - label 'error_retry' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - conda (params.enable_conda ? 'bioconda::unicycler=0.4.8' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/unicycler:0.4.8--py38h8162308_3" - } else { - container "quay.io/biocontainers/unicycler:0.4.8--py38h8162308_3" - } + conda "bioconda::unicycler=0.4.8" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/unicycler:0.4.8--py38h8162308_3' : + 'biocontainers/unicycler:0.4.8--py38h8162308_3' }" input: - tuple val(meta), file(reads), file(longreads) + tuple val(meta), path(shortreads), path(longreads) output: - tuple val(meta), path('*.scaffolds.fa'), emit: scaffolds - tuple val(meta), path('*.assembly.gfa'), emit: gfa - tuple val(meta), path('*.log') , emit: log - path '*.version.txt' , emit: version + tuple val(meta), path('*.scaffolds.fa.gz'), emit: scaffolds + tuple val(meta), path('*.assembly.gfa.gz'), emit: gfa + tuple val(meta), path('*.log') , emit: log + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when script: - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" if(params.assembly_type == 'long'){ input_reads = "-l $longreads" } else if (params.assembly_type == 'short'){ - input_reads = "-1 ${reads[0]} -2 ${reads[1]}" + input_reads = "-1 ${shortreads[0]} -2 ${shortreads[1]}" } else if (params.assembly_type == 'hybrid'){ - input_reads = "-1 ${reads[0]} -2 ${reads[1]} -l $longreads" + input_reads = "-1 ${shortreads[0]} -2 ${shortreads[1]} -l $longreads" } """ unicycler \\ --threads $task.cpus \\ - $options.args \\ + $args \\ $input_reads \\ --out ./ mv assembly.fasta ${prefix}.scaffolds.fa + gzip -n ${prefix}.scaffolds.fa mv assembly.gfa ${prefix}.assembly.gfa + gzip -n ${prefix}.assembly.gfa mv unicycler.log ${prefix}.unicycler.log - echo \$(unicycler --version 2>&1) | sed 's/^.*Unicycler v//; s/ .*\$//' > ${software}.version.txt + cat <<-END_VERSIONS > versions.yml + "${task.process}": + unicycler: \$(echo \$(unicycler --version 2>&1) | sed 's/^.*Unicycler v//; s/ .*\$//') + END_VERSIONS """ } diff --git a/modules/nf-core/bakta/bakta/main.nf b/modules/nf-core/bakta/bakta/main.nf new file mode 100644 index 00000000..fd0b76f2 --- /dev/null +++ b/modules/nf-core/bakta/bakta/main.nf @@ -0,0 +1,72 @@ +process BAKTA_BAKTA { + tag "$meta.id" + label 'process_medium' + + conda "bioconda::bakta=1.8.2" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/bakta:1.8.2--pyhdfd78af_0' : + 'biocontainers/bakta:1.8.2--pyhdfd78af_0' }" + + input: + tuple val(meta), path(fasta) + path db + path proteins + path prodigal_tf + + output: + tuple val(meta), path("${prefix}.embl") , emit: embl + tuple val(meta), path("${prefix}.faa") , emit: faa + tuple val(meta), path("${prefix}.ffn") , emit: ffn + tuple val(meta), path("${prefix}.fna") , emit: fna + tuple val(meta), path("${prefix}.gbff") , emit: gbff + tuple val(meta), path("${prefix}.gff3") , emit: gff + tuple val(meta), path("${prefix}.hypotheticals.tsv"), emit: hypotheticals_tsv + tuple val(meta), path("${prefix}.hypotheticals.faa"), emit: hypotheticals_faa + tuple val(meta), path("${prefix}.tsv") , emit: tsv + tuple val(meta), path("${prefix}.txt") , emit: txt + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + def proteins_opt = proteins ? "--proteins ${proteins[0]}" : "" + def prodigal_tf = prodigal_tf ? "--prodigal-tf ${prodigal_tf[0]}" : "" + """ + bakta \\ + $fasta \\ + $args \\ + --threads $task.cpus \\ + --prefix $prefix \\ + $proteins_opt \\ + $prodigal_tf \\ + --db $db + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + bakta: \$(echo \$(bakta --version) 2>&1 | cut -f '2' -d ' ') + END_VERSIONS + """ + + stub: + prefix = task.ext.prefix ?: "${meta.id}" + """ + touch ${prefix}.embl + touch ${prefix}.faa + touch ${prefix}.ffn + touch ${prefix}.fna + touch ${prefix}.gbff + touch ${prefix}.gff3 + touch ${prefix}.hypotheticals.tsv + touch ${prefix}.hypotheticals.faa + touch ${prefix}.tsv + touch ${prefix}.txt + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + bakta: \$(echo \$(bakta --version) 2>&1 | cut -f '2' -d ' ') + END_VERSIONS + """ +} diff --git a/modules/nf-core/bakta/bakta/meta.yml b/modules/nf-core/bakta/bakta/meta.yml new file mode 100644 index 00000000..0dfa07e2 --- /dev/null +++ b/modules/nf-core/bakta/bakta/meta.yml @@ -0,0 +1,91 @@ +name: bakta_bakta +description: Annotation of bacterial genomes (isolates, MAGs) and plasmids +keywords: + - annotation + - fasta + - bacteria +tools: + - bakta: + description: Rapid & standardized annotation of bacterial genomes, MAGs & plasmids. + homepage: https://github.com/oschwengers/bakta + documentation: https://github.com/oschwengers/bakta + tool_dev_url: https://github.com/oschwengers/bakta + doi: "10.1099/mgen.0.000685" + licence: ["GPL v3"] + +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - fasta: + type: file + description: | + FASTA file to be annotated. Has to contain at least a non-empty string dummy value. + - db: + type: file + description: | + Path to the Bakta database. Must have amrfinderplus database directory already installed within it (in a directory called 'amrfinderplus-db/'). + - proteins: + type: file + description: FASTA/GenBank file of trusted proteins to first annotate from (optional) + - prodigal_tf: + type: file + description: Training file to use for Prodigal (optional) + +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - txt: + type: file + description: genome statistics and annotation summary + pattern: "*.txt" + - tsv: + type: file + description: annotations as simple human readble tab separated values + pattern: "*.tsv" + - gff: + type: file + description: annotations & sequences in GFF3 format + pattern: "*.gff3" + - gbff: + type: file + description: annotations & sequences in (multi) GenBank format + pattern: "*.gbff" + - embl: + type: file + description: annotations & sequences in (multi) EMBL format + pattern: "*.embl" + - fna: + type: file + description: replicon/contig DNA sequences as FASTA + pattern: "*.fna" + - faa: + type: file + description: CDS/sORF amino acid sequences as FASTA + pattern: "*.faa" + - ffn: + type: file + description: feature nucleotide sequences as FASTA + pattern: "*.ffn" + - hypotheticals_tsv: + type: file + description: additional information on hypothetical protein CDS as simple human readble tab separated values + pattern: "*.hypotheticals.tsv" + - hypotheticals_faa: + type: file + description: hypothetical protein CDS amino acid sequences as FASTA + pattern: "*.hypotheticals.faa" + +authors: + - "@rpetit3" + - "@oschwengers" + - "@jfy133" diff --git a/modules/nf-core/bakta/baktadbdownload/bakta-baktadbdownload.diff b/modules/nf-core/bakta/baktadbdownload/bakta-baktadbdownload.diff new file mode 100644 index 00000000..7e2459d7 --- /dev/null +++ b/modules/nf-core/bakta/baktadbdownload/bakta-baktadbdownload.diff @@ -0,0 +1,19 @@ +Changes in module 'nf-core/bakta/baktadbdownload' +--- modules/nf-core/bakta/baktadbdownload/main.nf ++++ modules/nf-core/bakta/baktadbdownload/main.nf +@@ -1,10 +1,10 @@ + process BAKTA_BAKTADBDOWNLOAD { + label 'process_single' + +- conda "bioconda::bakta=1.8.2" ++ conda "bioconda::bakta=1.8.1" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? +- 'https://depot.galaxyproject.org/singularity/bakta:1.8.2--pyhdfd78af_0' : +- 'biocontainers/bakta:1.8.2--pyhdfd78af_0' }" ++ 'https://depot.galaxyproject.org/singularity/bakta:1.8.1--pyhdfd78af_0' : ++ 'biocontainers/bakta:1.8.1--pyhdfd78af_0' }" + + output: + path "db*" , emit: db + +************************************************************ diff --git a/modules/nf-core/bakta/baktadbdownload/main.nf b/modules/nf-core/bakta/baktadbdownload/main.nf new file mode 100644 index 00000000..5cf0bbb3 --- /dev/null +++ b/modules/nf-core/bakta/baktadbdownload/main.nf @@ -0,0 +1,43 @@ +process BAKTA_BAKTADBDOWNLOAD { + label 'process_single' + + conda "bioconda::bakta=1.8.1" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/bakta:1.8.1--pyhdfd78af_0' : + 'biocontainers/bakta:1.8.1--pyhdfd78af_0' }" + + output: + path "db*" , emit: db + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + """ + bakta_db \\ + download \\ + $args + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + bakta: \$(echo \$(bakta_db --version) 2>&1 | cut -f '2' -d ' ') + END_VERSIONS + """ + + stub: + def args = task.ext.args ?: '' + """ + echo "bakta_db \\ + download \\ + $args" + + mkdir db + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + bakta: \$(echo \$(bakta_db --version) 2>&1 | cut -f '2' -d ' ') + END_VERSIONS + """ +} diff --git a/modules/nf-core/bakta/baktadbdownload/meta.yml b/modules/nf-core/bakta/baktadbdownload/meta.yml new file mode 100644 index 00000000..996f5b0c --- /dev/null +++ b/modules/nf-core/bakta/baktadbdownload/meta.yml @@ -0,0 +1,31 @@ +name: "bakta_baktadbdownload" +description: Downloads BAKTA database from Zenodo +keywords: + - bakta + - annotation + - fasta + - bacteria + - database + - download +tools: + - bakta: + description: Rapid & standardized annotation of bacterial genomes, MAGs & plasmids + homepage: https://github.com/oschwengers/bakta + documentation: https://github.com/oschwengers/bakta + tool_dev_url: https://github.com/oschwengers/bakta + doi: "10.1099/mgen.0.000685" + licence: ["GPL v3"] + +output: + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - db: + type: directory + description: BAKTA database directory + pattern: "db*/" + +authors: + - "@jfy133" + - "@jasmezz" diff --git a/modules/nf-core/canu/main.nf b/modules/nf-core/canu/main.nf new file mode 100644 index 00000000..84d2e0cd --- /dev/null +++ b/modules/nf-core/canu/main.nf @@ -0,0 +1,50 @@ +process CANU { + tag "$meta.id" + label 'process_high' + + conda "bioconda::canu=2.2" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/canu:2.2--ha47f30e_0': + 'biocontainers/canu:2.2--ha47f30e_0' }" + + input: + tuple val(meta), path(reads) + val mode + val genomesize + + output: + tuple val(meta), path("*.report") , emit: report + tuple val(meta), path("*.contigs.fasta.gz") , emit: assembly , optional: true + tuple val(meta), path("*.unassembled.fasta.gz") , emit: contigs + tuple val(meta), path("*.correctedReads.fasta.gz") , emit: corrected_reads , optional: true + tuple val(meta), path("*.trimmedReads.fasta.gz") , emit: corrected_trimmed_reads , optional: true + tuple val(meta), path("*.contigs.layout") , emit: metadata , optional: true + tuple val(meta), path("*.contigs.layout.readToTig") , emit: contig_position , optional: true + tuple val(meta), path("*.contigs.layout.tigInfo") , emit: contig_info , optional: true + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + def valid_mode = ["-pacbio", "-nanopore", "-pacbio-hifi"] + if ( !valid_mode.contains(mode) ) { error "Unrecognised mode to run Canu. Options: ${valid_mode.join(', ')}" } + """ + canu \\ + -p ${prefix} \\ + $mode \\ + genomeSize=${genomesize} \\ + $args \\ + maxThreads=$task.cpus \\ + $reads + + gzip *.fasta + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + canu: \$(echo \$(canu --version 2>&1) | sed 's/^.*canu //; s/Using.*\$//' ) + END_VERSIONS + """ +} diff --git a/modules/nf-core/canu/meta.yml b/modules/nf-core/canu/meta.yml new file mode 100644 index 00000000..ee01e889 --- /dev/null +++ b/modules/nf-core/canu/meta.yml @@ -0,0 +1,80 @@ +name: "canu" +description: Accurate assembly of segmental duplications, satellites, and allelic variants from high-fidelity long reads. +keywords: + - Assembly + - pacbio + - hifi + - nanopore +tools: + - "canu": + description: "Canu is a fork of the Celera Assembler designed for high-noise single-molecule sequencing." + homepage: "https://canu.readthedocs.io/en/latest/index.html#" + documentation: "https://canu.readthedocs.io/en/latest/tutorial.html" + tool_dev_url: "https://github.com/marbl/canu" + doi: "10.1101/gr.215087.116" + licence: "['GPL v2 and others']" + +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:true ] + - reads: + type: file + description: fasta/fastq file + pattern: "*.{fasta,fastq}" + - mode: + type: value + description: Canu mode depending on the input data (source and error rate) + pattern: "-pacbio|-nanopore|-pacbio-hifi" + - genomesize: + type: value + description: An estimate of the size of the genome. Common suffices are allowed, for example, 3.7m or 2.8g + pattern: "[g|m|k]" + +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - report: + type: file + description: Most of the analysis reported during assembly + pattern: "*.report" + - assembly: + type: file + description: Everything which could be assembled and is the full assembly, including both unique, repetitive, and bubble elements. + pattern: "*.contigs.fasta" + - contigs: + type: file + description: Reads and low-coverage contigs which could not be incorporated into the primary assembly. + pattern: "*.unassembled.fasta" + - corrected_reads: + type: file + description: The reads after correction. + pattern: "*.correctedReads.fasta.gz" + - corrected_trimmed_reads: + type: file + description: The corrected reads after overlap based trimming + pattern: "*.trimmedReads.fasta.gz" + - metadata: + type: file + description: (undocumented) + pattern: "*.contigs.layout" + - contig_position: + type: file + description: The position of each read in a contig + pattern: "*.contigs.layout.readToTig" + - contig_info: + type: file + description: A list of the contigs, lengths, coverage, number of reads and other metadata. Essentially the same information provided in the FASTA header line. + pattern: "*.contigs.layout.tigInfo" + +authors: + - "@scorreard" diff --git a/modules/nf-core/custom/dumpsoftwareversions/main.nf b/modules/nf-core/custom/dumpsoftwareversions/main.nf new file mode 100644 index 00000000..c9d014b1 --- /dev/null +++ b/modules/nf-core/custom/dumpsoftwareversions/main.nf @@ -0,0 +1,24 @@ +process CUSTOM_DUMPSOFTWAREVERSIONS { + label 'process_single' + + // Requires `pyyaml` which does not have a dedicated container but is in the MultiQC container + conda "bioconda::multiqc=1.15" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/multiqc:1.15--pyhdfd78af_0' : + 'biocontainers/multiqc:1.15--pyhdfd78af_0' }" + + input: + path versions + + output: + path "software_versions.yml" , emit: yml + path "software_versions_mqc.yml", emit: mqc_yml + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + template 'dumpsoftwareversions.py' +} diff --git a/modules/nf-core/custom/dumpsoftwareversions/meta.yml b/modules/nf-core/custom/dumpsoftwareversions/meta.yml new file mode 100644 index 00000000..c32657de --- /dev/null +++ b/modules/nf-core/custom/dumpsoftwareversions/meta.yml @@ -0,0 +1,36 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/yaml-schema.json +name: custom_dumpsoftwareversions +description: Custom module used to dump software versions within the nf-core pipeline template +keywords: + - custom + - dump + - version +tools: + - custom: + description: Custom module used to dump software versions within the nf-core pipeline template + homepage: https://github.com/nf-core/tools + documentation: https://github.com/nf-core/tools + licence: ["MIT"] +input: + - versions: + type: file + description: YML file containing software versions + pattern: "*.yml" + +output: + - yml: + type: file + description: Standard YML file containing software versions + pattern: "software_versions.yml" + - mqc_yml: + type: file + description: MultiQC custom content YML file containing software versions + pattern: "software_versions_mqc.yml" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@drpatelh" + - "@grst" diff --git a/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py new file mode 100755 index 00000000..da033408 --- /dev/null +++ b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python + + +"""Provide functions to merge multiple versions.yml files.""" + + +import yaml +import platform +from textwrap import dedent + + +def _make_versions_html(versions): + """Generate a tabular HTML output of all versions for MultiQC.""" + html = [ + dedent( + """\\ + + + + + + + + + + """ + ) + ] + for process, tmp_versions in sorted(versions.items()): + html.append("") + for i, (tool, version) in enumerate(sorted(tmp_versions.items())): + html.append( + dedent( + f"""\\ + + + + + + """ + ) + ) + html.append("") + html.append("
    Process Name Software Version
    {process if (i == 0) else ''}{tool}{version}
    ") + return "\\n".join(html) + + +def main(): + """Load all version files and generate merged output.""" + versions_this_module = {} + versions_this_module["${task.process}"] = { + "python": platform.python_version(), + "yaml": yaml.__version__, + } + + with open("$versions") as f: + versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module + + # aggregate versions by the module name (derived from fully-qualified process name) + versions_by_module = {} + for process, process_versions in versions_by_process.items(): + module = process.split(":")[-1] + try: + if versions_by_module[module] != process_versions: + raise AssertionError( + "We assume that software versions are the same between all modules. " + "If you see this error-message it means you discovered an edge-case " + "and should open an issue in nf-core/tools. " + ) + except KeyError: + versions_by_module[module] = process_versions + + versions_by_module["Workflow"] = { + "Nextflow": "$workflow.nextflow.version", + "$workflow.manifest.name": "$workflow.manifest.version", + } + + versions_mqc = { + "id": "software_versions", + "section_name": "${workflow.manifest.name} Software Versions", + "section_href": "https://github.com/${workflow.manifest.name}", + "plot_type": "html", + "description": "are collected at run time from the software output.", + "data": _make_versions_html(versions_by_module), + } + + with open("software_versions.yml", "w") as f: + yaml.dump(versions_by_module, f, default_flow_style=False) + with open("software_versions_mqc.yml", "w") as f: + yaml.dump(versions_mqc, f, default_flow_style=False) + + with open("versions.yml", "w") as f: + yaml.dump(versions_this_module, f, default_flow_style=False) + + +if __name__ == "__main__": + main() diff --git a/modules/nf-core/fastp/main.nf b/modules/nf-core/fastp/main.nf new file mode 100644 index 00000000..831b7f12 --- /dev/null +++ b/modules/nf-core/fastp/main.nf @@ -0,0 +1,102 @@ +process FASTP { + tag "$meta.id" + label 'process_medium' + + conda "bioconda::fastp=0.23.4" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/fastp:0.23.4--h5f740d0_0' : + 'biocontainers/fastp:0.23.4--h5f740d0_0' }" + + input: + tuple val(meta), path(reads) + path adapter_fasta + val save_trimmed_fail + val save_merged + + output: + tuple val(meta), path('*.fastp.fastq.gz') , optional:true, emit: reads + tuple val(meta), path('*.json') , emit: json + tuple val(meta), path('*.html') , emit: html + tuple val(meta), path('*.log') , emit: log + path "versions.yml" , emit: versions + tuple val(meta), path('*.fail.fastq.gz') , optional:true, emit: reads_fail + tuple val(meta), path('*.merged.fastq.gz'), optional:true, emit: reads_merged + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + def adapter_list = adapter_fasta ? "--adapter_fasta ${adapter_fasta}" : "" + def fail_fastq = save_trimmed_fail && meta.single_end ? "--failed_out ${prefix}.fail.fastq.gz" : save_trimmed_fail && !meta.single_end ? "--unpaired1 ${prefix}_1.fail.fastq.gz --unpaired2 ${prefix}_2.fail.fastq.gz" : '' + // Added soft-links to original fastqs for consistent naming in MultiQC + // Use single ended for interleaved. Add --interleaved_in in config. + if ( task.ext.args?.contains('--interleaved_in') ) { + """ + [ ! -f ${prefix}.fastq.gz ] && ln -sf $reads ${prefix}.fastq.gz + + fastp \\ + --stdout \\ + --in1 ${prefix}.fastq.gz \\ + --thread $task.cpus \\ + --json ${prefix}.fastp.json \\ + --html ${prefix}.fastp.html \\ + $adapter_list \\ + $fail_fastq \\ + $args \\ + 2> ${prefix}.fastp.log \\ + | gzip -c > ${prefix}.fastp.fastq.gz + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + fastp: \$(fastp --version 2>&1 | sed -e "s/fastp //g") + END_VERSIONS + """ + } else if (meta.single_end) { + """ + [ ! -f ${prefix}.fastq.gz ] && ln -sf $reads ${prefix}.fastq.gz + + fastp \\ + --in1 ${prefix}.fastq.gz \\ + --out1 ${prefix}.fastp.fastq.gz \\ + --thread $task.cpus \\ + --json ${prefix}.fastp.json \\ + --html ${prefix}.fastp.html \\ + $adapter_list \\ + $fail_fastq \\ + $args \\ + 2> ${prefix}.fastp.log + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + fastp: \$(fastp --version 2>&1 | sed -e "s/fastp //g") + END_VERSIONS + """ + } else { + def merge_fastq = save_merged ? "-m --merged_out ${prefix}.merged.fastq.gz" : '' + """ + [ ! -f ${prefix}_1.fastq.gz ] && ln -sf ${reads[0]} ${prefix}_1.fastq.gz + [ ! -f ${prefix}_2.fastq.gz ] && ln -sf ${reads[1]} ${prefix}_2.fastq.gz + fastp \\ + --in1 ${prefix}_1.fastq.gz \\ + --in2 ${prefix}_2.fastq.gz \\ + --out1 ${prefix}_1.fastp.fastq.gz \\ + --out2 ${prefix}_2.fastp.fastq.gz \\ + --json ${prefix}.fastp.json \\ + --html ${prefix}.fastp.html \\ + $adapter_list \\ + $fail_fastq \\ + $merge_fastq \\ + --thread $task.cpus \\ + --detect_adapter_for_pe \\ + $args \\ + 2> ${prefix}.fastp.log + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + fastp: \$(fastp --version 2>&1 | sed -e "s/fastp //g") + END_VERSIONS + """ + } +} diff --git a/modules/nf-core/fastp/meta.yml b/modules/nf-core/fastp/meta.yml new file mode 100644 index 00000000..197ea7ca --- /dev/null +++ b/modules/nf-core/fastp/meta.yml @@ -0,0 +1,73 @@ +name: fastp +description: Perform adapter/quality trimming on sequencing reads +keywords: + - trimming + - quality control + - fastq +tools: + - fastp: + description: | + A tool designed to provide fast all-in-one preprocessing for FastQ files. This tool is developed in C++ with multithreading supported to afford high performance. + documentation: https://github.com/OpenGene/fastp + doi: 10.1093/bioinformatics/bty560 + licence: ["MIT"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information. Use 'single_end: true' to specify single ended or interleaved FASTQs. Use 'single_end: false' for paired-end reads. + e.g. [ id:'test', single_end:false ] + - reads: + type: file + description: | + List of input FastQ files of size 1 and 2 for single-end and paired-end data, + respectively. If you wish to run interleaved paired-end data, supply as single-end data + but with `--interleaved_in` in your `modules.conf`'s `ext.args` for the module. + - adapter_fasta: + type: file + description: File in FASTA format containing possible adapters to remove. + pattern: "*.{fasta,fna,fas,fa}" + - save_trimmed_fail: + type: boolean + description: Specify true to save files that failed to pass trimming thresholds ending in `*.fail.fastq.gz` + - save_merged: + type: boolean + description: Specify true to save all merged reads to the a file ending in `*.merged.fastq.gz` + +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - reads: + type: file + description: The trimmed/modified/unmerged fastq reads + pattern: "*fastp.fastq.gz" + - json: + type: file + description: Results in JSON format + pattern: "*.json" + - html: + type: file + description: Results in HTML format + pattern: "*.html" + - log: + type: file + description: fastq log file + pattern: "*.log" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - reads_fail: + type: file + description: Reads the failed the preprocessing + pattern: "*fail.fastq.gz" + - reads_merged: + type: file + description: Reads that were successfully merged + pattern: "*.{merged.fastq.gz}" +authors: + - "@drpatelh" + - "@kevinmenden" diff --git a/modules/nf-core/fastqc/main.nf b/modules/nf-core/fastqc/main.nf new file mode 100644 index 00000000..249f9064 --- /dev/null +++ b/modules/nf-core/fastqc/main.nf @@ -0,0 +1,55 @@ +process FASTQC { + tag "$meta.id" + label 'process_medium' + + conda "bioconda::fastqc=0.11.9" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/fastqc:0.11.9--0' : + 'biocontainers/fastqc:0.11.9--0' }" + + input: + tuple val(meta), path(reads) + + output: + tuple val(meta), path("*.html"), emit: html + tuple val(meta), path("*.zip") , emit: zip + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + // Make list of old name and new name pairs to use for renaming in the bash while loop + def old_new_pairs = reads instanceof Path || reads.size() == 1 ? [[ reads, "${prefix}.${reads.extension}" ]] : reads.withIndex().collect { entry, index -> [ entry, "${prefix}_${index + 1}.${entry.extension}" ] } + def rename_to = old_new_pairs*.join(' ').join(' ') + def renamed_files = old_new_pairs.collect{ old_name, new_name -> new_name }.join(' ') + """ + printf "%s %s\\n" $rename_to | while read old_name new_name; do + [ -f "\${new_name}" ] || ln -s \$old_name \$new_name + done + + fastqc \\ + $args \\ + --threads $task.cpus \\ + $renamed_files + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" ) + END_VERSIONS + """ + + stub: + def prefix = task.ext.prefix ?: "${meta.id}" + """ + touch ${prefix}.html + touch ${prefix}.zip + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" ) + END_VERSIONS + """ +} diff --git a/modules/nf-core/fastqc/meta.yml b/modules/nf-core/fastqc/meta.yml new file mode 100644 index 00000000..4da5bb5a --- /dev/null +++ b/modules/nf-core/fastqc/meta.yml @@ -0,0 +1,52 @@ +name: fastqc +description: Run FastQC on sequenced reads +keywords: + - quality control + - qc + - adapters + - fastq +tools: + - fastqc: + description: | + FastQC gives general quality metrics about your reads. + It provides information about the quality score distribution + across your reads, the per base sequence content (%A/C/G/T). + You get information about adapter contamination and other + overrepresented sequences. + homepage: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/ + documentation: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/ + licence: ["GPL-2.0-only"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - reads: + type: file + description: | + List of input FastQ files of size 1 and 2 for single-end and paired-end data, + respectively. +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - html: + type: file + description: FastQC report + pattern: "*_{fastqc.html}" + - zip: + type: file + description: FastQC report archive + pattern: "*_{fastqc.zip}" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@drpatelh" + - "@grst" + - "@ewels" + - "@FelixKrueger" diff --git a/modules/nf-core/fastqc/tests/main.nf.test b/modules/nf-core/fastqc/tests/main.nf.test new file mode 100644 index 00000000..3961de60 --- /dev/null +++ b/modules/nf-core/fastqc/tests/main.nf.test @@ -0,0 +1,32 @@ +nextflow_process { + + name "Test Process FASTQC" + script "modules/nf-core/fastqc/main.nf" + process "FASTQC" + tag "fastqc" + + test("Single-Read") { + + when { + process { + """ + input[0] = [ + [ id: 'test', single_end:true ], + [ + file(params.test_data['sarscov2']['illumina']['test_1_fastq_gz'], checkIfExists: true) + ] + ] + """ + } + } + + then { + assert process.success + assert process.out.html.get(0).get(1) ==~ ".*/test_fastqc.html" + assert path(process.out.html.get(0).get(1)).getText().contains("File typeConventional base calls") + assert process.out.zip.get(0).get(1) ==~ ".*/test_fastqc.zip" + } + + } + +} diff --git a/modules/nf-core/gunzip/main.nf b/modules/nf-core/gunzip/main.nf new file mode 100644 index 00000000..73bf08cd --- /dev/null +++ b/modules/nf-core/gunzip/main.nf @@ -0,0 +1,48 @@ +process GUNZIP { + tag "$archive" + label 'process_single' + + conda "conda-forge::sed=4.7" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/ubuntu:20.04' : + 'nf-core/ubuntu:20.04' }" + + input: + tuple val(meta), path(archive) + + output: + tuple val(meta), path("$gunzip"), emit: gunzip + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + gunzip = archive.toString() - '.gz' + """ + # Not calling gunzip itself because it creates files + # with the original group ownership rather than the + # default one for that user / the work directory + gzip \\ + -cd \\ + $args \\ + $archive \\ + > $gunzip + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + gunzip: \$(echo \$(gunzip --version 2>&1) | sed 's/^.*(gzip) //; s/ Copyright.*\$//') + END_VERSIONS + """ + + stub: + gunzip = archive.toString() - '.gz' + """ + touch $gunzip + cat <<-END_VERSIONS > versions.yml + "${task.process}": + gunzip: \$(echo \$(gunzip --version 2>&1) | sed 's/^.*(gzip) //; s/ Copyright.*\$//') + END_VERSIONS + """ +} diff --git a/modules/nf-core/gunzip/meta.yml b/modules/nf-core/gunzip/meta.yml new file mode 100644 index 00000000..4cdcdf4c --- /dev/null +++ b/modules/nf-core/gunzip/meta.yml @@ -0,0 +1,35 @@ +name: gunzip +description: Compresses and decompresses files. +keywords: + - gunzip + - compression + - decompression +tools: + - gunzip: + description: | + gzip is a file format and a software application used for file compression and decompression. + documentation: https://www.gnu.org/software/gzip/manual/gzip.html + licence: ["GPL-3.0-or-later"] +input: + - meta: + type: map + description: | + Optional groovy Map containing meta information + e.g. [ id:'test', single_end:false ] + - archive: + type: file + description: File to be compressed/uncompressed + pattern: "*.*" +output: + - gunzip: + type: file + description: Compressed/uncompressed file + pattern: "*.*" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@joseespinosa" + - "@drpatelh" + - "@jfy133" diff --git a/modules/nf-core/kraken2/kraken2/main.nf b/modules/nf-core/kraken2/kraken2/main.nf new file mode 100644 index 00000000..da8d8c6d --- /dev/null +++ b/modules/nf-core/kraken2/kraken2/main.nf @@ -0,0 +1,58 @@ +process KRAKEN2_KRAKEN2 { + tag "$meta.id" + label 'process_high' + + conda "bioconda::kraken2=2.1.2 conda-forge::pigz=2.6" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/mulled-v2-5799ab18b5fc681e75923b2450abaa969907ec98:87fc08d11968d081f3e8a37131c1f1f6715b6542-0' : + 'biocontainers/mulled-v2-5799ab18b5fc681e75923b2450abaa969907ec98:87fc08d11968d081f3e8a37131c1f1f6715b6542-0' }" + + input: + tuple val(meta), path(reads) + path db + val save_output_fastqs + val save_reads_assignment + + output: + tuple val(meta), path('*.classified{.,_}*') , optional:true, emit: classified_reads_fastq + tuple val(meta), path('*.unclassified{.,_}*') , optional:true, emit: unclassified_reads_fastq + tuple val(meta), path('*classifiedreads.txt') , optional:true, emit: classified_reads_assignment + tuple val(meta), path('*report.txt') , emit: report + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + def paired = meta.single_end ? "" : "--paired" + def classified = meta.single_end ? "${prefix}.classified.fastq" : "${prefix}.classified#.fastq" + def unclassified = meta.single_end ? "${prefix}.unclassified.fastq" : "${prefix}.unclassified#.fastq" + def classified_option = save_output_fastqs ? "--classified-out ${classified}" : "" + def unclassified_option = save_output_fastqs ? "--unclassified-out ${unclassified}" : "" + def readclassification_option = save_reads_assignment ? "--output ${prefix}.kraken2.classifiedreads.txt" : "--output /dev/null" + def compress_reads_command = save_output_fastqs ? "pigz -p $task.cpus *.fastq" : "" + + """ + kraken2 \\ + --db $db \\ + --threads $task.cpus \\ + --report ${prefix}.kraken2.report.txt \\ + --gzip-compressed \\ + $unclassified_option \\ + $classified_option \\ + $readclassification_option \\ + $paired \\ + $args \\ + $reads + + $compress_reads_command + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + kraken2: \$(echo \$(kraken2 --version 2>&1) | sed 's/^.*Kraken version //; s/ .*\$//') + pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' ) + END_VERSIONS + """ +} diff --git a/modules/nf-core/modules/kraken2/kraken2/meta.yml b/modules/nf-core/kraken2/kraken2/meta.yml similarity index 62% rename from modules/nf-core/modules/kraken2/kraken2/meta.yml rename to modules/nf-core/kraken2/kraken2/meta.yml index cb1ec0de..4721f45b 100644 --- a/modules/nf-core/modules/kraken2/kraken2/meta.yml +++ b/modules/nf-core/kraken2/kraken2/meta.yml @@ -12,6 +12,7 @@ tools: homepage: https://ccb.jhu.edu/software/kraken2/ documentation: https://github.com/DerrickWood/kraken2/wiki/Manual doi: 10.1186/s13059-019-1891-0 + licence: ["MIT"] input: - meta: type: map @@ -26,34 +27,49 @@ input: - db: type: directory description: Kraken2 database + - save_output_fastqs: + type: string + description: | + If true, optional commands are added to save classified and unclassified reads + as fastq files + - save_reads_assignment: + type: string + description: | + If true, an optional command is added to save a file reporting the taxonomic + classification of each input read output: - meta: type: map description: | Groovy Map containing sample information e.g. [ id:'test', single_end:false ] - - classified: + - classified_reads_fastq: type: file description: | - Reads classified to belong to any of the taxa + Reads classified as belonging to any of the taxa on the Kraken2 database. pattern: "*{fastq.gz}" - - unclassified: + - unclassified_reads_fastq: type: file description: | - Reads not classified to belong to any of the taxa + Reads not classified to any of the taxa on the Kraken2 database. pattern: "*{fastq.gz}" - - txt: + - classified_reads_assignment: + type: file + description: | + Kraken2 output file indicating the taxonomic assignment of + each input read + - report: type: file description: | Kraken2 report containing stats about classified and not classifed reads. pattern: "*.{report.txt}" - - version: + - versions: type: file - description: File containing software version - pattern: "*.{version.txt}" + description: File containing software versions + pattern: "versions.yml" authors: - "@joseespinosa" - "@drpatelh" diff --git a/modules/nf-core/miniasm/main.nf b/modules/nf-core/miniasm/main.nf new file mode 100644 index 00000000..99b44992 --- /dev/null +++ b/modules/nf-core/miniasm/main.nf @@ -0,0 +1,41 @@ +process MINIASM { + tag "$meta.id" + label 'process_high' + + conda "bioconda::miniasm=0.3_r179" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/miniasm:0.3_r179--h5bf99c6_2' : + 'biocontainers/miniasm:0.3_r179--h5bf99c6_2' }" + + input: + tuple val(meta), path(reads), path(paf) + + output: + tuple val(meta), path("*.gfa.gz") , emit: gfa + tuple val(meta), path("*.fasta.gz"), emit: assembly + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + """ + miniasm \\ + $args \\ + -f $reads \\ + $paf > \\ + ${prefix}.gfa + + awk '/^S/{print ">"\$2"\\n"\$3}' "${prefix}.gfa" | fold > ${prefix}.fasta + + gzip -n ${prefix}.gfa + gzip -n ${prefix}.fasta + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + miniasm: \$( miniasm -V 2>&1 ) + END_VERSIONS + """ +} diff --git a/modules/nf-core/miniasm/meta.yml b/modules/nf-core/miniasm/meta.yml new file mode 100644 index 00000000..59865945 --- /dev/null +++ b/modules/nf-core/miniasm/meta.yml @@ -0,0 +1,51 @@ +name: miniasm +description: A very fast OLC-based de novo assembler for noisy long reads +keywords: + - assembly + - pacbio + - nanopore +tools: + - miniasm: + description: Ultrafast de novo assembly for long noisy reads (though having no consensus step) + homepage: https://github.com/lh3/miniasm + documentation: https://github.com/lh3/miniasm + tool_dev_url: https://github.com/lh3/miniasm + doi: "10.1093/bioinformatics/btw152" + licence: ["MIT"] + +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - reads: + type: file + description: List of input PacBio/ONT FastQ files. + pattern: "*.{fastq,fastq.gz,fq,fq.gz}" + - paf: + type: file + description: Alignment in PAF format + pattern: "*{.paf,.paf.gz}" + +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - gfa: + type: file + description: Assembly graph + pattern: "*.gfa.gz" + - assembly: + type: file + description: Genome assembly + pattern: "*.fasta.gz" + +authors: + - "@avantonder" diff --git a/modules/nf-core/minimap2/align/main.nf b/modules/nf-core/minimap2/align/main.nf new file mode 100644 index 00000000..4da47c18 --- /dev/null +++ b/modules/nf-core/minimap2/align/main.nf @@ -0,0 +1,48 @@ +process MINIMAP2_ALIGN { + tag "$meta.id" + label 'process_medium' + + // Note: the versions here need to match the versions used in the mulled container below and minimap2/index + conda "bioconda::minimap2=2.24 bioconda::samtools=1.14" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/mulled-v2-66534bcbb7031a148b13e2ad42583020b9cd25c4:1679e915ddb9d6b4abda91880c4b48857d471bd8-0' : + 'biocontainers/mulled-v2-66534bcbb7031a148b13e2ad42583020b9cd25c4:1679e915ddb9d6b4abda91880c4b48857d471bd8-0' }" + + input: + tuple val(meta), path(reads) + path reference + val bam_format + val cigar_paf_format + val cigar_bam + + output: + tuple val(meta), path("*.paf"), optional: true, emit: paf + tuple val(meta), path("*.bam"), optional: true, emit: bam + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + def bam_output = bam_format ? "-a | samtools sort | samtools view -@ ${task.cpus} -b -h -o ${prefix}.bam" : "-o ${prefix}.paf" + def cigar_paf = cigar_paf_format && !bam_format ? "-c" : '' + def set_cigar_bam = cigar_bam && bam_format ? "-L" : '' + """ + minimap2 \\ + $args \\ + -t $task.cpus \\ + "${reference ?: reads}" \\ + "$reads" \\ + $cigar_paf \\ + $set_cigar_bam \\ + $bam_output + + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + minimap2: \$(minimap2 --version 2>&1) + END_VERSIONS + """ +} diff --git a/modules/nf-core/minimap2/align/meta.yml b/modules/nf-core/minimap2/align/meta.yml new file mode 100644 index 00000000..991b39a0 --- /dev/null +++ b/modules/nf-core/minimap2/align/meta.yml @@ -0,0 +1,65 @@ +name: minimap2_align +description: A versatile pairwise aligner for genomic and spliced nucleotide sequences +keywords: + - align + - fasta + - fastq + - genome + - paf + - reference +tools: + - minimap2: + description: | + A versatile pairwise aligner for genomic and spliced nucleotide sequences. + homepage: https://github.com/lh3/minimap2 + documentation: https://github.com/lh3/minimap2#uguide + licence: ["MIT"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - reads: + type: file + description: | + List of input FASTA or FASTQ files of size 1 and 2 for single-end + and paired-end data, respectively. + - reference: + type: file + description: | + Reference database in FASTA format. + - bam_format: + type: boolean + description: Specify that output should be in BAM format + - cigar_paf_format: + type: boolean + description: Specify that output CIGAR should be in PAF format + - cigar_bam: + type: boolean + description: | + Write CIGAR with >65535 ops at the CG tag. This is recommended when + doing XYZ (https://github.com/lh3/minimap2#working-with-65535-cigar-operations) +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - paf: + type: file + description: Alignment in PAF format + pattern: "*.paf" + - bam: + type: file + description: Alignment in BAM format + pattern: "*.bam" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@heuermh" + - "@sofstam" + - "@sateeshperi" + - "@jfy133" diff --git a/modules/nf-core/modules/fastqc/functions.nf b/modules/nf-core/modules/fastqc/functions.nf deleted file mode 100644 index da9da093..00000000 --- a/modules/nf-core/modules/fastqc/functions.nf +++ /dev/null @@ -1,68 +0,0 @@ -// -// Utility functions used in nf-core DSL2 module files -// - -// -// Extract name of software tool from process name using $task.process -// -def getSoftwareName(task_process) { - return task_process.tokenize(':')[-1].tokenize('_')[0].toLowerCase() -} - -// -// Function to initialise default values and to generate a Groovy Map of available options for nf-core modules -// -def initOptions(Map args) { - def Map options = [:] - options.args = args.args ?: '' - options.args2 = args.args2 ?: '' - options.args3 = args.args3 ?: '' - options.publish_by_meta = args.publish_by_meta ?: [] - options.publish_dir = args.publish_dir ?: '' - options.publish_files = args.publish_files - options.suffix = args.suffix ?: '' - return options -} - -// -// Tidy up and join elements of a list to return a path string -// -def getPathFromList(path_list) { - def paths = path_list.findAll { item -> !item?.trim().isEmpty() } // Remove empty entries - paths = paths.collect { it.trim().replaceAll("^[/]+|[/]+\$", "") } // Trim whitespace and trailing slashes - return paths.join('/') -} - -// -// Function to save/publish module results -// -def saveFiles(Map args) { - if (!args.filename.endsWith('.version.txt')) { - def ioptions = initOptions(args.options) - def path_list = [ ioptions.publish_dir ?: args.publish_dir ] - if (ioptions.publish_by_meta) { - def key_list = ioptions.publish_by_meta instanceof List ? ioptions.publish_by_meta : args.publish_by_meta - for (key in key_list) { - if (args.meta && key instanceof String) { - def path = key - if (args.meta.containsKey(key)) { - path = args.meta[key] instanceof Boolean ? "${key}_${args.meta[key]}".toString() : args.meta[key] - } - path = path instanceof String ? path : '' - path_list.add(path) - } - } - } - if (ioptions.publish_files instanceof Map) { - for (ext in ioptions.publish_files) { - if (args.filename.endsWith(ext.key)) { - def ext_list = path_list.collect() - ext_list.add(ext.value) - return "${getPathFromList(ext_list)}/$args.filename" - } - } - } else if (ioptions.publish_files == null) { - return "${getPathFromList(path_list)}/$args.filename" - } - } -} diff --git a/modules/nf-core/modules/fastqc/main.nf b/modules/nf-core/modules/fastqc/main.nf deleted file mode 100644 index 39c327b2..00000000 --- a/modules/nf-core/modules/fastqc/main.nf +++ /dev/null @@ -1,47 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process FASTQC { - tag "$meta.id" - label 'process_medium' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? "bioconda::fastqc=0.11.9" : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/fastqc:0.11.9--0" - } else { - container "quay.io/biocontainers/fastqc:0.11.9--0" - } - - input: - tuple val(meta), path(reads) - - output: - tuple val(meta), path("*.html"), emit: html - tuple val(meta), path("*.zip") , emit: zip - path "*.version.txt" , emit: version - - script: - // Add soft-links to original FastQs for consistent naming in pipeline - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" - if (meta.single_end) { - """ - [ ! -f ${prefix}.fastq.gz ] && ln -s $reads ${prefix}.fastq.gz - fastqc $options.args --threads $task.cpus ${prefix}.fastq.gz - fastqc --version | sed -e "s/FastQC v//g" > ${software}.version.txt - """ - } else { - """ - [ ! -f ${prefix}_1.fastq.gz ] && ln -s ${reads[0]} ${prefix}_1.fastq.gz - [ ! -f ${prefix}_2.fastq.gz ] && ln -s ${reads[1]} ${prefix}_2.fastq.gz - fastqc $options.args --threads $task.cpus ${prefix}_1.fastq.gz ${prefix}_2.fastq.gz - fastqc --version | sed -e "s/FastQC v//g" > ${software}.version.txt - """ - } -} diff --git a/modules/nf-core/modules/fastqc/meta.yml b/modules/nf-core/modules/fastqc/meta.yml deleted file mode 100644 index 8eb9953d..00000000 --- a/modules/nf-core/modules/fastqc/meta.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: fastqc -description: Run FastQC on sequenced reads -keywords: - - quality control - - qc - - adapters - - fastq -tools: - - fastqc: - description: | - FastQC gives general quality metrics about your reads. - It provides information about the quality score distribution - across your reads, the per base sequence content (%A/C/G/T). - You get information about adapter contamination and other - overrepresented sequences. - homepage: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/ - documentation: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/ -input: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'test', single_end:false ] - - reads: - type: file - description: | - List of input FastQ files of size 1 and 2 for single-end and paired-end data, - respectively. -output: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'test', single_end:false ] - - html: - type: file - description: FastQC report - pattern: "*_{fastqc.html}" - - zip: - type: file - description: FastQC report archive - pattern: "*_{fastqc.zip}" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" -authors: - - "@drpatelh" - - "@grst" - - "@ewels" - - "@FelixKrueger" diff --git a/modules/nf-core/modules/kraken2/kraken2/functions.nf b/modules/nf-core/modules/kraken2/kraken2/functions.nf deleted file mode 100644 index da9da093..00000000 --- a/modules/nf-core/modules/kraken2/kraken2/functions.nf +++ /dev/null @@ -1,68 +0,0 @@ -// -// Utility functions used in nf-core DSL2 module files -// - -// -// Extract name of software tool from process name using $task.process -// -def getSoftwareName(task_process) { - return task_process.tokenize(':')[-1].tokenize('_')[0].toLowerCase() -} - -// -// Function to initialise default values and to generate a Groovy Map of available options for nf-core modules -// -def initOptions(Map args) { - def Map options = [:] - options.args = args.args ?: '' - options.args2 = args.args2 ?: '' - options.args3 = args.args3 ?: '' - options.publish_by_meta = args.publish_by_meta ?: [] - options.publish_dir = args.publish_dir ?: '' - options.publish_files = args.publish_files - options.suffix = args.suffix ?: '' - return options -} - -// -// Tidy up and join elements of a list to return a path string -// -def getPathFromList(path_list) { - def paths = path_list.findAll { item -> !item?.trim().isEmpty() } // Remove empty entries - paths = paths.collect { it.trim().replaceAll("^[/]+|[/]+\$", "") } // Trim whitespace and trailing slashes - return paths.join('/') -} - -// -// Function to save/publish module results -// -def saveFiles(Map args) { - if (!args.filename.endsWith('.version.txt')) { - def ioptions = initOptions(args.options) - def path_list = [ ioptions.publish_dir ?: args.publish_dir ] - if (ioptions.publish_by_meta) { - def key_list = ioptions.publish_by_meta instanceof List ? ioptions.publish_by_meta : args.publish_by_meta - for (key in key_list) { - if (args.meta && key instanceof String) { - def path = key - if (args.meta.containsKey(key)) { - path = args.meta[key] instanceof Boolean ? "${key}_${args.meta[key]}".toString() : args.meta[key] - } - path = path instanceof String ? path : '' - path_list.add(path) - } - } - } - if (ioptions.publish_files instanceof Map) { - for (ext in ioptions.publish_files) { - if (args.filename.endsWith(ext.key)) { - def ext_list = path_list.collect() - ext_list.add(ext.value) - return "${getPathFromList(ext_list)}/$args.filename" - } - } - } else if (ioptions.publish_files == null) { - return "${getPathFromList(path_list)}/$args.filename" - } - } -} diff --git a/modules/nf-core/modules/kraken2/kraken2/main.nf b/modules/nf-core/modules/kraken2/kraken2/main.nf deleted file mode 100644 index 0fa86579..00000000 --- a/modules/nf-core/modules/kraken2/kraken2/main.nf +++ /dev/null @@ -1,55 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process KRAKEN2_KRAKEN2 { - tag "$meta.id" - label 'process_high' - label 'process_long' - label 'process_high_memory' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'bioconda::kraken2=2.1.1 conda-forge::pigz=2.6' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container 'https://depot.galaxyproject.org/singularity/mulled-v2-5799ab18b5fc681e75923b2450abaa969907ec98:941789bd7fe00db16531c26de8bf3c5c985242a5-0' - } else { - container 'quay.io/biocontainers/mulled-v2-5799ab18b5fc681e75923b2450abaa969907ec98:941789bd7fe00db16531c26de8bf3c5c985242a5-0' - } - - input: - tuple val(meta), path(reads) - path db - - output: - tuple val(meta), path('*classified*') , emit: classified - tuple val(meta), path('*unclassified*'), emit: unclassified - tuple val(meta), path('*report.txt') , emit: txt - path '*.version.txt' , emit: version - - script: - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" - def paired = meta.single_end ? "" : "--paired" - def classified = meta.single_end ? "${prefix}.classified.fastq" : "${prefix}.classified#.fastq" - def unclassified = meta.single_end ? "${prefix}.unclassified.fastq" : "${prefix}.unclassified#.fastq" - """ - kraken2 \\ - --db $db \\ - --threads $task.cpus \\ - --unclassified-out $unclassified \\ - --classified-out $classified \\ - --report ${prefix}.kraken2.report.txt \\ - --gzip-compressed \\ - $paired \\ - $options.args \\ - $reads - - pigz -p $task.cpus *.fastq - - echo \$(kraken2 --version 2>&1) | sed 's/^.*Kraken version //; s/ .*\$//' > ${software}.version.txt - """ -} diff --git a/modules/nf-core/modules/multiqc/functions.nf b/modules/nf-core/modules/multiqc/functions.nf deleted file mode 100644 index da9da093..00000000 --- a/modules/nf-core/modules/multiqc/functions.nf +++ /dev/null @@ -1,68 +0,0 @@ -// -// Utility functions used in nf-core DSL2 module files -// - -// -// Extract name of software tool from process name using $task.process -// -def getSoftwareName(task_process) { - return task_process.tokenize(':')[-1].tokenize('_')[0].toLowerCase() -} - -// -// Function to initialise default values and to generate a Groovy Map of available options for nf-core modules -// -def initOptions(Map args) { - def Map options = [:] - options.args = args.args ?: '' - options.args2 = args.args2 ?: '' - options.args3 = args.args3 ?: '' - options.publish_by_meta = args.publish_by_meta ?: [] - options.publish_dir = args.publish_dir ?: '' - options.publish_files = args.publish_files - options.suffix = args.suffix ?: '' - return options -} - -// -// Tidy up and join elements of a list to return a path string -// -def getPathFromList(path_list) { - def paths = path_list.findAll { item -> !item?.trim().isEmpty() } // Remove empty entries - paths = paths.collect { it.trim().replaceAll("^[/]+|[/]+\$", "") } // Trim whitespace and trailing slashes - return paths.join('/') -} - -// -// Function to save/publish module results -// -def saveFiles(Map args) { - if (!args.filename.endsWith('.version.txt')) { - def ioptions = initOptions(args.options) - def path_list = [ ioptions.publish_dir ?: args.publish_dir ] - if (ioptions.publish_by_meta) { - def key_list = ioptions.publish_by_meta instanceof List ? ioptions.publish_by_meta : args.publish_by_meta - for (key in key_list) { - if (args.meta && key instanceof String) { - def path = key - if (args.meta.containsKey(key)) { - path = args.meta[key] instanceof Boolean ? "${key}_${args.meta[key]}".toString() : args.meta[key] - } - path = path instanceof String ? path : '' - path_list.add(path) - } - } - } - if (ioptions.publish_files instanceof Map) { - for (ext in ioptions.publish_files) { - if (args.filename.endsWith(ext.key)) { - def ext_list = path_list.collect() - ext_list.add(ext.value) - return "${getPathFromList(ext_list)}/$args.filename" - } - } - } else if (ioptions.publish_files == null) { - return "${getPathFromList(path_list)}/$args.filename" - } - } -} diff --git a/modules/nf-core/modules/multiqc/main.nf b/modules/nf-core/modules/multiqc/main.nf deleted file mode 100644 index da780800..00000000 --- a/modules/nf-core/modules/multiqc/main.nf +++ /dev/null @@ -1,35 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process MULTIQC { - label 'process_medium' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:[:], publish_by_meta:[]) } - - conda (params.enable_conda ? "bioconda::multiqc=1.10.1" : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/multiqc:1.10.1--py_0" - } else { - container "quay.io/biocontainers/multiqc:1.10.1--py_0" - } - - input: - path multiqc_files - - output: - path "*multiqc_report.html", emit: report - path "*_data" , emit: data - path "*_plots" , optional:true, emit: plots - path "*.version.txt" , emit: version - - script: - def software = getSoftwareName(task.process) - """ - multiqc -f $options.args . - multiqc --version | sed -e "s/multiqc, version //g" > ${software}.version.txt - """ -} diff --git a/modules/nf-core/modules/multiqc/meta.yml b/modules/nf-core/modules/multiqc/meta.yml deleted file mode 100644 index 532a8bb1..00000000 --- a/modules/nf-core/modules/multiqc/meta.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: MultiQC -description: Aggregate results from bioinformatics analyses across many samples into a single report -keywords: - - QC - - bioinformatics tools - - Beautiful stand-alone HTML report -tools: - - multiqc: - description: | - MultiQC searches a given directory for analysis logs and compiles a HTML report. - It's a general use tool, perfect for summarising the output from numerous bioinformatics tools. - homepage: https://multiqc.info/ - documentation: https://multiqc.info/docs/ -input: - - multiqc_files: - type: file - description: | - List of reports / files recognised by MultiQC, for example the html and zip output of FastQC -output: - - report: - type: file - description: MultiQC report file - pattern: "multiqc_report.html" - - data: - type: dir - description: MultiQC data dir - pattern: "multiqc_data" - - plots: - type: file - description: Plots created by MultiQC - pattern: "*_data" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" -authors: - - "@abhi18av" - - "@bunop" - - "@drpatelh" diff --git a/modules/nf-core/modules/prokka/functions.nf b/modules/nf-core/modules/prokka/functions.nf deleted file mode 100644 index da9da093..00000000 --- a/modules/nf-core/modules/prokka/functions.nf +++ /dev/null @@ -1,68 +0,0 @@ -// -// Utility functions used in nf-core DSL2 module files -// - -// -// Extract name of software tool from process name using $task.process -// -def getSoftwareName(task_process) { - return task_process.tokenize(':')[-1].tokenize('_')[0].toLowerCase() -} - -// -// Function to initialise default values and to generate a Groovy Map of available options for nf-core modules -// -def initOptions(Map args) { - def Map options = [:] - options.args = args.args ?: '' - options.args2 = args.args2 ?: '' - options.args3 = args.args3 ?: '' - options.publish_by_meta = args.publish_by_meta ?: [] - options.publish_dir = args.publish_dir ?: '' - options.publish_files = args.publish_files - options.suffix = args.suffix ?: '' - return options -} - -// -// Tidy up and join elements of a list to return a path string -// -def getPathFromList(path_list) { - def paths = path_list.findAll { item -> !item?.trim().isEmpty() } // Remove empty entries - paths = paths.collect { it.trim().replaceAll("^[/]+|[/]+\$", "") } // Trim whitespace and trailing slashes - return paths.join('/') -} - -// -// Function to save/publish module results -// -def saveFiles(Map args) { - if (!args.filename.endsWith('.version.txt')) { - def ioptions = initOptions(args.options) - def path_list = [ ioptions.publish_dir ?: args.publish_dir ] - if (ioptions.publish_by_meta) { - def key_list = ioptions.publish_by_meta instanceof List ? ioptions.publish_by_meta : args.publish_by_meta - for (key in key_list) { - if (args.meta && key instanceof String) { - def path = key - if (args.meta.containsKey(key)) { - path = args.meta[key] instanceof Boolean ? "${key}_${args.meta[key]}".toString() : args.meta[key] - } - path = path instanceof String ? path : '' - path_list.add(path) - } - } - } - if (ioptions.publish_files instanceof Map) { - for (ext in ioptions.publish_files) { - if (args.filename.endsWith(ext.key)) { - def ext_list = path_list.collect() - ext_list.add(ext.value) - return "${getPathFromList(ext_list)}/$args.filename" - } - } - } else if (ioptions.publish_files == null) { - return "${getPathFromList(path_list)}/$args.filename" - } - } -} diff --git a/modules/nf-core/modules/quast/functions.nf b/modules/nf-core/modules/quast/functions.nf deleted file mode 100644 index da9da093..00000000 --- a/modules/nf-core/modules/quast/functions.nf +++ /dev/null @@ -1,68 +0,0 @@ -// -// Utility functions used in nf-core DSL2 module files -// - -// -// Extract name of software tool from process name using $task.process -// -def getSoftwareName(task_process) { - return task_process.tokenize(':')[-1].tokenize('_')[0].toLowerCase() -} - -// -// Function to initialise default values and to generate a Groovy Map of available options for nf-core modules -// -def initOptions(Map args) { - def Map options = [:] - options.args = args.args ?: '' - options.args2 = args.args2 ?: '' - options.args3 = args.args3 ?: '' - options.publish_by_meta = args.publish_by_meta ?: [] - options.publish_dir = args.publish_dir ?: '' - options.publish_files = args.publish_files - options.suffix = args.suffix ?: '' - return options -} - -// -// Tidy up and join elements of a list to return a path string -// -def getPathFromList(path_list) { - def paths = path_list.findAll { item -> !item?.trim().isEmpty() } // Remove empty entries - paths = paths.collect { it.trim().replaceAll("^[/]+|[/]+\$", "") } // Trim whitespace and trailing slashes - return paths.join('/') -} - -// -// Function to save/publish module results -// -def saveFiles(Map args) { - if (!args.filename.endsWith('.version.txt')) { - def ioptions = initOptions(args.options) - def path_list = [ ioptions.publish_dir ?: args.publish_dir ] - if (ioptions.publish_by_meta) { - def key_list = ioptions.publish_by_meta instanceof List ? ioptions.publish_by_meta : args.publish_by_meta - for (key in key_list) { - if (args.meta && key instanceof String) { - def path = key - if (args.meta.containsKey(key)) { - path = args.meta[key] instanceof Boolean ? "${key}_${args.meta[key]}".toString() : args.meta[key] - } - path = path instanceof String ? path : '' - path_list.add(path) - } - } - } - if (ioptions.publish_files instanceof Map) { - for (ext in ioptions.publish_files) { - if (args.filename.endsWith(ext.key)) { - def ext_list = path_list.collect() - ext_list.add(ext.value) - return "${getPathFromList(ext_list)}/$args.filename" - } - } - } else if (ioptions.publish_files == null) { - return "${getPathFromList(path_list)}/$args.filename" - } - } -} diff --git a/modules/nf-core/modules/quast/main.nf b/modules/nf-core/modules/quast/main.nf deleted file mode 100644 index 0b94c410..00000000 --- a/modules/nf-core/modules/quast/main.nf +++ /dev/null @@ -1,48 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process QUAST { - label 'process_medium' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:[:], publish_by_meta:[]) } - - conda (params.enable_conda ? 'bioconda::quast=5.0.2' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container 'https://depot.galaxyproject.org/singularity/quast:5.0.2--py37pl526hb5aa323_2' - } else { - container 'quay.io/biocontainers/quast:5.0.2--py37pl526hb5aa323_2' - } - - input: - path consensus - path fasta - path gff - val use_fasta - val use_gff - - output: - path "${prefix}" , emit: results - path '*.tsv' , emit: tsv - path '*.version.txt', emit: version - - script: - def software = getSoftwareName(task.process) - prefix = options.suffix ?: software - def features = use_gff ? "--features $gff" : '' - def reference = use_fasta ? "-r $fasta" : '' - """ - quast.py \\ - --output-dir $prefix \\ - $reference \\ - $features \\ - --threads $task.cpus \\ - $options.args \\ - ${consensus.join(' ')} - ln -s ${prefix}/report.tsv - echo \$(quast.py --version 2>&1) | sed 's/^.*QUAST v//; s/ .*\$//' > ${software}.version.txt - """ -} diff --git a/modules/nf-core/modules/quast/meta.yml b/modules/nf-core/modules/quast/meta.yml deleted file mode 100644 index cc79486e..00000000 --- a/modules/nf-core/modules/quast/meta.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: quast -description: Quality Assessment Tool for Genome Assemblies -keywords: - - quast - - assembly - - quality -tools: - - quast: - description: | - QUAST calculates quality metrics for genome assemblies - homepage: http://bioinf.spbau.ru/quast - doi: -input: - - consensus: - type: file - description: | - Fasta file containing the assembly of interest - - fasta: - type: file - description: | - The genome assembly to be evaluated. Has to contain at least a non-empty string dummy value. - - use_fasta: - type: boolean - description: Whether to use the provided fasta reference genome file - - gff: - type: file - description: The genome GFF file. Has to contain at least a non-empty string dummy value. - - use_gff: - type: boolean - description: Whether to use the provided gff reference annotation file - -output: - - quast: - type: directory - description: Directory containing complete quast report - pattern: "{prefix}.lineage_report.csv" - - report: - - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" - -authors: - - "@drpatelh" - - "@kevinmenden" diff --git a/modules/nf-core/modules/samtools/index/functions.nf b/modules/nf-core/modules/samtools/index/functions.nf deleted file mode 100644 index da9da093..00000000 --- a/modules/nf-core/modules/samtools/index/functions.nf +++ /dev/null @@ -1,68 +0,0 @@ -// -// Utility functions used in nf-core DSL2 module files -// - -// -// Extract name of software tool from process name using $task.process -// -def getSoftwareName(task_process) { - return task_process.tokenize(':')[-1].tokenize('_')[0].toLowerCase() -} - -// -// Function to initialise default values and to generate a Groovy Map of available options for nf-core modules -// -def initOptions(Map args) { - def Map options = [:] - options.args = args.args ?: '' - options.args2 = args.args2 ?: '' - options.args3 = args.args3 ?: '' - options.publish_by_meta = args.publish_by_meta ?: [] - options.publish_dir = args.publish_dir ?: '' - options.publish_files = args.publish_files - options.suffix = args.suffix ?: '' - return options -} - -// -// Tidy up and join elements of a list to return a path string -// -def getPathFromList(path_list) { - def paths = path_list.findAll { item -> !item?.trim().isEmpty() } // Remove empty entries - paths = paths.collect { it.trim().replaceAll("^[/]+|[/]+\$", "") } // Trim whitespace and trailing slashes - return paths.join('/') -} - -// -// Function to save/publish module results -// -def saveFiles(Map args) { - if (!args.filename.endsWith('.version.txt')) { - def ioptions = initOptions(args.options) - def path_list = [ ioptions.publish_dir ?: args.publish_dir ] - if (ioptions.publish_by_meta) { - def key_list = ioptions.publish_by_meta instanceof List ? ioptions.publish_by_meta : args.publish_by_meta - for (key in key_list) { - if (args.meta && key instanceof String) { - def path = key - if (args.meta.containsKey(key)) { - path = args.meta[key] instanceof Boolean ? "${key}_${args.meta[key]}".toString() : args.meta[key] - } - path = path instanceof String ? path : '' - path_list.add(path) - } - } - } - if (ioptions.publish_files instanceof Map) { - for (ext in ioptions.publish_files) { - if (args.filename.endsWith(ext.key)) { - def ext_list = path_list.collect() - ext_list.add(ext.value) - return "${getPathFromList(ext_list)}/$args.filename" - } - } - } else if (ioptions.publish_files == null) { - return "${getPathFromList(path_list)}/$args.filename" - } - } -} diff --git a/modules/nf-core/modules/samtools/index/main.nf b/modules/nf-core/modules/samtools/index/main.nf deleted file mode 100644 index e1966fb3..00000000 --- a/modules/nf-core/modules/samtools/index/main.nf +++ /dev/null @@ -1,35 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process SAMTOOLS_INDEX { - tag "$meta.id" - label 'process_low' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'bioconda::samtools=1.13' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/samtools:1.13--h8c37831_0" - } else { - container "quay.io/biocontainers/samtools:1.13--h8c37831_0" - } - - input: - tuple val(meta), path(bam) - - output: - tuple val(meta), path("*.bai"), optional:true, emit: bai - tuple val(meta), path("*.csi"), optional:true, emit: csi - path "*.version.txt" , emit: version - - script: - def software = getSoftwareName(task.process) - """ - samtools index $options.args $bam - echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//' > ${software}.version.txt - """ -} diff --git a/modules/nf-core/modules/samtools/index/meta.yml b/modules/nf-core/modules/samtools/index/meta.yml deleted file mode 100644 index 5d076e3b..00000000 --- a/modules/nf-core/modules/samtools/index/meta.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: samtools_index -description: Index SAM/BAM/CRAM file -keywords: - - index - - bam - - sam - - cram -tools: - - samtools: - description: | - SAMtools is a set of utilities for interacting with and post-processing - short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li. - These files are generated as output by short read aligners like BWA. - homepage: http://www.htslib.org/ - documentation: hhttp://www.htslib.org/doc/samtools.html - doi: 10.1093/bioinformatics/btp352 -input: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'test', single_end:false ] - - bam: - type: file - description: BAM/CRAM/SAM file - pattern: "*.{bam,cram,sam}" -output: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'test', single_end:false ] - - bai: - type: file - description: BAM/CRAM/SAM index file - pattern: "*.{bai,crai,sai}" - - csi: - type: file - description: CSI index file - pattern: "*.{csi}" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" -authors: - - "@drpatelh" - - "@ewels" diff --git a/modules/nf-core/modules/samtools/sort/functions.nf b/modules/nf-core/modules/samtools/sort/functions.nf deleted file mode 100644 index da9da093..00000000 --- a/modules/nf-core/modules/samtools/sort/functions.nf +++ /dev/null @@ -1,68 +0,0 @@ -// -// Utility functions used in nf-core DSL2 module files -// - -// -// Extract name of software tool from process name using $task.process -// -def getSoftwareName(task_process) { - return task_process.tokenize(':')[-1].tokenize('_')[0].toLowerCase() -} - -// -// Function to initialise default values and to generate a Groovy Map of available options for nf-core modules -// -def initOptions(Map args) { - def Map options = [:] - options.args = args.args ?: '' - options.args2 = args.args2 ?: '' - options.args3 = args.args3 ?: '' - options.publish_by_meta = args.publish_by_meta ?: [] - options.publish_dir = args.publish_dir ?: '' - options.publish_files = args.publish_files - options.suffix = args.suffix ?: '' - return options -} - -// -// Tidy up and join elements of a list to return a path string -// -def getPathFromList(path_list) { - def paths = path_list.findAll { item -> !item?.trim().isEmpty() } // Remove empty entries - paths = paths.collect { it.trim().replaceAll("^[/]+|[/]+\$", "") } // Trim whitespace and trailing slashes - return paths.join('/') -} - -// -// Function to save/publish module results -// -def saveFiles(Map args) { - if (!args.filename.endsWith('.version.txt')) { - def ioptions = initOptions(args.options) - def path_list = [ ioptions.publish_dir ?: args.publish_dir ] - if (ioptions.publish_by_meta) { - def key_list = ioptions.publish_by_meta instanceof List ? ioptions.publish_by_meta : args.publish_by_meta - for (key in key_list) { - if (args.meta && key instanceof String) { - def path = key - if (args.meta.containsKey(key)) { - path = args.meta[key] instanceof Boolean ? "${key}_${args.meta[key]}".toString() : args.meta[key] - } - path = path instanceof String ? path : '' - path_list.add(path) - } - } - } - if (ioptions.publish_files instanceof Map) { - for (ext in ioptions.publish_files) { - if (args.filename.endsWith(ext.key)) { - def ext_list = path_list.collect() - ext_list.add(ext.value) - return "${getPathFromList(ext_list)}/$args.filename" - } - } - } else if (ioptions.publish_files == null) { - return "${getPathFromList(path_list)}/$args.filename" - } - } -} diff --git a/modules/nf-core/modules/samtools/sort/main.nf b/modules/nf-core/modules/samtools/sort/main.nf deleted file mode 100644 index 0a6b7048..00000000 --- a/modules/nf-core/modules/samtools/sort/main.nf +++ /dev/null @@ -1,35 +0,0 @@ -// Import generic module functions -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - -process SAMTOOLS_SORT { - tag "$meta.id" - label 'process_medium' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - - conda (params.enable_conda ? 'bioconda::samtools=1.13' : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/samtools:1.13--h8c37831_0" - } else { - container "quay.io/biocontainers/samtools:1.13--h8c37831_0" - } - - input: - tuple val(meta), path(bam) - - output: - tuple val(meta), path("*.bam"), emit: bam - path "*.version.txt" , emit: version - - script: - def software = getSoftwareName(task.process) - def prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" - """ - samtools sort $options.args -@ $task.cpus -o ${prefix}.bam -T $prefix $bam - echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//' > ${software}.version.txt - """ -} diff --git a/modules/nf-core/modules/samtools/sort/meta.yml b/modules/nf-core/modules/samtools/sort/meta.yml deleted file mode 100644 index 704e8c1f..00000000 --- a/modules/nf-core/modules/samtools/sort/meta.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: samtools_sort -description: Sort SAM/BAM/CRAM file -keywords: - - sort - - bam - - sam - - cram -tools: - - samtools: - description: | - SAMtools is a set of utilities for interacting with and post-processing - short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li. - These files are generated as output by short read aligners like BWA. - homepage: http://www.htslib.org/ - documentation: hhttp://www.htslib.org/doc/samtools.html - doi: 10.1093/bioinformatics/btp352 -input: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'test', single_end:false ] - - bam: - type: file - description: BAM/CRAM/SAM file - pattern: "*.{bam,cram,sam}" -output: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'test', single_end:false ] - - bam: - type: file - description: Sorted BAM/CRAM/SAM file - pattern: "*.{bam,cram,sam}" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" -authors: - - "@drpatelh" - - "@ewels" diff --git a/modules/nf-core/multiqc/main.nf b/modules/nf-core/multiqc/main.nf new file mode 100644 index 00000000..40a691eb --- /dev/null +++ b/modules/nf-core/multiqc/main.nf @@ -0,0 +1,53 @@ +process MULTIQC { + label 'process_single' + + conda "bioconda::multiqc=1.17" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/multiqc:1.17--pyhdfd78af_0' : + 'biocontainers/multiqc:1.17--pyhdfd78af_0' }" + + input: + path multiqc_files, stageAs: "?/*" + path(multiqc_config) + path(extra_multiqc_config) + path(multiqc_logo) + + output: + path "*multiqc_report.html", emit: report + path "*_data" , emit: data + path "*_plots" , optional:true, emit: plots + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def config = multiqc_config ? "--config $multiqc_config" : '' + def extra_config = extra_multiqc_config ? "--config $extra_multiqc_config" : '' + """ + multiqc \\ + --force \\ + $args \\ + $config \\ + $extra_config \\ + . + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) + END_VERSIONS + """ + + stub: + """ + touch multiqc_data + touch multiqc_plots + touch multiqc_report.html + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) + END_VERSIONS + """ +} diff --git a/modules/nf-core/multiqc/meta.yml b/modules/nf-core/multiqc/meta.yml new file mode 100644 index 00000000..f93b5ee5 --- /dev/null +++ b/modules/nf-core/multiqc/meta.yml @@ -0,0 +1,56 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/yaml-schema.json +name: MultiQC +description: Aggregate results from bioinformatics analyses across many samples into a single report +keywords: + - QC + - bioinformatics tools + - Beautiful stand-alone HTML report +tools: + - multiqc: + description: | + MultiQC searches a given directory for analysis logs and compiles a HTML report. + It's a general use tool, perfect for summarising the output from numerous bioinformatics tools. + homepage: https://multiqc.info/ + documentation: https://multiqc.info/docs/ + licence: ["GPL-3.0-or-later"] + +input: + - multiqc_files: + type: file + description: | + List of reports / files recognised by MultiQC, for example the html and zip output of FastQC + - multiqc_config: + type: file + description: Optional config yml for MultiQC + pattern: "*.{yml,yaml}" + - extra_multiqc_config: + type: file + description: Second optional config yml for MultiQC. Will override common sections in multiqc_config. + pattern: "*.{yml,yaml}" + - multiqc_logo: + type: file + description: Optional logo file for MultiQC + pattern: "*.{png}" + +output: + - report: + type: file + description: MultiQC report file + pattern: "multiqc_report.html" + - data: + type: directory + description: MultiQC data dir + pattern: "multiqc_data" + - plots: + type: file + description: Plots created by MultiQC + pattern: "*_data" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@abhi18av" + - "@bunop" + - "@drpatelh" + - "@jfy133" diff --git a/modules/nf-core/nanoplot/main.nf b/modules/nf-core/nanoplot/main.nf new file mode 100644 index 00000000..65ea9618 --- /dev/null +++ b/modules/nf-core/nanoplot/main.nf @@ -0,0 +1,40 @@ +process NANOPLOT { + tag "$meta.id" + label 'process_low' + + conda "bioconda::nanoplot=1.41.0" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/nanoplot:1.41.0--pyhdfd78af_0' : + 'biocontainers/nanoplot:1.41.0--pyhdfd78af_0' }" + + input: + tuple val(meta), path(ontfile) + + output: + tuple val(meta), path("*.html") , emit: html + tuple val(meta), path("*.png") , optional: true, emit: png + tuple val(meta), path("*.txt") , emit: txt + tuple val(meta), path("*.log") , emit: log + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + def input_file = ("$ontfile".endsWith(".fastq.gz") || "$ontfile".endsWith(".fq.gz")) ? "--fastq ${ontfile}" : ("$ontfile".endsWith(".txt")) ? "--summary ${ontfile}" : '' + """ + NanoPlot \\ + $args \\ + -t $task.cpus \\ + $input_file + + mv NanoStats.txt ${prefix}.txt + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + nanoplot: \$(echo \$(NanoPlot --version 2>&1) | sed 's/^.*NanoPlot //; s/ .*\$//') + END_VERSIONS + """ +} diff --git a/modules/nf-core/nanoplot/meta.yml b/modules/nf-core/nanoplot/meta.yml new file mode 100644 index 00000000..28c8c2d3 --- /dev/null +++ b/modules/nf-core/nanoplot/meta.yml @@ -0,0 +1,59 @@ +name: nanoplot +description: Run NanoPlot on nanopore-sequenced reads +keywords: + - quality control + - qc + - fastq + - sequencing summary + - nanopore +tools: + - nanoplot: + description: | + NanoPlot is a tool for ploting long-read sequencing data and + alignment. + homepage: http://nanoplot.bioinf.be + documentation: https://github.com/wdecoster/NanoPlot + licence: ["GPL-3.0-or-later"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - fastq: + type: file + description: | + List of input basecalled-FastQ files. + - summary_txt: + type: file + description: | + List of sequencing_summary.txt files from running basecalling. +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - html: + type: file + description: NanoPlot report + pattern: "*{.html}" + - png: + type: file + description: Plots generated by NanoPlot + pattern: "*{.png}" + - txt: + type: file + description: Stats from NanoPlot + pattern: "*{.txt}" + - log: + type: file + description: log file of NanoPlot run + pattern: "*{.log}" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@drpatelh" + - "@yuukiiwa" diff --git a/modules/nf-core/nanoplot/nanoplot.diff b/modules/nf-core/nanoplot/nanoplot.diff new file mode 100644 index 00000000..bb61f798 --- /dev/null +++ b/modules/nf-core/nanoplot/nanoplot.diff @@ -0,0 +1,24 @@ +Changes in module 'nf-core/nanoplot' +--- modules/nf-core/nanoplot/main.nf ++++ modules/nf-core/nanoplot/main.nf +@@ -22,13 +22,16 @@ + + script: + def args = task.ext.args ?: '' +- def input_file = ("$ontfile".endsWith(".fastq.gz")) ? "--fastq ${ontfile}" : +- ("$ontfile".endsWith(".txt")) ? "--summary ${ontfile}" : '' ++ def prefix = task.ext.prefix ?: "${meta.id}" ++ def input_file = ("$ontfile".endsWith(".fastq.gz") || "$ontfile".endsWith(".fq.gz")) ? "--fastq ${ontfile}" : ("$ontfile".endsWith(".txt")) ? "--summary ${ontfile}" : '' + """ + NanoPlot \\ + $args \\ + -t $task.cpus \\ + $input_file ++ ++ mv NanoStats.txt ${prefix}.txt ++ + cat <<-END_VERSIONS > versions.yml + "${task.process}": + nanoplot: \$(echo \$(NanoPlot --version 2>&1) | sed 's/^.*NanoPlot //; s/ .*\$//') + +************************************************************ diff --git a/modules/nf-core/porechop/porechop/main.nf b/modules/nf-core/porechop/porechop/main.nf new file mode 100644 index 00000000..8fe0dd2e --- /dev/null +++ b/modules/nf-core/porechop/porechop/main.nf @@ -0,0 +1,36 @@ +process PORECHOP_PORECHOP { + tag "$meta.id" + label 'process_medium' + + conda "bioconda::porechop=0.2.4" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/porechop:0.2.4--py39h7cff6ad_2' : + 'biocontainers/porechop:0.2.4--py39h7cff6ad_2' }" + + input: + tuple val(meta), path(reads) + + output: + tuple val(meta), path("*.fastq.gz"), emit: reads + tuple val(meta), path("*.log") , emit: log + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + """ + porechop \\ + -i $reads \\ + -t $task.cpus \\ + $args \\ + -o ${prefix}.fastq.gz \\ + > ${prefix}.log + cat <<-END_VERSIONS > versions.yml + "${task.process}": + porechop: \$( porechop --version ) + END_VERSIONS + """ +} diff --git a/modules/nf-core/porechop/porechop/meta.yml b/modules/nf-core/porechop/porechop/meta.yml new file mode 100644 index 00000000..98b838f6 --- /dev/null +++ b/modules/nf-core/porechop/porechop/meta.yml @@ -0,0 +1,55 @@ +name: "porechop_porechop" +description: Adapter removal and demultiplexing of Oxford Nanopore reads +keywords: + - adapter + - nanopore + - demultiplexing +tools: + - porechop: + description: Adapter removal and demultiplexing of Oxford Nanopore reads + homepage: "https://github.com/rrwick/Porechop" + documentation: "https://github.com/rrwick/Porechop" + tool_dev_url: "https://github.com/rrwick/Porechop" + doi: "10.1099/mgen.0.000132" + licence: ["GPL v3"] + +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - reads: + type: file + description: fastq/fastq.gz file + pattern: "*.{fastq,fastq.gz,fq,fq.gz}" + +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - reads: + type: file + description: Demultiplexed and/or adapter-trimmed fastq.gz file + pattern: "*.{fastq.gz}" + - log: + type: file + description: Log file containing stdout information + pattern: "*.log" + +authors: + - "@ggabernet" + - "@jasmezz" + - "@d4straub" + - "@LaurenceKuhl" + - "@SusiJo" + - "@jonasscheid" + - "@jonoave" + - "@GokceOGUZ" + - "@jfy133" diff --git a/modules/nf-core/modules/prokka/main.nf b/modules/nf-core/prokka/main.nf similarity index 50% rename from modules/nf-core/modules/prokka/main.nf rename to modules/nf-core/prokka/main.nf index 1fa3f3d9..60fbe232 100644 --- a/modules/nf-core/modules/prokka/main.nf +++ b/modules/nf-core/prokka/main.nf @@ -1,21 +1,11 @@ -include { initOptions; saveFiles; getSoftwareName } from './functions' - -params.options = [:] -options = initOptions(params.options) - process PROKKA { tag "$meta.id" label 'process_low' - publishDir "${params.outdir}", - mode: params.publish_dir_mode, - saveAs: { filename -> saveFiles(filename:filename, options:params.options, publish_dir:getSoftwareName(task.process), meta:meta, publish_by_meta:['id']) } - conda (params.enable_conda ? "bioconda::prokka=1.14.6" : null) - if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) { - container "https://depot.galaxyproject.org/singularity/prokka:1.14.6--pl526_0" - } else { - container "quay.io/biocontainers/prokka:1.14.6--pl526_0" - } + conda "bioconda::prokka=1.14.6" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/prokka%3A1.14.6--pl5321hdfd78af_4' : + 'biocontainers/prokka:1.14.6--pl5321hdfd78af_4' }" input: tuple val(meta), path(fasta) @@ -35,22 +25,28 @@ process PROKKA { tuple val(meta), path("${prefix}/*.log"), emit: log tuple val(meta), path("${prefix}/*.txt"), emit: txt tuple val(meta), path("${prefix}/*.tsv"), emit: tsv - path "*.version.txt", emit: version + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when script: - def software = getSoftwareName(task.process) - prefix = options.suffix ? "${meta.id}${options.suffix}" : "${meta.id}" + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" def proteins_opt = proteins ? "--proteins ${proteins[0]}" : "" - def prodigal_opt = prodigal_tf ? "--prodigaltf ${prodigal_tf[0]}" : "" + def prodigal_tf = prodigal_tf ? "--prodigaltf ${prodigal_tf[0]}" : "" """ prokka \\ - $options.args \\ + $args \\ --cpus $task.cpus \\ --prefix $prefix \\ $proteins_opt \\ $prodigal_tf \\ $fasta - echo \$(prokka --version 2>&1) | sed 's/^.*prokka //' > ${software}.version.txt + cat <<-END_VERSIONS > versions.yml + "${task.process}": + prokka: \$(echo \$(prokka --version 2>&1) | sed 's/^.*prokka //') + END_VERSIONS """ } diff --git a/modules/nf-core/modules/prokka/meta.yml b/modules/nf-core/prokka/meta.yml similarity index 95% rename from modules/nf-core/modules/prokka/meta.yml rename to modules/nf-core/prokka/meta.yml index 4489b2fd..7fc9e185 100644 --- a/modules/nf-core/modules/prokka/meta.yml +++ b/modules/nf-core/prokka/meta.yml @@ -9,7 +9,7 @@ tools: description: Rapid annotation of prokaryotic genomes homepage: https://github.com/tseemann/prokka doi: "10.1093/bioinformatics/btu153" - licence: ['GPL v2'] + licence: ["GPL v2"] input: - meta: @@ -34,10 +34,10 @@ output: description: | Groovy Map containing sample information e.g. [ id:'test', single_end:false ] - - version: + - versions: type: file - description: File containing software version - pattern: "*.{version.txt}" + description: File containing software versions + pattern: "versions.yml" - gff: type: file description: annotation in GFF3 format, containing both sequences and annotations diff --git a/modules/nf-core/quast/main.nf b/modules/nf-core/quast/main.nf new file mode 100644 index 00000000..e265df73 --- /dev/null +++ b/modules/nf-core/quast/main.nf @@ -0,0 +1,136 @@ +process QUAST { + tag "$meta.id" + label 'process_medium' + + conda "bioconda::quast=5.2.0" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/quast:5.2.0--py39pl5321h2add14b_1' : + 'biocontainers/quast:5.2.0--py39pl5321h2add14b_1' }" + + input: + tuple val(meta) , path(consensus) + tuple val(meta2), path(fasta) + tuple val(meta3), path(gff) + + output: + tuple val(meta), path("${prefix}") , emit: results + tuple val(meta), path("${prefix}.tsv") , emit: tsv + tuple val(meta), path("${prefix}_transcriptome.tsv") , optional: true , emit: transcriptome + tuple val(meta), path("${prefix}_misassemblies.tsv") , optional: true , emit: misassemblies + tuple val(meta), path("${prefix}_unaligned.tsv") , optional: true , emit: unaligned + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + def features = gff ? "--features $gff" : '' + def reference = fasta ? "-r $fasta" : '' + """ + quast.py \\ + --output-dir $prefix \\ + $reference \\ + $features \\ + --threads $task.cpus \\ + $args \\ + ${consensus.join(' ')} + + ln -s ${prefix}/report.tsv ${prefix}.tsv + [ -f ${prefix}/contigs_reports/all_alignments_transcriptome.tsv ] && ln -s ${prefix}/contigs_reports/all_alignments_transcriptome.tsv ${prefix}_transcriptome.tsv + [ -f ${prefix}/contigs_reports/misassemblies_report.tsv ] && ln -s ${prefix}/contigs_reports/misassemblies_report.tsv ${prefix}_misassemblies.tsv + [ -f ${prefix}/contigs_reports/unaligned_report.tsv ] && ln -s ${prefix}/contigs_reports/unaligned_report.tsv ${prefix}_unaligned.tsv + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + quast: \$(quast.py --version 2>&1 | sed 's/^.*QUAST v//; s/ .*\$//') + END_VERSIONS + """ + + stub: + def args = task.ext.args ?: '' + prefix = task.ext.prefix ?: "${meta.id}" + def features = gff ? "--features $gff" : '' + def reference = fasta ? "-r $fasta" : '' + + """ + mkdir -p $prefix + touch $prefix/report.tsv + touch $prefix/report.html + touch $prefix/report.pdf + touch $prefix/quast.log + touch $prefix/transposed_report.txt + touch $prefix/transposed_report.tex + touch $prefix/icarus.html + touch $prefix/report.tex + touch $prefix/report.txt + + mkdir -p $prefix/basic_stats + touch $prefix/basic_stats/cumulative_plot.pdf + touch $prefix/basic_stats/Nx_plot.pdf + touch $prefix/basic_stats/genome_GC_content_plot.pdf + touch $prefix/basic_stats/GC_content_plot.pdf + + mkdir -p $prefix/icarus_viewers + touch $prefix/icarus_viewers/contig_size_viewer.html + + ln -s $prefix/report.tsv ${prefix}.tsv + + if [ $fasta ]; then + touch $prefix/basic_stats/NGx_plot.pdf + touch $prefix/basic_stats/gc.icarus.txt + + mkdir -p $prefix/aligned_stats + touch $prefix/aligned_stats/NAx_plot.pdf + touch $prefix/aligned_stats/NGAx_plot.pdf + touch $prefix/aligned_stats/cumulative_plot.pdf + + mkdir -p $prefix/contigs_reports + touch $prefix/contigs_reports/all_alignments_transcriptome.tsv + touch $prefix/contigs_reports/contigs_report_transcriptome.mis_contigs.info + touch $prefix/contigs_reports/contigs_report_transcriptome.stderr + touch $prefix/contigs_reports/contigs_report_transcriptome.stdout + touch $prefix/contigs_reports/contigs_report_transcriptome.unaligned.info + mkdir -p $prefix/contigs_reports/minimap_output + touch $prefix/contigs_reports/minimap_output/transcriptome.coords + touch $prefix/contigs_reports/minimap_output/transcriptome.coords.filtered + touch $prefix/contigs_reports/minimap_output/transcriptome.coords_tmp + touch $prefix/contigs_reports/minimap_output/transcriptome.sf + touch $prefix/contigs_reports/minimap_output/transcriptome.unaligned + touch $prefix/contigs_reports/minimap_output/transcriptome.used_snps + touch $prefix/contigs_reports/misassemblies_frcurve_plot.pdf + touch $prefix/contigs_reports/misassemblies_plot.pdf + touch $prefix/contigs_reports/misassemblies_report.tex + touch $prefix/contigs_reports/misassemblies_report.tsv + touch $prefix/contigs_reports/misassemblies_report.txt + touch $prefix/contigs_reports/transcriptome.mis_contigs.fa + touch $prefix/contigs_reports/transposed_report_misassemblies.tex + touch $prefix/contigs_reports/transposed_report_misassemblies.tsv + touch $prefix/contigs_reports/transposed_report_misassemblies.txt + touch $prefix/contigs_reports/unaligned_report.tex + touch $prefix/contigs_reports/unaligned_report.tsv + touch $prefix/contigs_reports/unaligned_report.txt + + mkdir -p $prefix/genome_stats + touch $prefix/genome_stats/genome_info.txt + touch $prefix/genome_stats/transcriptome_gaps.txt + touch $prefix/icarus_viewers/alignment_viewer.html + + ln -sf ${prefix}/contigs_reports/misassemblies_report.tsv ${prefix}_misassemblies.tsv + ln -sf ${prefix}/contigs_reports/unaligned_report.tsv ${prefix}_unaligned.tsv + ln -sf ${prefix}/contigs_reports/all_alignments_transcriptome.tsv ${prefix}_transcriptome.tsv + + fi + + if ([ $fasta ] && [ $gff ]); then + touch $prefix/genome_stats/features_cumulative_plot.pdf + touch $prefix/genome_stats/features_frcurve_plot.pdf + fi + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + quast: \$(quast.py --version 2>&1 | sed 's/^.*QUAST v//; s/ .*\$//') + END_VERSIONS + """ +} diff --git a/modules/nf-core/quast/meta.yml b/modules/nf-core/quast/meta.yml new file mode 100644 index 00000000..78e94f8e --- /dev/null +++ b/modules/nf-core/quast/meta.yml @@ -0,0 +1,60 @@ +name: quast +description: Quality Assessment Tool for Genome Assemblies +keywords: + - quast + - assembly + - quality + - contig + - scaffold +tools: + - quast: + description: | + QUAST calculates quality metrics for genome assemblies + homepage: http://bioinf.spbau.ru/quast + doi: 10.1093/bioinformatics/btt086 + licence: ["GPL-2.0-only"] +input: + - consensus: + type: file + description: | + Fasta file containing the assembly of interest + - fasta: + type: file + description: | + The genome assembly to be evaluated. Has to contain at least a non-empty string dummy value. + - gff: + type: file + description: The genome GFF file. Has to contain at least a non-empty string dummy value. + +output: + - quast: + type: directory + description: Directory containing complete quast report + pattern: "{prefix}/" + - report: + type: file + description: tab-separated version of the summary, suitable for spreadsheets and mqc + pattern: "${prefix}.tsv" + - misassemblies: + type: file + description: | + Report containing misassemblies, only when a reference fasta is provided + pattern: "${prefix}_misassemblies.tsv" + - transcriptome: + type: file + description: | + Report containing all the alignments of transcriptome to the assembly, only when a reference fasta is provided + pattern: "${prefix}_transcriptome.tsv" + - unaligned: + type: file + description: | + Report containing unaligned contigs, only when a reference fasta is provided + pattern: "${prefix}_unaligned.tsv" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + +authors: + - "@drpatelh" + - "@kevinmenden" diff --git a/modules/nf-core/racon/main.nf b/modules/nf-core/racon/main.nf new file mode 100644 index 00000000..6d0cceb2 --- /dev/null +++ b/modules/nf-core/racon/main.nf @@ -0,0 +1,38 @@ +process RACON { + tag "$meta.id" + label 'process_high' + + conda "bioconda::racon=1.4.20" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/racon:1.4.20--h9a82719_1' : + 'biocontainers/racon:1.4.20--h9a82719_1' }" + + input: + tuple val(meta), path(reads), path(assembly), path(paf) + + output: + tuple val(meta), path('*_assembly_consensus.fasta.gz') , emit: improved_assembly + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + """ + racon -t "$task.cpus" \\ + "${reads}" \\ + "${paf}" \\ + $args \\ + "${assembly}" > \\ + ${prefix}_assembly_consensus.fasta + + gzip -n ${prefix}_assembly_consensus.fasta + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + racon: \$( racon --version 2>&1 | sed 's/^.*v//' ) + END_VERSIONS + """ +} diff --git a/modules/nf-core/racon/meta.yml b/modules/nf-core/racon/meta.yml new file mode 100644 index 00000000..2e7737d9 --- /dev/null +++ b/modules/nf-core/racon/meta.yml @@ -0,0 +1,52 @@ +name: racon +description: Consensus module for raw de novo DNA assembly of long uncorrected reads +keywords: + - assembly + - pacbio + - nanopore + - polish +tools: + - racon: + description: Ultrafast consensus module for raw de novo genome assembly of long uncorrected reads. + homepage: https://github.com/lbcb-sci/racon + documentation: https://github.com/lbcb-sci/racon + tool_dev_url: https://github.com/lbcb-sci/racon + doi: 10.1101/gr.214270.116 + licence: ["MIT"] + +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - reads: + type: file + description: List of input FastQ files. Racon expects single end reads + pattern: "*.{fastq,fastq.gz,fq,fq.gz}" + - assembly: + type: file + description: Genome assembly to be improved + pattern: "*.{fasta,fa}" + - paf: + type: file + description: Alignment in PAF format + pattern: "*.paf" + +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - improved_assembly: + type: file + description: Improved genome assembly + pattern: "*_assembly_consensus.fasta.gz" + +authors: + - "@avantonder" diff --git a/modules/nf-core/samtools/index/main.nf b/modules/nf-core/samtools/index/main.nf new file mode 100644 index 00000000..0b20aa4b --- /dev/null +++ b/modules/nf-core/samtools/index/main.nf @@ -0,0 +1,48 @@ +process SAMTOOLS_INDEX { + tag "$meta.id" + label 'process_low' + + conda "bioconda::samtools=1.17" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/samtools:1.17--h00cdaf9_0' : + 'biocontainers/samtools:1.17--h00cdaf9_0' }" + + input: + tuple val(meta), path(input) + + output: + tuple val(meta), path("*.bai") , optional:true, emit: bai + tuple val(meta), path("*.csi") , optional:true, emit: csi + tuple val(meta), path("*.crai"), optional:true, emit: crai + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + """ + samtools \\ + index \\ + -@ ${task.cpus-1} \\ + $args \\ + $input + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') + END_VERSIONS + """ + + stub: + """ + touch ${input}.bai + touch ${input}.crai + touch ${input}.csi + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') + END_VERSIONS + """ +} diff --git a/modules/nf-core/samtools/index/meta.yml b/modules/nf-core/samtools/index/meta.yml new file mode 100644 index 00000000..8bd2fa6f --- /dev/null +++ b/modules/nf-core/samtools/index/meta.yml @@ -0,0 +1,53 @@ +name: samtools_index +description: Index SAM/BAM/CRAM file +keywords: + - index + - bam + - sam + - cram +tools: + - samtools: + description: | + SAMtools is a set of utilities for interacting with and post-processing + short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li. + These files are generated as output by short read aligners like BWA. + homepage: http://www.htslib.org/ + documentation: http://www.htslib.org/doc/samtools.html + doi: 10.1093/bioinformatics/btp352 + licence: ["MIT"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - bam: + type: file + description: BAM/CRAM/SAM file + pattern: "*.{bam,cram,sam}" +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - bai: + type: file + description: BAM/CRAM/SAM index file + pattern: "*.{bai,crai,sai}" + - crai: + type: file + description: BAM/CRAM/SAM index file + pattern: "*.{bai,crai,sai}" + - csi: + type: file + description: CSI index file + pattern: "*.{csi}" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@drpatelh" + - "@ewels" + - "@maxulysse" diff --git a/modules/nf-core/samtools/sort/main.nf b/modules/nf-core/samtools/sort/main.nf new file mode 100644 index 00000000..2b7753fd --- /dev/null +++ b/modules/nf-core/samtools/sort/main.nf @@ -0,0 +1,49 @@ +process SAMTOOLS_SORT { + tag "$meta.id" + label 'process_medium' + + conda "bioconda::samtools=1.17" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/samtools:1.17--h00cdaf9_0' : + 'biocontainers/samtools:1.17--h00cdaf9_0' }" + + input: + tuple val(meta), path(bam) + + output: + tuple val(meta), path("*.bam"), emit: bam + tuple val(meta), path("*.csi"), emit: csi, optional: true + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + if ("$bam" == "${prefix}.bam") error "Input and output names are the same, use \"task.ext.prefix\" to disambiguate!" + """ + samtools sort \\ + $args \\ + -@ $task.cpus \\ + -o ${prefix}.bam \\ + -T $prefix \\ + $bam + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') + END_VERSIONS + """ + + stub: + def prefix = task.ext.prefix ?: "${meta.id}" + """ + touch ${prefix}.bam + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//') + END_VERSIONS + """ +} diff --git a/modules/nf-core/samtools/sort/meta.yml b/modules/nf-core/samtools/sort/meta.yml new file mode 100644 index 00000000..07328431 --- /dev/null +++ b/modules/nf-core/samtools/sort/meta.yml @@ -0,0 +1,48 @@ +name: samtools_sort +description: Sort SAM/BAM/CRAM file +keywords: + - sort + - bam + - sam + - cram +tools: + - samtools: + description: | + SAMtools is a set of utilities for interacting with and post-processing + short DNA sequence read alignments in the SAM, BAM and CRAM formats, written by Heng Li. + These files are generated as output by short read aligners like BWA. + homepage: http://www.htslib.org/ + documentation: http://www.htslib.org/doc/samtools.html + doi: 10.1093/bioinformatics/btp352 + licence: ["MIT"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - bam: + type: file + description: BAM/CRAM/SAM file + pattern: "*.{bam,cram,sam}" +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - bam: + type: file + description: Sorted BAM/CRAM/SAM file + pattern: "*.{bam,cram,sam}" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" + - csi: + type: file + description: BAM index file (optional) + pattern: "*.csi" +authors: + - "@drpatelh" + - "@ewels" diff --git a/modules/nf-core/untar/main.nf b/modules/nf-core/untar/main.nf new file mode 100644 index 00000000..61461c39 --- /dev/null +++ b/modules/nf-core/untar/main.nf @@ -0,0 +1,63 @@ +process UNTAR { + tag "$archive" + label 'process_single' + + conda "conda-forge::sed=4.7 conda-forge::grep=3.11 conda-forge::tar=1.34" + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'https://depot.galaxyproject.org/singularity/ubuntu:20.04' : + 'nf-core/ubuntu:20.04' }" + + input: + tuple val(meta), path(archive) + + output: + tuple val(meta), path("$prefix"), emit: untar + path "versions.yml" , emit: versions + + when: + task.ext.when == null || task.ext.when + + script: + def args = task.ext.args ?: '' + def args2 = task.ext.args2 ?: '' + prefix = task.ext.prefix ?: ( meta.id ? "${meta.id}" : archive.baseName.toString().replaceFirst(/\.tar$/, "")) + + """ + mkdir $prefix + + ## Ensures --strip-components only applied when top level of tar contents is a directory + ## If just files or multiple directories, place all in prefix + if [[ \$(tar -taf ${archive} | grep -o -P "^.*?\\/" | uniq | wc -l) -eq 1 ]]; then + tar \\ + -C $prefix --strip-components 1 \\ + -xavf \\ + $args \\ + $archive \\ + $args2 + else + tar \\ + -C $prefix \\ + -xavf \\ + $args \\ + $archive \\ + $args2 + fi + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + untar: \$(echo \$(tar --version 2>&1) | sed 's/^.*(GNU tar) //; s/ Copyright.*\$//') + END_VERSIONS + """ + + stub: + prefix = task.ext.prefix ?: ( meta.id ? "${meta.id}" : archive.toString().replaceFirst(/\.[^\.]+(.gz)?$/, "")) + """ + mkdir $prefix + touch ${prefix}/file.txt + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + untar: \$(echo \$(tar --version 2>&1) | sed 's/^.*(GNU tar) //; s/ Copyright.*\$//') + END_VERSIONS + """ +} diff --git a/modules/nf-core/untar/meta.yml b/modules/nf-core/untar/meta.yml new file mode 100644 index 00000000..db241a6e --- /dev/null +++ b/modules/nf-core/untar/meta.yml @@ -0,0 +1,41 @@ +name: untar +description: Extract files. +keywords: + - untar + - uncompress + - extract +tools: + - untar: + description: | + Extract tar.gz files. + documentation: https://www.gnu.org/software/tar/manual/ + licence: ["GPL-3.0-or-later"] +input: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - archive: + type: file + description: File to be untar + pattern: "*.{tar}.{gz}" +output: + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - untar: + type: directory + description: Directory containing contents of archive + pattern: "*/" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@joseespinosa" + - "@drpatelh" + - "@matthdsm" + - "@jfy133" diff --git a/nextflow.config b/nextflow.config index 3bce38bd..66e2cced 100644 --- a/nextflow.config +++ b/nextflow.config @@ -1,7 +1,7 @@ /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nf-core/bacass Nextflow config file -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default config options for all compute environments ---------------------------------------------------------------------------------------- */ @@ -10,74 +10,87 @@ params { // Input options - input = null + input = null + + // QC and trimming options + save_trimmed_fail = false + save_merged = false // Contamination_screening - kraken2db = "" + kraken2db = "" // Assembly parameters - assembler = 'unicycler' //allowed are unicycler, canu, miniasm - assembly_type = 'short' //allowed are short, long, hybrid (hybrid works only with Unicycler) - unicycler_args = "" - canu_args = '' //Default no extra options, can be adjusted by the user + assembler = 'unicycler' //allowed are unicycler, canu, miniasm + assembly_type = 'short' //allowed are short, long, hybrid (hybrid works only with Unicycler) + unicycler_args = "" + canu_mode = '-nanopore' // allowed modes: ["-pacbio", "-nanopore", "-pacbio-hifi"] + canu_args = '' //Default no extra options, can be adjusted by the user // Assembly polishing - polish_method = 'medaka' + polish_method = 'medaka' // Annotation - annotation_tool = 'prokka' - prokka_args = "" - dfast_config = "$projectDir/assets/test_config_dfast.py" + annotation_tool = 'prokka' // allowed are 'prokka', 'bakta' or 'dfast' + prokka_args = "" + baktadb = '' + baktadb_download = false + baktadb_download_args = '--type light' // allowed: '--type light' or '--type full' + dfast_config = "$projectDir/assets/test_config_dfast.py" // Skipping options - skip_kraken2 = false - skip_pycoqc = false - skip_annotation = false - skip_polish = false + skip_fastqc = false + skip_fastp = false + skip_kraken2 = false + skip_pycoqc = false + skip_annotation = false + skip_polish = false + skip_multiqc = false // MultiQC options - multiqc_config = null - multiqc_title = null - max_multiqc_email_size = '25.MB' + multiqc_config = null + multiqc_title = null + multiqc_logo = null + max_multiqc_email_size = '25.MB' + multiqc_methods_description = null // Boilerplate options - outdir = './results' - tracedir = "${params.outdir}/pipeline_info" - publish_dir_mode = 'copy' - email = null - email_on_fail = null - plaintext_email = false - monochrome_logs = false - help = false - validate_params = true - show_hidden_params = false - schema_ignore_params = 'modules,igenomes_base' - enable_conda = false - singularity_pull_docker_container = false + outdir = null + publish_dir_mode = 'copy' + email = null + email_on_fail = null + plaintext_email = false + monochrome_logs = false + hook_url = null + help = false + validate_params = true + schema_ignore_params = 'modules,igenomes_base' + version = false // Config options - custom_config_version = 'master' - custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}" - hostnames = [:] - config_profile_description = null - config_profile_contact = null - config_profile_url = null - config_profile_name = null + config_profile_name = null + config_profile_description = null + custom_config_version = 'master' + custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}" + config_profile_contact = null + config_profile_url = null // Max resource options // Defaults only, expecting to be overwritten - max_memory = '128.GB' - max_cpus = 16 - max_time = '240.h' + max_memory = '128.GB' + max_cpus = 16 + max_time = '240.h' + // Schema validation default options + validationFailUnrecognisedParams= false + validationLenientMode = false + validationSchemaIgnoreParams = 'genomes' + validationShowHiddenParams = false + validate_params = true } // Load base.config by default for all pipelines includeConfig 'conf/base.config' -// Load modules.config for DSL2 module specific options -includeConfig 'conf/modules.config' - // Load nf-core custom profiles from different Institutions try { includeConfig "${params.custom_config_base}/nfcore_custom.config" @@ -85,52 +98,102 @@ try { System.err.println("WARNING: Could not load nf-core/config profiles: ${params.custom_config_base}/nfcore_custom.config") } +// Load nf-core/bacass custom profiles from different institutions. +// Warning: Uncomment only if a pipeline-specific instititutional config already exists on nf-core/configs! +// try { +// includeConfig "${params.custom_config_base}/pipeline/bacass.config" +// } catch (Exception e) { +// System.err.println("WARNING: Could not load nf-core/config/bacass profiles: ${params.custom_config_base}/pipeline/bacass.config") +// } profiles { - debug { process.beforeScript = 'echo $HOSTNAME' } + debug { + dumpHashes = true + process.beforeScript = 'echo $HOSTNAME' + cleanup = false + } conda { - params.enable_conda = true + conda.enabled = true docker.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false + } + mamba { + conda.enabled = true + conda.useMamba = true + docker.enabled = false + singularity.enabled = false + podman.enabled = false + shifter.enabled = false + charliecloud.enabled = false + apptainer.enabled = false } docker { docker.enabled = true docker.userEmulation = true + conda.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false + } + arm { + docker.runOptions = '-u $(id -u):$(id -g) --platform=linux/amd64' } singularity { singularity.enabled = true singularity.autoMounts = true + conda.enabled = false docker.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false } podman { podman.enabled = true + conda.enabled = false docker.enabled = false singularity.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false } shifter { shifter.enabled = true + conda.enabled = false docker.enabled = false singularity.enabled = false podman.enabled = false charliecloud.enabled = false + apptainer.enabled = false } charliecloud { charliecloud.enabled = true + conda.enabled = false + docker.enabled = false + singularity.enabled = false + podman.enabled = false + shifter.enabled = false + apptainer.enabled = false + } + apptainer { + apptainer.enabled = true + apptainer.autoMounts = true + conda.enabled = false docker.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false + charliecloud.enabled = false + } + gitpod { + executor.name = 'local' + executor.cpus = 4 + executor.memory = 8.GB } test { includeConfig 'conf/test.config' } test_dfast { includeConfig 'conf/test_dfast.config' } @@ -140,44 +203,65 @@ profiles { test_full { includeConfig 'conf/test_full.config' } } +// Set default registry for Apptainer, Docker, Podman and Singularity independent of -profile +// Will not be used unless Apptainer / Docker / Podman / Singularity are enabled +// Set to your registry if you have a mirror of containers +apptainer.registry = 'quay.io' +docker.registry = 'quay.io' +podman.registry = 'quay.io' +singularity.registry = 'quay.io' + +// Nextflow plugins +plugins { + id 'nf-validation' // Validation of pipeline parameters and creation of an input channel from a sample sheet +} + // Export these variables to prevent local Python/R libraries from conflicting with those in the container +// The JULIA depot path has been adjusted to a fixed path `/usr/local/share/julia` that needs to be used for packages in the container. +// See https://apeltzer.github.io/post/03-julia-lang-nextflow/ for details on that. Once we have a common agreement on where to keep Julia packages, this is adjustable. + env { PYTHONNOUSERSITE = 1 R_PROFILE_USER = "/.Rprofile" R_ENVIRON_USER = "/.Renviron" + JULIA_DEPOT_PATH = "/usr/local/share/julia" } // Capture exit codes from upstream processes when piping -process.shell = ['/bin/bash', '-euo', 'pipefail'] +process.shell = ['/bin/bash', '-euo', 'pipefail'] def trace_timestamp = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss') timeline { enabled = true - file = "${params.tracedir}/execution_timeline_${trace_timestamp}.html" + file = "${params.outdir}/pipeline_info/execution_timeline_${trace_timestamp}.html" } report { enabled = true - file = "${params.tracedir}/execution_report_${trace_timestamp}.html" + file = "${params.outdir}/pipeline_info/execution_report_${trace_timestamp}.html" } trace { enabled = true - file = "${params.tracedir}/execution_trace_${trace_timestamp}.txt" + file = "${params.outdir}/pipeline_info/execution_trace_${trace_timestamp}.txt" } dag { enabled = true - file = "${params.tracedir}/pipeline_dag_${trace_timestamp}.svg" + file = "${params.outdir}/pipeline_info/pipeline_dag_${trace_timestamp}.html" } manifest { name = 'nf-core/bacass' - author = 'Andreas Wilm, Alexander Peltzer' + author = """Andreas Wilm, Alexander Peltzer""" homePage = 'https://github.com/nf-core/bacass' - description = 'Simple bacterial assembly and annotation' + description = """Simple bacterial assembly and annotation""" mainScript = 'main.nf' - nextflowVersion = '!>=21.04.0' - version = '2.0.0' + nextflowVersion = '!>=23.04.0' + version = '2.1.0' + doi = '10.5281/zenodo.2669428' } +// Load modules.config for DSL2 module specific options +includeConfig 'conf/modules.config' + // Function to ensure that resource requirements don't go beyond // a maximum limit def check_max(obj, type) { diff --git a/nextflow_schema.json b/nextflow_schema.json index 216c26fd..6b416b30 100644 --- a/nextflow_schema.json +++ b/nextflow_schema.json @@ -10,24 +10,21 @@ "type": "object", "fa_icon": "fas fa-terminal", "description": "Define where the pipeline should find input data and save output data.", - "required": [ - "input" - ], + "required": ["input", "outdir"], "properties": { "input": { "type": "string", - "format": "file-path", - "mimetype": "text/csv", - "pattern": "^\\S+\\.csv$", - "schema": "assets/schema_input.json", - "description": "Path to comma-separated file containing information about the samples in the experiment.", - "help_text": "You will need to create a design file with information about the samples in your experiment before running the pipeline. Use this parameter to specify its location. It has to be a tab-separated file with 6 columns, and a header row. See [usage docs](https://nf-co.re/bacass/usage#samplesheet-input).\n\nFor example:\n\n`--input 'design_hybrid.csv'`\n\nAn example of properly formatted input files can be found at the [nf-core/test-datasets](https://github.com/nf-core/test-datasets/tree/bacass). \n\nFor example, this is the input used for a hybrid assembly in testing:\nID R1 R2 LongFastQ Fast5 GenomeSize\nERR044595 https://github.com/nf-core/test-datasets/raw/bacass/ERR044595_1M_1.fastq.gz https://github.com/nf-core/test-datasets/raw/bacass/ERR044595_1M_2.fastq.gz https://github.com/nf-core/test-datasets/raw/bacass/nanopore/subset15000.fq.gz NA 2.8m\n\n* `ID`: The identifier to use for handling the dataset e.g. sample name\n* `R1`: The forward reads in case of available short-read data\n* `R2`: The reverse reads in case of available short-read data\n* `LongFastQ`: The long read FastQ file with reads in FASTQ format\n* `Fast5`: The folder containing the basecalled fast5 files\n* `GenomeSize`: The expected genome size of the assembly. Only used by the canu assembler.\n\nMissing values (e.g. Fast5 folder in case of short reads) can be omitted by using a `NA` in the TSV file. The pipeline will handle such cases appropriately then.", - "fa_icon": "fas fa-file-csv" + "exist": true, + "mimetype": "text/tsv", + "fa_icon": "fas fa-dna", + "description": "Path to tab-separated sample sheet", + "help_text": "Path to sample sheet, either tab-separated (.tsv), comma-separated (.csv), or in YAML format (.yml/.yaml), that points to compressed fastq files.\n\nThe sample sheet must have six tab-separated columns/entries with the following headers: \n- `ID` (required): Unique sample IDs, must start with a letter, and can only contain letters, numbers or underscores\n- `R1` (optional): Paths to (forward) reads zipped FastQ files\n- `R2` (optional): Paths to reverse reads zipped FastQ files, required if the data is paired-end\n- `LongFastQ` (optional): Paths to long reads zipped FastQ files\n- `Fast5` (optional): Paths to the directory containing FAST5 files\n- `GenomeSize` (optional): A number (including decimals) ending with 'm', representing genome size.\n\n Please be aware that files will be required based on the chosen assembly type specified with the '--assembly_type' option, which can be set to one of the following values: ['short', 'long', 'hybrid'].`", + "schema": "assets/schema_input.json" }, "outdir": { "type": "string", - "description": "Path to the output directory where the results will be saved.", - "default": "./results", + "format": "directory-path", + "description": "The output directory where the results will be saved. You have to use absolute paths to storage on Cloud infrastructure.", "fa_icon": "fas fa-folder-open" }, "email": { @@ -39,6 +36,31 @@ } } }, + "qc_and_trim": { + "title": "QC and Trim", + "type": "object", + "description": "Parameters for QC and trim short-reads", + "default": "", + "properties": { + "save_trimmed_fail": { + "type": "boolean", + "enum": ["true", "false"], + "description": "save files that failed to pass trimming thresholds ending in `*.fail.fastq.gz`" + }, + "save_merged": { + "type": "boolean", + "description": "save all merged reads to the a file ending in `*.merged.fastq.gz`" + }, + "skip_fastqc": { + "type": "boolean", + "description": "Skip FastQC" + }, + "skip_fastp": { + "type": "boolean", + "description": "Skip FastP" + } + } + }, "contamination_screening": { "title": "Contamination Screening", "type": "object", @@ -80,6 +102,11 @@ "description": "Extra arguments for Unicycler", "help_text": "This advanced option allows you to pass extra arguments to Unicycler (e.g. `\"--mode conservative\"` or `\"--no_correct\"`). For this to work you need to quote the arguments and add at least one space." }, + "canu_mode": { + "type": "string", + "enum": ["-pacbio", "-nanopore", "-pacbio-hifi", "null"], + "description": "Allowed technologies for long read assembly : [\"-pacbio\", \"-nanopore\", \"-pacbio-hifi\"]" + }, "canu_args": { "type": "string", "fa_icon": "fas fa-ship", @@ -106,20 +133,35 @@ "annotation": { "title": "Annotation", "type": "object", - "description": "", + "description": "Parameters for the annotation", "default": "", "fa_icon": "fas fa-align-left", "properties": { "annotation_tool": { "type": "string", "default": "prokka", - "description": "The annotation method to annotate the final assembly. Default choice is `prokka`, but the `dfast` tool is also available. For the latter, make sure to create your specific config if you're not happy with the default one provided. See [#dfast_config](#dfastconfig) to find out how." + "description": "The annotation method to annotate the final assembly. Default choice is `prokka`, but the `dfast` tool is also available. For the latter, make sure to create your specific config if you're not happy with the default one provided. See [#dfast_config](#dfastconfig) to find out how.", + "enum": ["prokka", "bakta", "dfast"] }, "prokka_args": { "type": "string", "description": "Extra arguments for prokka annotation tool.", "help_text": "This advanced option allows you to pass extra arguments to Prokka (e.g. `\" --rfam\"` or `\" --genus name\"`). For this to work you need to quote the arguments and add at least one space between the arguments. Example:\n\n```bash\n--prokka_args `--rfam --genus Escherichia Coli`\n```\n" }, + "baktadb": { + "type": "string", + "description": "Path to Bakta database" + }, + "baktadb_download": { + "type": "boolean", + "description": "Download Bakta database" + }, + "baktadb_download_args": { + "type": "string", + "default": "--type light", + "description": "This can be used to supply [extra options](https://github.com/oschwengers/bakta#database-download) to the Bakta download module", + "enum": ["--type light", "--type full"] + }, "dfast_config": { "type": "string", "default": "assets/test_config_dfast.py", @@ -154,6 +196,10 @@ "type": "boolean", "fa_icon": "fas fa-forward", "description": "Skip polishing the long-read assembly with fast5 input. Will not affect short/hybrid assemblies." + }, + "skip_multiqc": { + "type": "boolean", + "description": "Skip MultiQC" } } }, @@ -179,12 +225,6 @@ "help_text": "If you're running offline, Nextflow will not be able to fetch the institutional config files from the internet. If you don't need them, then this is not a problem. If you do need them, you should download the files from the repo and tell Nextflow where to find them with this parameter.", "fa_icon": "fas fa-users-cog" }, - "hostnames": { - "type": "string", - "description": "Institutional configs hostname.", - "hidden": true, - "fa_icon": "fas fa-users-cog" - }, "config_profile_name": { "type": "string", "description": "Institutional config name.", @@ -240,7 +280,7 @@ "description": "Maximum amount of time that can be requested for any single job.", "default": "240.h", "fa_icon": "far fa-clock", - "pattern": "^(\\d+\\.?\\s*(s|m|h|day)\\s*)+$", + "pattern": "^(\\d+\\.?\\s*(s|m|h|d|day)\\s*)+$", "hidden": true, "help_text": "Use to set an upper-limit for the time requirement for each process. Should be a string in the format integer-unit e.g. `--max_time '2.h'`" } @@ -259,20 +299,19 @@ "fa_icon": "fas fa-question-circle", "hidden": true }, + "version": { + "type": "boolean", + "description": "Display version and exit.", + "fa_icon": "fas fa-question-circle", + "hidden": true + }, "publish_dir_mode": { "type": "string", "default": "copy", "description": "Method used to save pipeline results to output directory.", "help_text": "The Nextflow `publishDir` option specifies which intermediate files should be saved to the output directory. This option tells the pipeline what method should be used to move these files. See [Nextflow docs](https://www.nextflow.io/docs/latest/process.html#publishdir) for details.", "fa_icon": "fas fa-copy", - "enum": [ - "symlink", - "rellink", - "link", - "copy", - "copyNoFollow", - "move" - ], + "enum": ["symlink", "rellink", "link", "copy", "copyNoFollow", "move"], "hidden": true }, "multiqc_title": { @@ -308,19 +347,31 @@ "fa_icon": "fas fa-palette", "hidden": true }, + "hook_url": { + "type": "string", + "description": "Incoming hook URL for messaging service", + "fa_icon": "fas fa-people-group", + "help_text": "Incoming hook URL for messaging service. Currently, MS Teams and Slack are supported.", + "hidden": true + }, "multiqc_config": { "type": "string", + "format": "file-path", "description": "Custom config file to supply to MultiQC.", "fa_icon": "fas fa-cog", "hidden": true }, - "tracedir": { + "multiqc_logo": { "type": "string", - "description": "Directory to keep pipeline Nextflow logs and reports.", - "default": "${params.outdir}/pipeline_info", - "fa_icon": "fas fa-cogs", + "description": "Custom logo file to supply to MultiQC. File name must also be set in the MultiQC config file", + "fa_icon": "fas fa-image", "hidden": true }, + "multiqc_methods_description": { + "type": "string", + "description": "Custom MultiQC yaml file containing HTML including a methods description.", + "fa_icon": "fas fa-cog" + }, "validate_params": { "type": "boolean", "description": "Boolean whether to validate parameters against the schema at runtime", @@ -328,25 +379,32 @@ "fa_icon": "fas fa-check-square", "hidden": true }, - "show_hidden_params": { + "validationShowHiddenParams": { "type": "boolean", "fa_icon": "far fa-eye-slash", "description": "Show all params when using `--help`", "hidden": true, "help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters." }, - "enable_conda": { + "validationFailUnrecognisedParams": { "type": "boolean", - "description": "Run this workflow with Conda. You can also use '-profile conda' instead of providing this parameter.", + "fa_icon": "far fa-check-circle", + "description": "Validation of parameters fails when an unrecognised parameter is found.", "hidden": true, - "fa_icon": "fas fa-bacon" + "help_text": "By default, when an unrecognised parameter is found, it returns a warinig." }, - "singularity_pull_docker_container": { + "validationLenientMode": { "type": "boolean", - "description": "Instead of directly downloading Singularity images for use with Singularity, force the workflow to pull and convert Docker containers instead.", + "fa_icon": "far fa-check-circle", + "description": "Validation of parameters in lenient more.", "hidden": true, - "fa_icon": "fas fa-toolbox", - "help_text": "This may be useful for example if you are unable to directly pull Singularity containers to run the pipeline due to http/https proxy issues." + "help_text": "Allows string values that are parseable as numbers or booleans. For further information see [JSONSchema docs](https://github.com/everit-org/json-schema#lenient-mode)." + }, + "schema_ignore_params": { + "type": "string", + "default": "modules,igenomes_base", + "hidden": true, + "description": "A comma separated string of inputs the schema validation should ignore" } } } @@ -355,6 +413,9 @@ { "$ref": "#/definitions/input_output_options" }, + { + "$ref": "#/definitions/qc_and_trim" + }, { "$ref": "#/definitions/contamination_screening" }, @@ -380,4 +441,4 @@ "$ref": "#/definitions/generic_options" } ] -} \ No newline at end of file +} diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..0d62beb6 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,10 @@ +# Config file for Python. Mostly used to configure linting of bin/check_samplesheet.py with Black. +# Should be kept the same as nf-core/tools to avoid fighting with template synchronisation. +[tool.black] +line-length = 120 +target_version = ["py37", "py38", "py39", "py310"] + +[tool.isort] +profile = "black" +known_first_party = ["nf_core"] +multi_line_output = 3 diff --git a/subworkflows/local/bakta_dbdownload_run.nf b/subworkflows/local/bakta_dbdownload_run.nf new file mode 100644 index 00000000..675adcbe --- /dev/null +++ b/subworkflows/local/bakta_dbdownload_run.nf @@ -0,0 +1,58 @@ +// +// Annotation of Bacterial genomes with Bakta +// + +include { BAKTA_BAKTADBDOWNLOAD } from '../../modules/nf-core/bakta/baktadbdownload/main' +include { UNTAR } from '../../modules/nf-core/untar/main' +include { BAKTA_BAKTA } from '../../modules/nf-core/bakta/bakta/main' + + +workflow BAKTA_DBDOWNLOAD_RUN { + take: + ch_fasta // channel: [ val(meta), path(fasta) ] + ch_path_baktadb // channel: [ path(fasta) ] + val_baktadb_download // value: boolean + + main: + ch_versions = Channel.empty() + + // + // SUBWORKFLOW: Parse, download and/or untar Bakta database + // + if( ch_path_baktadb ){ + if (ch_path_baktadb.endsWith('.tar.gz')){ + ch_baktadb_tar = Channel.from(ch_path_baktadb).map{ db -> [ [id: 'baktadb'], db ]} + + // MODULE: untar database + UNTAR( ch_baktadb_tar ) + ch_baktadb = UNTAR.out.untar.map{ meta, db -> db } + ch_versions = ch_versions.mix(UNTAR.out.versions) + } else { + ch_baktadb = Channel.from(ch_path_baktadb).map{ db -> db } + } + } else if (!ch_path_baktadb && val_baktadb_download){ + // MODULE: Downlado Bakta database from zenodo + BAKTA_BAKTADBDOWNLOAD() + ch_baktadb = BAKTA_BAKTADBDOWNLOAD.out.db + ch_versions = ch_versions.mix(BAKTA_BAKTADBDOWNLOAD.out.versions) + + } else if (!ch_path_baktadb && !val_baktadb_download ){ + exit 1, "The Bakta database argument is missing. To enable the workflow to access the Bakta database, please include the path using '--baktadb' or use '--bakdtadb_download true' to download the Bakta database." + } + + // + // MODULE: BAKTA, gene annotation + // + BAKTA_BAKTA ( + ch_fasta, + ch_baktadb, + [], + [] + ) + ch_bakta_txt_multiqc = BAKTA_BAKTA.out.txt + ch_versions = ch_versions.mix(BAKTA_BAKTA.out.versions) + + emit: + versions = ch_versions.ifEmpty(null) // channel: [ path(versions.yml) ] + bakta_txt_multiqc = ch_bakta_txt_multiqc // channel: [ meta, path(*.txt) ] +} diff --git a/subworkflows/local/input_check.nf b/subworkflows/local/input_check.nf deleted file mode 100644 index 7fb9249c..00000000 --- a/subworkflows/local/input_check.nf +++ /dev/null @@ -1,86 +0,0 @@ -// -// Check input samplesheet and get read channels -// - -params.options = [:] - -workflow INPUT_CHECK { - take: - samplesheet // file: /path/to/samplesheet.csv - - main: - Channel - .fromPath( samplesheet ) - .ifEmpty {exit 1, log.info "Cannot find path file ${tsvFile}"} - .splitCsv ( header:true, sep:'\t' ) - .map { create_fastq_channels(it) } - .set { reads } - - // reconfigure channels - reads - .map { meta, reads, long_fastq, fast5 -> [ meta, reads ] } - .filter{ meta, reads -> reads != 'NA' } - .filter{ meta, reads -> reads[0] != 'NA' && reads[1] != 'NA' } - .set { shortreads } - reads - .map { meta, reads, long_fastq, fast5 -> [ meta, long_fastq ] } - .filter{ meta, long_fastq -> long_fastq != 'NA' } - .set { longreads } - reads - .map { meta, reads, long_fastq, fast5 -> [ meta, fast5 ] } - .filter{ meta, fast5 -> fast5 != 'NA' } - .set { fast5 } - - emit: - reads // channel: [ val(meta), [ reads ], long_fastq, fast5 ] - shortreads // channel: [ val(meta), [ reads ] ] - longreads // channel: [ val(meta), long_fastq ] - fast5 // channel: [ val(meta), fast5 ] -} - -// Function to get list of [ meta, [ fastq_1, fastq_2 ], long_fastq, fast5 ] -def create_fastq_channels(LinkedHashMap row) { - def meta = [:] - meta.id = row.ID - meta.single_end = false - meta.genome_size = row.GenomeSize == null ? 'NA' : row.GenomeSize - - def array = [] - // check short reads - if ( !(row.R1 == 'NA') ) { - if ( !file(row.R1).exists() ) { - exit 1, "ERROR: Please check input samplesheet -> Read 1 FastQ file does not exist!\n${row.R1}" - } - fastq_1 = file(row.R1) - } else { fastq_1 = 'NA' } - if ( !(row.R2 == 'NA') ) { - if ( !file(row.R2).exists() ) { - exit 1, "ERROR: Please check input samplesheet -> Read 2 FastQ file does not exist!\n${row.R2}" - } - fastq_2 = file(row.R2) - } else { fastq_2 = 'NA' } - - // check long_fastq - if ( !(row.LongFastQ == 'NA') ) { - if ( !file(row.LongFastQ).exists() ) { - exit 1, "ERROR: Please check input samplesheet -> Long FastQ file does not exist!\n${row.R1}" - } - long_fastq = file(row.LongFastQ) - } else { long_fastq = 'NA' } - - // check long_fastq - if ( !(row.Fast5 == 'NA') ) { - if ( !file(row.Fast5).exists() ) { - exit 1, "ERROR: Please check input samplesheet -> Fast5 file does not exist!\n${row.R1}" - } - fast5 = file(row.Fast5) - } else { fast5 = 'NA' } - - // prepare output // currently does not allow single end data! - if ( meta.single_end ) { - array = [ meta, fastq_1 , long_fastq, fast5 ] - } else { - array = [ meta, [ fastq_1, fastq_2 ], long_fastq, fast5 ] - } - return array -} diff --git a/subworkflows/nf-core/fastq_trim_fastp_fastqc/main.nf b/subworkflows/nf-core/fastq_trim_fastp_fastqc/main.nf new file mode 100644 index 00000000..4f1c84fc --- /dev/null +++ b/subworkflows/nf-core/fastq_trim_fastp_fastqc/main.nf @@ -0,0 +1,103 @@ +// +// Read QC and trimming +// + +include { FASTQC as FASTQC_RAW } from '../../../modules/nf-core/fastqc/main' +include { FASTQC as FASTQC_TRIM } from '../../../modules/nf-core/fastqc/main' +include { FASTP } from '../../../modules/nf-core/fastp/main' + +// +// Function that parses fastp json output file to get total number of reads after trimming +// +import groovy.json.JsonSlurper + +def getFastpReadsAfterFiltering(json_file) { + def Map json = (Map) new JsonSlurper().parseText(json_file.text).get('summary') + return json['after_filtering']['total_reads'].toLong() +} + +workflow FASTQ_TRIM_FASTP_FASTQC { + take: + ch_reads // channel: [ val(meta), path(reads) ] + ch_adapter_fasta // channel: [ path(fasta) ] + val_save_trimmed_fail // value: boolean + val_save_merged // value: boolean + val_skip_fastp // value: boolean + val_skip_fastqc // value: boolean + + main: + + ch_versions = Channel.empty() + + ch_fastqc_raw_html = Channel.empty() + ch_fastqc_raw_zip = Channel.empty() + if (!val_skip_fastqc) { + FASTQC_RAW ( + ch_reads + ) + ch_fastqc_raw_html = FASTQC_RAW.out.html + ch_fastqc_raw_zip = FASTQC_RAW.out.zip + ch_versions = ch_versions.mix(FASTQC_RAW.out.versions.first()) + } + + ch_trim_reads = ch_reads + ch_trim_json = Channel.empty() + ch_trim_html = Channel.empty() + ch_trim_log = Channel.empty() + ch_trim_reads_fail = Channel.empty() + ch_trim_reads_merged = Channel.empty() + ch_fastqc_trim_html = Channel.empty() + ch_fastqc_trim_zip = Channel.empty() + if (!val_skip_fastp) { + FASTP ( + ch_reads, + ch_adapter_fasta, + val_save_trimmed_fail, + val_save_merged + ) + ch_trim_reads = FASTP.out.reads + ch_trim_json = FASTP.out.json + ch_trim_html = FASTP.out.html + ch_trim_log = FASTP.out.log + ch_trim_reads_fail = FASTP.out.reads_fail + ch_trim_reads_merged = FASTP.out.reads_merged + ch_versions = ch_versions.mix(FASTP.out.versions.first()) + + // + // Filter empty FastQ files after adapter trimming so FastQC doesn't fail + // + ch_trim_reads + .join(ch_trim_json) + .map { + meta, reads, json -> + if (getFastpReadsAfterFiltering(json) > 0) { + [ meta, reads ] + } + } + .set { ch_trim_reads } + + if (!val_skip_fastqc) { + FASTQC_TRIM ( + ch_trim_reads + ) + ch_fastqc_trim_html = FASTQC_TRIM.out.html + ch_fastqc_trim_zip = FASTQC_TRIM.out.zip + ch_versions = ch_versions.mix(FASTQC_TRIM.out.versions.first()) + } + } + + emit: + reads = ch_trim_reads // channel: [ val(meta), path(reads) ] + trim_json = ch_trim_json // channel: [ val(meta), path(json) ] + trim_html = ch_trim_html // channel: [ val(meta), path(html) ] + trim_log = ch_trim_log // channel: [ val(meta), path(log) ] + trim_reads_fail = ch_trim_reads_fail // channel: [ val(meta), path(fastq.gz) ] + trim_reads_merged = ch_trim_reads_merged // channel: [ val(meta), path(fastq.gz) ] + + fastqc_raw_html = ch_fastqc_raw_html // channel: [ val(meta), path(html) ] + fastqc_raw_zip = ch_fastqc_raw_zip // channel: [ val(meta), path(zip) ] + fastqc_trim_html = ch_fastqc_trim_html // channel: [ val(meta), path(html) ] + fastqc_trim_zip = ch_fastqc_trim_zip // channel: [ val(meta), path(zip) ] + + versions = ch_versions.ifEmpty(null) // channel: [ path(versions.yml) ] +} diff --git a/subworkflows/nf-core/fastq_trim_fastp_fastqc/meta.yml b/subworkflows/nf-core/fastq_trim_fastp_fastqc/meta.yml new file mode 100644 index 00000000..506fca4f --- /dev/null +++ b/subworkflows/nf-core/fastq_trim_fastp_fastqc/meta.yml @@ -0,0 +1,110 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/subworkflows/yaml-schema.json +name: "fastq_trim_fastp_fastqc" +description: Read QC, fastp trimming and read qc +keywords: + - qc + - quality_control + - adapters + - trimming + - fastq +components: + - fastqc + - fastp + +input: + - ch_reads: + type: file + description: | + Structure: [ val(meta), path (reads) ] + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ], List of input FastQ files of size 1 and 2 for single-end and paired-end data, + respectively. If you wish to run interleaved paired-end data, supply as single-end data + but with `--interleaved_in` in your `modules.conf`'s `ext.args` for the module. + - ch_adapter_fasta: + type: file + description: | + Structure: path(adapter_fasta) + File in FASTA format containing possible adapters to remove. + - val_save_trimmed_fail: + type: boolean + description: | + Structure: val(save_trimmed_fail) + Specify true to save files that failed to pass trimming thresholds ending in `*.fail.fastq.gz` + - val_save_merged: + type: boolean + description: | + Structure: val(save_merged) + Specify true to save all merged reads to the a file ending in `*.merged.fastq.gz` + - val_skip_fastqc: + type: boolean + description: | + Structure: val(skip_fastqc) + skip the fastqc process if true + - val_skip_fastp: + type: boolean + description: | + Structure: val(skip_fastp) + skip the fastp process if true + +output: + - meta: + type: value + description: Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - reads: + type: file + description: | + Structure: [ val(meta), path(reads) ] + The trimmed/modified/unmerged fastq reads + - trim_json: + type: file + description: | + Structure: [ val(meta), path(trim_json) ] + Results in JSON format + - trim_html: + type: file + description: | + Structure: [ val(meta), path(trim_html) ] + Results in HTML format + - trim_log: + type: file + description: | + Structure: [ val(meta), path(trim_log) ] + fastq log file + - trim_reads_fail: + type: file + description: | + Structure: [ val(meta), path(trim_reads_fail) ] + Reads the failed the preprocessing + - trim_reads_merged: + type: file + description: | + Structure: [ val(meta), path(trim_reads_merged) ] + Reads that were successfully merged + + - fastqc_raw_html: + type: file + description: | + Structure: [ val(meta), path(fastqc_raw_html) ] + Raw fastQC report + - fastqc_raw_zip: + type: file + description: | + Structure: [ val(meta), path(fastqc_raw_zip) ] + Raw fastQC report archive + - fastqc_trim_html: + type: file + description: | + Structure: [ val(meta), path(fastqc_trim_html) ] + Trimmed fastQC report + - fastqc_trim_zip: + type: file + description: | + Structure: [ val(meta), path(fastqc_trim_zip) ] + Trimmed fastQC report archive + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" +authors: + - "@Joon-Klaps" diff --git a/tower.yml b/tower.yml new file mode 100644 index 00000000..787aedfe --- /dev/null +++ b/tower.yml @@ -0,0 +1,5 @@ +reports: + multiqc_report.html: + display: "MultiQC HTML report" + samplesheet.csv: + display: "Auto-created samplesheet with collated metadata and FASTQ paths" diff --git a/workflows/bacass.nf b/workflows/bacass.nf index 1a9aaaf7..4a6d03a9 100644 --- a/workflows/bacass.nf +++ b/workflows/bacass.nf @@ -1,21 +1,30 @@ /* -======================================================================================== - VALIDATE INPUTS -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + PRINT PARAMS SUMMARY +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -def summary_params = NfcoreSchema.paramsSummaryMap(workflow, params) +include { paramsSummaryLog; paramsSummaryMap; fromSamplesheet } from 'plugin/nf-validation' + +def logo = NfcoreTemplate.logo(workflow, params.monochrome_logs) +def citation = '\n' + WorkflowMain.citation(workflow) + '\n' +def summary_params = paramsSummaryMap(workflow) + +// Print parameter summary log to screen +log.info logo + paramsSummaryLog(workflow) + citation + +/* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + VALIDATE INPUTS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*/ -// Validate input parameters WorkflowBacass.initialise(params, log) // Check input path parameters to see if they exist def checkPathParamList = [ params.input, params.multiqc_config, params.kraken2db, params.dfast_config ] for (param in checkPathParamList) { if (param) { file(param, checkIfExists: true) } } -// Check mandatory parameters -if (params.input) { ch_input = file(params.input) } else { exit 1, 'Input samplesheet not specified!' } - // Check krakendb if(! params.skip_kraken2){ if(params.kraken2db){ @@ -26,82 +35,73 @@ if(! params.skip_kraken2){ } /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONFIG FILES -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -ch_multiqc_config = file("$projectDir/assets/multiqc_config.yaml", checkIfExists: true) -ch_multiqc_custom_config = params.multiqc_config ? Channel.fromPath(params.multiqc_config) : Channel.empty() +ch_multiqc_config = Channel.fromPath("$projectDir/assets/multiqc_config.yml", checkIfExists: true) +ch_multiqc_custom_config = params.multiqc_config ? Channel.fromPath( params.multiqc_config, checkIfExists: true ) : Channel.empty() +ch_multiqc_logo = params.multiqc_logo ? Channel.fromPath( params.multiqc_logo, checkIfExists: true ) : Channel.empty() +ch_multiqc_custom_methods_description = params.multiqc_methods_description ? file(params.multiqc_methods_description, checkIfExists: true) : file("$projectDir/assets/methods_description_template.yml", checkIfExists: true) /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ IMPORT LOCAL MODULES/SUBWORKFLOWS -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -// Don't overwrite global params.modules, create a copy instead and use that within the main script. -def modules = params.modules.clone() - -def unicycler_options = modules['unicycler'] -unicycler_options.args += " $params.unicycler_args" - -def canu_options = modules['canu'] -canu_options.args += " $params.canu_args" - // // MODULE: Local to the pipeline // -include { GET_SOFTWARE_VERSIONS } from '../modules/local/get_software_versions' addParams( options: [publish_files : ['tsv':'']] ) -include { SKEWER } from '../modules/local/skewer' addParams( options: modules['skewer'] ) -include { NANOPLOT } from '../modules/local/nanoplot' addParams( options: modules['nanoplot'] ) -include { PYCOQC } from '../modules/local/pycoqc' addParams( options: modules['pycoqc'] ) -include { PORECHOP } from '../modules/local/porechop' addParams( options: modules['porechop'] ) -include { UNICYCLER } from '../modules/local/unicycler' addParams( options: unicycler_options ) -include { CANU } from '../modules/local/canu' addParams( options: canu_options ) -include { MINIMAP2_ALIGN } from '../modules/local/minimap_align' addParams( options: modules['minimap_align'] ) -include { MINIMAP2_ALIGN as MINIMAP2_CONSENSUS } from '../modules/local/minimap_align' addParams( options: modules['minimap_consensus']) -include { MINIMAP2_ALIGN as MINIMAP2_POLISH } from '../modules/local/minimap_align' addParams( options: modules['minimap_polish']) -include { MINIASM } from '../modules/local/miniasm' addParams( options: modules['miniasm'] ) -include { RACON } from '../modules/local/racon' addParams( options: modules['racon'] ) -include { MEDAKA } from '../modules/local/medaka' addParams( options: modules['medaka'] ) -include { NANOPOLISH } from '../modules/local/nanopolish' addParams( options: modules['nanopolish'] ) -include { KRAKEN2_DB_PREPARATION} from '../modules/local/kraken2_db_preparation' -include { DFAST } from '../modules/local/dfast' addParams( options: modules['dfast'] ) +include { PYCOQC } from '../modules/local/pycoqc' +include { UNICYCLER } from '../modules/local/unicycler' +include { NANOPOLISH } from '../modules/local/nanopolish' +include { MEDAKA } from '../modules/local/medaka' +include { KRAKEN2_DB_PREPARATION } from '../modules/local/kraken2_db_preparation' +include { DFAST } from '../modules/local/dfast' // // SUBWORKFLOW: Consisting of a mix of local and nf-core/modules // -include { INPUT_CHECK } from '../subworkflows/local/input_check' addParams( options: [:] ) /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ IMPORT NF-CORE MODULES/SUBWORKFLOWS -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -def multiqc_options = modules['multiqc'] -multiqc_options.args += params.multiqc_title ? Utils.joinModuleArgs(["--title \"$params.multiqc_title\""]) : '' - -def prokka_options = modules['prokka'] -prokka_options.args += " $params.prokka_args" - // // MODULE: Installed directly from nf-core/modules // -include { FASTQC } from '../modules/nf-core/modules/fastqc/main' addParams( options: modules['fastqc'] ) -include { SAMTOOLS_SORT } from '../modules/nf-core/modules/samtools/sort/main' addParams( options: [publish_files : false] ) -include { SAMTOOLS_INDEX } from '../modules/nf-core/modules/samtools/index/main' addParams( options: [publish_files : false] ) -include { KRAKEN2_KRAKEN2 as KRAKEN2 } from '../modules/nf-core/modules/kraken2/kraken2/main' addParams( options: modules['kraken2'] ) -include { KRAKEN2_KRAKEN2 as KRAKEN2_LONG } from '../modules/nf-core/modules/kraken2/kraken2/main' addParams( options: modules['kraken2_long'] ) -include { QUAST } from '../modules/nf-core/modules/quast/main' addParams( options: modules['quast'] ) -include { PROKKA } from '../modules/nf-core/modules/prokka/main' addParams( options: prokka_options ) -include { MULTIQC } from '../modules/nf-core/modules/multiqc/main' addParams( options: multiqc_options ) +include { NANOPLOT } from '../modules/nf-core/nanoplot/main' +include { PORECHOP_PORECHOP } from '../modules/nf-core/porechop/porechop/main' +include { CANU } from '../modules/nf-core/canu/main' +include { MINIMAP2_ALIGN } from '../modules/nf-core/minimap2/align/main' +include { MINIMAP2_ALIGN as MINIMAP2_CONSENSUS } from '../modules/nf-core/minimap2/align/main' +include { MINIMAP2_ALIGN as MINIMAP2_POLISH } from '../modules/nf-core/minimap2/align/main' +include { MINIASM } from '../modules/nf-core/miniasm/main' +include { RACON } from '../modules/nf-core/racon/main' +include { SAMTOOLS_SORT } from '../modules/nf-core/samtools/sort/main' +include { SAMTOOLS_INDEX } from '../modules/nf-core/samtools/index/main' +include { KRAKEN2_KRAKEN2 as KRAKEN2 } from '../modules/nf-core/kraken2/kraken2/main' +include { KRAKEN2_KRAKEN2 as KRAKEN2_LONG } from '../modules/nf-core/kraken2/kraken2/main' +include { QUAST } from '../modules/nf-core/quast/main' +include { GUNZIP } from '../modules/nf-core/gunzip/main' +include { PROKKA } from '../modules/nf-core/prokka/main' +include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoftwareversions/main' +include { MULTIQC } from '../modules/nf-core/multiqc/main' + +// +// SUBWORKFLOWS: Consisting of a mix of local and nf-core/modules +// +include { FASTQ_TRIM_FASTP_FASTQC } from '../subworkflows/nf-core/fastq_trim_fastp_fastqc/main' +include { BAKTA_DBDOWNLOAD_RUN } from '../subworkflows/local/bakta_dbdownload_run' /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RUN MAIN WORKFLOW -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ // Info required for completion email and summary @@ -109,57 +109,84 @@ def multiqc_report = [] workflow BACASS { - ch_software_versions = Channel.empty() + ch_versions = Channel.empty() // // SUBWORKFLOW: Read in samplesheet, validate and stage input files // - INPUT_CHECK ( - ch_input - ) - - // - // MODULE: Run FastQC - // - FASTQC ( - INPUT_CHECK.out.shortreads - ) - ch_software_versions = ch_software_versions.mix(FASTQC.out.version.first().ifEmpty(null)) - - // - // MODULE: Skewer, trim and combine short read read-pairs per sample. - // - SKEWER ( - INPUT_CHECK.out.shortreads.dump(tag: 'shortreads') + def criteria = multiMapCriteria { + meta, fastq_1, fastq_2, long_fastq, fast5, genome_size -> + shortreads: fastq_1 != 'NA' ? tuple(tuple(meta, [fastq_1, fastq_2])) : null + longreads: long_fastq != 'NA' ? tuple(meta, long_fastq) : null + fast5: fast5 != 'NA' ? tuple(meta, fast5) : null + } + // See the documentation https://nextflow-io.github.io/nf-validation/samplesheets/fromSamplesheet/ + Channel + .fromSamplesheet('input') + .multiMap (criteria) + .set { ch_input } + // reconfigure channels + ch_input + .shortreads + .filter{ it != null } + .set { ch_shortreads } + ch_input + .longreads + .filter{ it != null } + .set { ch_longreads } + ch_input + .fast5 + .filter{ it != null } + .set { ch_fast5 } + + // + // SUBWORKFLOW: Short reads QC and trim adapters + // + FASTQ_TRIM_FASTP_FASTQC ( + ch_shortreads, + [], + params.save_trimmed_fail, + params.save_merged, + params.skip_fastp, + params.skip_fastqc ) - ch_software_versions = ch_software_versions.mix(SKEWER.out.version.first().ifEmpty(null)) + ch_fastqc_raw_multiqc = FASTQ_TRIM_FASTP_FASTQC.out.fastqc_raw_zip + ch_fastqc_trim_multiqc = FASTQ_TRIM_FASTP_FASTQC.out.fastqc_trim_zip + ch_trim_json_multiqc = FASTQ_TRIM_FASTP_FASTQC.out.trim_json + ch_versions = ch_versions.mix(FASTQ_TRIM_FASTP_FASTQC.out.versions.ifEmpty(null)) // // MODULE: Nanoplot, quality check for nanopore reads and Quality/Length Plots // NANOPLOT ( - INPUT_CHECK.out.longreads + ch_longreads ) - ch_software_versions = ch_software_versions.mix(NANOPLOT.out.version.first().ifEmpty(null)) + ch_nanoplot_txt_multiqc = NANOPLOT.out.txt + ch_versions = ch_versions.mix(NANOPLOT.out.versions.ifEmpty(null)) // // MODULE: PYCOQC, quality check for nanopore reads and Quality/Length Plots // + // TODO: Couldn't be tested. No configuration test available (lack of fast5 file or params.skip_pycoqc=false). + ch_pycoqc_multiqc = Channel.empty() if ( !params.skip_pycoqc ) { PYCOQC ( - INPUT_CHECK.out.fast5.dump(tag: 'fast5') + ch_fast5.dump(tag: 'fast5') ) - ch_software_versions = ch_software_versions.mix(PYCOQC.out.version.first().ifEmpty(null)) + ch_pycoqc_multiqc = PYCOQC.out.json + ch_versions = ch_versions.mix(PYCOQC.out.versions.ifEmpty(null)) } // - // MODULE: PYCOQC, quality check for nanopore reads and Quality/Length Plots + // MODULE: PORECHOP, quality check for nanopore reads and Quality/Length Plots // + ch_porechop_log_multiqc = Channel.empty() if ( params.assembly_type == 'hybrid' || params.assembly_type == 'long' && !('short' in params.assembly_type) ) { - PORECHOP ( - INPUT_CHECK.out.longreads.dump(tag: 'longreads') + PORECHOP_PORECHOP ( + ch_longreads.dump(tag: 'longreads') ) - ch_software_versions = ch_software_versions.mix(PORECHOP.out.version.first().ifEmpty(null)) + ch_porechop_log_multiqc = PORECHOP_PORECHOP.out.log + ch_versions = ch_versions.mix( PORECHOP_PORECHOP.out.versions.ifEmpty(null) ) } // @@ -167,27 +194,27 @@ workflow BACASS { // Prepare channel for Kraken2 // if(params.assembly_type == 'hybrid'){ - ch_for_kraken2_short = SKEWER.out.reads - ch_for_kraken2_long = PORECHOP.out.reads.dump(tag: 'porechop') - SKEWER.out.reads - .dump(tag: 'skewer') - .join(PORECHOP.out.reads) + ch_for_kraken2_short = FASTQ_TRIM_FASTP_FASTQC.out.reads + ch_for_kraken2_long = PORECHOP_PORECHOP.out.reads + FASTQ_TRIM_FASTP_FASTQC.out.reads + .dump(tag: 'fastp') + .join(PORECHOP_PORECHOP.out.reads) .dump(tag: 'ch_for_assembly') .set { ch_for_assembly } } else if ( params.assembly_type == 'short' ) { - ch_for_kraken2_short = SKEWER.out.reads - ch_for_kraken2_long = Channel.empty() - SKEWER.out.reads - .dump(tag: 'skewer') - .map{ meta,reads -> tuple(meta,reads,'NA') } + ch_for_kraken2_short = FASTQ_TRIM_FASTP_FASTQC.out.reads + ch_for_kraken2_long = Channel.empty() + FASTQ_TRIM_FASTP_FASTQC.out.reads + .dump(tag: 'fastp') + .map{ meta,reads -> tuple(meta,reads,[]) } .dump(tag: 'ch_for_assembly') .set { ch_for_assembly } } else if ( params.assembly_type == 'long' ) { - ch_for_kraken2_short = Channel.empty() - ch_for_kraken2_long = PORECHOP.out.reads - PORECHOP.out.reads + ch_for_kraken2_short = Channel.empty() + ch_for_kraken2_long = PORECHOP_PORECHOP.out.reads + PORECHOP_PORECHOP.out.reads .dump(tag: 'porechop') - .map{ meta,lr -> tuple(meta,'NA',lr) } + .map{ meta,lr -> tuple(meta,[],lr) } .dump(tag: 'ch_for_assembly') .set { ch_for_assembly } } @@ -205,7 +232,7 @@ workflow BACASS { ch_for_assembly ) ch_assembly = ch_assembly.mix( UNICYCLER.out.scaffolds.dump(tag: 'unicycler') ) - ch_software_versions = ch_software_versions.mix(UNICYCLER.out.version.first().ifEmpty(null)) + ch_versions = ch_versions.mix( UNICYCLER.out.versions.ifEmpty(null) ) } // @@ -213,32 +240,56 @@ workflow BACASS { // if ( params.assembler == 'canu' ) { CANU ( - ch_for_assembly + ch_for_assembly.map { meta, reads, lr -> tuple( meta, lr ) }, + params.canu_mode, + ch_for_assembly.map { meta, reads, lr -> meta.genome_size } ) ch_assembly = ch_assembly.mix( CANU.out.assembly.dump(tag: 'canu') ) - ch_software_versions = ch_software_versions.mix(CANU.out.version.first().ifEmpty(null)) + ch_versions = ch_versions.mix(CANU.out.versions.ifEmpty(null)) } // // MODULE: Miniasm, genome assembly, long reads - // if ( params.assembler == 'miniasm' ) { MINIMAP2_ALIGN ( - ch_for_assembly.map{ meta,sr,lr -> tuple(meta,sr,lr,lr) } + ch_for_assembly.map{ meta,sr,lr -> tuple(meta,lr) }, + [], + false, + false, + false ) - ch_software_versions = ch_software_versions.mix(MINIMAP2_ALIGN.out.version.first().ifEmpty(null)) + ch_versions = ch_versions.mix(MINIMAP2_ALIGN.out.versions.ifEmpty(null)) + + ch_for_assembly + .join(MINIMAP2_ALIGN.out.paf) + .map { meta, sr, lr, paf-> tuple(meta, lr, paf) } + .set { ch_for_miniasm } + MINIASM ( - MINIMAP2_ALIGN.out.paf.dump(tag: 'minimap2') + ch_for_miniasm ) - ch_software_versions = ch_software_versions.mix(MINIASM.out.version.first().ifEmpty(null)) + ch_versions = ch_versions.mix(MINIASM.out.versions.ifEmpty(null)) + MINIMAP2_CONSENSUS ( - MINIASM.out.all.dump(tag: 'miniasm') + ch_for_assembly.map{ meta,sr,lr -> tuple(meta,lr) }, + MINIASM.out.assembly.map { meta, assembly -> assembly }, + false, + false, + false ) + ch_versions = ch_versions.mix(MINIMAP2_CONSENSUS.out.versions.ifEmpty(null)) + + ch_for_assembly + .join(MINIASM.out.assembly) + .join(MINIMAP2_CONSENSUS.out.paf) + .map { meta, sr, lr, assembly, paf -> tuple(meta, lr, assembly, paf) } + .set{ ch_for_racon } + RACON ( - MINIMAP2_CONSENSUS.out.paf.dump(tag: 'minimap2_consensus') + ch_for_racon ) - ch_assembly = ch_assembly.mix( RACON.out.assembly.dump(tag: 'miniasm') ) - ch_software_versions = ch_software_versions.mix(RACON.out.version.first().ifEmpty(null)) + ch_assembly = ch_assembly.mix( RACON.out.improved_assembly.dump(tag: 'miniasm') ) + ch_versions = ch_versions.mix(RACON.out.versions.ifEmpty(null)) } // @@ -248,52 +299,65 @@ workflow BACASS { ch_for_assembly .join( ch_assembly ) .set { ch_for_polish } + MINIMAP2_POLISH ( - ch_for_polish.dump(tag: 'into_minimap2_polish') - ) - ch_software_versions = ch_software_versions.mix(MINIMAP2_POLISH.out.version.first().ifEmpty(null)) - SAMTOOLS_SORT ( - MINIMAP2_POLISH.out.paf.map{ meta,sr,lr,ref,paf -> tuple(meta,paf) }.dump(tag: 'minimap2_polish') + ch_for_polish.map { meta, sr, lr, fasta -> tuple(meta, lr) }, + ch_for_polish.map { meta, sr, lr, fasta -> fasta }, + true, + false, + false ) - ch_software_versions = ch_software_versions.mix(SAMTOOLS_SORT.out.version.first().ifEmpty(null)) + ch_versions = ch_versions.mix(MINIMAP2_POLISH.out.versions.ifEmpty(null)) + SAMTOOLS_INDEX ( - SAMTOOLS_SORT.out.bam.dump(tag: 'samtools_sort') + MINIMAP2_POLISH.out.bam.dump(tag: 'samtools_sort') ) - ch_software_versions = ch_software_versions.mix(SAMTOOLS_INDEX.out.version.first().ifEmpty(null)) - ch_for_polish //tuple val(meta), val(reads), file(longreads), file(assembly) - .join( SAMTOOLS_SORT.out.bam ) //tuple val(meta), file(bam) - .join( SAMTOOLS_INDEX.out.bai ) //tuple val(meta), file(bai) - .join( INPUT_CHECK.out.fast5 ) //tuple val(meta), file(fast5) - .set { ch_for_nanopolish } //tuple val(meta), val(reads), file(longreads), file(assembly), file(bam), file(bai), file(fast5) + ch_versions = ch_versions.mix(SAMTOOLS_INDEX.out.versions.ifEmpty(null)) + + ch_for_polish // tuple val(meta), val(reads), file(longreads), file(assembly) + .join( MINIMAP2_POLISH.out.bam ) // tuple val(meta), file(bam) + .join( SAMTOOLS_INDEX.out.bai ) // tuple val(meta), file(bai) + .join( ch_fast5 ) // tuple val(meta), file(fast5) + .set { ch_for_nanopolish } // tuple val(meta), val(reads), file(longreads), file(assembly), file(bam), file(bai), file(fast5) + + // TODO: 'nanopolish index' couldn't be tested. No fast5 provided in test datasets. NANOPOLISH ( ch_for_nanopolish.dump(tag: 'into_nanopolish') ) - ch_software_versions = ch_software_versions.mix(NANOPOLISH.out.version.first().ifEmpty(null)) + ch_versions = ch_versions.mix(NANOPOLISH.out.versions.ifEmpty(null)) } // // MODULE: Medaka, polishes assembly - should take either miniasm, canu, or unicycler consensus sequence // if ( !params.skip_polish && params.assembly_type == 'long' && params.polish_method == 'medaka' ) { - ch_assembly - .join( ch_for_assembly ) + ch_for_assembly + .join( ch_assembly ) + .map { meta, sr, lr, assembly -> tuple(meta, lr, assembly) } .set { ch_for_medaka } + MEDAKA ( ch_for_medaka.dump(tag: 'into_medaka') ) - ch_software_versions = ch_software_versions.mix(MEDAKA.out.version.first().ifEmpty(null)) + ch_versions = ch_versions.mix(MEDAKA.out.versions.ifEmpty(null)) } // // MODULE: Kraken2, QC for sample purity // + ch_kraken_short_multiqc = Channel.empty() + ch_kraken_long_multiqc = Channel.empty() if ( !params.skip_kraken2 ) { KRAKEN2_DB_PREPARATION ( kraken2db ) KRAKEN2 ( ch_for_kraken2_short.dump(tag: 'kraken2_short'), - KRAKEN2_DB_PREPARATION.out.db.map { info, db -> db }.dump(tag: 'kraken2_db_preparation') + KRAKEN2_DB_PREPARATION.out.db.map { info, db -> db }.dump(tag: 'kraken2_db_preparation'), + false, + false ) - ch_software_versions = ch_software_versions.mix(KRAKEN2.out.version.first().ifEmpty(null)) + ch_kraken_short_multiqc = KRAKEN2.out.report + ch_versions = ch_versions.mix(KRAKEN2.out.versions.ifEmpty(null)) + KRAKEN2_LONG ( ch_for_kraken2_long .map { meta, reads -> @@ -303,39 +367,67 @@ workflow BACASS { [ info, reads ] } .dump(tag: 'kraken2_long'), - KRAKEN2_DB_PREPARATION.out.db.map { info, db -> db }.dump(tag: 'kraken2_db_preparation') + KRAKEN2_DB_PREPARATION.out.db.map { info, db -> db }.dump(tag: 'kraken2_db_preparation'), + false, + false ) - ch_software_versions = ch_software_versions.mix(KRAKEN2_LONG.out.version.first().ifEmpty(null)) + ch_kraken_long_multiqc = KRAKEN2_LONG.out.report + ch_versions = ch_versions.mix(KRAKEN2_LONG.out.versions.ifEmpty(null)) } // // MODULE: QUAST, assembly QC // ch_assembly - .map { meta, fasta -> fasta } - .collect() + .collect{ it[1] } + .map { consensus_collect -> tuple([id: "report"], consensus_collect) } .set { ch_to_quast } + QUAST ( ch_to_quast, - [], - [], - false, - false + [[:],[]], + [[:],[]] ) - ch_software_versions = ch_software_versions.mix(QUAST.out.version.ifEmpty(null)) + ch_quast_multiqc = QUAST.out.tsv + ch_versions = ch_versions.mix(QUAST.out.versions.ifEmpty(null)) // // MODULE: PROKKA, gene annotation // + ch_prokka_txt_multiqc = Channel.empty() if ( !params.skip_annotation && params.annotation_tool == 'prokka' ) { + GUNZIP ( ch_assembly ) + ch_to_prokka = GUNZIP.out.gunzip + ch_versions = ch_versions.mix(GUNZIP.out.versions.ifEmpty(null)) + PROKKA ( - ch_assembly, + ch_to_prokka, [], [] ) - ch_software_versions = ch_software_versions.mix(PROKKA.out.version.first().ifEmpty(null)) + ch_prokka_txt_multiqc = PROKKA.out.txt.collect() + ch_versions = ch_versions.mix(PROKKA.out.versions.ifEmpty(null)) } + // + // MODULE: BAKTA, gene annotation + // + + ch_bakta_txt_multiqc = Channel.empty() + if ( !params.skip_annotation && params.annotation_tool == 'bakta' ) { + GUNZIP ( ch_assembly ) + ch_to_bakta = GUNZIP.out.gunzip + ch_versions = ch_versions.mix(GUNZIP.out.versions.ifEmpty(null)) + + BAKTA_DBDOWNLOAD_RUN ( + ch_to_bakta, + params.baktadb, + params.baktadb_download + ) + + ch_bakta_txt_multiqc = BAKTA_DBDOWNLOAD_RUN.out.bakta_txt_multiqc.collect() + ch_versions = ch_versions.mix(BAKTA_DBDOWNLOAD_RUN.out.versions) + } // // MODULE: DFAST, gene annotation // @@ -345,59 +437,70 @@ workflow BACASS { ch_assembly, Channel.value(params.dfast_config ? file(params.dfast_config) : "") ) - ch_software_versions = ch_software_versions.mix(DFAST.out.version.first().ifEmpty(null)) + ch_versions = ch_versions.mix(DFAST.out.versions.ifEmpty(null)) } // // MODULE: Pipeline reporting // - ch_software_versions - .map { it -> if (it) [ it.baseName, it ] } - .groupTuple() - .map { it[1][0] } - .flatten() - .collect() - .set { ch_software_versions } - - GET_SOFTWARE_VERSIONS ( - ch_software_versions.map { it }.collect() + CUSTOM_DUMPSOFTWAREVERSIONS ( + ch_versions.unique().collectFile(name: 'collated_versions.yml') ) // // MODULE: MultiQC // - workflow_summary = WorkflowBacass.paramsSummaryMultiqc(workflow, summary_params) - ch_workflow_summary = Channel.value(workflow_summary) - - ch_multiqc_files = Channel.empty() - ch_multiqc_files = ch_multiqc_files.mix(Channel.from(ch_multiqc_config)) - ch_multiqc_files = ch_multiqc_files.mix(ch_multiqc_custom_config.collect().ifEmpty([])) - ch_multiqc_files = ch_multiqc_files.mix(ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml')) - ch_multiqc_files = ch_multiqc_files.mix(GET_SOFTWARE_VERSIONS.out.yaml.collect()) - ch_multiqc_files = ch_multiqc_files.mix(FASTQC.out.zip.collect{it[1]}.ifEmpty([])) - - MULTIQC ( - ch_multiqc_files.collect() - ) - multiqc_report = MULTIQC.out.report.toList() - ch_software_versions = ch_software_versions.mix(MULTIQC.out.version.ifEmpty(null)) + if (!params.skip_multiqc){ + workflow_summary = WorkflowBacass.paramsSummaryMultiqc(workflow, summary_params) + ch_workflow_summary = Channel.value(workflow_summary) + methods_description = WorkflowBacass.methodsDescriptionText(workflow, ch_multiqc_custom_methods_description, params) + ch_methods_description = Channel.value(methods_description) + + ch_multiqc_files = Channel.empty() + ch_multiqc_files = ch_multiqc_files.mix(ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml')) + ch_multiqc_files = ch_multiqc_files.mix(ch_methods_description.collectFile(name: 'methods_description_mqc.yaml')) + ch_multiqc_files = ch_multiqc_files.mix(CUSTOM_DUMPSOFTWAREVERSIONS.out.mqc_yml.collect()) + ch_multiqc_files = ch_multiqc_files.mix(ch_fastqc_raw_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_fastqc_trim_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_trim_json_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_kraken_short_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_kraken_long_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_quast_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_prokka_txt_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_bakta_txt_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_nanoplot_txt_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_porechop_log_multiqc.collect{it[1]}.ifEmpty([])) + ch_multiqc_files = ch_multiqc_files.mix(ch_pycoqc_multiqc.collect{it[1]}.ifEmpty([])) + + MULTIQC ( + ch_multiqc_files.collect(), + ch_multiqc_config, + ch_multiqc_custom_config.collect().ifEmpty([]), + ch_multiqc_logo.collect().ifEmpty([]) + ) + multiqc_report = MULTIQC.out.report.toList() + } } /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ COMPLETION EMAIL AND SUMMARY -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ workflow.onComplete { if (params.email || params.email_on_fail) { NfcoreTemplate.email(workflow, params, summary_params, projectDir, log, multiqc_report) } + NfcoreTemplate.dump_parameters(workflow, params) NfcoreTemplate.summary(workflow, params, log) + if (params.hook_url) { + NfcoreTemplate.IM_notification(workflow, params, summary_params, projectDir, log) + } } /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ THE END -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */