diff --git a/.copier-answers.yml b/.copier-answers.yml index 11eb3df90..107af3b94 100644 --- a/.copier-answers.yml +++ b/.copier-answers.yml @@ -1,5 +1,5 @@ # Changes here will be overwritten by Copier -_commit: 1.0.0-8-g1361223 +_commit: 2.0.1 _src_path: gh:DiamondLightSource/python-copier-template author_email: tom.cobb@diamond.ac.uk author_name: Tom Cobb @@ -11,4 +11,6 @@ docs_type: sphinx git_platform: github.com github_org: PandABlocks package_name: pandablocks +pypi: true repo_name: PandABlocks-client +type_checker: mypy diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 96427ee45..79b85ff41 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -6,45 +6,41 @@ "target": "developer" }, "remoteEnv": { + // Allow X11 apps to run inside the container "DISPLAY": "${localEnv:DISPLAY}" }, - // Add the URLs of features you want added when the container is built. - "features": { - "ghcr.io/devcontainers/features/common-utils:1": { - "username": "none", - "upgradePackages": false - } - }, "customizations": { - // Set *default* container specific settings.json values on container create. - "settings": { - "python.defaultInterpreterPath": "/venv/bin/python" - }, "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + "python.defaultInterpreterPath": "/venv/bin/python" + }, // Add the IDs of extensions you want installed when the container is created. "extensions": [ "ms-python.python", + "github.vscode-github-actions", "tamasfe.even-better-toml", "redhat.vscode-yaml", - "ryanluker.vscode-coverage-gutters" + "ryanluker.vscode-coverage-gutters", + "charliermarsh.ruff", + "ms-azuretools.vscode-docker" ] } }, - // Make sure the files we are mapping into the container exist on the host - "initializeCommand": "bash -c 'for i in $HOME/.inputrc; do [ -f $i ] || touch $i; done'", + "features": { + // Some default things like git config + "ghcr.io/devcontainers/features/common-utils:2": { + "upgradePackages": false + } + }, "runArgs": [ + // Allow the container to access the host X11 display and EPICS CA "--net=host", - "--security-opt=label=type:container_runtime_t" - ], - "mounts": [ - "source=${localEnv:HOME}/.ssh,target=/root/.ssh,type=bind", - "source=${localEnv:HOME}/.inputrc,target=/root/.inputrc,type=bind", - // map in home directory - not strictly necessary but useful - "source=${localEnv:HOME},target=${localEnv:HOME},type=bind,consistency=cached" + // Make sure SELinux does not disable with access to host filesystems like tmp + "--security-opt=label=disable" ], - // make the workspace folder the same inside and outside of the container - "workspaceMount": "source=${localWorkspaceFolder},target=${localWorkspaceFolder},type=bind", - "workspaceFolder": "${localWorkspaceFolder}", + // Mount the parent as /workspaces so we can pip install peers as editable + "workspaceMount": "source=${localWorkspaceFolder}/..,target=/workspaces,type=bind", // After the container is created, install the python project in editable form - "postCreateCommand": "pip install -e '.[dev]'" + "postCreateCommand": "pip install $([ -f dev-requirements.txt ] && echo '-c dev-requirements.txt') -e '.[dev]' && pre-commit install" } \ No newline at end of file diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 000000000..2ec8dfe69 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contribute to the project + +Contributions and issues are most welcome! All issues and pull requests are +handled through [GitHub](https://github.com/PandABlocks/PandABlocks-client/issues). Also, please check for any existing issues before +filing a new one. If you have a great idea but it involves big changes, please +file a ticket before making a pull request! We want to make sure you don't spend +your time coding something that might not fit the scope of the project. + +## Issue or Discussion? + +Github also offers [discussions](https://github.com/PandABlocks/PandABlocks-client/discussions) as a place to ask questions and share ideas. If +your issue is open ended and it is not obvious when it can be "closed", please +raise it as a discussion instead. + +## Code Coverage + +While 100% code coverage does not make a library bug-free, it significantly +reduces the number of easily caught bugs! Please make sure coverage remains the +same or is improved by a pull request! + +## Developer Information + +It is recommended that developers use a [vscode devcontainer](https://code.visualstudio.com/docs/devcontainers/containers). This repository contains configuration to set up a containerized development environment that suits its own needs. + +This project was created using the [Diamond Light Source Copier Template](https://github.com/DiamondLightSource/python-copier-template) for Python projects. + +For more information on common tasks like setting up a developer environment, running the tests, and setting a pre-commit hook, see the template's [How-to guides](https://diamondlightsource.github.io/python-copier-template/2.0.1/how-to.html). diff --git a/.github/CONTRIBUTING.rst b/.github/CONTRIBUTING.rst deleted file mode 100644 index b426da9a3..000000000 --- a/.github/CONTRIBUTING.rst +++ /dev/null @@ -1,78 +0,0 @@ -Contributing -============ - -Contributions and issues are most welcome! All issues and pull requests are -handled through github on the `PandABlocks-client repository`_. Also, please -check for any existing issues before filing a new one. If you have a great idea -but it involves big changes, please file a ticket before making a pull request! -We want to make sure you don't spend your time coding something that might not -fit the scope of the project. - -.. _PandABlocks-client repository: https://github.com/PandABlocks/PandABlocks-client/issues - -Running the tests ------------------ - -To get the source code and run the unit tests, run:: - - $ git clone git@github.com:PandABlocks/PandABlocks-client.git - $ cd PandABlocks-client - $ python3 -m venv /path/to/venv - $ source /path/to/venv/bin/activate - $ pytest - -While 100% code coverage does not make a library bug-free, it significantly -reduces the number of easily caught bugs! Please make sure coverage remains the -same or is improved by a pull request! - -Code Styling ------------- - -The code in this repository conforms to standards set by the following tools: - -- black_ for code formatting -- flake8_ for style checks -- isort_ for import ordering -- mypy_ for static type checking - -.. _black: https://github.com/psf/black -.. _flake8: http://flake8.pycqa.org/en/latest/ -.. _isort: https://github.com/timothycrosley/isort - -These tests will be run on code when running ``pytest`` and also -automatically at check in. Please read the tool documentation for details -on how to fix the errors it reports. - -Documentation -------------- - -Documentation is contained in the ``docs`` directory and extracted from -docstrings of the API. - -Docs follow the underlining convention:: - - Headling 1 (page title) - ======================= - - Heading 2 - --------- - - Heading 3 - ~~~~~~~~~ - - -You can build the docs from the project directory by running:: - - $ tox -e docs - $ firefox build/html/index.html - -Release Checklist ------------------ - -Before a new release, please go through the following checklist: - -- Choose a new PEP440 compliant release number -- Add a release note in CHANGELOG.rst -- Git tag the version with message from CHANGELOG -- Push to github and the actions will make a release on pypi -- Push to internal gitlab and do a dls-release.py of the tag diff --git a/.github/actions/install_requirements/action.yml b/.github/actions/install_requirements/action.yml index aab283a73..d33e08052 100644 --- a/.github/actions/install_requirements/action.yml +++ b/.github/actions/install_requirements/action.yml @@ -6,7 +6,7 @@ inputs: default: "dev" pip-install: description: Parameters to pass to pip install - default: "-e .[dev]" + default: "$([ -f dev-requirements.txt ] && echo '-c dev-requirements.txt') -e .[dev]" runs: using: composite diff --git a/.github/pages/make_switcher.py b/.github/pages/make_switcher.py index f4cffd1bc..e2c8e6f62 100755 --- a/.github/pages/make_switcher.py +++ b/.github/pages/make_switcher.py @@ -56,7 +56,7 @@ def get_versions(ref: str, add: Optional[str]) -> List[str]: def write_json(path: Path, repository: str, versions: str): org, repo_name = repository.split("/") struct = [ - dict(version=version, url=f"https://{org}.github.io/{repo_name}/{version}/") + {"version": version, "url": f"https://{org}.github.io/{repo_name}/{version}/"} for version in versions ] text = json.dumps(struct, indent=2) diff --git a/.github/workflows/_check.yml b/.github/workflows/_check.yml index b26d72a59..a6139c19f 100644 --- a/.github/workflows/_check.yml +++ b/.github/workflows/_check.yml @@ -1,15 +1,15 @@ on: workflow_call: outputs: - not-in-pr: + branch-pr: description: The PR number if the branch is in one - value: ${{ jobs.pr.outputs.not-in-pr }} + value: ${{ jobs.pr.outputs.branch-pr }} jobs: pr: runs-on: "ubuntu-latest" outputs: - not-in-pr: ${{ steps.script.outputs.result }} + branch-pr: ${{ steps.script.outputs.result }} steps: - uses: actions/github-script@v7 id: script @@ -23,6 +23,5 @@ jobs: }) if (prs.data.length) { console.log(`::notice ::Skipping CI on branch push as it is already run in PR #${prs.data[0]["number"]}`) - } else { - return "not-in-pr" + return prs.data[0]["number"] } diff --git a/.github/workflows/_docs.yml b/.github/workflows/_docs.yml index 4d9d7ad12..40446e332 100644 --- a/.github/workflows/_docs.yml +++ b/.github/workflows/_docs.yml @@ -6,6 +6,10 @@ jobs: runs-on: ubuntu-latest steps: + - name: Avoid git conflicts when tag and branch pushed at same time + if: github.ref_type == 'tag' + run: sleep 60 + - name: Checkout uses: actions/checkout@v4 with: @@ -24,23 +28,20 @@ jobs: - name: Remove environment.pickle run: rm build/html/.doctrees/environment.pickle - - name: Sanitize ref name for docs version - run: echo "DOCS_VERSION=${GITHUB_REF_NAME//[^A-Za-z0-9._-]/_}" >> $GITHUB_ENV - - - name: Move to versioned directory - run: mv build/html build/$DOCS_VERSION - - name: Upload built docs artifact uses: actions/upload-artifact@v4 with: name: docs path: build - - name: Add other static pages files - run: cp .github/pages/* build + - name: Sanitize ref name for docs version + run: echo "DOCS_VERSION=${GITHUB_REF_NAME//[^A-Za-z0-9._-]/_}" >> $GITHUB_ENV + + - name: Move to versioned directory + run: mv build/html .github/pages/$DOCS_VERSION - name: Write switcher.json - run: python .github/pages/make_switcher.py --add $DOCS_VERSION ${{ github.repository }} build/switcher.json + run: python .github/pages/make_switcher.py --add $DOCS_VERSION ${{ github.repository }} .github/pages/switcher.json - name: Publish Docs to gh-pages if: github.ref_type == 'tag' || github.ref_name == 'main' @@ -49,5 +50,5 @@ jobs: uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3 with: github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: build - keep_files: true + publish_dir: .github/pages + keep_files: true \ No newline at end of file diff --git a/.github/workflows/_pypi.yml b/.github/workflows/_pypi.yml index 69103d1ed..0c5258dbe 100644 --- a/.github/workflows/_pypi.yml +++ b/.github/workflows/_pypi.yml @@ -15,5 +15,3 @@ jobs: - name: Publish to PyPI using trusted publishing uses: pypa/gh-action-pypi-publish@release/v1 - with: - password: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index 84f68ed68..b49fa7dca 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -8,15 +8,16 @@ jobs: steps: - name: Download artifacts uses: actions/download-artifact@v4 + with: + merge-multiple: true - - name: Prepare release files + - name: Zip up docs run: | - if [ -d docs ]; then - cd docs && zip -r docs.zip * - echo 'DOCS=docs/docs.zip' >> $GITHUB_ENV - fi - if [ -d dist ]; then - echo 'DIST=dist/*' >> $GITHUB_ENV + set -vxeuo pipefail + if [ -d html ]; then + mv html $GITHUB_REF_NAME + zip -r docs.zip $GITHUB_REF_NAME + rm -rf $GITHUB_REF_NAME fi - name: Create GitHub Release @@ -25,9 +26,7 @@ jobs: uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15 with: prerelease: ${{ contains(github.ref_name, 'a') || contains(github.ref_name, 'b') || contains(github.ref_name, 'rc') }} - files: | - ${{ env.DOCS }} - ${{ env.DIST }} + files: "*" generate_release_notes: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/_test.yml b/.github/workflows/_test.yml index 9f164afe5..f652d4145 100644 --- a/.github/workflows/_test.yml +++ b/.github/workflows/_test.yml @@ -9,6 +9,9 @@ on: type: string description: The runner to run this job on required: true + secrets: + CODECOV_TOKEN: + required: true env: # https://github.com/pytest-dev/pytest/issues/2042 @@ -29,6 +32,17 @@ jobs: name: Install dev versions of python packages uses: ./.github/actions/install_requirements + - if: inputs.python-version == 'dev' + name: Write the requirements as an artifact + run: pip freeze --exclude-editable > /tmp/dev-requirements.txt + + - if: inputs.python-version == 'dev' + name: Upload dev-requirements.txt + uses: actions/upload-artifact@v4 + with: + name: dev-requirements + path: /tmp/dev-requirements.txt + - if: inputs.python-version != 'dev' name: Install latest versions of python packages uses: ./.github/actions/install_requirements @@ -37,10 +51,12 @@ jobs: pip-install: ".[dev]" - name: Run tests - run: tox -e pytest + run: tox -e tests - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: name: ${{ inputs.python-version }}/${{ inputs.runs-on }} files: cov.xml + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5ba6f3a82..888eb4d80 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,14 +10,14 @@ jobs: lint: needs: check - if: needs.check.outputs.not-in-pr + if: needs.check.outputs.branch-pr == '' uses: ./.github/workflows/_tox.yml with: - tox: pre-commit,mypy + tox: pre-commit,type-checking test: needs: check - if: needs.check.outputs.not-in-pr + if: needs.check.outputs.branch-pr == '' strategy: matrix: runs-on: ["ubuntu-latest"] # can add windows-latest, macos-latest @@ -31,31 +31,33 @@ jobs: with: runs-on: ${{ matrix.runs-on }} python-version: ${{ matrix.python-version }} + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} container: needs: check - if: needs.check.outputs.not-in-pr + if: needs.check.outputs.branch-pr == '' uses: ./.github/workflows/_container.yml permissions: packages: write docs: needs: check - if: needs.check.outputs.not-in-pr + if: needs.check.outputs.branch-pr == '' uses: ./.github/workflows/_docs.yml dist: needs: check - if: needs.check.outputs.not-in-pr + if: needs.check.outputs.branch-pr == '' uses: ./.github/workflows/_dist.yml - + pypi: if: github.ref_type == 'tag' needs: dist uses: ./.github/workflows/_pypi.yml permissions: id-token: write - + release: if: github.ref_type == 'tag' needs: [dist, docs] diff --git a/.gitignore b/.gitignore index a37be99b3..2593ec752 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,6 @@ __pycache__/ # Distribution / packaging .Python env/ -.venv build/ develop-eggs/ dist/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5bc9f001c..5a4cbf7b4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-added-large-files - id: check-yaml @@ -8,16 +8,16 @@ repos: - repo: local hooks: - - id: black - name: Run black - stages: [commit] + - id: ruff + name: lint with ruff language: system - entry: black --check --diff + entry: ruff check --force-exclude types: [python] + require_serial: true - - id: ruff - name: Run ruff - stages: [commit] + - id: ruff-format + name: format with ruff language: system - entry: ruff + entry: ruff format --force-exclude types: [python] + require_serial: true diff --git a/.vscode/launch.json b/.vscode/launch.json index 3c8a2b5be..15b70fa13 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -17,7 +17,7 @@ }, { "name": "Debug Unit Test", - "type": "python", + "type": "debugpy", "request": "launch", "program": "${file}", "purpose": [ @@ -25,12 +25,9 @@ ], "console": "integratedTerminal", "env": { - // Cannot have coverage and debugging at the same time, - // and the default config in setup.cfg adds coverage - // https://github.com/microsoft/vscode-python/issues/693 - "PYTEST_ADDOPTS": "--no-cov" + // Enable break on exception when debugging tests (see: tests/conftest.py) + "PYTEST_RAISE": "1", }, - "justMyCode": false } ] -} +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 8b5839135..c129d991b 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,29 +1,11 @@ { - "python.linting.pylintEnabled": false, - "python.linting.flake8Enabled": false, - "python.linting.mypyEnabled": true, - "python.linting.enabled": true, - "python.testing.pytestArgs": [ - "--cov=pandablocks", - "--cov-report", - "xml:cov.xml" - ], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, - "python.formatting.provider": "black", - "python.languageServer": "Pylance", "editor.formatOnSave": true, "editor.codeActionsOnSave": { "source.organizeImports": "explicit" }, - "[jsonc]": { - "editor.defaultFormatter": "vscode.json-language-features" - }, - "python.analysis.typeCheckingMode": "off", "[python]": { - "editor.codeActionsOnSave": { - "source.fixAll.ruff": "never", - "source.organizeImports.ruff": "explicit" - } - } -} + "editor.defaultFormatter": "charliermarsh.ruff", + }, +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 000000000..8ee45994f --- /dev/null +++ b/README.md @@ -0,0 +1,51 @@ +[![CI](https://github.com/PandABlocks/PandABlocks-client/actions/workflows/ci.yml/badge.svg)](https://github.com/PandABlocks/PandABlocks-client/actions/workflows/ci.yml) +[![Coverage](https://codecov.io/gh/PandABlocks/PandABlocks-client/branch/main/graph/badge.svg)](https://codecov.io/gh/PandABlocks/PandABlocks-client) +[![PyPI](https://img.shields.io/pypi/v/pandablocks.svg)](https://pypi.org/project/pandablocks) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +# pandablocks + +A Python client to control and data ports of the PandABlocks TCP server + +Source | +:---: | :---: +PyPI | `pip install pandablocks` +Docker | `docker run ghcr.io/pandablocks/PandABlocks-client:latest` +Documentation | +Releases | + +Command line tool features an interactive console, load/save control, and HDF5 writing: + +```shell +$ pip install pandablocks + +$ pandablocks control +< PCAP. # Hit TAB key... +PCAP.ACTIVE PCAP.BITS1 PCAP.BITS3 PCAP.GATE PCAP.SAMPLES PCAP.TRIG PCAP.TS_END PCAP.TS_TRIG +PCAP.BITS0 PCAP.BITS2 PCAP.ENABLE PCAP.HEALTH PCAP.SHIFT_SUM PCAP.TRIG_EDGE PCAP.TS_START +< PCAP.ACTIVE? +OK =1 + +$ pandablocks hdf /tmp/panda-%d.h5 +INFO:Opened '/tmp/panda-1.h5' with 60 byte samples stored in 11 datasets +INFO:Closed '/tmp/panda-1.h5' after writing 50000000 samples. End reason is 'Disarmed' +``` + +Library features a Sans-IO core with both asyncio and blocking wrappers: + +```python +from pandablocks.blocking import BlockingClient +from pandablocks.commands import Get + +with BlockingClient("hostname-or-ip") as client: + # Commands sent to Control port + idn = client.send(Get("*IDN")) + print(f"Hello {idn}") + for data in client.data(): + # Data captured from Data port + print(f"I got some PCAP data {data}") +``` + + + +See https://pandablocks.github.io/PandABlocks-client for more detailed documentation. diff --git a/README.rst b/README.rst deleted file mode 100644 index cbcca3320..000000000 --- a/README.rst +++ /dev/null @@ -1,74 +0,0 @@ -PandABlocks Python Client -========================= - -|code_ci| |docs_ci| |coverage| |pypi_version| |license| - -A Python client which connects to the control and data ports of the PandABlocks TCP server. - -============== ============================================================== -PyPI ``pip install pandablocks`` -Source code https://github.com/PandABlocks/PandABlocks-client -Documentation https://PandABlocks.github.io/PandABlocks-client -Releases https://github.com/PandABlocks/PandABlocks-client/releases -============== ============================================================== - -Command line tool features an interactive console, load/save control, and HDF5 -writing: - -.. code:: - - $ pip install pandablocks - - $ pandablocks control - < PCAP. # Hit TAB key... - PCAP.ACTIVE PCAP.BITS1 PCAP.BITS3 PCAP.GATE PCAP.SAMPLES PCAP.TRIG PCAP.TS_END PCAP.TS_TRIG - PCAP.BITS0 PCAP.BITS2 PCAP.ENABLE PCAP.HEALTH PCAP.SHIFT_SUM PCAP.TRIG_EDGE PCAP.TS_START - < PCAP.ACTIVE? - OK =1 - - $ pandablocks hdf /tmp/panda-%d.h5 - INFO:Opened '/tmp/panda-1.h5' with 60 byte samples stored in 11 datasets - INFO:Closed '/tmp/panda-1.h5' after writing 50000000 samples. End reason is 'Disarmed' - -Library features a Sans-IO core with both asyncio and blocking wrappers: - -.. code:: python - - from pandablocks.blocking import BlockingClient - from pandablocks.commands import Get - - with BlockingClient("hostname-or-ip") as client: - # Commands sent to Control port - idn = client.send(Get("*IDN")) - print(f"Hello {idn}") - for data in client.data(): - # Data captured from Data port - print(f"I got some PCAP data {data}") - - -.. |code_ci| image:: https://github.com/PandABlocks/PandABlocks-client/actions/workflows/code.yml/badge.svg?branch=master - :target: https://github.com/PandABlocks/PandABlocks-client/actions/workflows/code.yml - :alt: Code CI - -.. |docs_ci| image:: https://github.com/PandABlocks/PandABlocks-client/actions/workflows/docs.yml/badge.svg?branch=master - :target: https://github.com/PandABlocks/PandABlocks-client/actions/workflows/docs.yml - :alt: Docs CI - -.. |coverage| image:: https://codecov.io/gh/PandABlocks/PandABlocks-client/branch/master/graph/badge.svg - :target: https://codecov.io/gh/PandABlocks/PandABlocks-client - :alt: Test Coverage - -.. |pypi_version| image:: https://img.shields.io/pypi/v/pandablocks.svg - :target: https://pypi.org/project/pandablocks - :alt: Latest PyPI version - -.. |license| image:: https://img.shields.io/badge/License-Apache%202.0-blue.svg - :target: https://opensource.org/licenses/Apache-2.0 - :alt: Apache License - - -.. - Anything below this line is used when viewing README.rst and will be replaced - when included in index.rst - -See https://pandablocks.github.io/PandABlocks-client for more detailed documentation. diff --git a/docs/CHANGELOG.rst b/docs/CHANGELOG.rst deleted file mode 100644 index 8bfb306de..000000000 --- a/docs/CHANGELOG.rst +++ /dev/null @@ -1,17 +0,0 @@ -Change Log -========== -All notable changes to this project will be documented in this file. -This project adheres to `Semantic Versioning `_. - -Unreleased_ ------------ - -- Nothing yet - - -0.1 - 2020-07-09 ----------------- - -- Initial release - -.. _Unreleased: https://github.com/PandABlocks/PandABlocks-client/compare/0.1...HEAD diff --git a/docs/conf.py b/docs/conf.py index 698ab94ed..5f555b79e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,8 +48,13 @@ "sphinx_copybutton", # For the card element "sphinx_design", + # So we can write markdown files + "myst_parser", ] +# So we can use the ::: syntax +myst_enable_extensions = ["colon_fence"] + # If true, Sphinx will warn about all references where the target cannot # be found. nitpicky = True @@ -68,6 +73,7 @@ ("py:class", "'id'"), ("py:class", "typing_extensions.Literal"), ("py:func", "int"), + ("py:class", "pandablocks.commands.T"), ] # Both the class’ and the __init__ method’s docstring are concatenated and @@ -87,9 +93,6 @@ # role, that is, for text marked up `like this` default_role = "any" -# The suffix of source filenames. -source_suffix = ".rst" - # The master toctree document. master_doc = "index" @@ -139,7 +142,7 @@ html_theme = "pydata_sphinx_theme" github_repo = "PandABlocks-client" github_user = "PandABlocks" -switcher_json = "https://PandABlocks.github.io/PandABlocks-client/switcher.json" +switcher_json = f"https://{github_user}.github.io/{github_repo}/switcher.json" switcher_exists = requests.get(switcher_json).ok if not switcher_exists: print( @@ -178,12 +181,6 @@ }, "check_switcher": False, "navbar_end": ["theme-switcher", "icon-links", "version-switcher"], - "external_links": [ - { - "name": "Release Notes", - "url": f"https://github.com/{github_user}/{github_repo}/releases", - } - ], "navigation_with_keys": False, } diff --git a/docs/developer/explanations/decisions.rst b/docs/developer/explanations/decisions.rst deleted file mode 100644 index 5841e6ea0..000000000 --- a/docs/developer/explanations/decisions.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. This Source Code Form is subject to the terms of the Mozilla Public -.. License, v. 2.0. If a copy of the MPL was not distributed with this -.. file, You can obtain one at http://mozilla.org/MPL/2.0/. - -Architectural Decision Records -============================== - -We record major architectural decisions in Architecture Decision Records (ADRs), -as `described by Michael Nygard -`_. -Below is the list of our current ADRs. - -.. toctree:: - :maxdepth: 1 - :glob: - - decisions/* \ No newline at end of file diff --git a/docs/developer/explanations/decisions/0001-record-architecture-decisions.rst b/docs/developer/explanations/decisions/0001-record-architecture-decisions.rst deleted file mode 100644 index 0604062cc..000000000 --- a/docs/developer/explanations/decisions/0001-record-architecture-decisions.rst +++ /dev/null @@ -1,24 +0,0 @@ -1. Record architecture decisions -================================ - -Status ------- - -Accepted - -Context -------- - -We need to record the architectural decisions made on this project. - -Decision --------- - -We will use Architecture Decision Records, as `described by Michael Nygard -`_. - -Consequences ------------- - -See Michael Nygard's article, linked above. To create new ADRs we will copy and -paste from existing ones. diff --git a/docs/developer/explanations/decisions/0002-switched-to-pip-skeleton.rst b/docs/developer/explanations/decisions/0002-switched-to-pip-skeleton.rst deleted file mode 100644 index 07d02bd4a..000000000 --- a/docs/developer/explanations/decisions/0002-switched-to-pip-skeleton.rst +++ /dev/null @@ -1,33 +0,0 @@ -2. Adopt python_copier_template for project structure -===================================================== - -Status ------- - -Accepted - -Context -------- - -We should use the following `python_copier_template `_. -The template will ensure consistency in developer -environments and package management. - -Decision --------- - -We have switched to using the skeleton. - -Consequences ------------- - -This module will use a fixed set of tools as developed in python_copier_template -and can pull from this template to update the packaging to the latest techniques. - -As such, the developer environment may have changed, the following could be -different: - -- linting -- formatting -- pip venv setup -- CI/CD diff --git a/docs/developer/how-to/build-docs.rst b/docs/developer/how-to/build-docs.rst deleted file mode 100644 index 11a5e6386..000000000 --- a/docs/developer/how-to/build-docs.rst +++ /dev/null @@ -1,38 +0,0 @@ -Build the docs using sphinx -=========================== - -You can build the `sphinx`_ based docs from the project directory by running:: - - $ tox -e docs - -This will build the static docs on the ``docs`` directory, which includes API -docs that pull in docstrings from the code. - -.. seealso:: - - `documentation_standards` - -The docs will be built into the ``build/html`` directory, and can be opened -locally with a web browser:: - - $ firefox build/html/index.html - -Autobuild ---------- - -You can also run an autobuild process, which will watch your ``docs`` -directory for changes and rebuild whenever it sees changes, reloading any -browsers watching the pages:: - - $ tox -e docs autobuild - -You can view the pages at localhost:: - - $ firefox http://localhost:8000 - -If you are making changes to source code too, you can tell it to watch -changes in this directory too:: - - $ tox -e docs autobuild -- --watch src - -.. _sphinx: https://www.sphinx-doc.org/ diff --git a/docs/developer/how-to/contribute.rst b/docs/developer/how-to/contribute.rst deleted file mode 100644 index 65b992f08..000000000 --- a/docs/developer/how-to/contribute.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../.github/CONTRIBUTING.rst diff --git a/docs/developer/how-to/lint.rst b/docs/developer/how-to/lint.rst deleted file mode 100644 index 2df258d8f..000000000 --- a/docs/developer/how-to/lint.rst +++ /dev/null @@ -1,39 +0,0 @@ -Run linting using pre-commit -============================ - -Code linting is handled by black_ and ruff_ run under pre-commit_. - -Running pre-commit ------------------- - -You can run the above checks on all files with this command:: - - $ tox -e pre-commit - -Or you can install a pre-commit hook that will run each time you do a ``git -commit`` on just the files that have changed:: - - $ pre-commit install - -It is also possible to `automatically enable pre-commit on cloned repositories `_. -This will result in pre-commits being enabled on every repo your user clones from now on. - -Fixing issues -------------- - -If black reports an issue you can tell it to reformat all the files in the -repository:: - - $ black . - -Likewise with ruff:: - - $ ruff --fix . - -Ruff may not be able to automatically fix all issues; in this case, you will have to fix those manually. - -VSCode support --------------- - -The ``.vscode/settings.json`` will run black formatting as well as -ruff checking on save. Issues will be highlighted in the editor window. diff --git a/docs/developer/how-to/make-release.rst b/docs/developer/how-to/make-release.rst deleted file mode 100644 index df24c3407..000000000 --- a/docs/developer/how-to/make-release.rst +++ /dev/null @@ -1,16 +0,0 @@ -Make a release -============== - -To make a new release, please follow this checklist: - -- Choose a new PEP440 compliant release number (see https://peps.python.org/pep-0440/) -- Go to the GitHub release_ page -- Choose ``Draft New Release`` -- Click ``Choose Tag`` and supply the new tag you chose (click create new tag) -- Click ``Generate release notes``, review and edit these notes -- Choose a title and click ``Publish Release`` - -Note that tagging and pushing to the main branch has the same effect except that -you will not get the option to edit the release notes. - -.. _release: https://github.com/PandABlocks/PandABlocks-client/releases diff --git a/docs/developer/how-to/pin-requirements.rst b/docs/developer/how-to/pin-requirements.rst deleted file mode 100644 index 89639623a..000000000 --- a/docs/developer/how-to/pin-requirements.rst +++ /dev/null @@ -1,74 +0,0 @@ -Pinning Requirements -==================== - -Introduction ------------- - -By design this project only defines dependencies in one place, i.e. in -the ``requires`` table in ``pyproject.toml``. - -In the ``requires`` table it is possible to pin versions of some dependencies -as needed. For library projects it is best to leave pinning to a minimum so -that your library can be used by the widest range of applications. - -When CI builds the project it will use the latest compatible set of -dependencies available (after applying your pins and any dependencies' pins). - -This approach means that there is a possibility that a future build may -break because an updated release of a dependency has made a breaking change. - -The correct way to fix such an issue is to work out the minimum pinning in -``requires`` that will resolve the problem. However this can be quite hard to -do and may be time consuming when simply trying to release a minor update. - -For this reason we provide a mechanism for locking all dependencies to -the same version as a previous successful release. This is a quick fix that -should guarantee a successful CI build. - -Finding the lock files ----------------------- - -Every release of the project will have a set of requirements files published -as release assets. - -For example take a look at the release page for python3-pip-skeleton-cli here: -https://github.com/DiamondLightSource/python3-pip-skeleton-cli/releases/tag/3.3.0 - -There is a list of requirements*.txt files showing as assets on the release. - -There is one file for each time the CI installed the project into a virtual -environment. There are multiple of these as the CI creates a number of -different environments. - -The files are created using ``pip freeze`` and will contain a full list -of the dependencies and sub-dependencies with pinned versions. - -You can download any of these files by clicking on them. It is best to use -the one that ran with the lowest Python version as this is more likely to -be compatible with all the versions of Python in the test matrix. -i.e. ``requirements-test-ubuntu-latest-3.8.txt`` in this example. - -Applying the lock file ----------------------- - -To apply a lockfile: - -- copy the requirements file you have downloaded to the root of your - repository -- rename it to requirements.txt -- commit it into the repo -- push the changes - -The CI looks for a requirements.txt in the root and will pass it to pip -when installing each of the test environments. pip will then install exactly -the same set of packages as the previous release. - -Removing dependency locking from CI ------------------------------------ - -Once the reasons for locking the build have been resolved it is a good idea -to go back to an unlocked build. This is because you get an early indication -of any incoming problems. - -To restore unlocked builds in CI simply remove requirements.txt from the root -of the repo and push. diff --git a/docs/developer/how-to/run-tests.rst b/docs/developer/how-to/run-tests.rst deleted file mode 100644 index d2e03644c..000000000 --- a/docs/developer/how-to/run-tests.rst +++ /dev/null @@ -1,12 +0,0 @@ -Run the tests using pytest -========================== - -Testing is done with pytest_. It will find functions in the project that `look -like tests`_, and run them to check for errors. You can run it with:: - - $ tox -e pytest - -It will also report coverage to the commandline and to ``cov.xml``. - -.. _pytest: https://pytest.org/ -.. _look like tests: https://docs.pytest.org/explanation/goodpractices.html#test-discovery diff --git a/docs/developer/how-to/static-analysis.rst b/docs/developer/how-to/static-analysis.rst deleted file mode 100644 index 065920e1c..000000000 --- a/docs/developer/how-to/static-analysis.rst +++ /dev/null @@ -1,8 +0,0 @@ -Run static analysis using mypy -============================== - -Static type analysis is done with mypy_. It checks type definition in source -files without running them, and highlights potential issues where types do not -match. You can run it with:: - - $ tox -e mypy diff --git a/docs/developer/how-to/test-container.rst b/docs/developer/how-to/test-container.rst deleted file mode 100644 index a4a43a6ff..000000000 --- a/docs/developer/how-to/test-container.rst +++ /dev/null @@ -1,25 +0,0 @@ -Container Local Build and Test -============================== - -CI builds a runtime container for the project. The local tests -checks available via ``tox -p`` do not verify this because not -all developers will have docker installed locally. - -If CI is failing to build the container, then it is best to fix and -test the problem locally. This would require that you have docker -or podman installed on your local workstation. - -In the following examples the command ``docker`` is interchangeable with -``podman`` depending on which container cli you have installed. - -To build the container and call it ``test``:: - - cd - docker build -t test . - -To verify that the container runs:: - - docker run -it test --help - -You can pass any other command line parameters to your application -instead of --help. diff --git a/docs/developer/how-to/update-tools.rst b/docs/developer/how-to/update-tools.rst deleted file mode 100644 index c1075ee8c..000000000 --- a/docs/developer/how-to/update-tools.rst +++ /dev/null @@ -1,16 +0,0 @@ -Update the tools -================ - -This module is merged with the python3-pip-skeleton_. This is a generic -Python project structure which provides a means to keep tools and -techniques in sync between multiple Python projects. To update to the -latest version of the skeleton, run:: - - $ git pull --rebase=false https://github.com/DiamondLightSource/python3-pip-skeleton - -Any merge conflicts will indicate an area where something has changed that -conflicts with the setup of the current module. Check the `closed pull requests -`_ -of the skeleton module for more details. - -.. _python3-pip-skeleton: https://DiamondLightSource.github.io/python3-pip-skeleton diff --git a/docs/developer/index.rst b/docs/developer/index.rst deleted file mode 100644 index 8a6369b9c..000000000 --- a/docs/developer/index.rst +++ /dev/null @@ -1,64 +0,0 @@ -Developer Guide -=============== - -Documentation is split into four categories, also accessible from links in the -side-bar. - -.. grid:: 2 - :gutter: 4 - - .. grid-item-card:: :material-regular:`directions_run;3em` - - .. toctree:: - :caption: Tutorials - :maxdepth: 1 - - tutorials/dev-install - - +++ - - Tutorials for getting up and running as a developer. - - .. grid-item-card:: :material-regular:`task;3em` - - .. toctree:: - :caption: How-to Guides - :maxdepth: 1 - - how-to/contribute - how-to/build-docs - how-to/run-tests - how-to/static-analysis - how-to/lint - how-to/update-tools - how-to/make-release - how-to/pin-requirements - how-to/test-container - - +++ - - Practical step-by-step guides for day-to-day dev tasks. - - .. grid-item-card:: :material-regular:`apartment;3em` - - .. toctree:: - :caption: Explanations - :maxdepth: 1 - - explanations/decisions - - +++ - - Explanations of how and why the architecture is why it is. - - .. grid-item-card:: :material-regular:`description;3em` - - .. toctree:: - :caption: Reference - :maxdepth: 1 - - reference/standards - - +++ - - Technical reference material on standards in use. diff --git a/docs/developer/reference/standards.rst b/docs/developer/reference/standards.rst deleted file mode 100644 index 5a1fd4782..000000000 --- a/docs/developer/reference/standards.rst +++ /dev/null @@ -1,63 +0,0 @@ -Standards -========= - -This document defines the code and documentation standards used in this -repository. - -Code Standards --------------- - -The code in this repository conforms to standards set by the following tools: - -- black_ for code formatting -- ruff_ for style checks -- mypy_ for static type checking - -.. seealso:: - - How-to guides `../how-to/lint` and `../how-to/static-analysis` - -.. _documentation_standards: - -Documentation Standards ------------------------ - -Docstrings are pre-processed using the Sphinx Napoleon extension. As such, -google-style_ is considered as standard for this repository. Please use type -hints in the function signature for types. For example: - -.. code:: python - - def func(arg1: str, arg2: int) -> bool: - """Summary line. - - Extended description of function. - - Args: - arg1: Description of arg1 - arg2: Description of arg2 - - Returns: - Description of return value - """ - return True - -.. _google-style: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#google-vs-numpy - -Documentation is contained in the ``docs`` directory and extracted from -docstrings of the API. - -Docs follow the underlining convention:: - - Headling 1 (page title) - ======================= - - Heading 2 - --------- - - Heading 3 - ~~~~~~~~~ - -.. seealso:: - - How-to guide `../how-to/build-docs` diff --git a/docs/developer/tutorials/dev-install.rst b/docs/developer/tutorials/dev-install.rst deleted file mode 100644 index 49ecac74c..000000000 --- a/docs/developer/tutorials/dev-install.rst +++ /dev/null @@ -1,68 +0,0 @@ -Developer install -================= - -These instructions will take you through the minimal steps required to get a dev -environment setup, so you can run the tests locally. - -Clone the repository --------------------- - -First clone the repository locally using `Git -`_:: - - $ git clone git://github.com/PandABlocks/PandABlocks-client.git - -Install dependencies --------------------- - -You can choose to either develop on the host machine using a `venv` (which -requires python 3.8 or later) or to run in a container under `VSCode -`_ - -.. tab-set:: - - .. tab-item:: Local virtualenv - - .. code:: - - $ cd pandablocks - $ python3 -m venv venv - $ source venv/bin/activate - $ pip install -e '.[dev]' - - .. tab-item:: VSCode devcontainer - - .. code:: - - $ code pandablocks - # Click on 'Reopen in Container' when prompted - # Open a new terminal - - .. note:: - - See the epics-containers_ documentation for more complex - use cases, such as integration with podman. - -See what was installed ----------------------- - -To see a graph of the python package dependency tree type:: - - $ pipdeptree - -Build and test --------------- - -Now you have a development environment you can run the tests in a terminal:: - - $ tox -p - -This will run in parallel the following checks: - -- `../how-to/build-docs` -- `../how-to/run-tests` -- `../how-to/static-analysis` -- `../how-to/lint` - - -.. _epics-containers: https://epics-containers.github.io/main/user/tutorials/devcontainer.html diff --git a/docs/explanations.md b/docs/explanations.md new file mode 100644 index 000000000..73ab289b6 --- /dev/null +++ b/docs/explanations.md @@ -0,0 +1,10 @@ +# Explanations + +Explanations of how it works and why it works that way. + +```{toctree} +:maxdepth: 1 +:glob: + +explanations/* +``` diff --git a/docs/explanations/decisions.md b/docs/explanations/decisions.md new file mode 100644 index 000000000..0533b98d4 --- /dev/null +++ b/docs/explanations/decisions.md @@ -0,0 +1,12 @@ +# Architectural Decision Records + +Architectural decisions are made throughout a project's lifetime. As a way of keeping track of these decisions, we record these decisions in Architecture Decision Records (ADRs) listed below. + +```{toctree} +:glob: true +:maxdepth: 1 + +decisions/* +``` + +For more information on ADRs see this [blog by Michael Nygard](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions). diff --git a/docs/explanations/decisions/0001-record-architecture-decisions.md b/docs/explanations/decisions/0001-record-architecture-decisions.md new file mode 100644 index 000000000..44d234efc --- /dev/null +++ b/docs/explanations/decisions/0001-record-architecture-decisions.md @@ -0,0 +1,18 @@ +# 1. Record architecture decisions + +## Status + +Accepted + +## Context + +We need to record the architectural decisions made on this project. + +## Decision + +We will use Architecture Decision Records, as [described by Michael Nygard](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions). + +## Consequences + +See Michael Nygard's article, linked above. To create new ADRs we will copy and +paste from existing ones. diff --git a/docs/explanations/decisions/0002-switched-to-python-copier-template.md b/docs/explanations/decisions/0002-switched-to-python-copier-template.md new file mode 100644 index 000000000..66fe5d8b2 --- /dev/null +++ b/docs/explanations/decisions/0002-switched-to-python-copier-template.md @@ -0,0 +1,28 @@ +# 2. Adopt python-copier-template for project structure + +## Status + +Accepted + +## Context + +We should use the following [python-copier-template](https://github.com/DiamondLightSource/python-copier-template). +The template will ensure consistency in developer +environments and package management. + +## Decision + +We have switched to using the template. + +## Consequences + +This module will use a fixed set of tools as developed in `python-copier-template` +and can pull from this template to update the packaging to the latest techniques. + +As such, the developer environment may have changed, the following could be +different: + +- linting +- formatting +- pip venv setup +- CI/CD diff --git a/docs/developer/explanations/decisions/0003-make-library-sans-io.rst b/docs/explanations/decisions/0003-make-library-sans-io.md similarity index 62% rename from docs/developer/explanations/decisions/0003-make-library-sans-io.rst rename to docs/explanations/decisions/0003-make-library-sans-io.md index a1a634ff3..ad9f2e5cb 100644 --- a/docs/developer/explanations/decisions/0003-make-library-sans-io.rst +++ b/docs/explanations/decisions/0003-make-library-sans-io.md @@ -1,24 +1,19 @@ -3. Sans-IO pandABlocks-client -============================= +# 3. Sans-IO pandABlocks-client Date: 2021-08-02 (ADR created retroactively) -Status ------- +## Status Accepted -Context -------- +## Context Ensure PandABlocks-client works sans-io. -Decision --------- +## Decision We will ensure pandablocks works sans-io `sans-io `. -Consequences ------------- +## Consequences -We have the option to use an asyncio client or a blocking client. \ No newline at end of file +We have the option to use an asyncio client or a blocking client. diff --git a/docs/explanations/decisions/COPYME b/docs/explanations/decisions/COPYME new file mode 100644 index 000000000..b466c7929 --- /dev/null +++ b/docs/explanations/decisions/COPYME @@ -0,0 +1,19 @@ +# 3. Short descriptive title + +Date: Today's date + +## Status + +Accepted + +## Context + +Background to allow us to make the decision, to show how we arrived at our conclusions. + +## Decision + +What decision we made. + +## Consequences + +What we will do as a result of this decision. diff --git a/docs/user/explanations/performance.rst b/docs/explanations/performance.md similarity index 77% rename from docs/user/explanations/performance.rst rename to docs/explanations/performance.md index f2952d96c..4e7a22f28 100644 --- a/docs/user/explanations/performance.rst +++ b/docs/explanations/performance.md @@ -1,14 +1,13 @@ -.. _performance: +(performance)= -How fast can we write HDF files? -================================ +# How fast can we write HDF files? There are many factors that affect the speed we can write HDF files. This article discusses how this library addresses them and what the maximum data rate of a PandA is. -Factors to consider -------------------- +## Factors to consider +```{eval-rst} .. list-table:: :widths: 10 50 @@ -32,50 +31,44 @@ Factors to consider or panda-webcontrol will reduce throughput * - Flush rate - Flushing data to disk to often will slow write speed +``` -Strategies to help ------------------- +## Strategies to help There are a number of strategies that help increase performance. These can be combined to give the greatest benefit -Average the data -~~~~~~~~~~~~~~~~ +### Average the data -Selecting the ``Mean`` capture mode will activate on-FPGA averaging of the -captured value. ``Min`` and ``Max`` can also be captured at the same time. -Capturing these rather than ``Value`` may allow you to lower the trigger +Selecting the `Mean` capture mode will activate on-FPGA averaging of the +captured value. `Min` and `Max` can also be captured at the same time. +Capturing these rather than `Value` may allow you to lower the trigger frequency while still providing enough information for data analysis -Scale the data on the client -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +### Scale the data on the client -`AsyncioClient.data` and `BlockingClient.data` accept a ``scaled`` argument. +`AsyncioClient.data` and `BlockingClient.data` accept a `scaled` argument. Setting this to False will transfer the raw unscaled data, allowing for up to 50% more data to be sent depending on the datatype of the field. You can use the `StartData.fields` information to scale the data on the client. The `write_hdf_files` function uses this approach. -Remove the panda-webcontrol package -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +### Remove the panda-webcontrol package The measures above should get you to about 50MBytes/s, but if more clients connect to the web GUI then this will drop. To increase the data rate to 60MBytes/s and improve stability you may want to remove the panda-webcontrol zpkg. -Flush about 1Hz -~~~~~~~~~~~~~~~ +### Flush about 1Hz -`AsyncioClient.data` accepts a ``flush_period`` argument. If given, it will +`AsyncioClient.data` accepts a `flush_period` argument. If given, it will squash intermediate data frames together until this period expires, and only then produce them. This means the numpy data blocks are larger and can be more efficiently written to disk then flushed. The `write_hdf_files` function uses this approach. - -Performance Achieved --------------------- +## Performance Achieved Tests were run with the following conditions: @@ -100,15 +93,14 @@ When panda-webcontrol was not installed, the following results were achieved: Increasing above these throughputs failed most scans with `DATA_OVERRUN`. -Data overruns -------------- +## Data overruns If there is a `DATA_OVERRUN`, the server will stop sending data. The most recently received `FrameData` from either `AsyncioClient.data` or `BlockingClient.data` may -be corrupt. This is the case if the ``scaled`` argument is set to False. The mechanism +be corrupt. This is the case if the `scaled` argument is set to False. The mechanism the server uses to send raw unscaled data is only able to detect the corrupt frame after it has already been sent. Conversely, the mechanism used to send scaled data aborts prior to sending a corrupt frame. -The `write_hdf_files` function uses ``scaled=False``, so your HDF file may include some +The `write_hdf_files` function uses `scaled=False`, so your HDF file may include some corrupt data in the event of an overrun. diff --git a/docs/user/explanations/sans-io.rst b/docs/explanations/sans-io.md similarity index 52% rename from docs/user/explanations/sans-io.rst rename to docs/explanations/sans-io.md index e8f759686..07c2357f6 100644 --- a/docs/user/explanations/sans-io.rst +++ b/docs/explanations/sans-io.md @@ -1,9 +1,8 @@ -.. _sans-io: +(sans-io)= -Why write a Sans-IO library? -============================ +# Why write a Sans-IO library? -As the reference_ says: *Reusability*. The protocol can be coded in a separate +As the [reference] says: *Reusability*. The protocol can be coded in a separate class to the I/O allowing integration into a number of different concurrency frameworks. @@ -12,69 +11,61 @@ coded the protocol in either of them it would not be usable in the other. Much better to put it in a separate class and feed it bytes off the wire. We call this protocol encapsulation a Connection. -Connections ------------ +## Connections The PandA TCP server exposes a Control port and a Data port, so there are -corresponding `ControlConnection` and `DataConnection` objects: +corresponding [](ControlConnection) and objects: -.. currentmodule:: pandablocks.connections +The [](ControlConnection) class has the following methods: -.. autoclass:: ControlConnection - :noindex: - - The :meth:`~ControlConnection.send` method takes a `Command` subclass and +- The [`send()`](ControlConnection.send) method takes a `Command` subclass and returns the bytes that should be sent to the PandA. Whenever bytes are - received from the socket they can be passed to - :meth:`~ControlConnection.receive_bytes` which will return any subsequent - bytes that should be send back. The :meth:`~ControlConnection.responses` - method returns an iterator of ``(command, response)`` tuples that have now + received from the socket they can be passed to this method which will return any subsequent + bytes that should be send back. +- The [`responses()`](ControlConnection.responses) method returns an iterator of ``(command, response)`` tuples that have now completed. The response type will depend on the command. For instance `Get` returns `bytes` or a `list` of `bytes` of the field value, and `GetFieldInfo` returns a `dict` mapping `str` field name to `FieldInfo`. -.. autoclass:: DataConnection - :noindex: +The [](DataConnection) class has the following methods: - The :meth:`~DataConnection.connect` method takes any connection arguments +- The [`connect()`](DataConnection.connect) method takes any connection arguments and returns the bytes that should be sent to the PandA to make the initial connection. Whenever bytes are received from the socket they can be passed - to :meth:`~DataConnection.receive_bytes` which will return an iterator of - `Data` objects. Intermediate `FrameData` can be squashed together by passing + to this method which will return an iterator of + `Data` objects. +- Intermediate `FrameData` can be squashed together by passing ``flush_every_frame=False``, then explicitly calling - :meth:`~DataConnection.flush` when they are required. + [`flush()`](DataConnection.flush) when they are required. -Wrappers --------- +## Wrappers Of course, these Connections are useless without connecting some I/O. To aid with this, wrappers are included for use in `asyncio ` and blocking programs. They expose slightly different APIs to make best use of the features of their respective concurrency frameworks. -For example, to send multiple commands in fields with the `blocking` wrapper:: +For example, to send multiple commands in fields with the `blocking` wrapper: - with BlockingClient("hostname") as client: - resp1, resp2 = client.send([cmd1, cmd2]) +``` +with BlockingClient("hostname") as client: + resp1, resp2 = client.send([cmd1, cmd2]) +``` -while with the `asyncio` wrapper we would:: +while with the `asyncio` wrapper we would: - async with AsyncioClient("hostname") as client: - resp1, resp2 = await asyncio.gather( - client.send(cmd1), - client.send(cmd2) - ) +``` +async with AsyncioClient("hostname") as client: + resp1, resp2 = await asyncio.gather( + client.send(cmd1), + client.send(cmd2) + ) +``` The first has the advantage of simplicity, but blocks while waiting for data. The second allows multiple co-routines to use the client at the same time at the expense of a more verbose API. -The wrappers do not guarantee feature parity, for instance the ``flush_period`` +The wrappers do not guarantee feature parity, for instance the `flush_period` option is only available in the asyncio wrapper. - - - - - - -.. _reference: https://sans-io.readthedocs.io/ \ No newline at end of file +[reference]: https://sans-io.readthedocs.io/ diff --git a/docs/genindex.md b/docs/genindex.md new file mode 100644 index 000000000..73f1191b0 --- /dev/null +++ b/docs/genindex.md @@ -0,0 +1,3 @@ +# Index + + diff --git a/docs/genindex.rst b/docs/genindex.rst deleted file mode 100644 index 93eb8b294..000000000 --- a/docs/genindex.rst +++ /dev/null @@ -1,5 +0,0 @@ -API Index -========= - -.. - https://stackoverflow.com/a/42310803 diff --git a/docs/how-to.md b/docs/how-to.md new file mode 100644 index 000000000..6b1614172 --- /dev/null +++ b/docs/how-to.md @@ -0,0 +1,10 @@ +# How-to Guides + +Practical step-by-step guides for the more experienced user. + +```{toctree} +:maxdepth: 1 +:glob: + +how-to/* +``` diff --git a/docs/how-to/contribute.md b/docs/how-to/contribute.md new file mode 100644 index 000000000..f9c4ca1d7 --- /dev/null +++ b/docs/how-to/contribute.md @@ -0,0 +1,2 @@ +```{include} ../../.github/CONTRIBUTING.md +``` \ No newline at end of file diff --git a/docs/user/how-to/introspect-panda.rst b/docs/how-to/introspect-panda.md similarity index 56% rename from docs/user/how-to/introspect-panda.rst rename to docs/how-to/introspect-panda.md index 2bd129147..64ca6cd58 100644 --- a/docs/user/how-to/introspect-panda.rst +++ b/docs/how-to/introspect-panda.md @@ -1,22 +1,21 @@ -How to introspect a PandA -=========================== +# How to introspect a PandA Using a combination of `commands ` it is straightforward to query the PandA -to list all blocks, and all fields inside each block, that exist. +to list all blocks, and all fields inside each block, that exist. Call the following script, with the address of the PandA as the first and only command line argument: +```{literalinclude} ../../examples/introspect_panda.py +``` -.. literalinclude:: ../../../examples/introspect_panda.py - -This script can be found in ``examples/introspect_panda.py``. +This script can be found in `examples/introspect_panda.py`. By examining the `BlockInfo` structure returned from `GetBlockInfo` for each Block the number and description may be acquired for every block. -By examining the `FieldInfo` structure (which is fully printed in this example) the ``type``, -``sub-type``, ``description`` and ``label`` may all be found for every field. +By examining the `FieldInfo` structure (which is fully printed in this example) the `type`, +`sub-type`, `description` and `label` may all be found for every field. -Lastly the complete list of every ``BITS`` field in the ``PCAP`` block are gathered and -printed. See the documentation in the `Field Types `_ +Lastly the complete list of every `BITS` field in the `PCAP` block are gathered and +printed. See the documentation in the [Field Types](https://pandablocks-server.readthedocs.io/en/latest/fields.html?#field-types) section of the PandA Server documentation. diff --git a/docs/user/how-to/library-hdf.rst b/docs/how-to/library-hdf.md similarity index 67% rename from docs/user/how-to/library-hdf.rst rename to docs/how-to/library-hdf.md index 54ebd97a0..b24befe87 100644 --- a/docs/user/how-to/library-hdf.rst +++ b/docs/how-to/library-hdf.md @@ -1,34 +1,34 @@ -.. _library-hdf: +(library-hdf)= -How to use the library to capture HDF files -=========================================== +# How to use the library to capture HDF files The `commandline-hdf` introduced how to use the commandline to capture HDF files. The `write_hdf_files` function that is called to do this can also be integrated into custom Python applications. This guide shows how to do this. -Approach 1: Call the function directly --------------------------------------- +## Approach 1: Call the function directly If you need a one-shot configure and run application, you can use the function directly: -.. literalinclude:: ../../../examples/arm_and_hdf.py +```{literalinclude} ../../examples/arm_and_hdf.py +``` With the `AsyncioClient` as a `Context Manager `, this code sets up some fields of a PandA before taking a single acquisition. The code in `write_hdf_files` is responsible for arming the PandA. -.. note:: +:::{note} +There are no log messages emitted like in `commandline-hdf`. This is because +we have not configured the logging framework in this example. You can get +these messages by adding a call to `logging.basicConfig` like this: - There are no log messages emitted like in `commandline-hdf`. This is because - we have not configured the logging framework in this example. You can get - these messages by adding a call to `logging.basicConfig` like this:: +``` +logging.basicConfig(level=logging.INFO) +``` +::: - logging.basicConfig(level=logging.INFO) - -Approach 2: Create the pipeline yourself ----------------------------------------- +## Approach 2: Create the pipeline yourself If you need more control over the pipeline, for instance to display progress, you can create the pipeline yourself, and feed it data from the PandA. This @@ -36,7 +36,8 @@ means you can make decisions about when to start and stop acquisitions based on the `Data` objects that go past. For example, if we want to make a progress bar we could: -.. literalinclude:: ../../../examples/hdf_queue_reporting.py +```{literalinclude} ../../examples/hdf_queue_reporting.py +``` This time, after setting up the PandA, we create the `AsyncioClient.data` iterator ourselves. Each `Data` object we get is queued on the first `Pipeline` @@ -46,9 +47,8 @@ update a progress bar, or return as acquisition is complete. In a `finally ` block we stop the pipeline, which will wait for all data to flow through the pipeline and close the HDF file. -Performance ------------ +## Performance The commandline client and both these approaches use the same core code, so will give the same performance. The steps to consider in optimising performance are -outlined in `performance` \ No newline at end of file +outlined in `performance` diff --git a/docs/how-to/poll-changes.md b/docs/how-to/poll-changes.md new file mode 100644 index 000000000..bfab9b0e7 --- /dev/null +++ b/docs/how-to/poll-changes.md @@ -0,0 +1,3 @@ +# How to efficiently poll for changes + +Write something about using `*CHANGES` like Malcolm does. diff --git a/docs/how-to/run-container.md b/docs/how-to/run-container.md new file mode 100644 index 000000000..5649331d3 --- /dev/null +++ b/docs/how-to/run-container.md @@ -0,0 +1,14 @@ +# Run in a container + +Pre-built containers with PandABlocks-client and its dependencies already +installed are available on [Github Container Registry](https://ghcr.io/PandABlocks/PandABlocks-client). + +## Starting the container + +To pull the container from github container registry and run: + +``` +$ docker run ghcr.io/pandablocks/PandABlocks-client:latest --version +``` + +To get a released version, use a numbered release instead of `latest`. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..730b3fdc1 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,56 @@ +--- +html_theme.sidebar_secondary.remove: true +--- + +```{include} ../README.md +:end-before: + +::::{grid} 2 +:gutter: 4 + +:::{grid-item-card} {material-regular}`directions_walk;2em` +```{toctree} +:maxdepth: 2 +tutorials +``` ++++ +Tutorials for installation and typical usage. New users start here. +::: + +:::{grid-item-card} {material-regular}`directions;2em` +```{toctree} +:maxdepth: 2 +how-to +``` ++++ +Practical step-by-step guides for the more experienced user. +::: + +:::{grid-item-card} {material-regular}`info;2em` +```{toctree} +:maxdepth: 2 +explanations +``` ++++ +Explanations of how it works and why it works that way. +::: + +:::{grid-item-card} {material-regular}`menu_book;2em` +```{toctree} +:maxdepth: 2 +reference +``` ++++ +Technical reference material including APIs and release notes. +::: + +:::: diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index 0023bc4d4..000000000 --- a/docs/index.rst +++ /dev/null @@ -1,29 +0,0 @@ -:html_theme.sidebar_secondary.remove: - -.. include:: ../README.rst - :end-before: when included in index.rst - -How the documentation is structured ------------------------------------ - -The documentation is split into 2 sections: - -.. grid:: 2 - - .. grid-item-card:: :material-regular:`person;4em` - :link: user/index - :link-type: doc - - The User Guide contains documentation on how to install and use pandablocks. - - .. grid-item-card:: :material-regular:`code;4em` - :link: developer/index - :link-type: doc - - The Developer Guide contains documentation on how to develop and contribute changes back to pandablocks. - -.. toctree:: - :hidden: - - user/index - developer/index diff --git a/docs/reference.md b/docs/reference.md new file mode 100644 index 000000000..ac4016691 --- /dev/null +++ b/docs/reference.md @@ -0,0 +1,12 @@ +# Reference + +Technical reference material including APIs and release notes. + +```{toctree} +:maxdepth: 1 +:glob: + +reference/* +genindex +Release Notes +``` diff --git a/docs/user/reference/api.rst b/docs/reference/api.rst similarity index 99% rename from docs/user/reference/api.rst rename to docs/reference/api.rst index b9071f801..9b3957400 100644 --- a/docs/user/reference/api.rst +++ b/docs/reference/api.rst @@ -89,4 +89,4 @@ from code: Utilities --------- - This package contains general methods for working with pandablocks. + This package contains general methods for working with pandablocks. \ No newline at end of file diff --git a/docs/tutorials.md b/docs/tutorials.md new file mode 100644 index 000000000..1fe66c541 --- /dev/null +++ b/docs/tutorials.md @@ -0,0 +1,10 @@ +# Tutorials + +Tutorials for installation and typical usage. New users start here. + +```{toctree} +:maxdepth: 1 +:glob: + +tutorials/* +``` diff --git a/docs/tutorials/commandline-hdf.md b/docs/tutorials/commandline-hdf.md new file mode 100644 index 000000000..7c835620c --- /dev/null +++ b/docs/tutorials/commandline-hdf.md @@ -0,0 +1,144 @@ +(commandline-hdf)= + +# Commandline Capture of HDF Files Tutorial + +This tutorial shows how to use the commandline tool to save an HDF file from the PandA +for each PCAP acquisition. It assumes that you have followed the `tutorial-load-save` tutorial +to setup the PandA. + +## Capturing some data + +In one terminal launch the HDF writer client, and tell it to capture 3 frames in a +location of your choosing: + +``` +pandablocks hdf --num=3 /tmp/panda-capture-%d.h5 +``` + +Where `` is the hostname or ip address of your PandA. This will connect +to the data port of the PandA and start listening for up to 3 acquisitions. It will +then write these into files: + +``` +/tmp/panda-capture-1.h5 +/tmp/panda-capture-2.h5 +/tmp/panda-capture-3.h5 +``` + +In a second terminal you can launch the acquisition: + +``` +$ pandablocks control +< *PCAP.ARM= +OK +``` + +This should write 1000 frames at 500Hz, printing in the first terminal window: + +``` +INFO:Opened '/tmp/panda-capture-1.h5' with 60 byte samples stored in 11 datasets +INFO:Closed '/tmp/panda-capture-1.h5' after writing 1000 samples. End reason is 'Ok' +``` + +You can then do `PCAP.ARM=` twice more to make the other files. + +## Examining the data + +You can use your favourite HDF reader to examine the data. It is written in `swmr` +mode so that you can read partial acquisitions before they are complete. + +:::{note} +Reading SWMR HDF5 files while they are being written to require the use of a +Posix compliant filesystem like a local disk or GPFS native client. NFS +mounts are *not* Posix compliant. +::: + +In the repository `examples/plot_counter_hdf.py` is an example of reading the +file, listing the datasets, and plotting the counters: + +```{literalinclude} ../../examples/plot_counter_hdf.py +``` + +Running it on `/tmp/panda-capture-1.h5` will show the three counter values: + +```{eval-rst} +.. plot:: + + for i in range(1, 4): + plt.plot(np.arange(1, 1001) * i, label=f"Counter {i}") + plt.legend() + plt.show() +``` + +You should see that they are all the same size: + +``` +$ ls -s --si /tmp/panda-capture-*.h5 +74k /tmp/panda-capture-1.h5 +74k /tmp/panda-capture-2.h5 +74k /tmp/panda-capture-3.h5 +``` + +If you have h5diff you can check the contents are the same: + +``` +$ h5diff /tmp/panda-capture-1.h5 /tmp/panda-capture-2.h5 +$ h5diff /tmp/panda-capture-1.h5 /tmp/panda-capture-3.h5 +``` + +## Collecting more data faster + +The test data is produced by a SEQ Block, configured to produce a high level +for 1 prescaled tick, then a low level for 1 prescaled tick. The default +setting is to produce 1000 repeats of these, with a prescale of 1ms and hence +a period of 2ms. Each sample is 11 fields, totalling 60 bytes, which means +that it will produce data at a modest 30kBytes/s for a total of 2s. +We can increase this to a more taxing 30MBytes/s by reducing the +prescaler to 1us. If we increase the prescaler to 10 million then we will +sustain this data rate for 20s and write 600MByte files each time: + +``` +$ pandablocks control +< SEQ1.REPEATS? +OK =1000 # It was doing 1k samples, change to 10M +< SEQ1.REPEATS=10000000 +OK +< SEQ1.PRESCALE? +OK =1000 +< SEQ1.PRESCALE.UNITS? +OK =us # It was doing 1ms ticks, change to 1us +< SEQ1.PRESCALE=1 +OK +``` + +Lets write a single file this time, telling the command to also arm the PandA: + +``` +pandablocks hdf --arm /tmp/biggerfile-%d.h5 +``` + +Twenty seconds later we will get a file: + +``` +$ ls -s --si /tmp/biggerfile-*.h5 +602M /tmp/biggerfile-1.h5 +``` + +Which looks very similar when plotted with the code above, just a bit bigger: + +```{eval-rst} +.. plot:: + + for i in range(1, 4): + plt.plot(np.arange(1, 10000001) * i, label=f"Counter {i}") + plt.legend() + plt.show() +``` + +## Conclusion + +This tutorial has shown how to capture data to an HDF file using the commandline +client. It is possible to use this commandline interface in production, but it is +more likely to be integrated in an application that controls the acquisition as well +as writing the data. This is covered in `library-hdf`. You can explore strategies +on getting the maximum performance out of a PandA in `performance`. diff --git a/docs/tutorials/control.md b/docs/tutorials/control.md new file mode 100644 index 000000000..bf8cbf592 --- /dev/null +++ b/docs/tutorials/control.md @@ -0,0 +1,68 @@ +# Interactive Control Tutorial + +This tutorial shows how to use the commandline tool to open an interactive terminal +to control a PandA. + +## Connect + +Open a terminal, and type: + +``` +pandablocks control +``` + +Where `` is the hostname or ip address of your PandA. + +## Type Commands + +You should be presented with a prompt where you can type PandABlocks-server +[commands]. If you are on Linux you can tab complete commands with the TAB key: + +``` +< PCAP. # Hit TAB key... +PCAP.ACTIVE PCAP.BITS1 PCAP.BITS3 PCAP.GATE PCAP.SAMPLES PCAP.TRIG PCAP.TS_END PCAP.TS_TRIG +PCAP.BITS0 PCAP.BITS2 PCAP.ENABLE PCAP.HEALTH PCAP.SHIFT_SUM PCAP.TRIG_EDGE PCAP.TS_START +``` + +Pressing return will send the command to the server and display the response. + +## Control an acquisition + +You can check if an acquisition is currently in progress by getting the value of the +`PCAP.ACTIVE` field: + +``` +< PCAP.ACTIVE? +OK =0 +``` + +You can start and stop acquisitions with special "star" commands. To start an acquisition: + +``` +< *PCAP.ARM= +OK +``` + +You can now use the up arrow to recall the previous command, then press return: + +``` +< PCAP.ACTIVE? +OK =1 +``` + +This means that acquisition is in progress. You can stop it by disarming: + +``` +< *PCAP.DISARM= +OK +< PCAP.ACTIVE? +OK =0 +``` + +## Conclusion + +This tutorial has shown how to start and stop an acquisition from the commandline +client. It can also be used to send any other control [commands] to query and set +variables on the PandA. + +[commands]: https://pandablocks-server.readthedocs.io/en/latest/commands.html diff --git a/docs/tutorials/installation.md b/docs/tutorials/installation.md new file mode 100644 index 000000000..26bf50524 --- /dev/null +++ b/docs/tutorials/installation.md @@ -0,0 +1,48 @@ +# Installation + +## Check your version of python + +You will need python 3.7 or later. You can check your version of python by +typing into a terminal: + +``` +$ python3 --version +``` + +## Create a virtual environment + +It is recommended that you install into a “virtual environment” so this +installation will not interfere with any existing Python software: + +``` +$ python3 -m venv /path/to/venv +$ source /path/to/venv/bin/activate +``` + +## Installing the library + +You can now use `pip` to install the library and its dependencies: + +``` +$ python3 -m pip install pandablocks +``` + +If you need to write HDF files you should install the hdf5 extra: + +``` +$ python3 -m pip install pandablocks[hdf5] +``` + +If you require a feature that is not currently released you can also install +from github: + +``` +$ python3 -m pip install git+https://github.com/PandABlocks/PandABlocks-client.git +``` + +The library should now be installed and the commandline interface on your path. +You can check the version that has been installed by typing: + +``` +$ PandABlocks-client --version +``` diff --git a/docs/user/tutorials/load-save.rst b/docs/tutorials/load-save.md similarity index 51% rename from docs/user/tutorials/load-save.rst rename to docs/tutorials/load-save.md index 5134c40f6..1147cc068 100644 --- a/docs/user/tutorials/load-save.rst +++ b/docs/tutorials/load-save.md @@ -1,19 +1,19 @@ -.. _tutorial-load-save: +(tutorial-load-save)= -Commandline Load/Save Tutorial -============================== +# Commandline Load/Save Tutorial This tutorial shows how to use the commandline tool to save the state of all the Blocks and Fields in a PandA, and load a new state from file. It assumes that you know the basic concepts of a PandA as outlined in the PandABlocks-FPGA -blinking LEDs tutorial_. +blinking LEDs [tutorial]. -Save ----- +## Save -You can save the current state using the save command as follows:: +You can save the current state using the save command as follows: - $ pandablocks save +``` +$ pandablocks save +``` The save file is a text file containing the sequence of pandablocks control commands that will set up the PandA to match its state at the time of the save. @@ -22,35 +22,40 @@ fields. e.g. the first few lines of the tutorial save file look like this: -.. literalinclude:: ../../../src/pandablocks/saves/tutorial.sav - :lines: 1-12 +```{literalinclude} ../../src/pandablocks/saves/tutorial.sav +:lines: 1-12 +``` -Load ----- +## Load -To restore a PandA to a previously saved state use the load command as follows:: +To restore a PandA to a previously saved state use the load command as follows: - $ pandablocks load +``` +$ pandablocks load +``` -This is equivalent to typing the sequence of commands in into the +This is equivalent to typing the sequence of commands in \ into the pandablocks control command line. -To load the preconfigured tutorial state:: +To load the preconfigured tutorial state: - $ pandablocks load --tutorial +``` +$ pandablocks load --tutorial +``` The tutorial sets up a Seqencer block driving 3 Counter blocks and a Position Capture block. This configuration is the starting point for the next tutorial: -:ref:`commandline-hdf` +{ref}`commandline-hdf` -.. note:: - - The Web UI will not change the Blocks visible on the screen when you use - ``pandablocks load``. If you want all the connected Blocks to appear in the - UI then restart the services on the PandA (Admin > System > Reboot/Restart) +:::{note} +The Web UI will not change the Blocks visible on the screen when you use +`pandablocks load`. If you want all the connected Blocks to appear in the +UI then restart the services on the PandA (Admin > System > Reboot/Restart) +::: The tutorial blocks are wired up as shown in the following Web UI layout. -.. image:: tutorial_layout.png +```{image} tutorial_layout.png +``` -.. _tutorial: https://pandablocks-fpga.readthedocs.io/en/latest/tutorials/tutorial1_blinking_leds.html +[tutorial]: https://pandablocks-fpga.readthedocs.io/en/latest/tutorials/tutorial1_blinking_leds.html diff --git a/docs/user/tutorials/tutorial_layout.png b/docs/tutorials/tutorial_layout.png similarity index 100% rename from docs/user/tutorials/tutorial_layout.png rename to docs/tutorials/tutorial_layout.png diff --git a/docs/user/explanations/docs-structure.rst b/docs/user/explanations/docs-structure.rst deleted file mode 100644 index f25a09baa..000000000 --- a/docs/user/explanations/docs-structure.rst +++ /dev/null @@ -1,18 +0,0 @@ -About the documentation ------------------------ - - :material-regular:`format_quote;2em` - - The Grand Unified Theory of Documentation - - -- David Laing - -There is a secret that needs to be understood in order to write good software -documentation: there isn't one thing called *documentation*, there are four. - -They are: *tutorials*, *how-to guides*, *technical reference* and *explanation*. -They represent four different purposes or functions, and require four different -approaches to their creation. Understanding the implications of this will help -improve most documentation - often immensely. - -`More information on this topic. `_ diff --git a/docs/user/how-to/poll-changes.rst b/docs/user/how-to/poll-changes.rst deleted file mode 100644 index 6be376256..000000000 --- a/docs/user/how-to/poll-changes.rst +++ /dev/null @@ -1,4 +0,0 @@ -How to efficiently poll for changes -=================================== - -Write something about using ``*CHANGES`` like Malcolm does. diff --git a/docs/user/how-to/run-container.rst b/docs/user/how-to/run-container.rst deleted file mode 100644 index 7285ef9b9..000000000 --- a/docs/user/how-to/run-container.rst +++ /dev/null @@ -1,15 +0,0 @@ -Run in a container -================== - -Pre-built containers with pandablocks and its dependencies already -installed are available on `Github Container Registry -`_. - -Starting the container ----------------------- - -To pull the container from github container registry and run:: - - $ docker run ghcr.io/PandABlocks/PandABlocks-client:main --version - -To get a released version, use a numbered release instead of ``main``. diff --git a/docs/user/index.rst b/docs/user/index.rst deleted file mode 100644 index c315ff2d1..000000000 --- a/docs/user/index.rst +++ /dev/null @@ -1,67 +0,0 @@ -User Guide -========== - -Documentation is split into four categories, also accessible from links in the -side-bar. - -.. grid:: 2 - :gutter: 4 - - .. grid-item-card:: :material-regular:`directions_walk;3em` - - .. toctree:: - :caption: Tutorials - :maxdepth: 1 - - tutorials/installation - tutorials/commandline-hdf - tutorials/control - tutorials/load-save - - +++ - - Tutorials for installation and typical usage. New users start here. - - .. grid-item-card:: :material-regular:`directions;3em` - - .. toctree:: - :caption: How-to Guides - :maxdepth: 1 - - how-to/run-container - how-to/introspect-panda - how-to/library-hdf - how-to/poll-changes - - +++ - - Practical step-by-step guides for the more experienced user. - - .. grid-item-card:: :material-regular:`info;3em` - - .. toctree:: - :caption: Explanations - :maxdepth: 1 - - explanations/docs-structure - explanations/performance - explanations/sans-io - - +++ - - Explanations of how the library works and why it works that way. - - .. grid-item-card:: :material-regular:`menu_book;3em` - - .. toctree:: - :caption: Reference - :maxdepth: 1 - - reference/api - reference/changelog - reference/contributing - ../genindex - - +++ - - Technical reference material including APIs and release notes. diff --git a/docs/user/reference/appendix.rst b/docs/user/reference/appendix.rst deleted file mode 100644 index 9c10c37eb..000000000 --- a/docs/user/reference/appendix.rst +++ /dev/null @@ -1,15 +0,0 @@ -:orphan: - -Appendix -======== - -These definitions are needed to quell sphinx warnings. - -.. py:class:: T - :canonical: pandablocks.commands.T - - Parameter for Generic class `Command`, indicating its response type - -.. py:class:: socket.socket - - The docs for this are `here ` \ No newline at end of file diff --git a/docs/user/reference/changelog.rst b/docs/user/reference/changelog.rst deleted file mode 100644 index 09929fe43..000000000 --- a/docs/user/reference/changelog.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../CHANGELOG.rst diff --git a/docs/user/reference/contributing.rst b/docs/user/reference/contributing.rst deleted file mode 100644 index 65b992f08..000000000 --- a/docs/user/reference/contributing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../.github/CONTRIBUTING.rst diff --git a/docs/user/tutorials/commandline-hdf.rst b/docs/user/tutorials/commandline-hdf.rst deleted file mode 100644 index 28b5f0cec..000000000 --- a/docs/user/tutorials/commandline-hdf.rst +++ /dev/null @@ -1,126 +0,0 @@ -.. _commandline-hdf: - -Commandline Capture of HDF Files Tutorial -========================================= - -This tutorial shows how to use the commandline tool to save an HDF file from the PandA -for each PCAP acquisition. It assumes that you have followed the `tutorial-load-save` tutorial -to setup the PandA. - -Capturing some data -------------------- - -In one terminal launch the HDF writer client, and tell it to capture 3 frames in a -location of your choosing:: - - pandablocks hdf --num=3 /tmp/panda-capture-%d.h5 - -Where ```` is the hostname or ip address of your PandA. This will connect -to the data port of the PandA and start listening for up to 3 acquisitions. It will -then write these into files:: - - /tmp/panda-capture-1.h5 - /tmp/panda-capture-2.h5 - /tmp/panda-capture-3.h5 - -In a second terminal you can launch the acquisition:: - - $ pandablocks control - < *PCAP.ARM= - OK - -This should write 1000 frames at 500Hz, printing in the first terminal window:: - - INFO:Opened '/tmp/panda-capture-1.h5' with 60 byte samples stored in 11 datasets - INFO:Closed '/tmp/panda-capture-1.h5' after writing 1000 samples. End reason is 'Ok' - -You can then do ``PCAP.ARM=`` twice more to make the other files. - -Examining the data ------------------- - -You can use your favourite HDF reader to examine the data. It is written in `swmr` -mode so that you can read partial acquisitions before they are complete. - -.. note:: - - Reading SWMR HDF5 files while they are being written to require the use of a - Posix compliant filesystem like a local disk or GPFS native client. NFS - mounts are *not* Posix compliant. - -In the repository ``examples/plot_counter_hdf.py`` is an example of reading the -file, listing the datasets, and plotting the counters: - -.. literalinclude:: ../../../examples/plot_counter_hdf.py - -Running it on ``/tmp/panda-capture-1.h5`` will show the three counter values: - -.. plot:: - - for i in range(1, 4): - plt.plot(np.arange(1, 1001) * i, label=f"Counter {i}") - plt.legend() - plt.show() - -You should see that they are all the same size:: - - $ ls -s --si /tmp/panda-capture-*.h5 - 74k /tmp/panda-capture-1.h5 - 74k /tmp/panda-capture-2.h5 - 74k /tmp/panda-capture-3.h5 - -If you have h5diff you can check the contents are the same:: - - $ h5diff /tmp/panda-capture-1.h5 /tmp/panda-capture-2.h5 - $ h5diff /tmp/panda-capture-1.h5 /tmp/panda-capture-3.h5 - -Collecting more data faster ---------------------------- - -The test data is produced by a SEQ Block, configured to produce a high level -for 1 prescaled tick, then a low level for 1 prescaled tick. The default -setting is to produce 1000 repeats of these, with a prescale of 1ms and hence -a period of 2ms. Each sample is 11 fields, totalling 60 bytes, which means -that it will produce data at a modest 30kBytes/s for a total of 2s. -We can increase this to a more taxing 30MBytes/s by reducing the -prescaler to 1us. If we increase the prescaler to 10 million then we will -sustain this data rate for 20s and write 600MByte files each time:: - - $ pandablocks control - < SEQ1.REPEATS? - OK =1000 # It was doing 1k samples, change to 10M - < SEQ1.REPEATS=10000000 - OK - < SEQ1.PRESCALE? - OK =1000 - < SEQ1.PRESCALE.UNITS? - OK =us # It was doing 1ms ticks, change to 1us - < SEQ1.PRESCALE=1 - OK - -Lets write a single file this time, telling the command to also arm the PandA:: - - pandablocks hdf --arm /tmp/biggerfile-%d.h5 - -Twenty seconds later we will get a file:: - - $ ls -s --si /tmp/biggerfile-*.h5 - 602M /tmp/biggerfile-1.h5 - -Which looks very similar when plotted with the code above, just a bit bigger: - -.. plot:: - - for i in range(1, 4): - plt.plot(np.arange(1, 10000001) * i, label=f"Counter {i}") - plt.legend() - plt.show() - -Conclusion ----------- - -This tutorial has shown how to capture data to an HDF file using the commandline -client. It is possible to use this commandline interface in production, but it is -more likely to be integrated in an application that controls the acquisition as well -as writing the data. This is covered in `library-hdf`. You can explore strategies -on getting the maximum performance out of a PandA in `performance`. diff --git a/docs/user/tutorials/control.rst b/docs/user/tutorials/control.rst deleted file mode 100644 index 339d1cc91..000000000 --- a/docs/user/tutorials/control.rst +++ /dev/null @@ -1,61 +0,0 @@ -Interactive Control Tutorial -============================ - -This tutorial shows how to use the commandline tool to open an interactive terminal -to control a PandA. - -Connect -------- - -Open a terminal, and type:: - - pandablocks control - -Where ```` is the hostname or ip address of your PandA. - -Type Commands -------------- - -You should be presented with a prompt where you can type PandABlocks-server -commands_. If you are on Linux you can tab complete commands with the TAB key:: - - < PCAP. # Hit TAB key... - PCAP.ACTIVE PCAP.BITS1 PCAP.BITS3 PCAP.GATE PCAP.SAMPLES PCAP.TRIG PCAP.TS_END PCAP.TS_TRIG - PCAP.BITS0 PCAP.BITS2 PCAP.ENABLE PCAP.HEALTH PCAP.SHIFT_SUM PCAP.TRIG_EDGE PCAP.TS_START - -Pressing return will send the command to the server and display the response. - -Control an acquisition ----------------------- - -You can check if an acquisition is currently in progress by getting the value of the -``PCAP.ACTIVE`` field:: - - < PCAP.ACTIVE? - OK =0 - -You can start and stop acquisitions with special "star" commands. To start an acquisition:: - - < *PCAP.ARM= - OK - -You can now use the up arrow to recall the previous command, then press return:: - - < PCAP.ACTIVE? - OK =1 - -This means that acquisition is in progress. You can stop it by disarming:: - - < *PCAP.DISARM= - OK - < PCAP.ACTIVE? - OK =0 - -Conclusion ----------- - -This tutorial has shown how to start and stop an acquisition from the commandline -client. It can also be used to send any other control commands_ to query and set -variables on the PandA. - -.. _commands: https://pandablocks-server.readthedocs.io/en/latest/commands.html diff --git a/docs/user/tutorials/installation.rst b/docs/user/tutorials/installation.rst deleted file mode 100644 index 41131e63b..000000000 --- a/docs/user/tutorials/installation.rst +++ /dev/null @@ -1,42 +0,0 @@ -Installation -============ - -Check your version of python ----------------------------- - -You will need python 3.7 or later. You can check your version of python by -typing into a terminal:: - - $ python3 --version - - -Create a virtual environment ----------------------------- - -It is recommended that you install into a “virtual environment” so this -installation will not interfere with any existing Python software:: - - $ python3 -m venv /path/to/venv - $ source /path/to/venv/bin/activate - - -Installing the library ----------------------- - -You can now use ``pip`` to install the library:: - - python3 -m pip install pandablocks - -If you need to write HDF files you should install the ``hdf5`` extra:: - - python3 -m pip install pandablocks[hdf5] - -If you require a feature that is not currently released you can also install -from github:: - - python3 -m pip install git+git://github.com/PandABlocks/PandABlocks-client.git - -The library should now be installed and the commandline interface on your path. -You can check the version that has been installed by typing:: - - pandablocks --version diff --git a/pyproject.toml b/pyproject.toml index 00da045e8..73539a517 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=64", "setuptools_scm[toml]>=6.2", "wheel"] +requires = ["setuptools>=64", "setuptools_scm[toml]>=6.2"] build-backend = "setuptools.build_meta" [project] @@ -17,7 +17,7 @@ description = "A Python client to control and data ports of the PandABlocks TCP dependencies = ["typing-extensions;python_version<'3.8'", "numpy", "click"] dynamic = ["version"] license.file = "LICENSE" -readme = "README.rst" +readme = "README.md" requires-python = ">=3.7" [project.optional-dependencies] @@ -25,12 +25,13 @@ h5py = ["h5py", "matplotlib"] dev = [ # A dev install will require [h5py] packages too "pandablocks[h5py]", - "black", "mypy", "mock", "types-mock", "atomicwrites", "typed-ast", + "copier", + "myst-parser", "pipdeptree", "pre-commit", "pydata-sphinx-theme>=0.12", @@ -85,7 +86,7 @@ legacy_tox_ini = """ [tox] skipsdist=True -[testenv:{pre-commit,mypy,pytest,docs}] +[testenv:{pre-commit,type-checking,tests,docs}] # Don't create a virtualenv for the command, requires tox-direct plugin direct = True passenv = * @@ -98,18 +99,21 @@ allowlist_externals = commands = pytest: pytest --cov=src/pandablocks --cov-report term --cov-report xml:cov.xml {posargs} mypy: mypy src tests {posargs} - pre-commit: pre-commit run --all-files {posargs} + pre-commit: pre-commit run --all-files --show-diff-on-failure {posargs} + type-checking: mypy src tests {posargs} + tests: pytest --cov=pandablocks --cov-report term --cov-report xml:cov.xml {posargs} docs: sphinx-{posargs:build -EW --keep-going} -T docs build/html """ - [tool.ruff] src = ["src", "tests"] line-length = 88 -select = [ - "C4", # flake8-comprehensions - https://beta.ruff.rs/docs/rules/#flake8-comprehensions-c4 - "E", # pycodestyle errors - https://beta.ruff.rs/docs/rules/#error-e - "F", # pyflakes rules - https://beta.ruff.rs/docs/rules/#pyflakes-f - "W", # pycodestyle warnings - https://beta.ruff.rs/docs/rules/#warning-w - "I001", # isort +lint.select = [ + "B", # flake8-bugbear - https://docs.astral.sh/ruff/rules/#flake8-bugbear-b + "C4", # flake8-comprehensions - https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 + "E", # pycodestyle errors - https://docs.astral.sh/ruff/rules/#error-e + "F", # pyflakes rules - https://docs.astral.sh/ruff/rules/#pyflakes-f + "W", # pycodestyle warnings - https://docs.astral.sh/ruff/rules/#warning-w + "I", # isort - https://docs.astral.sh/ruff/rules/#isort-i + "UP", # pyupgrade - https://docs.astral.sh/ruff/rules/#pyupgrade-up ] diff --git a/src/pandablocks/__init__.py b/src/pandablocks/__init__.py index 457ddb1f8..26d23badb 100644 --- a/src/pandablocks/__init__.py +++ b/src/pandablocks/__init__.py @@ -1,11 +1,3 @@ -import sys - -if sys.version_info < (3, 8): - from importlib_metadata import version # noqa -else: - from importlib.metadata import version # noqa - -__version__ = version("pandablocks") -del version +from ._version import __version__ __all__ = ["__version__"] diff --git a/src/pandablocks/connections.py b/src/pandablocks/connections.py index 9645e72ab..0e2cc8294 100644 --- a/src/pandablocks/connections.py +++ b/src/pandablocks/connections.py @@ -105,8 +105,8 @@ def __iter__(self): def __next__(self) -> bytes: try: return self.read_line() - except NeedMoreData: - raise StopIteration() + except NeedMoreData as err: + raise StopIteration() from err @dataclass diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py index 9e3902a8d..fd1881eda 100644 --- a/tests/test_asyncio.py +++ b/tests/test_asyncio.py @@ -54,7 +54,7 @@ async def test_asyncio_data_timeout(dummy_server_async, fast_dump): dummy_server_async.data = fast_dump async with AsyncioClient("localhost") as client: with pytest.raises(asyncio.TimeoutError, match="No data received for 0.1s"): - async for data in client.data(frame_timeout=0.1): + async for _ in client.data(frame_timeout=0.1): "This goes forever, when it runs out of data we will get our timeout" diff --git a/tests/test_cli.py b/tests/test_cli.py index e8559b464..1e1ed9c20 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -73,8 +73,8 @@ def __call__(self, prompt): assert prompt == cli.PROMPT try: return self._commands.popleft() - except IndexError: - raise EOFError() + except IndexError as err: + raise EOFError() from err def test_interactive_simple(dummy_server_in_thread, capsys):