diff --git a/.bumpversion.cfg b/.bumpversion.cfg deleted file mode 100644 index a8535b949..000000000 --- a/.bumpversion.cfg +++ /dev/null @@ -1,25 +0,0 @@ -[bumpversion] -current_version = 1.3.1 -commit = True -tag = False -parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\.(?P[a-z]+)(?P\d+))? -serialize = - {major}.{minor}.{patch}.{release}{build} - {major}.{minor}.{patch} - -[bumpversion:part:release] -optional_value = prod -first_value = dev -values = - dev - prod - -[bumpversion:part:build] - -[bumpversion:file:setup.py] -search = version="{current_version}" -replace = version="{new_version}" - -[bumpversion:file:src/maestral/__init__.py] - -[bumpversion:file:docs/conf.py] diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 102aca717..e26202536 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -19,11 +19,17 @@ jobs: runs-on: ${{ matrix.platform }} steps: - - name: Checkout project + + - name: Checkout merge commit uses: actions/checkout@v2 + if: github.event_name == 'pull_request_target' with: ref: 'refs/pull/${{ github.event.number }}/merge' + - name: Checkout head commit + uses: actions/checkout@v2 + if: github.event_name != 'pull_request_target' + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: @@ -34,11 +40,12 @@ jobs: python -m pip install --upgrade pip python -m pip install --upgrade pytest python -m pip install --upgrade pytest-cov + python -m pip install --upgrade pytest-rerunfailures python -m pip install . - name: Test with pytest run: | - pytest --cov=maestral --cov-report=xml tests/offline + pytest --reruns 5 --cov=maestral --cov-report=xml tests/offline - name: Upload coverage to Codecov uses: codecov/codecov-action@v1 @@ -46,7 +53,7 @@ jobs: file: ./coverage.xml flags: pytest env_vars: OS,PYTHON,TYPE - name: pytests + name: pytests -v env: OS: ${{ matrix.platform }} PYTHON: ${{ matrix.python-version }} @@ -71,11 +78,16 @@ jobs: runs-on: ${{ matrix.platform }} steps: - - name: Checkout project + - name: Checkout merge commit uses: actions/checkout@v2 + if: github.event_name == 'pull_request_target' with: ref: 'refs/pull/${{ github.event.number }}/merge' + - name: Checkout head commit + uses: actions/checkout@v2 + if: github.event_name != 'pull_request_target' + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: @@ -86,6 +98,7 @@ jobs: python -m pip install --upgrade pip python -m pip install --upgrade pytest python -m pip install --upgrade pytest-cov + python -m pip install --upgrade pytest-rerunfailures python -m pip install . - name: Get short-lived Dropbox token @@ -99,11 +112,11 @@ jobs: -d client_id=2jmbq42w7vof78h) token=$(echo $auth_result | python -c "import sys, json; print(json.load(sys.stdin)['access_token'])") echo "::add-mask::$token" - echo "DROPBOX_TOKEN=$token" >> $GITHUB_ENV + echo "DROPBOX_ACCESS_TOKEN=$token" >> $GITHUB_ENV - name: Test with pytest run: | - pytest --cov=maestral --cov-report=xml tests/linked + pytest -v --reruns 5 --cov=maestral --cov-report=xml tests/linked - name: Upload coverage to Codecov uses: codecov/codecov-action@v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d5dff8e6..aa9949fdf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,124 @@ +## v1.4.0 + +This release brings significant extensions to the command line interface: It introduces +commands to create and manage shared links, to compare older version of a file and print +the diff output to the terminal, and commands for direct access to config values (note +the warning below). It also adds optional one-way syncing, for instance to keep a mirror +of a remote Dropbox folder while ignoring local changes. + +Several bugs have been fixed which could occur when resuming the sync activity after the +connection had been lost while indexing a remote folder. + +Finally, this release removes automatic error reporting via Bugsnag. Please file any bug +reports as issues on GitHub where it is possible to follow up. + +#### Added: + +* Added a command `maestral diff` to compare different versions of a text file. The + resulting diff is printed to the console. Credit goes to @OrangeFran. +* Resurrected the command `maestral revs` to list previous versions (revisions) of a file. +* Added a command group `maestral sharelink` to create and manage shared links. + Subcommands are: + + * `create`: Create a shared link for a file or folder, optionally with password + protection and an expiry date on supported accounts (business and professional). + * `list`: List shared links, either for a specific file or folder or for all items + in your Dropbox. + * `revoke`: Revoke a shared link. + +* Added a command group `maestral config` to provide direct access to config values. + Subcommands are: + + * `get`: Gets the config value for a key. + * `set`: Sets the config value for a key. + + This provides access to previously inaccessible config values such as + `reindex_interval` or `max_cpu_percent`. Please refer to a Wiki for an overview of all + config values. Use the `set` command with caution: setting some config values may + leave the daemon in an inconsistent state (e.g., changing the location of the Dropbox + folder). Always use the equivalent command from the Settings group (e.g., `maestral + move-dir`). +* Added the ability to disable a single sync direction, for instance to enable download + syncs only. This can be useful when you want to mirror a remote folder while ignoring + local changes or when syncing to a file system which does not support inotify. To use + this, set the respective config values for `upload` or `download` to False. Note that + conflict resolution remains unaffected. For instance, when an unsynced local change + would be overwritten by a remote change, the local file will be moved to a + "conflicting copy" first. However, the conflicting copy will not be uploaded. + +#### Changed: + +* Changes to indexing: + + * Avoid scanning of objects matching an `.mignore` pattern (file watches will still be + added however). This results in performance improvements during startup and resume. + A resulting behavioral change is that **maestral will remove files matching an + ignore pattern from Dropbox**. After this change it will be immaterial if an + `.mignore` pattern is added before or after having matching files in Dropbox. + * If Maestral is quit or interrupted during indexing, for instance due to connection + problems, it will later resume from the same position instead of restarting from the + beginning. + * Indexing will no longer skip excluded folders. This is necessary for the above + change. + * Defer periodic reindexing, typically carried out weekly, if the device is not + connected to an AC power supply. This prevents draining the battery when hashing + file contents. + +* Changes to CLI: + + * Moved linking and unlinking to a new command group `maestral auth` with subcommands + `link`, `unlink` and `status`. + * Renamed the command `file-status` to `filestatus`. + * Added a `--yes, -Y` flag to the `unlink` to command to skip the confirmation prompt. + * Renamed the `configs` command to list config files to `config-files`. + * Added an option `--clean` to `config-files` to remove all stale config files (those + without a linked Dropbox account). + +* Improved the error message when the user is running out of inotify watches: Recommend + default values of `max_user_watches = 524288` and `max_user_instances = 1024` or + double the current values, whichever is higher. Advise to apply the changes with + `sysctl -p`. + +#### Fixed: + +* Fixes an issue with the CLI on Python 3.6 where commands that print dates to the console + would raise an exception. +* Properly handle a rare OSError "[Errno 41] Protocol wrong type for socket" on macOS, + see https://bugs.python.org/issue33450. +* Allow creating local files even if we cannot set their permissions, for instances on + some mounted NTFS drives. +* Fixes an issue with the selective sync dialog in the Qt / Linux GUI where the "Update" + button could be incorrectly enabled or disabled. +* Fixes an issue where a lost internet connection while starting the sync could lead to + a stuck sync thread or an endless indexing cycle. +* Fixes an issue where a lost internet connection during the download of a folder newly + included in selective sync could result in the download never being completed. +* Fixes an issue where pausing the sync during the download of a folder newly included + in selective sync could result in the download never being completed. + +#### Removed: + +* Removed automatic error reporting via bugsnag. +* Removed from CLI: + + * The `maestral restart` command. Use `stop` and `start` instead. + * The `maestral account-info` command. Use `maestral auth status` instead. +å +* Removed the public API methods `Maestral.resume_sync` and `Maestral.pause_sync`. Use + `Maestral.start_sync` and `Maestral.stop_sync` instead. + +#### Dependencies: + +* Bumped survey to version >=3.2.2,<4.0. +* Bumped keyring to version >=22. +* Bumped watchdog to version >= 2.0. +* Added `desktop-notifier` dependency. This is spin-off project from Maestral, built on + the code previously in the `notify` module. +* Removed the bugsnag dependency. + ## v1.3.1 -#### Fixes: +#### Fixed: * Fixes an incorrect entry point for the Qt GUI. @@ -25,7 +143,7 @@ series of bug fixes for GUI and daemon. #### Changed: * Significant improvements to the command line interface: - * Overhauled all CLI dialogs with nicer formatting and more interactive prompts + * Overhauled all CLI dialogs with nicer formatting and more interactive prompts using the `survey` package. * Improved output of many CLI commands, including `ls`, `activity`, and `restore`. * Increased speed of many CLI commands by importing only necessary modules. @@ -42,7 +160,7 @@ series of bug fixes for GUI and daemon. * The `Maestral.excluded_items` property is no longer read-only. * Some refactoring of the `cli` module to prepare for shell completion support. -#### Fixes: +#### Fixed: * Fixes an issue where all newly downloaded files would be created with 755 permissions. They are now created with the user's default permissions for new files instead. @@ -55,10 +173,10 @@ series of bug fixes for GUI and daemon. * Fixes possible loss of data when excluding an item from syncing while it is downloaded. This is no longer possible and will raise a `BusyError` instead. * Fixes an issue where `maestral ls` would fail when run with the `-l, --long` flag. -* Fixes an occasional `IndexError` during a download sync when trying to query past +* Fixes an occasional `IndexError` during a download sync when trying to query past versions of a deleted item. * Fixes an issue which could cause a segfault of the selective sync dialog on macOS. -* Fixes an issue where the selective sync dialog on Linux would not load the contents of +* Fixes an issue where the selective sync dialog on Linux would not load the contents of more than 10 folders. * Fixes a regression with the autostart functionality of the Linux GUI. Autostart entries created with v1.2.2 will need be reset by toggling the checkbox "start on @@ -94,7 +212,7 @@ series of bug fixes for GUI and daemon. This release focuses on bug fixes and performance improvements. In particular, memory usage has been improved when syncing a Dropbox folder with a large number of items. -#### Changes: +#### Changed: - `maestral file-status` now accepts relative paths. - Runs the daemon in a Python interpreter with -OO flags. This strips docstrings and saves @@ -112,7 +230,7 @@ usage has been improved when syncing a Dropbox folder with a large number of ite - Switch from PyInstaller to [briefcase](https://github.com/beeware/briefcase) for packaging on macOS. -#### Fixes: +#### Fixed: - Fixes an issue which would prevent the daemon from starting on macOS when running with Python 3.6. @@ -155,7 +273,7 @@ full compatibility from macOS 10.13 High Sierra to macOS 11.0 Big Sur. - Improves log messages when the connection to Dropbox is lost. - Performance improvements to `maestral activity` in case of very large sync queues. -#### Fixes: +#### Fixed: - Fixes a database integrity error due to an unfulfilled unique constraint. - Fixes an issue when the daemon is launched with systemd where systemd would unexpectedly diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ed1cdcd74..b84c3fe20 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,5 @@ -## Guidelines +### Code To start, install maestral with the `dev` extra to get all dependencies required for development: @@ -8,7 +8,8 @@ development: pip3 install maestral[dev] ``` -### Checking the Format, Coding Style, and Type Hints +This will install packages to check and enforce the code style, use pre-commit hooks and +bump the current version. Code is formatted with [black](https://github.com/psf/black). Coding style is checked with [flake8](http://flake8.pycqa.org). @@ -23,7 +24,8 @@ pre-commit run -a ``` You can also install the provided pre-commit hook to run checks on every commit. This -will however significantly slow down commits. +will however significantly slow down commits. An introduction to pre-commit commit hooks +is given at [https://pre-commit.com](https://pre-commit.com). ### Documentation @@ -40,7 +42,7 @@ pip3 install maestral[docs] The API documentation is mostly based on doc strings. Inline comments should be used whenever code may be difficult to understand for others. -## Tests +### Tests The test suite uses a mixture of [unittest](https://docs.python.org/3.8/library/unittest.html) and [pytest](https://pytest-cov.readthedocs.io/en/latest/), depending on what is most @@ -56,10 +58,30 @@ indexing and cleaning up sync events, and for particularly complex functions tha prone to regressions. The current test suite uses a Dropbox access token provided by the environment variable -`DROPBOX_TOKEN` to connect to a real account. The GitHub action which is running the -tests will set this environment variable for you with a temporary access token that +`DROPBOX_ACCESS_TOKEN` or a refresh token provided by `DROPBOX_REFRESH_TOKEN` to connect +to a real account. The GitHub action which is running the tests will set the +`DROPBOX_ACCESS_TOKEN` environment variable for you with a temporary access token that expires after 4 hours. Tests are run on `ubuntu-latest` and `macos-latest` in parallel -on different accounts and you should acquire a "lock" on the account before running -tests. Fixtures to create and clean up a test config and to acquire a lock are provided -in the `tests/linked/conftest.py`. If you run the tests locally, you will need to -provide an access token for your own Dropbox account. +on different accounts. + +When using the GitHub test runner, you should acquire a "lock" on the account before +running tests to prevent them from interfering which each other by creating a folder +`test.lock` in the root of the Dropbox folder. This folder should have a +`client_modified` time set in the future, to the expiry time of the lock. Fixtures to +create and clean up a test config and to acquire a lock are provided in the +`tests/linked/conftest.py`. + +If you run the tests locally, you will need to provide a refresh or access token for +your own Dropbox account. If your account is already linked with Maestral, it will have +saved a long-lived "refresh token" in your system keyring. You can access it manually or +through the Python API: + +```Python +from maestral.main import Maestral + +m = Maestral() +print(m.client.auth.refresh_token) +``` + +You can then store the retrieved refresh token in the environment variable +`DROPBOX_REFRESH_TOKEN` to be automatically picked up by the tests. diff --git a/README.md b/README.md index 1ad8be949..e4a105a35 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,8 @@ or downloading a file if it already exists with the same content locally or in t ## Warning -- Never sync a folder with both the official Dropbox client and Maestral at the same time. +- Never sync a local folder with both the official Dropbox client and Maestral at the same + time. - Network drives and some external hard drives are not supported as locations for the Dropbox folder. @@ -167,6 +168,22 @@ month to offset the cost of an Apple Developer account to sign and notarize the # Acknowledgements +Maestral directly uses code from the following projects: + - The config module uses code from the [Spyder IDE](https://github.com/spyder-ide) -- The DropboxClient is inspired by work from [Orphilia](https://github.com/ksiazkowicz/orphilia-dropbox) -- Error reporting is powered by bugsnag. +- The DropboxClient module is inspired by work from [Orphilia](https://github.com/ksiazkowicz/orphilia-dropbox) + +It also would not be possible without the following excellent Python packages: + +- Communication between sync daemon and frontends uses [Pyro5](https://github.com/irmen/Pyro5). +- The command line interface is built with [click](https://github.com/pallets/click) and + uses beautiful interactive prompts by [survey](https://github.com/Exahilosys/survey). +- The Cocoa GUI is built using [toga](https://github.com/beeware/toga) and the macOS app + bundle is built using [briefcase](https://github.com/beeware/briefcase), both part of + the [beeware](https://beeware.org) project for writing cross-platform Python applications. +- Credential storage uses system keychains via [keyring](https://github.com/jaraco/keyring). +- [watchdog](https://github.com/gorakhargosh/watchdog) allows us to receive local file + system events. +- Error reporting is generously provided by [bugsnag](https://www.bugsnag.com). +- Many more well known libraries that have become the backbone of Python projects + such as requests, sqlalchemy, etc. diff --git a/docs/config_files.rst b/docs/background/config_files.rst similarity index 91% rename from docs/config_files.rst rename to docs/background/config_files.rst index 5626b4c09..b1a4c5487 100644 --- a/docs/config_files.rst +++ b/docs/background/config_files.rst @@ -23,14 +23,11 @@ made to one of the options through the ``maestral.config`` module. # The current Dropbox directory path = /Users/samschott/Dropbox (Maestral) - # Default directory name for the config - default_dir_name = Dropbox (Maestral) - # List of excluded files and folders excluded_items = ['/test_folder', '/sub/folder'] # Config file version (not the Maestral version!) - version = 12.0.0 + version = 15.0.0 [account] @@ -57,12 +54,19 @@ made to one of the options through the ``maestral.config`` module. # Interval in sec to check for updates update_notification_interval = 604800 - # Enable or disable automatic error reports - analytics = False - [sync] # Interval in sec to perform a full reindexing reindex_interval = 604800 + # Maximum CPU usage per core max_cpu_percent = 20.0 + + # Sync history to keep in seconds + keep_history = 604800 + + # Enable upload syncing + upload = True + + # Enable download syncing + download = True diff --git a/docs/background/contributing.rst b/docs/background/contributing.rst new file mode 100644 index 000000000..60b6c129b --- /dev/null +++ b/docs/background/contributing.rst @@ -0,0 +1,7 @@ + +Contributing +============ + +Thank you for your interest in contributing! + +.. mdinclude:: ../../CONTRIBUTING.md diff --git a/docs/log_handlers.csv b/docs/background/log_handlers.csv similarity index 81% rename from docs/log_handlers.csv rename to docs/background/log_handlers.csv index 9912c20a2..4de90f1cd 100644 --- a/docs/log_handlers.csv +++ b/docs/background/log_handlers.csv @@ -5,5 +5,4 @@ Systemd journal,User defined (default: INFO),If started as systemd service Systemd notify status,INFO,If started as systemd notify service :attr:`Maestral.status` API,INFO,Alyways Desktop notifications,WARNING,Alyways -:attr:`Maestral.fatal_errors` API,ERROR,Alyways -Bugsnag,ERROR,Disabled by default \ No newline at end of file +:attr:`Maestral.fatal_errors` API,ERROR,Alyways \ No newline at end of file diff --git a/docs/log_levels.csv b/docs/background/log_levels.csv similarity index 100% rename from docs/log_levels.csv rename to docs/background/log_levels.csv diff --git a/docs/logging.rst b/docs/background/logging.rst similarity index 87% rename from docs/logging.rst rename to docs/background/logging.rst index 11987e0e8..da7bf7e00 100644 --- a/docs/logging.rst +++ b/docs/background/logging.rst @@ -18,9 +18,7 @@ internal usage, others for external communication. For instance, cached logging are used to populate the public APIs :attr:`Maestral.status` and :attr:`Maestral.fatal_errors` and therefore use fixed log levels. Logging to stderr, the systemd journal (if applicable) and to our log files uses the user defined log level -from :attr:`Maestral.log_level` which defaults to INFO. Finally, the Bugsnag -error handler which sends all errors to a server for analytics must be explicitly -enabled by the user and has a fixed log level of ERROR. +from :attr:`Maestral.log_level` which defaults to INFO. .. csv-table:: :file: log_handlers.csv diff --git a/docs/state_files.rst b/docs/background/state_files.rst similarity index 100% rename from docs/state_files.rst rename to docs/background/state_files.rst diff --git a/docs/sync_logic.rst b/docs/background/sync_logic.rst similarity index 89% rename from docs/sync_logic.rst rename to docs/background/sync_logic.rst index 39769c5aa..20bcb4d40 100644 --- a/docs/sync_logic.rst +++ b/docs/background/sync_logic.rst @@ -22,7 +22,7 @@ Maestral processes remote events as follows: 1) :meth:`SyncEngine.wait_for_remote_changes` blocks until remote changes are available. -2) :meth:`SyncEngine.get_remote_changes` lists all remote changes since the last sync. +2) :meth:`SyncEngine.list_remote_changes` lists all remote changes since the last sync. Those events are processed at follows: * Events for entries which are excluded by selective sync and hard-coded file names @@ -49,7 +49,9 @@ Local file events come in eight types: For both files and folders we collect cre moved, modified and deleted events. They are processed as follows: 1) :meth:`SyncEngine.wait_for_local_changes`: Blocks until local changes are - registered by :class:`FSEventHandler` and returns those changes. Events are + registered by :class:`FSEventHandler`. + +2) :meth:`SyncEngine.list_local_changes`: Lists all local file events. Those are processed as follows: * Events ignored by a "mignore" pattern as well as hard-coded file names and @@ -62,8 +64,13 @@ moved, modified and deleted events. They are processed as follows: moved or deleted events for its children. 2) :meth:`SyncEngine.apply_local_changes`: Sorts local changes hierarchically and - applies events in the order of deleted, folders and files. Deletions and creations - will be carried out in parallel with up to 6 threads. + applies events in the order of deleted, folders and files. Deleted, created and + modified events will be applies to the remote Dropbox in parallel with up to 6 + threads. Moves will be carried out synchronously. + +Before processing, we convert all Dropbox metadata and local file events to a unified +format of :class:`maestral.database.SyncEvent` instances which are also used to store +the sync history data in our SQLite database. Detection of sync conflicts *************************** diff --git a/docs/conf.py b/docs/conf.py index 23119372e..4ff2aefc4 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,7 +11,7 @@ # -- Project information --------------------------------------------------------------- author = "Sam Schott" -version = "1.3.1" +version = "1.4.0.dev0" release = version project = "Maestral" title = "Maestral API Documentation" @@ -55,7 +55,6 @@ autoapi_options = [ "members", "show-inheritance", - "show-module-summary", "undoc-members", ] autoapi_add_toctree_entry = False @@ -72,5 +71,5 @@ "python": ("https://docs.python.org/3/", None), "requests": ("https://requests.readthedocs.io/en/master/", None), "sqlalchemy": ("https://docs.sqlalchemy.org/en/latest/", None), - "watchdog": ("https://python-watchdog.readthedocs.io/en/v0.10.3/", None), + "watchdog": ("https://python-watchdog.readthedocs.io/en/latest/", None), } diff --git a/docs/index.rst b/docs/index.rst index d25343f5b..1c7820bab 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,14 +8,35 @@ For a user manual and an overview of Maestral's functionality, please refer to the `wiki `_. .. toctree:: - :caption: Table of Contents - :maxdepth: 1 - - sync_logic - logging - config_files - state_files - API Reference + :hidden: + :caption: Background + :maxdepth: 2 + + background/sync_logic + background/logging + background/config_files + background/state_files + background/contributing + +.. toctree:: + :hidden: + :caption: Reference + :maxdepth: 2 + + autoapi/maestral/cli/index + autoapi/maestral/client/index + autoapi/maestral/config/index + autoapi/maestral/constants/index + autoapi/maestral/daemon/index + autoapi/maestral/database/index + autoapi/maestral/errors/index + autoapi/maestral/fsevents/index + autoapi/maestral/logging/index + autoapi/maestral/main/index + autoapi/maestral/notify/index + autoapi/maestral/oauth/index + autoapi/maestral/sync/index + autoapi/maestral/utils/index Getting started *************** diff --git a/docs/requirements.txt b/docs/requirements.txt index 32d09ea2d..ca5bf54a9 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ m2r2 -sphinx==3.3.1 -sphinx-autoapi==1.5.1 -sphinx_rtd_theme==0.5.0 \ No newline at end of file +sphinx==3.4.3 +sphinx-autoapi==1.7.0 +sphinx_rtd_theme==0.5.1 \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index 26eb2d881..f4014b992 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,8 +1,30 @@ +[bumpversion] +current_version = 1.4.0.dev0 +commit = True +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\.(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}.{release}{build} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = prod +first_value = dev +values = + dev + prod + +[bumpversion:part:build] + +[bumpversion:file:setup.py] +search = version="{current_version}" +replace = version="{new_version}" + +[bumpversion:file:src/maestral/__init__.py] + +[bumpversion:file:docs/conf.py] + [flake8] -# E203: whitespace before ':', conflicts with black -# E501: line width, is handled by black -# W503: line break before binary operator, conflict with black -# H306: imports in alphabetical order ignore = E203,E501,W503,H306 statistics = True diff --git a/setup.py b/setup.py index d4928c85c..9be3e13b9 100644 --- a/setup.py +++ b/setup.py @@ -6,31 +6,30 @@ # proceed with actual install install_requires = [ - "alembic>=1.3,<1.5", - "bugsnag>=3.4,<5.0", - "click>=7.1.1,<8.0", + "alembic>=1.3", + "click>=7.1.1", + "desktop-notifier>=3.1.2", "dropbox>=10.9.0,<12.0", - "dbus-next>=0.1.4;sys_platform=='linux'", "fasteners>=0.15", "importlib_metadata;python_version<'3.8'", "importlib_resources;python_version<'3.9'", - "keyring>=19,<22", - "keyrings.alt>=3.1.0,<5.0", + "keyring>=22", + "keyrings.alt>=3.1.0", "packaging", "pathspec>=0.5.8", "Pyro5>=5.10", "requests>=2.16.2", - "rubicon-objc>=0.3.1;sys_platform=='darwin'", + "rubicon-objc>=0.4.0;sys_platform=='darwin'", "sdnotify>=0.3.2", "setuptools", - "sqlalchemy>=1.3,<1.4", - "survey>=2.1.0,<3.0", - "watchdog>=0.10.0,<=0.10.3", + "sqlalchemy>=1.3", + "survey>=3.2.2,<4.0", + "watchdog>=2.0", ] gui_requires = [ - "maestral-qt>=1.3.1;sys_platform=='linux'", - "maestral-cocoa>=1.3.0;sys_platform=='darwin'", + "maestral-qt>=1.3.2.dev0;sys_platform=='linux'", + "maestral-cocoa>=1.3.2.dev0;sys_platform=='darwin'", ] syslog_requires = ["systemd-python"] @@ -56,7 +55,7 @@ name="maestral", author="Sam Schott", author_email="ss2151@cam.ac.uk", - version="1.3.1", + version="1.4.0.dev0", url="https://github.com/SamSchott/maestral", description="Open-source Dropbox client for macOS and Linux.", license="MIT", diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml deleted file mode 100644 index bd69d3e43..000000000 --- a/snap/snapcraft.yaml +++ /dev/null @@ -1,67 +0,0 @@ -name: maestral -base: core18 -license: MIT -adopt-info: maestral -icon: maestral/resources/maestral.png -summary: An open-source Dropbox client for macOS and Linux. -description: | - Maestral is light-weight and simple Dropbox client for Linux and macOS. - It allows two-way syncing with your Dropbox, setting up a 'mignore' file - similar to 'gitignore' and syncing multiple Dropbox accounts. - -grade: stable -confinement: strict - -apps: - maestral: - command: maestral - desktop: share/applications/maestral.desktop - autostart: maestral-maestral.desktop - extensions: - - kde-neon - plugs: - - home - - network - - unity7 - - opengl - -environment: - LC_ALL: C.UTF-8 - LANG: C.UTF-8 - QT_QPA_PLATFORMTHEME: gtk3 - XDG_CURRENT_DESKTOP: "Unity:$XDG_CURRENT_DESKTOP" - -parts: - maestral: - source: . - plugin: python - python-version: python3 - stage-packages: - # Use the version from the repo to avoid the xcb not found error - - python3-pyqt5 - override-pull: | - snapcraftctl pull - snapcraftctl set-version $(grep version maestral/__init__.py | cut -d'"' -f2) - sed -i "s|Exec = {start_cmd}|Exec = maestral gui -c 'maestral'|g" maestral/resources/maestral.desktop - - maestral-qt: - source: https://github.com/SamSchott/maestral-qt.git - plugin: python - python-version: python3 - override-build: | - sed -i 's|Exec = maestral_qt|Exec = maestral gui|g' maestral_qt/resources/maestral.desktop - sed -i "s|'PyQt5.*||g" setup.py - sed -i "s|'maestral==.*||g" setup.py - snapcraftctl build - override-prime: | - snapcraftctl prime - sed -i 's|Icon = maestral|Icon=${SNAP}/share/icons/hicolor/512x512/apps/maestral.png|g' share/applications/maestral.desktop - - cleanup: - after: [maestral, maestral-qt] - plugin: nil - build-snaps: [ kde-frameworks-5-core18 ] - override-prime: | - set -eux - cd /snap/kde-frameworks-5-core18/current - find . -type f,l -exec rm -f $SNAPCRAFT_PRIME/{} \; diff --git a/src/maestral/__init__.py b/src/maestral/__init__.py index 2681b2e26..e895db638 100644 --- a/src/maestral/__init__.py +++ b/src/maestral/__init__.py @@ -1,20 +1,8 @@ # -*- coding: utf-8 -*- -""" - -The following APIs should remain stable for frontends: - -* maestral.main.Maestral -* maestral.constants -* maestral.daemon -* maestral.errors -* maestral.utils.appdirs -* maestral.utils.autostart - -""" import warnings -__version__ = "1.3.1" +__version__ = "1.4.0.dev0" __author__ = "Sam Schott" __url__ = "https://github.com/SamSchott/maestral" diff --git a/src/maestral/autostart.py b/src/maestral/autostart.py index 001758edf..431350ec6 100644 --- a/src/maestral/autostart.py +++ b/src/maestral/autostart.py @@ -114,10 +114,10 @@ def __init__( self.service_config.write(f) def enable(self) -> None: - subprocess.run(["systemctl", "--user", "enable", self.service_name]) + subprocess.check_output(["systemctl", "--user", "enable", self.service_name]) def disable(self) -> None: - subprocess.run(["systemctl", "--user", "disable", self.service_name]) + subprocess.check_output(["systemctl", "--user", "disable", self.service_name]) @property def enabled(self) -> bool: @@ -210,7 +210,7 @@ def __init__( def enable(self) -> None: with open(self.destination, "w") as f: - self.config.write(f) + self.config.write(f, space_around_delimiters=False) st = os.stat(self.destination) os.chmod(self.destination, st.st_mode | stat.S_IEXEC) @@ -307,7 +307,6 @@ def __init__(self, config_name: str) -> None: TryExec=self.maestral_path, Icon="maestral", Terminal="false", - Categories="Network;FileTransfer;", GenericName="File Synchronizer", Comment="Sync your files with Dropbox", ) diff --git a/src/maestral/cli.py b/src/maestral/cli.py index 0d5ad8185..98b7b0757 100755 --- a/src/maestral/cli.py +++ b/src/maestral/cli.py @@ -15,13 +15,13 @@ # external imports import click -import Pyro5.errors # type: ignore # local imports -from . import __version__, __author__, __url__ +from . import __version__ from .utils import cli if TYPE_CHECKING: + from datetime import datetime from .main import Maestral from .daemon import MaestralProxy @@ -44,13 +44,13 @@ def stop_daemon_with_cli_feedback(config_name: str) -> None: click.echo("Stopping Maestral...", nl=False) res = stop_maestral_daemon_process(config_name) if res == Stop.Ok: - click.echo(" " * 8 + OK) + click.echo("\rStopping Maestral... " + OK) elif res == Stop.NotRunning: click.echo("\rMaestral daemon is not running.") elif res == Stop.Killed: - click.echo(" " * 8 + KILLED) + click.echo("\rStopping Maestral... " + KILLED) elif res == Stop.Failed: - click.echo(" " * 8 + FAILED) + click.echo("\rStopping Maestral... " + FAILED) def select_dbx_path_dialog( @@ -75,7 +75,7 @@ def select_dbx_path_dialog( res = cli.select_path( "Please choose a local Dropbox folder:", default=f"~/{default_dir_name}", - only_directories=True, + files_allowed=False, ) res = res.rstrip(osp.sep) @@ -149,7 +149,7 @@ def link_dialog(m: Union["MaestralProxy", "Maestral"]) -> None: elif res == 1: cli.warn("Invalid token, please try again") elif res == 2: - cli.warn(" Could not connect to Dropbox, please try again") + cli.warn("Could not connect to Dropbox, please try again") def check_for_updates() -> None: @@ -212,7 +212,7 @@ def check_for_fatal_errors(m: Union["MaestralProxy", "Maestral"]) -> bool: return False -def catch_maestral_errors(func: Callable) -> Callable: +def convert_py_errors(func: Callable) -> Callable: """ Decorator that catches a MaestralApiError and prints it as a useful message to the command line instead of printing the full stacktrace. @@ -228,10 +228,25 @@ def wrapper(*args, **kwargs): raise cli.RemoteApiError(exc.title, exc.message) except ConnectionError: raise cli.CliException("Could not connect to Dropbox") + except Exception as exc: + raise cli.CliException(f"{exc.__class__.__name__}: {exc.args[0]}") return wrapper +def _datetime_from_iso_str(time_str: str) -> "datetime": + """ + Converts an ISO 8601 time string such as '2015-05-15T15:50:38Z' to a timezone aware + datetime object in the local time zone. + """ + + from datetime import datetime + + # replace Z with +0000, required for Python 3.6 compatibility + time_str = time_str.replace("Z", "+0000") + return datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%S%z").astimezone() + + # ====================================================================================== # Custom parameter types # ====================================================================================== @@ -312,7 +327,7 @@ def __init__(self, file_okay: bool = True, dir_okay: bool = True) -> None: class ConfigName(click.ParamType): - """ "A command line parameter representing a Dropbox path + """A command line parameter representing a Dropbox path :param existing: If ``True`` require an existing config, otherwise create a new config on demand. @@ -458,24 +473,10 @@ def format_commands( formatter.write_dl(rows) -@click.group( - cls=OrderedGroup, - context_settings={"help_option_names": ["-h", "--help"]}, - invoke_without_command=True, - no_args_is_help=True, - help="Dropbox client for Linux and macOS.", -) -@click.option( - "--version", - "-V", - is_flag=True, - default=False, - help="Show version and exit.", -) -def main(version: bool): - - if version: - click.echo(__version__) +@click.group(cls=OrderedGroup, help="Dropbox client for Linux and macOS.") +@click.version_option(version=__version__, message=__version__) +def main(): + pass # ====================================================================================== @@ -509,13 +510,17 @@ def main(version: bool): "-f", is_flag=True, default=False, - help="Starts Maestral in the foreground.", + help="Start Maestral in the foreground.", ) @click.option( - "--verbose", "-v", is_flag=True, default=False, help="Print log messages to stdout." + "--verbose", + "-v", + is_flag=True, + default=False, + help="Print log messages to stdout when started with '-f, --foreground' flag.", ) @config_option -@catch_maestral_errors +@convert_py_errors def start(foreground: bool, verbose: bool, config_name: str) -> None: # ---- run setup if necessary ------------------------------------------------------ @@ -524,8 +529,8 @@ def start(foreground: bool, verbose: bool, config_name: str) -> None: # running with the --foreground flag, prevents leaving a zombie process if the setup # fails with an exception and does not confuse systemd. - from .main import Maestral from .daemon import ( + MaestralProxy, start_maestral_daemon, start_maestral_daemon_process, is_running, @@ -538,7 +543,22 @@ def start(foreground: bool, verbose: bool, config_name: str) -> None: click.echo("Daemon is already running.") return - m = Maestral(config_name, log_to_stdout=verbose) + if not foreground: + # start daemon process + cli.echo("Starting Maestral...", nl=False) + + res = start_maestral_daemon_process(config_name) + + if res == Start.Ok: + cli.echo("\rStarting Maestral... " + OK) + elif res == Start.AlreadyRunning: + cli.echo("\rStarting Maestral... " + "Already running.") + else: + cli.echo("\rStarting Maestral... " + FAILED) + cli.echo("Please check logs for more information.") + return + + m = MaestralProxy(config_name, fallback=True) if m.pending_link: # this may raise KeyringAccessError link_dialog(m) @@ -578,28 +598,13 @@ def start(foreground: bool, verbose: bool, config_name: str) -> None: m.excluded_items = excluded_paths - # free resources - del m + cli.ok("Setup completed. Starting sync.") if foreground: - # start our current process + del m start_maestral_daemon(config_name, log_to_stdout=verbose, start_sync=True) else: - - # start daemon process - cli.echo("Starting Maestral...", nl=False) - - res = start_maestral_daemon_process( - config_name, log_to_stdout=verbose, start_sync=True - ) - - if res == Start.Ok: - cli.echo(" " * 8 + OK) - elif res == Start.AlreadyRunning: - cli.echo(" " * 8 + "Already running.") - else: - cli.echo(" " * 8 + FAILED) - cli.echo("Please check logs for more information.") + m.start_sync() @main.command(section="Core Commands", help="Stop the sync daemon.") @@ -608,24 +613,6 @@ def stop(config_name: str) -> None: stop_daemon_with_cli_feedback(config_name) -@main.command(section="Core Commands", help="Restart the sync daemon.") -@click.option( - "--foreground", - "-f", - is_flag=True, - default=False, - help="Start the sync daemon in the foreground.", -) -@click.option( - "--verbose", "-v", is_flag=True, default=False, help="Print log messages to stdout." -) -@existing_config_option -@click.pass_context -def restart(ctx, foreground: bool, verbose: bool, config_name: str) -> None: - stop_daemon_with_cli_feedback(config_name) - ctx.forward(start) - - @main.command(section="Core Commands", help="Run the GUI if installed.") @config_option def gui(config_name: str) -> None: @@ -679,32 +666,38 @@ def gui(config_name: str) -> None: @existing_config_option def pause(config_name: str) -> None: - from .daemon import MaestralProxy + from .daemon import MaestralProxy, CommunicationError try: with MaestralProxy(config_name) as m: - m.pause_sync() + m.stop_sync() cli.ok("Syncing paused.") - except Pyro5.errors.CommunicationError: + except CommunicationError: cli.echo("Maestral daemon is not running.") @main.command(section="Core Commands", help="Resume syncing.") @existing_config_option def resume(config_name: str) -> None: - from .daemon import MaestralProxy + + from .daemon import MaestralProxy, CommunicationError try: with MaestralProxy(config_name) as m: if not check_for_fatal_errors(m): - m.resume_sync() + m.start_sync() cli.ok("Syncing resumed.") - except Pyro5.errors.CommunicationError: + except CommunicationError: cli.echo("Maestral daemon is not running.") -@main.command(section="Core Commands", help="Link with a Dropbox account.") +@main.group(section="Core Commands", help="Link, unlink and view the Dropbox account.") +def auth(): + pass + + +@auth.command(name="link", help="Link a new Dropbox account.") @click.option( "-r", "relink", @@ -713,8 +706,9 @@ def resume(config_name: str) -> None: help="Relink to the current account. Keeps the sync state.", ) @config_option -@catch_maestral_errors -def link(relink: bool, config_name: str) -> None: +@convert_py_errors +def auth_link(relink: bool, config_name: str) -> None: + from .daemon import MaestralProxy with MaestralProxy(config_name, fallback=True) as m: @@ -723,25 +717,28 @@ def link(relink: bool, config_name: str) -> None: link_dialog(m) else: cli.echo( - "Maestral is already linked. Use the option " - "'-r' to relink to the same account." + "Maestral is already linked. Use '-r' to relink to the same " + "account or specify a new config name with '-c'." ) -@main.command( - section="Core Commands", +@auth.command( + name="unlink", help=""" -Unlinks your Dropbox account. +Unlink your Dropbox account. If Maestral is running, it will be stopped before unlinking. """, ) +@click.option("--yes", "-Y", is_flag=True, default=False) @existing_config_option -@catch_maestral_errors -def unlink(config_name: str) -> None: +@convert_py_errors +def auth_unlink(yes: bool, config_name: str) -> None: - if cli.confirm("Are you sure you want unlink your account?", default=False): + if not yes: + yes = cli.confirm("Are you sure you want unlink your account?", default=False) + if yes: from .main import Maestral stop_daemon_with_cli_feedback(config_name) @@ -751,27 +748,161 @@ def unlink(config_name: str) -> None: cli.ok("Unlinked Maestral.") +@auth.command(name="status", help="View authentication status.") +@existing_config_option +def auth_status(config_name: str) -> None: + + from .config import MaestralConfig, MaestralState + + conf = MaestralConfig(config_name) + state = MaestralState(config_name) + + email = state.get("account", "email") + account_type = state.get("account", "type").capitalize() + dbid = conf.get("account", "account_id") + + cli.echo("") + cli.echo(f"Email: {email}") + cli.echo(f"Account-type: {account_type}") + cli.echo(f"Dropbox-ID: {dbid}") + cli.echo("") + + +@main.group(section="Core Commands", help="Create and manage shared links.") +def sharelink(): + pass + + +@sharelink.command(name="create", help="Create a shared link for a file or folder.") +@click.argument("dropbox_path", type=DropboxPath()) +@click.option( + "-p", + "--password", + help="Optional password for the link.", +) +@click.option( + "-e", + "--expiry", + metavar="DATE", + type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M", "%Y-%m-%d %H:%M"]), + help="Expiry time for the link (e.g. '2025-07-24 20:50').", +) +@existing_config_option +@convert_py_errors +def sharelink_create( + dropbox_path: str, + password: str, + expiry: Optional["datetime"], + config_name: str, +) -> None: + + from .daemon import MaestralProxy + + if not dropbox_path.startswith("/"): + dropbox_path = "/" + dropbox_path + + expiry_dt: Optional[float] + + if expiry: + expiry_dt = expiry.timestamp() + else: + expiry_dt = None + + if password: + visibility = "password" + else: + visibility = "public" + + with MaestralProxy(config_name, fallback=True) as m: + link_info = m.create_shared_link(dropbox_path, visibility, password, expiry_dt) + + cli.echo(link_info["url"]) + + +@sharelink.command(name="revoke", help="Revoke a shared link.") +@click.argument("url") +@existing_config_option +@convert_py_errors +def sharelink_revoke(url: str, config_name: str) -> None: + + from .daemon import MaestralProxy + + with MaestralProxy(config_name, fallback=True) as m: + m.revoke_shared_link(url) + + cli.echo("Revoked shared link.") + + +@sharelink.command( + name="list", help="List shared links for a path or all shared links." +) +@click.argument("dropbox_path", required=False, type=DropboxPath()) +@existing_config_option +@convert_py_errors +def sharelink_list(dropbox_path: Optional[str], config_name: str) -> None: + + from .daemon import MaestralProxy + + if dropbox_path and not dropbox_path.startswith("/"): + dropbox_path = "/" + dropbox_path + + with MaestralProxy(config_name, fallback=True) as m: + links = m.list_shared_links(dropbox_path) + + link_table = cli.Table(["URL", "Item", "Access", "Expires"]) + + for link in links: + url = cast(str, link["url"]) + file_name = cast(str, link["name"]) + visibility = cast(str, link["link_permissions"]["resolved_visibility"][".tag"]) + + dt_field: cli.Field + + if "expires" in link: + expires = cast(str, link["expires"]) + dt_field = cli.DateField(_datetime_from_iso_str(expires)) + else: + dt_field = cli.TextField("-") + + link_table.append([url, file_name, visibility, dt_field]) + + cli.echo("") + link_table.echo() + cli.echo("") + + +# ====================================================================================== +# Information commands +# ====================================================================================== + + @main.command(section="Information", help="Show the status of the daemon.") @existing_config_option -@catch_maestral_errors +@convert_py_errors def status(config_name: str) -> None: - from .daemon import MaestralProxy + from .daemon import MaestralProxy, CommunicationError check_for_updates() try: with MaestralProxy(config_name) as m: + email = m.get_state("account", "email") + account_type = m.get_state("account", "type").capitalize() + + status_info = m.status if m.running else "Paused" + usage = m.get_state("account", "usage") + n_errors = len(m.sync_errors) color = "red" if n_errors > 0 else "green" n_errors_str = click.style(str(n_errors), fg=color) + cli.echo("") - cli.echo("Account: {}".format(m.get_state("account", "email"))) - cli.echo("Usage: {}".format(m.get_state("account", "usage"))) - cli.echo("Status: {}".format(m.status)) - cli.echo("Sync threads: {}".format("Running" if m.running else "Stopped")) - cli.echo("Sync errors: {}".format(n_errors_str)) + cli.echo(f"Account: {email} ({account_type})") + cli.echo(f"Usage: {usage}") + cli.echo(f"Status: {status_info}") + cli.echo(f"Sync errors: {n_errors_str}") cli.echo("") check_for_fatal_errors(m) @@ -792,7 +923,7 @@ def status(config_name: str) -> None: table.echo() cli.echo("") - except Pyro5.errors.CommunicationError: + except CommunicationError: cli.echo("Maestral daemon is not running.") @@ -809,27 +940,27 @@ def status(config_name: str) -> None: ) @click.argument("local_path", type=click.Path(exists=True, resolve_path=True)) @existing_config_option -def file_status(local_path: str, config_name: str) -> None: +def filestatus(local_path: str, config_name: str) -> None: - from .daemon import MaestralProxy + from .daemon import MaestralProxy, CommunicationError try: with MaestralProxy(config_name) as m: stat = m.get_file_status(local_path) cli.echo(stat) - except Pyro5.errors.CommunicationError: + except CommunicationError: cli.echo("unwatched") @main.command(section="Information", help="Live view of all items being synced.") @existing_config_option -@catch_maestral_errors +@convert_py_errors def activity(config_name: str) -> None: import curses from .utils import natural_size - from .daemon import MaestralProxy + from .daemon import MaestralProxy, CommunicationError try: with MaestralProxy(config_name) as m: @@ -903,19 +1034,20 @@ def curses_loop(screen) -> None: # no type hints for screen provided yet # enter curses event loop curses.wrapper(curses_loop) - except Pyro5.errors.CommunicationError: + except CommunicationError: cli.echo("Maestral daemon is not running.") @main.command(section="Information", help="Show recently changed or added files.") @existing_config_option +@convert_py_errors def history(config_name: str) -> None: from datetime import datetime from .daemon import MaestralProxy with MaestralProxy(config_name, fallback=True) as m: - history = m.get_history() + events = m.get_history() table = cli.Table( [ @@ -925,7 +1057,7 @@ def history(config_name: str) -> None: ] ) - for event in history: + for event in events: dbx_path = cast(str, event["dbx_path"]) change_type = cast(str, event["change_type"]) @@ -940,7 +1072,7 @@ def history(config_name: str) -> None: @main.command(section="Information", help="List contents of a Dropbox directory.") -@click.argument("dropbox_path", type=click.Path(), default="") +@click.argument("dropbox_path", type=DropboxPath(), default="") @click.option( "-l", "--long", @@ -956,10 +1088,9 @@ def history(config_name: str) -> None: help="Include deleted items in listing.", ) @existing_config_option -@catch_maestral_errors +@convert_py_errors def ls(long: bool, dropbox_path: str, include_deleted: bool, config_name: str) -> None: - from datetime import datetime from .utils import natural_size from .daemon import MaestralProxy @@ -1020,8 +1151,7 @@ def ls(long: bool, dropbox_path: str, include_deleted: bool, config_name: str) - if "client_modified" in entry: cm = cast(str, entry["client_modified"]) - dt = datetime.strptime(cm, "%Y-%m-%dT%H:%M:%S%z").astimezone() - dt_field = cli.DateField(dt) + dt_field = cli.DateField(_datetime_from_iso_str(cm)) else: dt_field = cli.TextField("-") @@ -1046,28 +1176,14 @@ def ls(long: bool, dropbox_path: str, include_deleted: bool, config_name: str) - grid.echo() -@main.command(section="Information", help="Show linked Dropbox account information.") -@existing_config_option -def account_info(config_name: str) -> None: - from .daemon import MaestralProxy - - with MaestralProxy(config_name, fallback=True) as m: - - email = m.get_state("account", "email") - account_type = m.get_state("account", "type").capitalize() - usage = m.get_state("account", "usage") - dbid = m.get_conf("account", "account_id") - - cli.echo("") - cli.echo(f"Email: {email}") - cli.echo(f"Account-type: {account_type}") - cli.echo(f"Usage: {usage}") - cli.echo(f"Dropbox-ID: {dbid}") - cli.echo("") - - @main.command(section="Information", help="List all configured Dropbox accounts.") -def configs() -> None: +@click.option( + "--clean", + is_flag=True, + default=False, + help="Remove config files without a linked account.", +) +def config_files(clean: bool) -> None: from .daemon import is_running from .config import ( @@ -1077,37 +1193,43 @@ def configs() -> None: remove_configuration, ) - # clean up stale configs - config_names = list_configs() - - for name in config_names: - dbid = MaestralConfig(name).get("account", "account_id") - if dbid == "" and not is_running(name): - remove_configuration(name) - - # display remaining configs - names = list_configs() - emails = [MaestralState(c).get("account", "email") for c in names] + if clean: - table = cli.Table([cli.Column("Config name", names), cli.Column("Account", emails)]) + # Clean up stale config files. - cli.echo("") - table.echo() - cli.echo("") + for name in list_configs(): + conf = MaestralConfig(name) + path = conf.get_config_fpath() + dbid = conf.get("account", "account_id") + if dbid == "" and not is_running(name): + remove_configuration(name) + cli.echo(f"Removed: {path}") -@main.command( - section="Information", help="Return the version number and other information." -) -def about() -> None: + else: + # Display config files. + names = list_configs() + emails = [] + paths = [] + + for name in names: + config = MaestralConfig(name) + state = MaestralState(name) + + emails.append(state.get("account", "email")) + paths.append(config.get_config_fpath()) + + table = cli.Table( + [ + cli.Column("Config name", names), + cli.Column("Account", emails), + cli.Column("Path", paths, elide=cli.Elide.Leading), + ] + ) - year = time.localtime().tm_year - - cli.echo("") - cli.echo(f"Version: {__version__}") - cli.echo(f"Website: {__url__}") - cli.echo(f"Copyright: (c) 2018-{year}, {__author__}.") - cli.echo("") + cli.echo("") + table.echo() + cli.echo("") # ====================================================================================== @@ -1162,6 +1284,7 @@ def excluded(): @excluded.command(name="list", help="List all excluded files and folders.") @existing_config_option def excluded_list(config_name: str) -> None: + from .daemon import MaestralProxy with MaestralProxy(config_name, fallback=True) as m: @@ -1180,10 +1303,11 @@ def excluded_list(config_name: str) -> None: name="add", help="Add a file or folder to the excluded list and re-sync.", ) -@click.argument("dropbox_path", type=click.Path()) +@click.argument("dropbox_path", type=DropboxPath()) @existing_config_option -@catch_maestral_errors +@convert_py_errors def excluded_add(dropbox_path: str, config_name: str) -> None: + from .daemon import MaestralProxy if not dropbox_path.startswith("/"): @@ -1207,11 +1331,12 @@ def excluded_add(dropbox_path: str, config_name: str) -> None: folder will be included as well (but no other items inside it). """, ) -@click.argument("dropbox_path", type=click.Path()) +@click.argument("dropbox_path", type=DropboxPath()) @existing_config_option -@catch_maestral_errors +@convert_py_errors def excluded_remove(dropbox_path: str, config_name: str) -> None: - from .daemon import MaestralProxy + + from .daemon import MaestralProxy, CommunicationError if not dropbox_path.startswith("/"): dropbox_path = "/" + dropbox_path @@ -1224,7 +1349,7 @@ def excluded_remove(dropbox_path: str, config_name: str) -> None: m.include_item(dropbox_path) cli.ok(f"Included '{dropbox_path}'. Now downloading...") - except Pyro5.errors.CommunicationError: + except CommunicationError: raise cli.CliException("Daemon must be running to download folders.") @@ -1240,20 +1365,20 @@ def notify(): @click.argument( "level_name", required=False, - type=click.Choice(["ERROR", "SYNCISSUE", "FILECHANGE"]), + type=click.Choice(["ERROR", "SYNCISSUE", "FILECHANGE"], case_sensitive=False), ) @existing_config_option def notify_level(level_name: str, config_name: str) -> None: - from .notify import MaestralDesktopNotifier as Notifier + from . import notify as _notify from .daemon import MaestralProxy with MaestralProxy(config_name, fallback=True) as m: if level_name: - m.notification_level = Notifier.level_name_to_number(level_name) + m.notification_level = _notify.level_name_to_number(level_name) cli.ok(f"Notification level set to {level_name}.") else: - level_name = Notifier.level_number_to_name(m.notification_level) + level_name = _notify.level_number_to_name(m.notification_level) cli.echo(f"Notification level: {level_name}.") @@ -1264,54 +1389,23 @@ def notify_level(level_name: str, config_name: str) -> None: @click.argument("minutes", type=click.IntRange(min=0)) @existing_config_option def notify_snooze(minutes: int, config_name: str) -> None: - from .daemon import MaestralProxy + + from .daemon import MaestralProxy, CommunicationError try: with MaestralProxy(config_name) as m: m.notification_snooze = minutes - except Pyro5.errors.CommunicationError: + except CommunicationError: cli.echo("Maestral daemon is not running.") else: if minutes > 0: cli.ok( - f"Notifications snoozed for {minutes} min. " "Set snooze to 0 to reset." + f"Notifications snoozed for {minutes} min. Set snooze to 0 to reset." ) else: cli.ok("Notifications enabled.") -@main.command( - section="Settings", - help=""" -Enable or disables sharing of error reports. - -Sharing is disabled by default. If enabled, error reports are shared with bugsnag and no -personal information will typically be collected. Shared tracebacks may however include -file names, depending on the error. -""", -) -@click.option("--yes", "-Y", is_flag=True, default=False) -@click.option("--no", "-N", is_flag=True, default=False) -@existing_config_option -def analytics(yes: bool, no: bool, config_name: str) -> None: - from .daemon import MaestralProxy - - if yes or no: - with MaestralProxy(config_name, fallback=True) as m: - m.analytics = yes - - status_str = "Enabled" if yes else "Disabled" - cli.ok(f"{status_str} automatic error reports.") - else: - with MaestralProxy(config_name, fallback=True) as m: - enabled = m.analytics - - if enabled: - cli.echo("Analytics are enabled. Use -N to disable") - else: - cli.echo("Analytics are disabled. Use -Y to enable") - - # ====================================================================================== # Maintenance # ====================================================================================== @@ -1321,6 +1415,7 @@ def analytics(yes: bool, no: bool, config_name: str) -> None: @click.argument("new_path", required=False, type=click.Path(writable=True)) @existing_config_option def move_dir(new_path: str, config_name: str) -> None: + from .daemon import MaestralProxy new_path = new_path or select_dbx_path_dialog(config_name) @@ -1340,7 +1435,7 @@ def move_dir(new_path: str, config_name: str) -> None: """, ) @existing_config_option -@catch_maestral_errors +@convert_py_errors def rebuild_index(config_name: str) -> None: import textwrap @@ -1366,9 +1461,188 @@ def rebuild_index(config_name: str) -> None: m.rebuild_index() if m._is_fallback: - cli.ok("Rebuilding now. Run 'maestral status' to view progress.") - else: cli.ok("Daemon is not running. Rebuilding scheduled for next startup.") + else: + cli.ok("Rebuilding now. Run 'maestral status' to view progress.") + + +@main.command(section="Maintenance", help="List old file revisions.") +@click.argument("dropbox_path", type=DropboxPath()) +@click.option( + "-l", + "--limit", + help="Maximum number of revs to list.", + show_default=True, + type=click.IntRange(min=1, max=100), + default=10, +) +@existing_config_option +@convert_py_errors +def revs(dropbox_path: str, limit: int, config_name: str) -> None: + + from .daemon import MaestralProxy + + with MaestralProxy(config_name, fallback=True) as m: + entries = m.list_revisions(dropbox_path, limit=limit) + + table = cli.Table(["Revision", "Modified Time"]) + + for entry in entries: + + rev = cast(str, entry["rev"]) + dt = _datetime_from_iso_str(cast(str, entry["client_modified"])) + + table.append([cli.TextField(rev), cli.DateField(dt)]) + + cli.echo("") + table.echo() + cli.echo("") + + +@main.command( + section="Maintenance", + help=""" +Compare two revisions of a file. + +If no revs are passed to the command, you can select the revisions interactively. If +only one rev is passed, it is compared to the local version of the file. The diff is +shown via a pager if longer 30 lines. + +Warning: The specified revisions will be downloaded to temp files and loaded into memory +to generate the diff. Depending on the file size, this may use significant disk space +and memory. +""", +) +@click.argument("dropbox_path", type=DropboxPath()) +@click.option( + "-v", + "--rev", + help="Revisions to compare (multiple allowed).", + multiple=True, + default=[], +) +@click.option("--no-color", help="Don't use colors for the diff.", is_flag=True) +@click.option("--no-pager", help="Don't use a pager for output.", is_flag=True) +@click.option( + "-l", + "--limit", + help="Maximum number of revs to list.", + show_default=True, + type=click.IntRange(min=1, max=100), + default=10, +) +@convert_py_errors +@existing_config_option +def diff( + dropbox_path: str, + rev: List[str], + no_color: bool, + no_pager: bool, + limit: int, + config_name: str, +) -> None: + + from .daemon import MaestralProxy + + # Reason for rel_dbx_path: os.path.join does not like leading / + if not dropbox_path.startswith("/"): + dropbox_path = "/" + dropbox_path + + def download_and_compare( + m: MaestralProxy, old_rev: str, new_rev: Optional[str] = None + ) -> None: + """ + Download up to two revisions to a local temporary folder + and compare them with a 'diff'. Only text files are supported. + If an unknown file type was found, everything that doesn't match + 'text/*', an error message gets printed. + """ + + diff = m.get_file_diff(old_rev, new_rev) + + if len(diff) == 0: + click.echo("There are no changes between the two revisions.") + return + + def color(ind: int, line: str) -> str: + """ + Color diff lines. + Inspiration for colors was taken from the + well known command 'git diff'. + """ + + if ind < 2: + line = click.style(line, bold=True) + elif line.startswith("+"): + line = click.style(line, fg="green") + elif line.startswith("-"): + line = click.style(line, fg="red") + # Don't highlight these in the intro + elif line.startswith("@@ "): + line = click.style(line, fg="cyan") + return line + + # Color the lines + if not no_color: + diff = [color(i, l) for i, l in enumerate(diff)] + + # Enter pager if diff is too long + if len(diff) > 30 and not no_pager: + click.echo_via_pager("".join(diff)) + else: + click.echo("".join(diff)) + + with MaestralProxy(config_name, fallback=True) as m: + if len(rev) == 0: + entries = m.list_revisions(dropbox_path, limit=limit) + + for entry in entries: + cm = cast(str, entry["client_modified"]) + field = cli.DateField(_datetime_from_iso_str(cm)) + entry["desc"] = field.format(40)[0] + + dbx_path = cast(str, entries[0]["path_display"]) + local_path = m.to_local_path(dbx_path) + + if osp.isfile(local_path): + # prepend local version as an option + entries.insert(0, {"desc": "local version", "rev": None}) + + index_base = cli.select( + message="New revision:", + options=list(e["desc"] for e in entries), + hint="(↓ to see more)" if len(entries) > 6 else "", + ) + + if index_base == len(entries) - 1: + cli.warn( + "Oldest revision selected, unable to find anything to compare." + ) + return + + comparable_versions = entries[index_base + 1 :] + index_new = cli.select( + message="Old revision:", + options=list(e["desc"] for e in comparable_versions), + hint="(↓ to see more)" if len(comparable_versions) > 6 else "", + ) + + old_rev = entries[index_new + index_base]["rev"] + new_rev = entries[index_base]["rev"] + elif len(rev) == 1: + old_rev = rev[0] + new_rev = None + elif len(rev) == 2: + old_rev = rev[0] + new_rev = rev[1] + elif len(rev) > 2: + cli.warn("You can only compare two revisions at a time.") + return + + # '\r' will put the cursor to the beginning of the line + # so the next characters will overwrite it + click.echo("Loading ...\r", nl=False) + download_and_compare(m, old_rev, new_rev) @main.command( @@ -1379,12 +1653,20 @@ def rebuild_index(config_name: str) -> None: If no revision number is given, old revisions will be listed. """, ) -@click.argument("dropbox_path", type=click.Path()) -@click.option("-v", "--rev", help="Revision to restore", default="") +@click.argument("dropbox_path", type=DropboxPath()) +@click.option("-v", "--rev", help="Revision to restore.", default="") +@click.option( + "-l", + "--limit", + help="Maximum number of revs to list.", + show_default=True, + type=click.IntRange(min=1, max=100), + default=10, +) @existing_config_option -@catch_maestral_errors -def restore(dropbox_path: str, rev: str, config_name: str) -> None: - from datetime import datetime +@convert_py_errors +def restore(dropbox_path: str, rev: str, limit: int, config_name: str) -> None: + from .daemon import MaestralProxy if not dropbox_path.startswith("/"): @@ -1394,12 +1676,11 @@ def restore(dropbox_path: str, rev: str, config_name: str) -> None: if not rev: cli.echo("Loading...\r", nl=False) - entries = m.list_revisions(dropbox_path) + entries = m.list_revisions(dropbox_path, limit=limit) dates = [] for entry in entries: cm = cast(str, entry["client_modified"]) - dt = datetime.strptime(cm, "%Y-%m-%dT%H:%M:%S%z").astimezone() - field = cli.DateField(dt) + field = cli.DateField(_datetime_from_iso_str(cm)) dates.append(field.format(40)[0]) index = cli.select( @@ -1477,7 +1758,7 @@ def log_clear(config_name: str) -> None: @click.argument( "level_name", required=False, - type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR"]), + type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False), ) @existing_config_option def log_level(level_name: str, config_name: str) -> None: @@ -1492,3 +1773,107 @@ def log_level(level_name: str, config_name: str) -> None: else: level_name = logging.getLevelName(m.log_level) cli.echo(f"Log level: {level_name}") + + +@main.group( + section="Maintenance", + help=""" +Direct access to config values. + +Warning: Changing some config values must be accompanied by maintenance tasks. For +example, changing the config value for the Dropbox location needs to be accompanied by +actually moving the folder. This command only gets / sets the value in the config file. +Most changes will also require a restart of the daemon to become effective. + +Use the commands from the Settings section instead wherever possible. They will take +effect immediately, perform accompanying tasks for you, and never leave the daemon in an +inconsistent state. + +Currently available config keys are: + +\b +- path: the location of the local Dropbox folder +- excluded_items: list of files or folders excluded by selective sync +- account_id: the ID of the linked Dropbox account +- notification_level: the level for desktop notifications +- log_level: the log level. +- update_notification_interval: interval in secs to check for updates +- keyring: the keyring backend to use (full path of the class) +- reindex_interval: the interval in seconds for full reindexing +- max_cpu_percent: maximum CPU usage target per core +- keep_history: the sync history to keep in seconds +- upload: if upload sync is enabled +- download: if download sync is enabled +""", +) +def config(): + pass + + +@config.command(name="get", help="Print the value of a given configuration key.") +@click.argument("key") +@config_option +def config_get(key: str, config_name: str) -> None: + + from .config import MaestralConfig + from .config.main import DEFAULTS_CONFIG + from .daemon import MaestralProxy, CommunicationError + + section = next( + iter(s for s, conf_dict in DEFAULTS_CONFIG if key in conf_dict), None + ) + + if not section: + raise cli.CliException(f"'{key}' is not a valid configuration key.") + + try: + with MaestralProxy(config_name) as m: + value = m.get_conf(section, key) + except CommunicationError: + value = MaestralConfig(config_name).get(section, key) + + cli.echo(value) + + +@config.command( + name="set", + help=""" +Update configuration with a value for the given key. + +Values will be cast to the proper type, raising an error where this is not possibly. For +instance, setting a boolean config value to 1 will actually set it to True. +""", +) +@click.argument("key") +@click.argument("value") +@config_option +@convert_py_errors +def config_set(key: str, value: str, config_name: str) -> None: + + import ast + from .config import MaestralConfig + from .config.main import DEFAULTS_CONFIG + from .daemon import MaestralProxy, CommunicationError + + section, defaults = next( + iter((s, d) for s, d in DEFAULTS_CONFIG if key in d), (None, None) + ) + + if not section or not defaults: + raise cli.CliException(f"'{key}' is not a valid configuration key.") + + default_value = defaults[key] + + if isinstance(default_value, str): + py_value = value + else: + try: + py_value = ast.literal_eval(value) + except (SyntaxError, ValueError): + py_value = value + + try: + with MaestralProxy(config_name) as m: + m.set_conf(section, key, py_value) + except CommunicationError: + MaestralConfig(config_name).set(section, key, py_value) diff --git a/src/maestral/client.py b/src/maestral/client.py index 715333ac2..ec4aeaaf8 100644 --- a/src/maestral/client.py +++ b/src/maestral/client.py @@ -10,7 +10,6 @@ import os.path as osp import time import logging -import functools import contextlib from datetime import datetime, timezone from typing import ( @@ -32,6 +31,7 @@ Dropbox, create_session, files, + sharing, users, exceptions, async_, @@ -65,13 +65,12 @@ TokenRevokedError, CursorResetError, DropboxServerError, - NoDropboxDirError, - InotifyError, NotLinkedError, InvalidDbidError, + SharedLinkError, ) from .config import MaestralState -from .constants import DROPBOX_APP_KEY, IDLE +from .constants import DROPBOX_APP_KEY from .utils import natural_size, chunks, clamp if TYPE_CHECKING: @@ -83,7 +82,6 @@ "DropboxClient", "dropbox_to_maestral_error", "os_to_maestral_error", - "fswatch_to_maestral_error", "convert_api_errors", ] @@ -121,9 +119,8 @@ FileSizeError, ] ] -_FT = Callable[..., Any] -_T = TypeVar("_T") - +PaginationResultType = Union[sharing.ListSharedLinksResult, files.ListFolderResult] +FT = TypeVar("FT", bound=Callable[..., Any]) # create single requests session for all clients SESSION = create_session() @@ -187,34 +184,11 @@ def convert_api_errors( except CONNECTION_ERRORS: raise ConnectionError("Cannot connect to Dropbox") except OSError as exc: - raise os_to_maestral_error(exc, dbx_path, local_path) - - -def convert_api_errors_decorator( - dbx_path_arg: Optional[int] = None, local_path_arg: Optional[int] = None -) -> Callable[[_FT], _FT]: - """ - Returns a decorator that catches and re-raises instances of :class:`OSError` and - :class:`exceptions.DropboxException` as :class:`errors.MaestralApiError` or - :class:`ConnectionError`. - - :param dbx_path_arg: Argument number to take as dbx_path for exception. - :param local_path_arg: Argument number to take as local_path_arg for exception. - """ - - def decorator(func: _FT) -> _FT: - @functools.wraps(func) - def wrapper(*args, **kwargs) -> Any: - - dbx_path = args[dbx_path_arg] if dbx_path_arg else None - local_path = args[local_path_arg] if local_path_arg else None - - with convert_api_errors(dbx_path, local_path): - return func(*args, **kwargs) - - return wrapper - - return decorator + if exc.errno == errno.EPROTOTYPE: + # Can occur on macOS, see https://bugs.python.org/issue33450 + raise ConnectionError("Cannot connect to Dropbox") + else: + raise os_to_maestral_error(exc, dbx_path, local_path) class DropboxClient: @@ -321,7 +295,6 @@ def link(self, token: str) -> int: return res - @convert_api_errors_decorator() def unlink(self) -> None: """ Unlinks the Dropbox account. @@ -329,8 +302,10 @@ def unlink(self) -> None: :raises KeyringAccessError: if keyring access fails. :raises DropboxAuthError: if we cannot authenticate with Dropbox. """ - self.dbx.auth_token_revoke() - self.auth.delete_creds() + + with convert_api_errors(): + self.dbx.auth_token_revoke() + self.auth.delete_creds() def _init_sdk_with_token( self, @@ -368,7 +343,6 @@ def account_id(self) -> Optional[str]: # ---- SDK wrappers ---------------------------------------------------------------- - @convert_api_errors_decorator() def get_account_info(self, dbid: Optional[str] = None) -> users.FullAccount: """ Gets current account information. @@ -377,10 +351,12 @@ def get_account_info(self, dbid: Optional[str] = None) -> users.FullAccount: currently linked account. :returns: Account info. """ - if dbid: - res = self.dbx.users_get_account(dbid) - else: - res = self.dbx.users_get_current_account() + + with convert_api_errors(): + if dbid: + res = self.dbx.users_get_account(dbid) + else: + res = self.dbx.users_get_current_account() if not dbid: # save our own account info to config @@ -400,12 +376,12 @@ def get_account_info(self, dbid: Optional[str] = None) -> users.FullAccount: return res - @convert_api_errors_decorator() def get_space_usage(self) -> SpaceUsage: """ :returns: The space usage of the currently linked account. """ - res = self.dbx.users_get_space_usage() + with convert_api_errors(): + res = self.dbx.users_get_space_usage() # convert from users.SpaceUsage to SpaceUsage space_usage = SpaceUsage.from_dbx_space_usage(res) @@ -416,7 +392,6 @@ def get_space_usage(self) -> SpaceUsage: return space_usage - @convert_api_errors_decorator(dbx_path_arg=1) def get_metadata(self, dbx_path: str, **kwargs) -> Optional[files.Metadata]: """ Gets metadata for an item on Dropbox or returns ``False`` if no metadata is @@ -424,21 +399,16 @@ def get_metadata(self, dbx_path: str, **kwargs) -> Optional[files.Metadata]: call. :param dbx_path: Path of folder on Dropbox. - :param kwargs: Keyword arguments for Dropbox SDK files_download_to_file. + :param kwargs: Keyword arguments for Dropbox SDK files_get_metadata. :returns: Metadata of item at the given path or ``None`` if item cannot be found. """ try: - return self.dbx.files_get_metadata(dbx_path, **kwargs) - except exceptions.ApiError as exc: - - if isinstance(exc.error, files.GetMetadataError): - # this will be only lookup errors - return None - else: - raise exc + with convert_api_errors(dbx_path=dbx_path): + return self.dbx.files_get_metadata(dbx_path, **kwargs) + except (NotFoundError, PathError): + return None - @convert_api_errors_decorator(dbx_path_arg=1) def list_revisions( self, dbx_path: str, mode: str = "path", limit: int = 10 ) -> files.ListRevisionsResult: @@ -452,10 +422,10 @@ def list_revisions( :returns: File revision history. """ - mode = files.ListRevisionsMode(mode) - return self.dbx.files_list_revisions(dbx_path, mode=mode, limit=limit) + with convert_api_errors(dbx_path=dbx_path): + mode = files.ListRevisionsMode(mode) + return self.dbx.files_list_revisions(dbx_path, mode=mode, limit=limit) - @convert_api_errors_decorator(dbx_path_arg=1) def restore(self, dbx_path: str, rev: str) -> files.FileMetadata: """ Restore an old revision of a file. @@ -466,9 +436,9 @@ def restore(self, dbx_path: str, rev: str) -> files.FileMetadata: :returns: Metadata of restored file. """ - return self.dbx.files_restore(dbx_path, rev) + with convert_api_errors(dbx_path=dbx_path): + return self.dbx.files_restore(dbx_path, rev) - @convert_api_errors_decorator(dbx_path_arg=1) def download( self, dbx_path: str, @@ -483,43 +453,40 @@ def download( :param local_path: Path to local download destination. :param sync_event: If given, the sync event will be updated with the number of downloaded bytes. - :param kwargs: Keyword arguments for Dropbox SDK files_download_to_file. + :param kwargs: Keyword arguments for Dropbox SDK files_download. :returns: Metadata of downloaded item. """ - # create local directory if not present - dst_path_directory = osp.dirname(local_path) - try: - os.makedirs(dst_path_directory) - except FileExistsError: - pass - md, http_resp = self.dbx.files_download(dbx_path, **kwargs) + with convert_api_errors(dbx_path=dbx_path): - chunksize = 2 ** 13 + dst_path_directory = osp.dirname(local_path) + try: + os.makedirs(dst_path_directory) + except FileExistsError: + pass - with open(local_path, "wb") as f: - with contextlib.closing(http_resp): - for c in http_resp.iter_content(chunksize): - f.write(c) - if sync_event: - sync_event.completed = f.tell() + md, http_resp = self.dbx.files_download(dbx_path, **kwargs) + + chunksize = 2 ** 13 + + with open(local_path, "wb") as f: + with contextlib.closing(http_resp): + for c in http_resp.iter_content(chunksize): + f.write(c) + if sync_event: + sync_event.completed = f.tell() # dropbox SDK provides naive datetime in UTC - client_mod_timestamp = md.client_modified.replace( - tzinfo=timezone.utc - ).timestamp() - server_mod_timestamp = md.server_modified.replace( - tzinfo=timezone.utc - ).timestamp() + client_mod = md.client_modified.replace(tzinfo=timezone.utc) + server_mod = md.server_modified.replace(tzinfo=timezone.utc) # enforce client_modified < server_modified - timestamp = min(client_mod_timestamp, server_mod_timestamp, time.time()) + timestamp = min(client_mod.timestamp(), server_mod.timestamp(), time.time()) # set mtime of downloaded file os.utime(local_path, (time.time(), timestamp)) return md - @convert_api_errors_decorator(local_path_arg=1, dbx_path_arg=2) def upload( self, local_path: str, @@ -543,83 +510,86 @@ def upload( chunk_size = clamp(chunk_size, 10 ** 5, 150 * 10 ** 6) - size = osp.getsize(local_path) + with convert_api_errors(dbx_path=dbx_path, local_path=local_path): - # dropbox SDK takes naive datetime in UTC - mtime = osp.getmtime(local_path) - mtime_dt = datetime.utcfromtimestamp(mtime) + size = osp.getsize(local_path) - if size <= chunk_size: - with open(local_path, "rb") as f: - md = self.dbx.files_upload( - f.read(), dbx_path, client_modified=mtime_dt, **kwargs - ) - if sync_event: - sync_event.completed = f.tell() - return md - else: - # Note: We currently do not support resuming interrupted uploads. Dropbox - # keeps upload sessions open for 48h so this could be done in the future. - with open(local_path, "rb") as f: - session_start = self.dbx.files_upload_session_start(f.read(chunk_size)) - uploaded = f.tell() - - cursor = files.UploadSessionCursor( - session_id=session_start.session_id, offset=uploaded - ) - commit = files.CommitInfo( - path=dbx_path, client_modified=mtime_dt, **kwargs - ) - - if sync_event: - sync_event.completed = uploaded - - while True: - try: - if size - f.tell() <= chunk_size: - md = self.dbx.files_upload_session_finish( - f.read(chunk_size), cursor, commit - ) + # dropbox SDK takes naive datetime in UTC + mtime = osp.getmtime(local_path) + mtime_dt = datetime.utcfromtimestamp(mtime) - else: - self.dbx.files_upload_session_append_v2( - f.read(chunk_size), cursor - ) - md = None - - # housekeeping - uploaded = f.tell() - if sync_event: - sync_event.completed = uploaded - - if md: - return md - else: - cursor.offset = uploaded - - except exceptions.DropboxException as exc: - error = getattr(exc, "error", None) - if ( - isinstance(error, files.UploadSessionFinishError) - and error.is_lookup_failed() - ): - session_lookup_error = error.get_lookup_failed() - elif isinstance(error, files.UploadSessionLookupError): - session_lookup_error = error - else: - raise exc + if size <= chunk_size: + with open(local_path, "rb") as f: + md = self.dbx.files_upload( + f.read(), dbx_path, client_modified=mtime_dt, **kwargs + ) + if sync_event: + sync_event.completed = f.tell() + return md + else: + # Note: We currently do not support resuming interrupted uploads. + # Dropbox keeps upload sessions open for 48h so this could be done in + # the future. + with open(local_path, "rb") as f: + data = f.read(chunk_size) + session_start = self.dbx.files_upload_session_start(data) + uploaded = f.tell() + + cursor = files.UploadSessionCursor( + session_id=session_start.session_id, offset=uploaded + ) + commit = files.CommitInfo( + path=dbx_path, client_modified=mtime_dt, **kwargs + ) - if session_lookup_error.is_incorrect_offset(): - o = ( - session_lookup_error.get_incorrect_offset().correct_offset - ) - # reset position in file - f.seek(o) - cursor.offset = f.tell() - else: - raise exc + if sync_event: + sync_event.completed = uploaded + + while True: + try: + + if size - f.tell() <= chunk_size: + # Wrap up upload session and return metadata. + data = f.read(chunk_size) + md = self.dbx.files_upload_session_finish( + data, cursor, commit + ) + if sync_event: + sync_event.completed = sync_event.size + return md + else: + # Append to upload session. + data = f.read(chunk_size) + self.dbx.files_upload_session_append_v2(data, cursor) + + uploaded = f.tell() + cursor.offset = uploaded + + if sync_event: + sync_event.completed = uploaded + + except exceptions.DropboxException as exc: + error = getattr(exc, "error", None) + if ( + isinstance(error, files.UploadSessionFinishError) + and error.is_lookup_failed() + ): + session_lookup_error = error.get_lookup_failed() + elif isinstance(error, files.UploadSessionLookupError): + session_lookup_error = error + else: + raise exc + + if session_lookup_error.is_incorrect_offset(): + # reset position in file + offset = ( + session_lookup_error.get_incorrect_offset().correct_offset + ) + f.seek(offset) + cursor.offset = f.tell() + else: + raise exc - @convert_api_errors_decorator(dbx_path_arg=1) def remove(self, dbx_path: str, **kwargs) -> files.Metadata: """ Removes a file / folder from Dropbox. @@ -628,13 +598,11 @@ def remove(self, dbx_path: str, **kwargs) -> files.Metadata: :param kwargs: Keyword arguments for Dropbox SDK files_delete_v2. :returns: Metadata of deleted item. """ - # try to remove file (response will be metadata, probably) - res = self.dbx.files_delete_v2(dbx_path, **kwargs) - md = res.metadata - return md + with convert_api_errors(dbx_path=dbx_path): + res = self.dbx.files_delete_v2(dbx_path, **kwargs) + return res.metadata - @convert_api_errors_decorator() def remove_batch( self, entries: List[Tuple[str, str]], batch_size: int = 900 ) -> List[Union[files.Metadata, MaestralApiError]]: @@ -660,7 +628,9 @@ def remove_batch( for chunk in chunks(entries, n=batch_size): arg = [files.DeleteArg(e[0], e[1]) for e in chunk] - res = self.dbx.files_delete_batch(arg) + + with convert_api_errors(): + res = self.dbx.files_delete_batch(arg) if res.is_complete(): batch_res = res.get_complete() @@ -670,13 +640,16 @@ def remove_batch( async_job_id = res.get_async_job_id() time.sleep(1.0) - res = self.dbx.files_delete_batch_check(async_job_id) + + with convert_api_errors(): + res = self.dbx.files_delete_batch_check(async_job_id) check_interval = round(len(chunk) / 100, 1) while res.is_in_progress(): time.sleep(check_interval) - res = self.dbx.files_delete_batch_check(async_job_id) + with convert_api_errors(): + res = self.dbx.files_delete_batch_check(async_job_id) if res.is_complete(): batch_res = res.get_complete() @@ -707,7 +680,6 @@ def remove_batch( return result_list - @convert_api_errors_decorator(dbx_path_arg=2) def move(self, dbx_path: str, new_path: str, **kwargs) -> files.Metadata: """ Moves / renames files or folders on Dropbox. @@ -717,18 +689,17 @@ def move(self, dbx_path: str, new_path: str, **kwargs) -> files.Metadata: :param kwargs: Keyword arguments for Dropbox SDK files_move_v2. :returns: Metadata of moved item. """ - res = self.dbx.files_move_v2( - dbx_path, - new_path, - allow_shared_folder=True, - allow_ownership_transfer=True, - **kwargs, - ) - md = res.metadata - return md + with convert_api_errors(dbx_path=new_path): + res = self.dbx.files_move_v2( + dbx_path, + new_path, + allow_shared_folder=True, + allow_ownership_transfer=True, + **kwargs, + ) + return res.metadata - @convert_api_errors_decorator(dbx_path_arg=1) def make_dir(self, dbx_path: str, **kwargs) -> files.FolderMetadata: """ Creates a folder on Dropbox. @@ -737,12 +708,11 @@ def make_dir(self, dbx_path: str, **kwargs) -> files.FolderMetadata: :param kwargs: Keyword arguments for Dropbox SDK files_create_folder_v2. :returns: Metadata of created folder. """ - res = self.dbx.files_create_folder_v2(dbx_path, **kwargs) - md = res.metadata - return md + with convert_api_errors(dbx_path=dbx_path): + res = self.dbx.files_create_folder_v2(dbx_path, **kwargs) + return res.metadata - @convert_api_errors_decorator() def make_dir_batch( self, dbx_paths: List[str], batch_size: int = 900, **kwargs ) -> List[Union[files.Metadata, MaestralApiError]]: @@ -761,36 +731,38 @@ def make_dir_batch( entries = [] result_list = [] - # up two ~ 1,000 entries allowed per batch according to - # https://www.dropbox.com/developers/reference/data-ingress-guide - for chunk in chunks(dbx_paths, n=batch_size): - res = self.dbx.files_create_folder_batch(chunk, **kwargs) - if res.is_complete(): - batch_res = res.get_complete() - entries.extend(batch_res.entries) - elif res.is_async_job_id(): - async_job_id = res.get_async_job_id() - - time.sleep(1.0) - res = self.dbx.files_create_folder_batch_check(async_job_id) - - check_interval = round(len(chunk) / 100, 1) - - while res.is_in_progress(): - time.sleep(check_interval) - res = self.dbx.files_create_folder_batch_check(async_job_id) + with convert_api_errors(): + # up two ~ 1,000 entries allowed per batch according to + # https://www.dropbox.com/developers/reference/data-ingress-guide + for chunk in chunks(dbx_paths, n=batch_size): + res = self.dbx.files_create_folder_batch(chunk, **kwargs) if res.is_complete(): batch_res = res.get_complete() entries.extend(batch_res.entries) + elif res.is_async_job_id(): + async_job_id = res.get_async_job_id() - elif res.is_failed(): - error = res.get_failed() - if error.is_too_many_files(): - res_list = self.make_dir_batch( - chunk, batch_size=round(batch_size / 2), **kwargs - ) - result_list.extend(res_list) + time.sleep(1.0) + res = self.dbx.files_create_folder_batch_check(async_job_id) + + check_interval = round(len(chunk) / 100, 1) + + while res.is_in_progress(): + time.sleep(check_interval) + res = self.dbx.files_create_folder_batch_check(async_job_id) + + if res.is_complete(): + batch_res = res.get_complete() + entries.extend(batch_res.entries) + + elif res.is_failed(): + error = res.get_failed() + if error.is_too_many_files(): + res_list = self.make_dir_batch( + chunk, batch_size=round(batch_size / 2), **kwargs + ) + result_list.extend(res_list) for i, entry in enumerate(entries): if entry.is_success(): @@ -807,7 +779,6 @@ def make_dir_batch( return result_list - @convert_api_errors_decorator(dbx_path_arg=1) def get_latest_cursor( self, dbx_path: str, include_non_downloadable_files: bool = False, **kwargs ) -> str: @@ -823,16 +794,16 @@ def get_latest_cursor( dbx_path = "" if dbx_path == "/" else dbx_path - res = self.dbx.files_list_folder_get_latest_cursor( - dbx_path, - include_non_downloadable_files=include_non_downloadable_files, - recursive=True, - **kwargs, - ) + with convert_api_errors(dbx_path=dbx_path): + res = self.dbx.files_list_folder_get_latest_cursor( + dbx_path, + include_non_downloadable_files=include_non_downloadable_files, + recursive=True, + **kwargs, + ) return res.cursor - @convert_api_errors_decorator(dbx_path_arg=1) def list_folder( self, dbx_path: str, @@ -841,7 +812,9 @@ def list_folder( **kwargs, ) -> files.ListFolderResult: """ - Lists the contents of a folder on Dropbox. + Lists the contents of a folder on Dropbox. Similar to + :meth:`list_folder_iterator` but returns all entries in a single + :class:`files.ListFolderResult` instance. :param dbx_path: Path of folder on Dropbox. :param max_retries_on_timeout: Number of times to try again if Dropbox servers @@ -853,44 +826,14 @@ def list_folder( :returns: Content of given folder. """ - dbx_path = "" if dbx_path == "/" else dbx_path - - results = [] - - res = self.dbx.files_list_folder( + iterator = self.list_folder_iterator( dbx_path, - include_non_downloadable_files=include_non_downloadable_files, + max_retries_on_timeout, + include_non_downloadable_files, **kwargs, ) - results.append(res) - - idx = 0 - - while results[-1].has_more: - - idx += len(results[-1].entries) - logger.info(f"Indexing {idx}...") - - attempt = 0 - - while True: - try: - more_results = self.dbx.files_list_folder_continue( - results[-1].cursor - ) - results.append(more_results) - break - except requests.exceptions.ReadTimeout: - attempt += 1 - if attempt <= max_retries_on_timeout: - time.sleep(5.0) - else: - raise - if idx > 0: - logger.info(IDLE) - - return self.flatten_results(results) + return self.flatten_results(list(iterator), attribute_name="entries") def list_folder_iterator( self, @@ -900,11 +843,10 @@ def list_folder_iterator( **kwargs, ) -> Iterator[files.ListFolderResult]: """ - Lists the contents of a folder on Dropbox. Does the same as :meth:`list_folder` - but returns an iterator yielding :class:`files.ListFolderResult` instances. The - number of entries returned in each iteration corresponds to the number of - entries returned by a single Dropbox API call and will be typically around 500. - This is useful to save memory when indexing a large number of items. + Lists the contents of a folder on Dropbox. Returns an iterator yielding + :class:`files.ListFolderResult` instances. The number of entries returned in + each iteration corresponds to the number of entries returned by a single Dropbox + API call and will be typically around 500. :param dbx_path: Path of folder on Dropbox. :param max_retries_on_timeout: Number of times to try again if Dropbox servers @@ -928,13 +870,8 @@ def list_folder_iterator( yield res - idx = 0 - while res.has_more: - idx += len(res.entries) - logger.info(f"Indexing {idx}...") - attempt = 0 while True: @@ -949,31 +886,6 @@ def list_folder_iterator( else: raise - if idx > 0: - logger.info(IDLE) - - @staticmethod - def flatten_results( - results: List[files.ListFolderResult], - ) -> files.ListFolderResult: - """ - Flattens a list of :class:`files.ListFolderResult` instances to a single - instance with the cursor of the last entry in the list. - - :param results: List of :class:`files.ListFolderResult` instances. - :returns: Flattened list folder result. - """ - entries_all = [] - for result in results: - entries_all += result.entries - - results_flattened = files.ListFolderResult( - entries=entries_all, cursor=results[-1].cursor, has_more=False - ) - - return results_flattened - - @convert_api_errors_decorator() def wait_for_remote_changes(self, last_cursor: str, timeout: int = 40) -> bool: """ Waits for remote changes since ``last_cursor``. Call this method after @@ -991,47 +903,42 @@ def wait_for_remote_changes(self, last_cursor: str, timeout: int = 40) -> bool: time_to_backoff = max(self._backoff_until - time.time(), 0) time.sleep(time_to_backoff) - result = self.dbx.files_list_folder_longpoll(last_cursor, timeout=timeout) + with convert_api_errors(): + res = self.dbx.files_list_folder_longpoll(last_cursor, timeout=timeout) # keep track of last longpoll, back off if requested by SDK - if result.backoff: - self._backoff_until = time.time() + result.backoff + 5.0 + if res.backoff: + logger.debug("Backoff requested for %s sec", res.backoff) + self._backoff_until = time.time() + res.backoff + 5.0 else: self._backoff_until = 0 - return result.changes + return res.changes - @convert_api_errors_decorator() def list_remote_changes(self, last_cursor: str) -> files.ListFolderResult: """ - Lists changes to remote Dropbox since ``last_cursor``. Call this after - :meth:`wait_for_remote_changes` returns ``True``. + Lists changes to remote Dropbox since ``last_cursor``. Same as + :meth:`list_remote_changes_iterator` but fetches all changes first and returns + a single :class:`files.ListFolderResult`. This may be useful if you want to + fetch all changes before starting to process them. :param last_cursor: Last to cursor to compare for changes. :returns: Remote changes since given cursor. """ - results = [self.dbx.files_list_folder_continue(last_cursor)] - - while results[-1].has_more: - more_results = self.dbx.files_list_folder_continue(results[-1].cursor) - results.append(more_results) - - # combine all results into one - results = self.flatten_results(results) - - return results + iterator = self.list_remote_changes_iterator(last_cursor) + return self.flatten_results(list(iterator), attribute_name="entries") def list_remote_changes_iterator( self, last_cursor: str ) -> Iterator[files.ListFolderResult]: """ - Lists changes to the remote Dropbox since ``last_cursor``. Does the same as - :meth:`list_remote_changes` but returns an iterator yielding - :class:`files.ListFolderResult` instances. The number of entries returned in - each iteration corresponds to the number of entries returned by a single Dropbox - API call and will be typically around 500. This is useful to save memory when - indexing a large number of items. + Lists changes to the remote Dropbox since ``last_cursor``. Returns an iterator + yielding :class:`files.ListFolderResult` instances. The number of entries + returned in each iteration corresponds to the number of entries returned by a + single Dropbox API call and will be typically around 500. + + Call this after :meth:`wait_for_remote_changes` returns ``True``. :param last_cursor: Last to cursor to compare for changes. :returns: Iterator over remote changes since given cursor. @@ -1047,6 +954,123 @@ def list_remote_changes_iterator( result = self.dbx.files_list_folder_continue(result.cursor) yield result + def create_shared_link( + self, + dbx_path: str, + visibility: sharing.RequestedVisibility = sharing.RequestedVisibility.public, + password: Optional[str] = None, + expires: Optional[datetime] = None, + **kwargs, + ) -> sharing.SharedLinkMetadata: + """ + Creates a shared link for the given path. Some options are only available for + Professional and Business accounts. Note that the requested visibility as access + level for the link may not be granted, depending on the Dropbox folder or team + settings. Check the returned link metadata to verify the visibility and access + level. + + :param dbx_path: Dropbox path to file or folder to share. + :param visibility: The visibility of the shared link. Can be public, team-only, + or password protected. In case of the latter, the password argument must be + given. Only available for Professional and Business accounts. + :param password: Password to protect shared link. Is required if visibility + is set to password protected and will be ignored otherwise + :param expires: Expiry time for shared link. Only available for Professional and + Business accounts. + :param kwargs: Additional keyword arguments for the + :class:`dropbox.sharing.SharedLinkSettings`. + :returns: Metadata for shared link. + """ + + if visibility.is_password() and not password: + raise MaestralApiError( + "Invalid shared link setting", + "Password is required to share a password-protected link", + ) + + if not visibility.is_password(): + password = None + + # convert timestamp to utc time if not naive + if expires is not None: + has_timezone = expires.tzinfo and expires.tzinfo.utcoffset(expires) + if has_timezone: + expires.astimezone(timezone.utc) + + settings = sharing.SharedLinkSettings( + requested_visibility=visibility, + link_password=password, + expires=expires, + **kwargs, + ) + + with convert_api_errors(dbx_path=dbx_path): + res = self.dbx.sharing_create_shared_link_with_settings(dbx_path, settings) + + return res + + def revoke_shared_link(self, url: str) -> None: + """ + Revokes a shared link. + + :param url: URL to revoke. + """ + with convert_api_errors(): + self.dbx.sharing_revoke_shared_link(url) + + def list_shared_links( + self, dbx_path: Optional[str] = None + ) -> sharing.ListSharedLinksResult: + """ + Lists all shared links for a given Dropbox path (file or folder). If no path is + given, list all shared links for the account, up to a maximum of 1,000 links. + + :param dbx_path: Dropbox path to file or folder. + :returns: Shared links for a path, including any shared links for parents + through which this path is accessible. + """ + + results = [] + + with convert_api_errors(dbx_path=dbx_path): + res = self.dbx.sharing_list_shared_links(dbx_path) + results.append(res) + + while results[-1].has_more: + res = self.dbx.sharing_list_shared_links(dbx_path, results[-1].cursor) + results.append(res) + + return self.flatten_results(results, attribute_name="links") + + @staticmethod + def flatten_results( + results: List[PaginationResultType], attribute_name: str + ) -> PaginationResultType: + """ + Flattens a list of Dropbox API results from a pagination to a single result with + the cursor of the last entry in the list. + + :param results: List of :results to flatten. + :param attribute_name: Name of attribute to flatten. + :returns: Flattened result. + """ + + all_entries = [] + + for result in results: + all_entries += getattr(result, attribute_name) + + kwargs = { + attribute_name: all_entries, + "cursor": results[-1].cursor, + "has_more": False, + } + + result_cls = type(results[0]) + results_flattened = result_cls(**kwargs) + + return results_flattened + # ==== conversion functions to generate error messages and types ======================= @@ -1128,58 +1152,6 @@ def os_to_maestral_error( return maestral_exc -def fswatch_to_maestral_error(exc: OSError) -> LocalError: - """ - Converts a :class:`OSError` when starting a file system watch to a - :class:`MaestralApiError` and tries to add a reasonably informative error title and - message. Error messages and types differ from :func:`os_to_maestral_error`. - - :param exc: Python Exception. - :returns: :class:`MaestralApiError` instance or :class:`OSError` instance. - """ - - error_number = getattr(exc, "errno", -1) - err_cls: Type[MaestralApiError] - - if isinstance(exc, NotADirectoryError): - title = "Dropbox folder has been moved or deleted" - msg = ( - "Please move the Dropbox folder back to its original location " - "or restart Maestral to set up a new folder." - ) - - err_cls = NoDropboxDirError - elif isinstance(exc, PermissionError): - title = "Insufficient permissions for Dropbox folder" - msg = ( - "Please ensure that you have read and write permissions " - "for the selected Dropbox folder." - ) - err_cls = InsufficientPermissionsError - - elif error_number in (errno.ENOSPC, errno.EMFILE): - title = "Inotify limit reached" - if error_number == errno.ENOSPC: - new_config = "fs.inotify.max_user_watches=524288" - else: - new_config = "fs.inotify.max_user_instances=512" - msg = ( - "Changes to your Dropbox folder cannot be monitored because it " - "contains too many items. Please increase the inotify limit in " - "your system by adding the following line to /etc/sysctl.conf: " - + new_config - ) - err_cls = InotifyError - - else: - return exc - - maestral_exc = err_cls(title, msg) - maestral_exc.__cause__ = exc - - return maestral_exc - - def dropbox_to_maestral_error( exc: exceptions.DropboxException, dbx_path: Optional[str] = None, @@ -1195,7 +1167,10 @@ def dropbox_to_maestral_error( :returns: :class:`MaestralApiError` instance. """ - err_cls: Type[MaestralApiError] + title = "An unexpected error occurred" + text = "Please contact the developer with the traceback information from the logs." + err_cls = MaestralApiError + # ---- Dropbox API Errors ---------------------------------------------------------- if isinstance(exc, exceptions.ApiError): @@ -1300,7 +1275,6 @@ def dropbox_to_maestral_error( elif error.is_properties_error(): # this is a programming error in maestral text = "Invalid property group provided." - err_cls = MaestralApiError else: text = "Please check the logs or traceback for more information" err_cls = SyncError @@ -1310,11 +1284,9 @@ def dropbox_to_maestral_error( if error.is_concurrent_session_close_not_allowed(): # this is a programming error in maestral text = "Can not start a closed concurrent upload session." - err_cls = MaestralApiError elif error.is_concurrent_session_data_not_allowed(): # this is a programming error in maestral text = "Uploading data not allowed when starting concurrent upload session." - err_cls = MaestralApiError else: text = "Please check the logs or traceback for more information" err_cls = SyncError @@ -1330,7 +1302,6 @@ def dropbox_to_maestral_error( elif error.is_properties_error(): # this is a programming error in maestral text = "Invalid property group provided." - err_cls = MaestralApiError elif error.is_too_many_write_operations(): text = ( "There are too many write operations happening in your " @@ -1362,12 +1333,6 @@ def dropbox_to_maestral_error( if error.is_path(): lookup_error = error.get_path() text, err_cls = _get_lookup_error_msg(lookup_error) - else: - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) - err_cls = MaestralApiError elif isinstance(error, files.ListFolderContinueError): title = "Could not list folder contents" @@ -1380,12 +1345,6 @@ def dropbox_to_maestral_error( "Maestral's index to re-sync your Dropbox." ) err_cls = CursorResetError - else: - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) - err_cls = MaestralApiError elif isinstance(error, files.ListFolderLongpollError): title = "Could not get Dropbox changes" @@ -1395,12 +1354,6 @@ def dropbox_to_maestral_error( "Maestral's index to re-sync your Dropbox." ) err_cls = CursorResetError - else: - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) - err_cls = MaestralApiError elif isinstance(error, async_.PollError): @@ -1414,13 +1367,9 @@ def dropbox_to_maestral_error( ) err_cls = DropboxServerError else: - # Other tags include invalid_async_job_id. Neither should occur in our - # SDK usage. - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) - err_cls = MaestralApiError + # Other tags include invalid_async_job_id. + # Neither should occur in our SDK usage. + pass elif isinstance(error, files.ListRevisionsError): @@ -1429,12 +1378,6 @@ def dropbox_to_maestral_error( if error.is_path(): lookup_error = error.get_path() text, err_cls = _get_lookup_error_msg(lookup_error) - else: - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) - err_cls = MaestralApiError elif isinstance(error, files.RestoreError): @@ -1442,19 +1385,13 @@ def dropbox_to_maestral_error( if error.is_invalid_revision(): text = "Invalid revision." - err_cls = PathError + err_cls = NotFoundError elif error.is_path_lookup(): lookup_error = error.get_path_lookup() text, err_cls = _get_lookup_error_msg(lookup_error) elif error.is_path_write(): write_error = error.get_path_write() text, err_cls = _get_write_error_msg(write_error) - else: - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) - err_cls = MaestralApiError elif isinstance(error, files.GetMetadataError): title = "Could not get metadata" @@ -1462,12 +1399,6 @@ def dropbox_to_maestral_error( if error.is_path(): lookup_error = error.get_path() text, err_cls = _get_lookup_error_msg(lookup_error) - else: - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) - err_cls = MaestralApiError elif isinstance(error, users.GetAccountError): title = "Could not get account info" @@ -1478,20 +1409,55 @@ def dropbox_to_maestral_error( "exist or has been deleted" ) err_cls = InvalidDbidError - else: - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) - err_cls = MaestralApiError - else: - err_cls = MaestralApiError - title = "An unexpected error occurred" - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) + elif isinstance(error, sharing.CreateSharedLinkWithSettingsError): + title = "Could not create shared link" + + if error.is_access_denied(): + text = "You do not have access to create shared links for this path." + err_cls = InsufficientPermissionsError + elif error.is_email_not_verified(): + text = "Please verify you email address before creating shared links" + err_cls = SharedLinkError + elif error.is_path(): + lookup_error = error.get_path() + text, err_cls = _get_lookup_error_msg(lookup_error) + elif error.is_settings_error(): + settings_error = error.get_settings_error() + err_cls = SharedLinkError + if settings_error.is_invalid_settings(): + text = "Please check if the settings are valid." + elif settings_error.is_not_authorized(): + text = "Basic accounts do not support passwords or expiry dates." + elif error.is_shared_link_already_exists(): + text = "The shared link already exists." + err_cls = SharedLinkError + + elif isinstance(error, sharing.RevokeSharedLinkError): + title = "Could not revoke shared link" + + if error.is_shared_link_not_found(): + text = "The given link does not exist." + err_cls = NotFoundError + elif error.is_shared_link_access_denied(): + text = "You do not have access to revoke the shared link." + err_cls = InsufficientPermissionsError + elif error.is_shared_link_malformed(): + text = "The shared link is malformed." + err_cls = SharedLinkError + elif error.is_unsupported_link_type(): + text = "The link type is not supported." + err_cls = SharedLinkError + + elif isinstance(error, sharing.ListSharedLinksError): + title = "Could not list shared links" + + if error.is_path(): + lookup_error = error.get_path() + text, err_cls = _get_lookup_error_msg(lookup_error) + elif error.is_reset(): + text = "Please try again later." + err_cls = SharedLinkError # ---- Authentication errors ------------------------------------------------------- elif isinstance(exc, exceptions.AuthError): @@ -1519,19 +1485,12 @@ def dropbox_to_maestral_error( # Other tags are invalid_select_admin, invalid_select_user, # missing_scope, route_access_denied. Neither should occur in our SDK # usage. - err_cls = MaestralApiError - title = "An unexpected error occurred" - text = ( - "Please contact the developer with the traceback " - "information from the logs." - ) + pass else: err_cls = DropboxAuthError title = "Authentication error" - text = ( - "Please check if you can log into your account on the Dropbox website." - ) + text = "Please check if you can log in on the Dropbox website." # ---- OAuth2 flow errors ---------------------------------------------------------- elif isinstance(exc, requests.HTTPError): @@ -1559,20 +1518,10 @@ def dropbox_to_maestral_error( # ---- Internal Dropbox error ------------------------------------------------------ elif isinstance(exc, exceptions.InternalServerError): err_cls = DropboxServerError - title = "Could not sync file or folder" - text = ( - "Something went wrong with the job on Dropbox’s end. Please " - "verify on the Dropbox website if the job succeeded and try " - "again if it failed." - ) - - # ---- Everything else ------------------------------------------------------------- - else: - err_cls = MaestralApiError - title = "An unexpected error occurred" + title = "Dropbox server error" text = ( - "Please contact the developer with the traceback " - "information from the logs." + "Something went wrong on Dropbox’s end. Please check on status.dropbox.com " + "if their services are up and running and try again later." ) maestral_exc = err_cls(title, text, dbx_path=dbx_path, local_path=local_path) diff --git a/src/maestral/config/main.py b/src/maestral/config/main.py index efb2578ea..03b9d7304 100644 --- a/src/maestral/config/main.py +++ b/src/maestral/config/main.py @@ -4,7 +4,6 @@ existing config or state instances for a specified config_name. """ -import copy import logging import threading from typing import Dict @@ -21,12 +20,11 @@ # Defaults # ============================================================================= -DEFAULTS_CONFIG = [ +DEFAULTS_CONFIG: DefaultsType = [ ( "main", { "path": "", # dropbox folder location - "default_dir_name": "Dropbox (Maestral)", # default dropbox folder name "excluded_items": [], # files and folders excluded from sync }, ), @@ -39,24 +37,25 @@ ( "app", { - "notification_level": 15, # desktop notification level, default to FILECHANGE - "log_level": 20, # log level for journal and file, default to INFO - "update_notification_interval": 60 * 60 * 24 * 7, # default to weekly - "analytics": False, # automatic errors reports with bugsnag, default to disabled + "notification_level": 15, # desktop notification level, default: FILECHANGE + "log_level": 20, # log level for journal and file, default: INFO + "update_notification_interval": 60 * 60 * 24 * 7, # default: weekly "keyring": "automatic", # keychain backend to use for credential storage }, ), ( "sync", { - "reindex_interval": 60 * 60 * 24 * 7, # default to weekly - "max_cpu_percent": 20.0, # max usage target per cpu core, default to 20% - "keep_history": 60 * 60 * 24 * 7, # default one week + "reindex_interval": 60 * 60 * 24 * 14, # default: every fortnight + "max_cpu_percent": 20.0, # max usage target per cpu core, default: 20% + "keep_history": 60 * 60 * 24 * 7, # default: one week + "upload": True, # if download sync is enabled + "download": True, # if upload sync is enabled }, ), ] -DEFAULTS_STATE = [ +DEFAULTS_STATE: DefaultsType = [ ( "account", # account state, periodically updated from dropbox servers { @@ -83,6 +82,8 @@ "cursor": "", # remote cursor: represents last state synced from dropbox "lastsync": 0.0, # local cursor: time-stamp of last upload "last_reindex": 0.0, # time-stamp of full last reindexing + "indexing_counter": 0, # counter for indexing progress between restarts + "did_finish_indexing": False, # indicates completed indexing "upload_errors": [], # failed uploads to retry on next sync "download_errors": [], # failed downloads to retry on next sync "pending_uploads": [], # incomplete uploads to retry on next sync @@ -98,18 +99,50 @@ # or if you want to *rename* options, then you need to do a MAJOR update in # version, e.g. from 3.0.0 to 4.0.0 # 3. You don't need to touch this value if you're just adding a new option -CONF_VERSION = "13.0.0" +CONF_VERSION = "15.0.0" # ============================================================================= # Factories # ============================================================================= -_config_instances: Dict[str, UserConfig] = dict() -_state_instances: Dict[str, UserConfig] = dict() +def _get_conf( + config_name: str, + config_path: str, + defaults: DefaultsType, + registry: Dict[str, UserConfig], +): + + try: + conf = registry[config_name] + except KeyError: + + try: + conf = UserConfig( + config_path, + defaults=defaults, + version=CONF_VERSION, + backup=True, + remove_obsolete=True, + ) + except OSError: + conf = UserConfig( + config_path, + defaults=defaults, + version=CONF_VERSION, + backup=True, + remove_obsolete=True, + load=False, + ) + + registry[config_name] = conf + + return conf + + +_config_instances: Dict[str, UserConfig] = dict() _config_lock = threading.Lock() -_state_lock = threading.Lock() def MaestralConfig(config_name: str) -> UserConfig: @@ -124,46 +157,12 @@ def MaestralConfig(config_name: str) -> UserConfig: global _config_instances with _config_lock: + config_path = get_conf_path(CONFIG_DIR_NAME, f"{config_name}.ini") + return _get_conf(config_name, config_path, DEFAULTS_CONFIG, _config_instances) - try: - return _config_instances[config_name] - except KeyError: - - defaults: DefaultsType = copy.deepcopy(DEFAULTS_CONFIG) # type: ignore - - # set default dir name according to config - for sec, options in defaults: - if sec == "main": - options["default_dir_name"] = f"Dropbox ({config_name.title()})" - - config_path = get_conf_path(CONFIG_DIR_NAME) - - try: - conf = UserConfig( - config_path, - config_name, - defaults=defaults, - version=CONF_VERSION, - backup=True, - remove_obsolete=True, - ) - except OSError: - conf = UserConfig( - config_path, - config_name, - defaults=defaults, - version=CONF_VERSION, - backup=True, - remove_obsolete=True, - load=False, - ) - - # adapt folder name to config - dirname = f"Dropbox ({config_name.title()})" - conf.set_default("main", "default_dir_name", dirname) - - _config_instances[config_name] = conf - return conf + +_state_instances: Dict[str, UserConfig] = dict() +_state_lock = threading.Lock() def MaestralState(config_name: str) -> UserConfig: @@ -178,35 +177,5 @@ def MaestralState(config_name: str) -> UserConfig: global _state_instances with _state_lock: - - try: - return _state_instances[config_name] - except KeyError: - state_path = get_data_path(CONFIG_DIR_NAME) - - defaults: DefaultsType = copy.deepcopy(DEFAULTS_STATE) # type: ignore - - try: - state = UserConfig( - state_path, - config_name, - defaults=defaults, - version=CONF_VERSION, - backup=True, - remove_obsolete=True, - suffix=".state", - ) - except OSError: - state = UserConfig( - state_path, - config_name, - defaults=defaults, - version=CONF_VERSION, - backup=True, - remove_obsolete=True, - suffix=".state", - load=False, - ) - - _state_instances[config_name] = state - return state + state_path = get_data_path(CONFIG_DIR_NAME, f"{config_name}.state") + return _get_conf(config_name, state_path, DEFAULTS_STATE, _state_instances) diff --git a/src/maestral/config/user.py b/src/maestral/config/user.py index cd3d0808d..48280fc2d 100644 --- a/src/maestral/config/user.py +++ b/src/maestral/config/user.py @@ -48,12 +48,15 @@ class DefaultsConfig(cp.ConfigParser): _lock = RLock() - def __init__(self, path: str, name: str, suffix: str) -> None: + def __init__(self, path: str) -> None: super(DefaultsConfig, self).__init__(interpolation=None) - self._path = path - self._name = name - self._suffix = suffix + dirname, basename = osp.split(path) + filename, ext = osp.splitext(basename) + + self._path = dirname + self._name = filename + self._suffix = ext if not osp.isdir(osp.dirname(self._path)): os.makedirs(osp.dirname(self._path)) @@ -147,16 +150,14 @@ class UserConfig(DefaultsConfig): def __init__( self, path: str, - name: str, defaults: InputDefaultsType = None, load: bool = True, version: str = "0.0.0", backup: bool = False, remove_obsolete: bool = False, - suffix: str = ".ini", ) -> None: """UserConfig class, based on ConfigParser.""" - super(UserConfig, self).__init__(path=path, name=name, suffix=suffix) + super(UserConfig, self).__init__(path=path) self._load = load self._version = self._check_version(version) @@ -315,8 +316,8 @@ def _load_old_defaults(self, old_version: str) -> cp.ConfigParser: def _save_new_defaults(self, defaults: DefaultsType) -> None: """Save new defaults.""" - path, name = self.get_defaults_path_name_from_version() - new_defaults = DefaultsConfig(path=path, name=name, suffix=self._suffix) + path = self.get_defaults_fpath_from_version() + new_defaults = DefaultsConfig(path=path) if not osp.isfile(new_defaults.get_config_fpath()): new_defaults.set_defaults(defaults) new_defaults.save() @@ -492,15 +493,8 @@ def get(self, section, option, default: Any = NoDefault) -> Any: # type: ignore if isinstance(default_value, str): value = raw_value - elif isinstance(default_value, bool): - value = ast.literal_eval(raw_value) - elif isinstance(default_value, float): - value = float(raw_value) - elif isinstance(default_value, int): - value = int(raw_value) else: try: - # Lists, tuples, None, ... value = ast.literal_eval(raw_value) except (SyntaxError, ValueError): value = raw_value @@ -540,18 +534,15 @@ def set(self, section: str, option: str, value: Any, save: bool = True) -> None: default_value = value self.set_default(section, option, default_value) - if isinstance(default_value, bool): - value = bool(value) - elif isinstance(default_value, float): + if isinstance(default_value, float) and isinstance(value, int): value = float(value) - elif isinstance(default_value, int): - value = int(value) - # elif isinstance(default_value, list): - # value = list(value) - # elif isinstance(default_value, tuple): - # value = tuple(value) - elif not isinstance(default_value, str): - value = repr(value) + + if type(default_value) is not type(value): + raise ValueError( + f"Inconsistent config type for [{section}][{option}]. " + f"Expected {default_value.__class__.__name__} but " + f"got {value.__class__.__name__}." + ) self._set(section, option, value) if save: diff --git a/src/maestral/constants.py b/src/maestral/constants.py index a58847c5e..26900053c 100644 --- a/src/maestral/constants.py +++ b/src/maestral/constants.py @@ -60,9 +60,10 @@ # state messages IDLE = "Up to date" SYNCING = "Syncing..." -PAUSED = "Syncing paused" STOPPED = "Syncing stopped" -DISCONNECTED = "Connecting..." +CONNECTED = "Connected" +DISCONNECTED = "Connection lost" +CONNECTING = "Connecting..." SYNC_ERROR = "Sync error" ERROR = "Fatal error" @@ -87,7 +88,6 @@ class FileStatus(Enum): FROZEN = BRIEFCASE or getattr(sys, "frozen", False) # keys -BUGSNAG_API_KEY = "081c05e2bf9730d5f55bc35dea15c833" DROPBOX_APP_KEY = "2jmbq42w7vof78h" # urls diff --git a/src/maestral/daemon.py b/src/maestral/daemon.py index 4b3299663..cb6a88128 100644 --- a/src/maestral/daemon.py +++ b/src/maestral/daemon.py @@ -9,18 +9,17 @@ import os import time import signal -import traceback import enum import subprocess -from shlex import quote import threading import fcntl import struct import tempfile import logging import warnings +from shlex import quote from typing import Optional, Any, Union, Tuple, Dict, Iterable, Type, TYPE_CHECKING -from types import TracebackType, FrameType +from types import TracebackType # external imports import Pyro5 # type: ignore @@ -48,6 +47,7 @@ "sockpath_for_config", "lockpath_for_config", "is_running", + "set_executable", "start_maestral_daemon", "start_maestral_daemon_process", "stop_maestral_daemon_process", @@ -72,6 +72,24 @@ URI = "PYRO:maestral.{0}@{1}" Pyro5.config.THREADPOOL_SIZE_MIN = 2 +if FROZEN and IS_MACOS: + EXECUTABLE = [sys.executable, "--run-python", "-OO"] +else: + EXECUTABLE = [sys.executable, "-OO"] + + +def set_executable(executable: str, *argv: str) -> None: + """ + Sets the path of the Python executable to use when starting the daemon. By default + :obj:`sys.executable` is used. Can be used when embedding the daemon. + + :param executable: Path to custom Python executable. + :param argv: Any command line arguments to be injected before the daemon startup + command. By default, "-OO" will be used. + """ + global EXECUTABLE + EXECUTABLE = [executable, *argv] + class Stop(enum.Enum): """Enumeration of daemon exit results""" @@ -282,10 +300,6 @@ def locking_pid(self) -> Optional[int]: # ==== helpers for daemon management =================================================== -def _sigterm_handler(signal_number: int, frame: FrameType) -> None: - sys.exit() - - def _send_term(pid: int) -> None: try: os.kill(pid, signal.SIGTERM) @@ -342,27 +356,33 @@ def is_running(config_name: str) -> bool: return maestral_lock(config_name).locked() -def _wait_for_startup(config_name: str, timeout: float = 8) -> Start: +def _wait_for_startup(config_name: str, timeout: float) -> None: """ - Checks if we can communicate with the maestral daemon. Returns :attr:`Start.Ok` if - communication succeeds within timeout, :attr:`Start.Failed` otherwise. + Waits until we can communicate with the maestral daemon for ``config_name``. + + :param config_name: Configuration to connect to. + :param timeout: Timeout it seconds until we raise an error. + :raises CommunicationError: if we cannot communicate with the daemon within the + given timeout. """ sock_name = sockpath_for_config(config_name) maestral_daemon = Proxy(URI.format(config_name, "./u:" + sock_name)) - while timeout > 0: + t0 = time.time() + + while True: try: maestral_daemon._pyroBind() - return Start.Ok - except Exception: - time.sleep(0.2) - timeout -= 0.2 + return + except Exception as exc: + if time.time() - t0 > timeout: + raise exc + else: + time.sleep(0.2) finally: maestral_daemon._pyroRelease() - return Start.Failed - # ==== main functions to manage daemon ================================================= @@ -383,29 +403,33 @@ def start_maestral_daemon( :param config_name: The name of the Maestral configuration to use. :param log_to_stdout: If ``True``, write logs to stdout. - :param start_sync: If ``True``, start syncing once the daemon has started. + :param start_sync: If ``True``, start syncing once the daemon has started. If the + ``start_sync`` call fails, an error will be logged but not raised. :raises RuntimeError: if a daemon for the given ``config_name`` is already running. """ import asyncio + from . import notify from .main import Maestral + if log_to_stdout: + logger.setLevel(logging.DEBUG) + if threading.current_thread() is not threading.main_thread(): raise RuntimeError("Must run daemon in main thread") # acquire PID lock file lock = maestral_lock(config_name) - if not lock.acquire(): + if lock.acquire(): + logger.debug("Acquired daemon lock") + else: raise RuntimeError("Maestral daemon is already running") # Nice ourselves to give other processes priority. We will likely only # have significant CPU usage in case of many concurrent downloads. os.nice(10) - # catch sigterm and shut down gracefully - signal.signal(signal.SIGTERM, _sigterm_handler) - # integrate with CFRunLoop in macOS, only works in main thread if sys.platform == "darwin": @@ -466,7 +490,7 @@ async def periodic_watchdog() -> None: # get socket for config name sockpath = sockpath_for_config(config_name) - logger.debug(f"Socket path for '{config_name}' daemon: '{sockpath}'") + logger.debug(f"Socket path: '{sockpath}'") # clean up old socket try: @@ -483,15 +507,19 @@ async def periodic_watchdog() -> None: ExposedMaestral.start_sync = oneway(ExposedMaestral.start_sync) ExposedMaestral.stop_sync = oneway(ExposedMaestral.stop_sync) - ExposedMaestral.pause_sync = oneway(ExposedMaestral.pause_sync) - ExposedMaestral.resume_sync = oneway(ExposedMaestral.resume_sync) ExposedMaestral.shutdown_daemon = oneway(ExposedMaestral.shutdown_daemon) maestral_daemon = ExposedMaestral(config_name, log_to_stdout=log_to_stdout) if start_sync: - logger.debug("Starting sync") - maestral_daemon.start_sync() + + try: + maestral_daemon.start_sync() + except Exception as exc: + title = getattr(exc, "title", "Failed to start sync") + message = getattr(exc, "message", "Please inspect the logs") + logger.error(title, exc_info=True) + maestral_daemon.sync.notify(title, message, level=notify.ERROR) try: @@ -508,6 +536,11 @@ async def periodic_watchdog() -> None: for socket in daemon.sockets: loop.add_reader(socket.fileno(), daemon.events, daemon.sockets) + # handle sigterm gracefully + signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) + for s in signals: + loop.add_signal_handler(s, maestral_daemon.shutdown_daemon) + loop.run_until_complete(maestral_daemon.shutdown_complete) for socket in daemon.sockets: @@ -516,8 +549,8 @@ async def periodic_watchdog() -> None: # prevent housekeeping from blocking shutdown daemon.transportServer.housekeeper = None - except Exception: - traceback.print_exc() + except Exception as exc: + logger.error(exc.args[0], exc_info=True) finally: if NOTIFY_SOCKET: @@ -527,21 +560,18 @@ async def periodic_watchdog() -> None: def start_maestral_daemon_process( config_name: str = "maestral", - log_to_stdout: bool = False, start_sync: bool = False, - detach: bool = True, + timeout: int = 5, ) -> Start: """ Starts the Maestral daemon in a new process by calling :func:`start_maestral_daemon`. Startup is race free: there will never be two daemons running for the same config. - This function requires that :obj:`sys.executable` points to a Python executable and - therefore may not work for "frozen" apps. + This function will use :obj:`sys.executable` as a Python executable to start the + daemon. Use :func:`set_executable` to use a custom executable instead. :param config_name: The name of the Maestral configuration to use. - :param log_to_stdout: If ``True``, write logs to stdout. :param start_sync: If ``True``, start syncing once the daemon has started. - :param detach: If ``True``, the daemon process will be detached. If ``False``, - the daemon processes will run in the same session as the current process. + :param timeout: Time in sec to wait for daemon to start. :returns: :attr:`Start.Ok` if successful, :attr:`Start.AlreadyRunning` if the daemon was already running or :attr:`Start.Failed` if startup failed. It is possible that :attr:`Start.Ok` may be returned instead of :attr:`Start.AlreadyRunning` @@ -551,38 +581,43 @@ def start_maestral_daemon_process( if is_running(config_name): return Start.AlreadyRunning - if detach: + # protect against injection + cc = quote(config_name).strip("'") + start_sync = bool(start_sync) - # protect against injection - cc = quote(config_name).strip("'") - std_log = bool(log_to_stdout) - start_sync = bool(start_sync) + script = ( + f"import maestral.daemon; " + f'maestral.daemon.start_maestral_daemon("{cc}", start_sync={start_sync})' + ) - script = ( - f"import maestral.daemon; " - f'maestral.daemon.start_maestral_daemon("{cc}", {std_log}, {start_sync})' - ) + cmd = [*EXECUTABLE, "-c", script] - if FROZEN and IS_MACOS: - cmd = [sys.executable, "--run-python", "-OO", "-c", script] - else: - cmd = [sys.executable, "-OO", "-c", script] + process = subprocess.Popen( + cmd, + start_new_session=True, + stdin=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) - subprocess.Popen(cmd, start_new_session=True) + try: + _wait_for_startup(config_name, timeout=timeout) + except Exception as exc: + logger.debug( + "Could not communicate with daemon", + exc_info=(type(exc), exc, exc.__traceback__), + ) + # let's check what the daemon has been doing + returncode = process.poll() + if returncode is None: + logger.debug("Daemon is running but not responsive, killing now") + process.terminate() # make sure we don't leave a stray process + else: + logger.debug("Daemon stopped with return code %s", returncode) + return Start.Failed else: - import multiprocessing as mp - - ctx = mp.get_context("spawn" if IS_MACOS else "fork") - - ctx.Process( - target=start_maestral_daemon, - args=(config_name, log_to_stdout, start_sync), - name="maestral-daemon", - daemon=True, - ).start() - - return _wait_for_startup(config_name) + return Start.Ok def stop_maestral_daemon_process( diff --git a/src/maestral/database.py b/src/maestral/database.py index cb51ae61f..b0218b6d3 100644 --- a/src/maestral/database.py +++ b/src/maestral/database.py @@ -280,7 +280,8 @@ def is_download(self) -> bool: def __repr__(self): return ( f"<{self.__class__.__name__}(direction={self.direction.name}, " - f"change_type={self.change_type.name}, dbx_path='{self.dbx_path}')>" + f"change_type={self.change_type.name}, item_type={self.item_type}, " + f"dbx_path='{self.dbx_path}')>" ) @classmethod diff --git a/src/maestral/errors.py b/src/maestral/errors.py index 2bd7622d5..2e1bf84c5 100644 --- a/src/maestral/errors.py +++ b/src/maestral/errors.py @@ -35,7 +35,7 @@ class MaestralApiError(Exception): def __init__( self, title: str, - message: str, + message: str = "", dbx_path: Optional[str] = None, dbx_path_dst: Optional[str] = None, local_path: Optional[str] = None, @@ -58,167 +58,119 @@ def __str__(self) -> str: class SyncError(MaestralApiError): """Base class for recoverable sync issues.""" - pass - class InsufficientPermissionsError(SyncError): """Raised when accessing a file or folder fails due to insufficient permissions, both locally and on Dropbox servers.""" - pass - class InsufficientSpaceError(SyncError): """Raised when the Dropbox account or local drive has insufficient storage space.""" - pass - class PathError(SyncError): """Raised when there is an issue with the provided file or folder path such as invalid characters, a too long file name, etc.""" - pass - class NotFoundError(SyncError): """Raised when a file or folder is requested but does not exist.""" - pass - class ConflictError(SyncError): """Raised when trying to create a file or folder which already exists.""" - pass - class FileConflictError(ConflictError): """Raised when trying to create a file which already exists.""" - pass - class FolderConflictError(SyncError): """Raised when trying to create or folder which already exists.""" - pass - class IsAFolderError(SyncError): """Raised when a file is required but a folder is provided.""" - pass - class NotAFolderError(SyncError): """Raised when a folder is required but a file is provided.""" - pass - class DropboxServerError(SyncError): """Raised in case of internal Dropbox errors.""" - pass - class RestrictedContentError(SyncError): """Raised when trying to sync restricted content, for instance when adding a file with a DMCA takedown notice to a public folder.""" - pass - class UnsupportedFileError(SyncError): """Raised when this file type cannot be downloaded but only exported. This is the case for G-suite files.""" - pass - class FileSizeError(SyncError): """Raised when attempting to upload a file larger than 350 GB in an upload session or larger than 150 MB in a single upload. Also raised when attempting to download a file with a size that exceeds file system's limit.""" - pass - class FileReadError(SyncError): """Raised when reading a local file failed.""" - pass - # ==== errors which are not related to a specific sync event =========================== +class CancelledError(MaestralApiError): + """Raised when syncing is cancelled by the user.""" + + class NotLinkedError(MaestralApiError): """Raised when no Dropbox account is linked.""" - pass - class InvalidDbidError(MaestralApiError): """Raised when the given Dropbox ID does not correspond to an existing account.""" - pass - class KeyringAccessError(MaestralApiError): """Raised when retrieving a saved auth token from the user keyring fails.""" - pass - class NoDropboxDirError(MaestralApiError): """Raised when the local Dropbox folder cannot be found.""" - pass - class CacheDirError(MaestralApiError): """Raised when creating the cache directory fails.""" - pass - class InotifyError(MaestralApiError): """Raised when the local Dropbox folder is too large to monitor with inotify.""" - pass - class OutOfMemoryError(MaestralApiError): """Raised when there is insufficient memory to complete an operation.""" - pass - class DatabaseError(MaestralApiError): """Raised when reading or writing to the database fails.""" - pass - class DropboxAuthError(MaestralApiError): """Raised when authentication fails.""" - pass - class TokenExpiredError(DropboxAuthError): """Raised when authentication fails because the user's token has expired.""" - pass - class TokenRevokedError(DropboxAuthError): """Raised when authentication fails because the user's token has been revoked.""" - pass - class CursorResetError(MaestralApiError): """Raised when the cursor used for a longpoll or list-folder request has been @@ -226,21 +178,23 @@ class CursorResetError(MaestralApiError): cursor for the respective folder has to be obtained through files_list_folder. This may require re-syncing the entire Dropbox.""" - pass - class BadInputError(MaestralApiError): """Raised when an API request is made with bad input. This should not happen during syncing but only in case of manual API calls.""" - pass - class BusyError(MaestralApiError): """Raised when trying to perform an action which is only possible in the idle state and we cannot block or queue the job.""" - pass + +class UnsupportedFileTypeForDiff(MaestralApiError): + """Raised when a diff for an unsupported file type was issued.""" + + +class SharedLinkError(MaestralApiError): + """Raised when creating a shared link fails.""" # connection errors are handled as warnings @@ -263,10 +217,13 @@ class BusyError(MaestralApiError): BadInputError, OutOfMemoryError, BusyError, + UnsupportedFileTypeForDiff, + SharedLinkError, ) SYNC_ERRORS = ( SyncError, + CancelledError, InsufficientPermissionsError, InsufficientSpaceError, PathError, diff --git a/src/maestral/fsevents/__init__.py b/src/maestral/fsevents/__init__.py index f6336b014..24102c880 100644 --- a/src/maestral/fsevents/__init__.py +++ b/src/maestral/fsevents/__init__.py @@ -1,68 +1,18 @@ # -*- coding: utf-8 -*- """ -This module provides custom event emitters for the :obj:`watchdog` package that sort -file system events in an order which can be applied to reproduce the new state from the -old state. This is only required for event emitters which internally use +This module provides a custom polling file system event emitter for the +:obj:`watchdog` package that sorts file system events in an order which can be applied +to reproduce the new state from the old state. This is only required for the polling +emitter which uses period directory snapshots and compares them with a :class:`watchdog.utils.dirsnapshot.DirectorySnapshotDiff` to generate file system -events. This includes the macOS FSEvents emitter and the Polling emitter but not inotify -emitters. - -Looking at the source code for :class:`watchdog.utils.dirsnapshot.DirectorySnapshotDiff`, -the event types are categorised as follows: - -* Created event: The inode is unique to the new snapshot. The path may be unique to the - new snapshot or exist in both. In the second case, there will be a preceding Deleted - event or a Moved event with the path as starting point (the old item was deleted or - moved away). - -* Deleted event: The inode is unique to the old snapshot. The path may be unique to the - old snapshot or exist in both. In the second case, there will be a subsequent Created - event or a Moved event with the path as end point (something else was created at or - moved to the location). - -* Moved event: The inode exists in both snapshots but with different paths. - -* Modified event: The inode exists in both snapshots and the mtime or file size are - different. DirectorySnapshotDiff will always use the inode’s path from the old - snapshot. - -From the above classification, there can be at most two created/deleted/moved events -that share the same path in one snapshot diff: - - * Deleted(path1) + Created(path1) - * Moved(path1, path2) + Created(path1) - * Deleted(path1) + Moved(path0, path1) - -Any Modified event will come before a Moved event or stand alone. Modified events will -never be combined by themselves with created or deleted events because they require the -inode to be present in both snapshots. - -From the above, we can achieve correct ordering for unique path by always adding Deleted -events to the queue first, Modified events second, Moved events third and Created events -last: - - Deleted -> Modified -> Moved -> Created - -The ordering won’t be correct between unrelated paths and between files and folder. The -first does not matter for syncing. We solve the second by assuming that when a directory -is deleted, so are its children. And before a child is created, its parent dircetory -must exist. - -MovedEvents which are not unique (their paths appear in other events) will be split -into Deleted and Created events by Maestral. +events. """ -import os -from typing import Union - from watchdog.utils import platform # type: ignore from watchdog.utils import UnsupportedLibc -from watchdog.utils import unicode_paths -if platform.is_darwin(): - from .fsevents import OrderedFSEventsObserver as Observer -elif platform.is_linux(): +if platform.is_linux(): try: from watchdog.observers.inotify import InotifyObserver as Observer # type: ignore except UnsupportedLibc: @@ -71,22 +21,3 @@ from watchdog.observers import Observer # type: ignore __all__ = ["Observer"] - - -# patch encoding / decoding of paths in watchdog - - -def _patched_decode(path: Union[str, bytes]) -> str: - if isinstance(path, bytes): - return os.fsdecode(path) - return path - - -def _patched_encode(path: Union[str, bytes]) -> bytes: - if isinstance(path, str): - return os.fsencode(path) - return path - - -unicode_paths.decode = _patched_decode -unicode_paths.encode = _patched_encode diff --git a/src/maestral/fsevents/fsevents.py b/src/maestral/fsevents/fsevents.py deleted file mode 100644 index f9d98924b..000000000 --- a/src/maestral/fsevents/fsevents.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2011 Yesudeep Mangalapilly -# Copyright 2012 Google, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watchdog.observers.fsevents import ( # type: ignore - FSEventsEmitter, - FSEventsObserver, - FileDeletedEvent, - FileModifiedEvent, - FileMovedEvent, - FileCreatedEvent, - DirDeletedEvent, - DirModifiedEvent, - DirMovedEvent, - DirCreatedEvent, - DEFAULT_OBSERVER_TIMEOUT, - BaseObserver, -) -from watchdog.utils.dirsnapshot import DirectorySnapshot - - -class OrderedFSEventsEmitter(FSEventsEmitter): - """Ordered file system event emitter for macOS - - This subclasses FSEventsEmitter to guarantee an order of events which can be applied - to reproduce the new state from the old state. - """ - - def queue_events(self, timeout): - with self._lock: - if not self.watch.is_recursive and self.watch.path not in self.pathnames: - return - new_snapshot = DirectorySnapshot(self.watch.path, self.watch.is_recursive) - diff = new_snapshot - self.snapshot - - # add metadata modified events which will be missed by regular diff - try: - ctime_files_modified = set() - - for path in self.snapshot.paths & new_snapshot.paths: - if not self.snapshot.isdir(path): - if self.snapshot.inode(path) == new_snapshot.inode(path): - if ( - self.snapshot.stat_info(path).st_ctime - != new_snapshot.stat_info(path).st_ctime - ): - ctime_files_modified.add(path) - - files_modified = set(ctime_files_modified) | set(diff.files_modified) - except Exception as exc: - print(exc) - - # replace cached snapshot - self.snapshot = new_snapshot - - # Files. - for src_path in diff.files_deleted: - self.queue_event(FileDeletedEvent(src_path)) - for src_path in files_modified: - self.queue_event(FileModifiedEvent(src_path)) - for src_path, dest_path in diff.files_moved: - self.queue_event(FileMovedEvent(src_path, dest_path)) - for src_path in diff.files_created: - self.queue_event(FileCreatedEvent(src_path)) - - # Directories. - for src_path in diff.dirs_deleted: - self.queue_event(DirDeletedEvent(src_path)) - for src_path in diff.dirs_modified: - self.queue_event(DirModifiedEvent(src_path)) - for src_path, dest_path in diff.dirs_moved: - self.queue_event(DirMovedEvent(src_path, dest_path)) - for src_path in diff.dirs_created: - self.queue_event(DirCreatedEvent(src_path)) - - # free some memory - del diff - del files_modified - - -class OrderedFSEventsObserver(FSEventsObserver): - def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): - BaseObserver.__init__( - self, emitter_class=OrderedFSEventsEmitter, timeout=timeout - ) diff --git a/src/maestral/fsevents/polling.py b/src/maestral/fsevents/polling.py index 3577714e0..3768674bf 100644 --- a/src/maestral/fsevents/polling.py +++ b/src/maestral/fsevents/polling.py @@ -1,6 +1,50 @@ -#!/usr/bin/env python # -*- coding: utf-8 -*- -# +""" +Looking at the source code for :class:`watchdog.utils.dirsnapshot.DirectorySnapshotDiff`, +the event types are categorised as follows: + +* Created event: The inode is unique to the new snapshot. The path may be unique to the + new snapshot or exist in both. In the second case, there will be a preceding Deleted + event or a Moved event with the path as starting point (the old item was deleted or + moved away). + +* Deleted event: The inode is unique to the old snapshot. The path may be unique to the + old snapshot or exist in both. In the second case, there will be a subsequent Created + event or a Moved event with the path as end point (something else was created at or + moved to the location). + +* Moved event: The inode exists in both snapshots but with different paths. + +* Modified event: The inode exists in both snapshots and the mtime or file size are + different. DirectorySnapshotDiff will always use the inode’s path from the old + snapshot. + +From the above classification, there can be at most two created/deleted/moved events +that share the same path in one snapshot diff: + + * Deleted(path1) + Created(path1) + * Moved(path1, path2) + Created(path1) + * Deleted(path1) + Moved(path0, path1) + +Any Modified event will come before a Moved event or stand alone. Modified events will +never be combined by themselves with created or deleted events because they require the +inode to be present in both snapshots. + +From the above, we can achieve correct ordering for unique path by always adding Deleted +events to the queue first, Modified events second, Moved events third and Created events +last: + + Deleted -> Modified -> Moved -> Created + +The ordering won’t be correct between unrelated paths and between files and folder. The +first does not matter for syncing. We solve the second by assuming that when a directory +is deleted, so are its children. And before a child is created, its parent directory +must exist. + +MovedEvents which are not unique (their paths appear in other events) will be split +into Deleted and Created events by Maestral. +""" + # Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc. # diff --git a/src/maestral/logging.py b/src/maestral/logging.py index 09996a1aa..1fb908fc6 100644 --- a/src/maestral/logging.py +++ b/src/maestral/logging.py @@ -3,7 +3,8 @@ import logging from collections import deque -from concurrent.futures._base import Future, wait +import concurrent.futures +from concurrent.futures import Future from typing import Deque, Optional, List try: @@ -111,9 +112,13 @@ def wait_for_emit(self, timeout: Optional[float]) -> bool: :param timeout: Maximum time to block before returning. :returns: ``True`` if there was a status change, ``False`` in case of a timeout. """ - done, not_done = wait([self._emit_future], timeout=timeout) + try: + self._emit_future.result(timeout=timeout) + except concurrent.futures.TimeoutError: + return False + self._emit_future = Future() # reset future - return len(done) == 1 + return True def getLastMessage(self) -> str: """ diff --git a/src/maestral/main.py b/src/maestral/main.py index d5bf9c729..b651b0b2a 100644 --- a/src/maestral/main.py +++ b/src/maestral/main.py @@ -5,7 +5,6 @@ import sys import os import os.path as osp -import platform import shutil import time import warnings @@ -13,14 +12,28 @@ import asyncio import random from concurrent.futures import ThreadPoolExecutor -from typing import Union, List, Iterator, Dict, Set, Awaitable, Optional, Any +from typing import ( + Union, + List, + Iterator, + Dict, + Set, + Tuple, + Awaitable, + Optional, + Any, +) +import tempfile +import mimetypes +import difflib # external imports import requests from watchdog.events import DirDeletedEvent, FileDeletedEvent # type: ignore -import bugsnag # type: ignore -from bugsnag.handlers import BugsnagHandler # type: ignore from packaging.version import Version +from datetime import datetime, timezone +from dropbox.files import FileMetadata +from dropbox.sharing import RequestedVisibility try: from systemd import journal # type: ignore @@ -38,9 +51,9 @@ NotFoundError, BusyError, KeyringAccessError, + UnsupportedFileTypeForDiff, ) from .config import MaestralConfig, MaestralState, validate_config_name -from .notify import MaestralDesktopNotificationHandler from .logging import CachedHandler, SdNotificationHandler, safe_journal_sender from .utils import get_newer_version from .utils.path import ( @@ -57,39 +70,16 @@ ErrorType, ) from .utils.appdirs import get_log_path, get_cache_path, get_data_path -from .constants import BUGSNAG_API_KEY, IDLE, FileStatus, GITHUB_RELEASES_API +from .utils.integration import get_ac_state, ACState +from .constants import IDLE, FileStatus, GITHUB_RELEASES_API -__all__ = [ - "Maestral", -] +__all__ = ["Maestral"] logger = logging.getLogger(__name__) -# set up error reporting but do not activate - -bugsnag.configure( - api_key=BUGSNAG_API_KEY, - app_version=__version__, - auto_notify=False, - auto_capture_sessions=False, -) - - -def bugsnag_global_callback(notification): - notification.add_tab( - "system", {"platform": platform.platform(), "python": platform.python_version()} - ) - cause = notification.exception.__cause__ - if cause: - notification.add_tab("original exception", error_to_dict(cause)) - - -bugsnag.before_notify(bugsnag_global_callback) - - # ====================================================================================== # Main API # ====================================================================================== @@ -141,19 +131,14 @@ def __init__( self._conf = MaestralConfig(self._config_name) self._state = MaestralState(self._config_name) - # enable / disable automatic reporting of errors - bugsnag.configure(auto_notify=self.analytics) - # set up logging self._log_to_stdout = log_to_stdout self._setup_logging() # set up sync infrastructure - self.client = DropboxClient( - config_name=self.config_name - ) # interface to Dbx SDK - self.monitor = SyncMonitor(self.client) # coordinates sync threads - self.sync = self.monitor.sync # provides core sync functionality + self.client = DropboxClient(config_name=self.config_name) + self.monitor = SyncMonitor(self.client) + self.sync = self.monitor.sync self._check_and_run_post_update_scripts() @@ -241,8 +226,7 @@ def unlink(self) -> None: def _setup_logging(self) -> None: """ Sets up logging to log files, status and error properties, desktop notifications, - the systemd journal if available, bugsnag if error reports are enabled, and to - stdout if requested. + the systemd journal if available, and to stdout if requested. """ self._logger = logging.getLogger("maestral") @@ -308,16 +292,6 @@ def _setup_logging(self) -> None: self._log_handler_error_cache.setLevel(logging.ERROR) self._logger.addHandler(self._log_handler_error_cache) - # log errors to desktop notifications - self._log_handler_desktop_notifier = MaestralDesktopNotificationHandler() - self._log_handler_desktop_notifier.setLevel(logging.WARNING) - self._logger.addHandler(self._log_handler_desktop_notifier) - - # log to bugsnag (disabled by default) - self._log_handler_bugsnag = BugsnagHandler() - self._log_handler_bugsnag.setLevel(logging.ERROR if self.analytics else 100) - self._logger.addHandler(self._log_handler_bugsnag) - # ==== methods to access config and saved state ==================================== @property @@ -471,20 +445,6 @@ def log_to_stdout(self, enabled: bool) -> None: level = self.log_level if enabled else 100 self.log_handler_stream.setLevel(level) - @property - def analytics(self) -> bool: - """Enables or disables logging of errors to bugsnag.""" - return self._conf.get("app", "analytics") - - @analytics.setter - def analytics(self, enabled: bool) -> None: - """Setter: analytics.""" - - bugsnag.configure(auto_notify=self.analytics) - self._log_handler_bugsnag.setLevel(logging.ERROR if enabled else 100) - - self._conf.set("app", "analytics", enabled) - @property def notification_snooze(self) -> float: """Snooze time for desktop notifications in minutes. Defaults to 0.0 if @@ -538,21 +498,11 @@ def pending_first_download(self) -> bool: """Indicates if the initial download has already occurred (read only).""" return self.sync.local_cursor == 0 or self.sync.remote_cursor == "" - @property - def syncing(self) -> bool: - """Indicates if Maestral is syncing (read only). It will be ``True`` if syncing - is not paused by the user *and* Maestral is connected to the internet.""" - return ( - self.monitor.syncing.is_set() - or self.monitor.startup.is_set() - or self.sync.busy() - ) - @property def paused(self) -> bool: """Indicates if syncing is paused by the user (read only). This is set by calling :meth:`pause`.""" - return self.monitor.paused_by_user.is_set() and not self.sync.busy() + return not self.monitor.autostart.is_set() and not self.sync.busy() @property def running(self) -> bool: @@ -568,7 +518,7 @@ def connected(self) -> bool: if self.pending_link: return False else: - return self.monitor.connected.is_set() + return self.monitor.connected @property def status(self) -> str: @@ -641,7 +591,7 @@ def get_file_status(self, local_path: str) -> str: 'up to date', 'error', or 'unwatched' (for files outside of the Dropbox directory). This will always be 'unwatched' if syncing is paused. """ - if not self.syncing: + if not self.running: return FileStatus.Unwatched.value local_path = osp.realpath(local_path) @@ -865,6 +815,101 @@ def list_revisions(self, dbx_path: str, limit: int = 10) -> List[StoneType]: return entries + def get_file_diff(self, old_rev: str, new_rev: Optional[str] = None) -> List[str]: + """ + Compare to revisions of a text file using Python's difflib. The versions will be + downloaded to temporary files. If new_rev is None, the old revision will be + compared to the corresponding local file, if any. + + :param old_rev: Identifier of old revision. + :param new_rev: Identifier of new revision. + :returns: Diff as a list of strings (lines). + :raises UnsupportedFileTypeForDiff: if file type is not supported. + :raises UnsupportedFileTypeForDiff: if file content could not be decoded. + :raises MaestralApiError: if file could not be read for any other reason. + """ + + def str_from_date(d: datetime) -> str: + """Convert 'client_modified' metadata to string in local timezone""" + tz_date = d.replace(tzinfo=timezone.utc).astimezone() + return tz_date.strftime("%d %b %Y at %H:%M") + + def download_rev(rev: str) -> Tuple[List[str], FileMetadata]: + """ + Download a rev to a tmp file, read it and return the content + metadata + """ + + with tempfile.NamedTemporaryFile(mode="w+") as f: + md = self.client.download(dbx_path, f.name, rev=rev) + + # Read from the file + try: + with convert_api_errors(dbx_path=dbx_path, local_path=f.name): + content = f.readlines() + except UnicodeDecodeError: + raise UnsupportedFileTypeForDiff( + "Failed to decode the file", + "Only UTF-8 plain text files are currently supported.", + ) + + return content, md + + md_new = self.client.get_metadata(f"rev:{new_rev}", include_deleted=True) + md_old = self.client.get_metadata(f"rev:{old_rev}", include_deleted=True) + + if md_new is None or md_old is None: + missing_rev = new_rev if md_new is None else old_rev + raise NotFoundError( + f"Could not a file with revision {missing_rev}", + "Use 'list_revisions' to list past revisions of a file.", + ) + + dbx_path = self.sync.correct_case(md_old.path_display) + local_path = self.sync.to_local_path(md_old.path_display) + + # Check if a diff is possible + # If mime is None, proceed because most files without + # an extension are just text files + mime, _ = mimetypes.guess_type(dbx_path) + if mime is not None and not mime.startswith("text/"): + raise UnsupportedFileTypeForDiff( + f"Bad file type: '{mime}'", "Only files of type 'text/*' are supported." + ) + + # If new_rev is None, the local file is used, even if it isn't synced + if new_rev is None: + new_rev = "local version" + try: + with convert_api_errors(dbx_path=dbx_path, local_path=local_path): + mtime = time.localtime(osp.getmtime(local_path)) + date_str_new = time.strftime("%d %b %Y at %H:%M", mtime) + + with open(local_path) as f: + content_new = f.readlines() + + except UnicodeDecodeError: + raise UnsupportedFileTypeForDiff( + "Failed to decode the file", + "Only UTF-8 plain text files are currently supported.", + ) + else: + content_new, md_new = download_rev(new_rev) + date_str_new = str_from_date(md_new.client_modified) + + content_old, md_old = download_rev(old_rev) + date_str_old = str_from_date(md_old.client_modified) + + return list( + difflib.unified_diff( + content_old, + content_new, + fromfile=f"{dbx_path} ({old_rev})", + tofile=f"{dbx_path} ({new_rev})", + fromfiledate=date_str_old, + tofiledate=date_str_new, + ) + ) + def restore(self, dbx_path: str, rev: str) -> StoneType: """ Restore an old revision of a file. @@ -881,6 +926,8 @@ def restore(self, dbx_path: str, rev: str) -> StoneType: self._check_linked() + logger.info(f"Restoring '{dbx_path} to {rev}'") + res = self.client.restore(dbx_path, rev) return dropbox_stone_to_dict(res) @@ -925,26 +972,6 @@ def start_sync(self) -> None: if not self.running: self.monitor.start() - def resume_sync(self) -> None: - """ - Resumes syncing if paused. - - :raises NotLinkedError: if no Dropbox account is linked. - :raises NoDropboxDirError: if local Dropbox folder is not set up. - """ - - self._check_linked() - self._check_dropbox_dir() - - self.monitor.resume() - - def pause_sync(self) -> None: - """ - Pauses the syncing if running. - """ - if not self.paused: - self.monitor.pause() - def stop_sync(self) -> None: """ Stops all syncing threads if running. Call :meth:`start_sync` to restart @@ -1044,7 +1071,7 @@ def _remove_after_excluded(self, dbx_path: str) -> None: pass else: event_cls = DirDeletedEvent if osp.isdir(local_path) else FileDeletedEvent - with self.monitor.fs_event_handler.ignore(event_cls(local_path)): + with self.monitor.sync.fs_events.ignore(event_cls(local_path)): delete(local_path) def include_item(self, dbx_path: str) -> None: @@ -1225,8 +1252,8 @@ def create_dropbox_directory(self, path: str) -> None: # pause syncing resume = False - if self.syncing: - self.pause_sync() + if self.running: + self.stop_sync() resume = True # housekeeping @@ -1241,7 +1268,88 @@ def create_dropbox_directory(self, path: str) -> None: # resume syncing if resume: - self.resume_sync() + self.start_sync() + + def create_shared_link( + self, + dbx_path: str, + visibility: str = "public", + password: Optional[str] = None, + expires: Optional[float] = None, + ) -> StoneType: + """ + Creates a shared link for the given ``dbx_path``. Returns a dictionary with + information regarding the link, including the URL, access permissions, expiry + time, etc. The shared link will grant read / download access only. Note that + basic accounts do not support password protection or expiry times. + + :param dbx_path: Path to item on Dropbox. + :param visibility: Requested visibility of the shared link. Must be "public", + "team_only" or "password". The actual visibility may be different, depending + on the team and folder settings. Inspect the "link_permissions" entry of the + returned dictionary. + :param password: An optional password required to access the link. Will be + ignored if the visibility is not "password". + :param expires: An optional expiry time for the link as POSIX timestamp. + :returns: Shared link information as dict. See + :class:`dropbox.sharing.SharedLinkMetadata` for keys and values. + :raises ValueError: if visibility is 'password' but no password is provided. + :raises DropboxAuthError: in case of an invalid access token. + :raises DropboxServerError: for internal Dropbox errors. + :raises ConnectionError: if the connection to Dropbox fails. + :raises NotLinkedError: if no Dropbox account is linked. + """ + + self._check_linked() + + if visibility not in ("public", "team_only", "password"): + raise ValueError("Visibility must be 'public', 'team_only', or 'password'") + + if visibility == "password" and not password: + raise ValueError("Please specify a password") + + link_info = self.client.create_shared_link( + dbx_path=dbx_path, + visibility=RequestedVisibility(visibility), + password=password, + expires=datetime.utcfromtimestamp(expires) if expires else None, + ) + + return dropbox_stone_to_dict(link_info) + + def revoke_shared_link(self, url: str) -> None: + """ + Revokes the given shared link. Note that any other links to the same file or + folder will remain valid. + + :param url: URL of shared link to revoke. + :raises DropboxAuthError: in case of an invalid access token. + :raises DropboxServerError: for internal Dropbox errors. + :raises ConnectionError: if the connection to Dropbox fails. + :raises NotLinkedError: if no Dropbox account is linked. + """ + + self._check_linked() + self.client.revoke_shared_link(url) + + def list_shared_links(self, dbx_path: Optional[str] = None) -> List[StoneType]: + """ + Returns a list of all shared links for the given Dropbox path. If no path is + given, return all shared links for the account, up to a maximum of 1,000 links. + + :param dbx_path: Path to item on Dropbox. + :returns: List of shared link information as dictionaries. See + :class:`dropbox.sharing.SharedLinkMetadata` for keys and values. + :raises DropboxAuthError: in case of an invalid access token. + :raises DropboxServerError: for internal Dropbox errors. + :raises ConnectionError: if the connection to Dropbox fails. + :raises NotLinkedError: if no Dropbox account is linked. + """ + + self._check_linked() + res = self.client.list_shared_links(dbx_path) + + return [dropbox_stone_to_dict(link) for link in res.links] # ==== utility methods for front ends ============================================== @@ -1362,6 +1470,8 @@ def _check_and_run_post_update_scripts(self) -> None: self._update_from_pre_v1_2_0() elif Version(updated_from) < Version("1.2.1"): self._update_from_pre_v1_2_1() + elif Version(updated_from) < Version("1.3.2"): + self._update_from_pre_v1_3_2() self.set_state("app", "updated_scripts_completed", __version__) @@ -1416,29 +1526,42 @@ def _update_from_pre_v1_2_1(self) -> None: batch_op.drop_constraint(constraint_name=name, type_="unique") + def _update_from_pre_v1_3_2(self) -> None: + + if self._conf.get("app", "keyring") == "keyring.backends.OS_X.Keyring": + logger.info("Migrating keyring after update from pre v1.3.2") + self._conf.set("app", "keyring", "keyring.backends.macOS.Keyring") + # ==== period async jobs =========================================================== def _schedule_task(self, coro: Awaitable) -> None: + """Schedules a task in our asyncio loop.""" task = self._loop.create_task(coro) self._tasks.add(task) async def _periodic_refresh_info(self) -> None: + """Periodically refresh the account information from Dropbox servers.""" await asyncio.sleep(60 * 5) while True: # update account info if self.client.auth.loaded: - # only run if we have loaded the keyring, we don't - # want to trigger any keyring access from here - if self.client.linked: + + # Only run if we have loaded the keyring, we don't + # want to trigger any keyring access from here. + + try: await self._loop.run_in_executor(self._pool, self.get_account_info) await self._loop.run_in_executor(self._pool, self.get_profile_pic) + except (ConnectionError, MaestralApiError): + pass await sleep_rand(60 * 45) async def _period_update_check(self) -> None: + """Periodically check for software updates.""" await asyncio.sleep(60 * 3) @@ -1451,15 +1574,22 @@ async def _period_update_check(self) -> None: await sleep_rand(60 * 60) async def _period_reindexing(self) -> None: + """ + Trigger periodic reindexing, determined by the 'reindex_interval' setting. Don't + reindex if we are running on battery power. + """ while True: if self.monitor.running.is_set(): elapsed = time.time() - self.sync.last_reindex + ac_state = get_ac_state() + reindexing_due = elapsed > self.monitor.reindex_interval is_idle = self.monitor.idle_time > 20 * 60 + has_ac_power = ac_state in (ACState.Connected, ACState.Undetermined) - if reindexing_due and is_idle: + if reindexing_due and is_idle and has_ac_power: self.monitor.rebuild_index() await sleep_rand(60 * 5) diff --git a/src/maestral/notify.py b/src/maestral/notify.py new file mode 100644 index 000000000..63593d6a8 --- /dev/null +++ b/src/maestral/notify.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +""" +This module handles desktop notifications and supports multiple backends, depending on +the platform. +""" + +# system imports +import time +from typing import Optional, Dict, Callable + +# external imports +from desktop_notifier import DesktopNotifier, Urgency, Button + +# local imports +from .config import MaestralConfig +from .constants import APP_NAME, APP_ICON_PATH + + +__all__ = [ + "NONE", + "ERROR", + "SYNCISSUE", + "FILECHANGE", + "level_name_to_number", + "level_number_to_name", + "MaestralDesktopNotifier", +] + +_desktop_notifier = DesktopNotifier( + app_name=APP_NAME, + app_icon=f"file://{APP_ICON_PATH}", + notification_limit=10, +) + + +NONE = 100 +ERROR = 40 +SYNCISSUE = 30 +FILECHANGE = 15 + + +_level_to_name = { + NONE: "NONE", + ERROR: "ERROR", + SYNCISSUE: "SYNCISSUE", + FILECHANGE: "FILECHANGE", +} + +_name_to_level = { + "NONE": 100, + "ERROR": 40, + "SYNCISSUE": 30, + "FILECHANGE": 15, +} + + +def level_number_to_name(number: int) -> str: + """Converts a Maestral notification level number to name.""" + + try: + return _level_to_name[number] + except KeyError: + return f"Level {number}" + + +def level_name_to_number(name: str) -> int: + """Converts a Maestral notification level name to number.""" + + try: + return _name_to_level[name] + except KeyError: + raise ValueError("Invalid level name") + + +class MaestralDesktopNotifier: + """Desktop notification emitter for Maestral + + Desktop notifier with snooze functionality and variable notification levels. + """ + + def __init__(self, config_name: str) -> None: + self._conf = MaestralConfig(config_name) + self._snooze = 0.0 + + @property + def notify_level(self) -> int: + """Custom notification level. Notifications with a lower level will be + discarded.""" + return self._conf.get("app", "notification_level") + + @notify_level.setter + def notify_level(self, level: int) -> None: + """Setter: notify_level.""" + self._conf.set("app", "notification_level", level) + + @property + def snoozed(self) -> float: + """Time in minutes to snooze notifications. Applied to FILECHANGE level only.""" + return max(0.0, (self._snooze - time.time()) / 60.0) + + @snoozed.setter + def snoozed(self, minutes: float) -> None: + """Setter: snoozed.""" + self._snooze = time.time() + minutes * 60.0 + + def notify( + self, + title: str, + message: str, + level: int = FILECHANGE, + on_click: Optional[Callable] = None, + actions: Optional[Dict[str, Callable]] = None, + ) -> None: + """ + Sends a desktop notification. + + :param title: Notification title. + :param message: Notification message. + :param level: Notification level of the message. + :param on_click: A callback to execute when the notification is clicked. The + provided callable must not take any arguments. + :param actions: A dictionary with button names and callbacks for the + notification. + """ + + snoozed = self.snoozed and level <= FILECHANGE + + if level >= self.notify_level and not snoozed: + + urgency = Urgency.Critical if level >= ERROR else Urgency.Normal + + if actions: + buttons = [Button(name, handler) for name, handler in actions.items()] + else: + buttons = [] + + _desktop_notifier.send_sync( + title=title, + message=message, + urgency=urgency, + on_clicked=on_click, + buttons=buttons, + ) diff --git a/src/maestral/notify/__init__.py b/src/maestral/notify/__init__.py deleted file mode 100644 index 88cf197cc..000000000 --- a/src/maestral/notify/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# -*- coding: utf-8 -*- -from .notify import MaestralDesktopNotifier, MaestralDesktopNotificationHandler - -__all__ = ["MaestralDesktopNotifier", "MaestralDesktopNotificationHandler"] diff --git a/src/maestral/notify/notify.py b/src/maestral/notify/notify.py deleted file mode 100644 index 219aeb903..000000000 --- a/src/maestral/notify/notify.py +++ /dev/null @@ -1,230 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module handles desktop notifications and supports multiple backends, depending on -the platform. -""" - -# system imports -import time -import platform -import logging -from threading import Lock -from typing import Optional, Dict, ClassVar, Callable - -# local imports -from maestral.config import MaestralConfig -from maestral.constants import APP_NAME, BUNDLE_ID, APP_ICON_PATH -from .notify_base import DesktopNotifierBase, NotificationLevel, Notification - - -__all__ = [ - "Notification", - "NotificationLevel", - "DesktopNotifier", - "MaestralDesktopNotifier", - "MaestralDesktopNotificationHandler", -] - - -class DesktopNotifier: - """Cross-platform desktop notification emitter - - Uses different backends depending on the platform version and available services. - The Dbus backend requires a running asyncio loop. The Cocoa implementations will - dispatch notifications without an event loop but require a running CFRunLoop *in the - main thread* to react to user interactions with the notification. Packages such as - :mod:`rubicon.objc` can be used to integrate asyncio with a CFRunLoop. - - :param app_name: Name of app which sends notifications. - :param app_id: Bundle identifier of the app. This is typically a reverse domain name - such as 'com.google.app'. - """ - - _impl: Optional[DesktopNotifierBase] - - def __init__(self, app_name: str, app_id: str) -> None: - self._lock = Lock() - - if platform.system() == "Darwin": - from .notify_macos import Impl - elif platform.system() == "Linux": - from .notify_linux import Impl # type: ignore - else: - Impl = None # type: ignore - - if Impl: - self._impl = Impl(app_name, app_id) - else: - self._impl = None - - def send( - self, - title: str, - message: str, - urgency: NotificationLevel = NotificationLevel.Normal, - icon: Optional[str] = None, - action: Optional[Callable] = None, - buttons: Optional[Dict[str, Optional[Callable]]] = None, - ) -> None: - """ - Sends a desktop notification. Some arguments may be ignored, depending on the - backend. - - :param title: Notification title. - :param message: Notification message. - :param urgency: Notification level: low, normal or critical. This is ignored by - some implementations. - :param icon: Path to an icon to use for the notification, typically the app - icon. This is ignored by some implementations, e.g., on macOS where the icon - of the app bundle is always used. - :param action: Handler to call when the notification is clicked. This is ignored - by some implementations. - :param buttons: A dictionary with button names and callbacks to show in the - notification. This is ignored by some implementations. - """ - notification = Notification(title, message, urgency, icon, action, buttons) - - if self._impl: - with self._lock: - self._impl.send(notification) - - -_desktop_notifier_maestral = DesktopNotifier(APP_NAME, BUNDLE_ID) - - -class MaestralDesktopNotifier: - """Desktop notification emitter for Maestral - - Desktop notifier with snooze functionality and variable notification levels. - - :cvar int NONE: Notification level for no desktop notifications. - :cvar int ERROR: Notification level for errors. - :cvar int SYNCISSUE: Notification level for sync issues. - :cvar int FILECHANGE: Notification level for file changes. - """ - - _instances: ClassVar[Dict[str, "MaestralDesktopNotifier"]] = dict() - _lock = Lock() - - NONE = 100 - ERROR = 40 - SYNCISSUE = 30 - FILECHANGE = 15 - - _levelToName = { - NONE: "NONE", - ERROR: "ERROR", - SYNCISSUE: "SYNCISSUE", - FILECHANGE: "FILECHANGE", - } - - _nameToLevel = { - "NONE": 100, - "ERROR": 40, - "SYNCISSUE": 30, - "FILECHANGE": 15, - } - - @classmethod - def level_number_to_name(cls, number: int) -> str: - """Converts a Maestral notification level number to name.""" - return cls._levelToName[number] - - @classmethod - def level_name_to_number(cls, name: str) -> int: - """Converts a Maestral notification level name to number.""" - return cls._nameToLevel[name] - - def __init__(self, config_name: str) -> None: - self._conf = MaestralConfig(config_name) - self._snooze = 0.0 - - @property - def notify_level(self) -> int: - """Custom notification level. Notifications with a lower level will be - discarded.""" - return self._conf.get("app", "notification_level") - - @notify_level.setter - def notify_level(self, level: int) -> None: - """Setter: notify_level.""" - self._conf.set("app", "notification_level", level) - - @property - def snoozed(self) -> float: - """Time in minutes to snooze notifications. Applied to FILECHANGE level only.""" - return max(0.0, (self._snooze - time.time()) / 60.0) - - @snoozed.setter - def snoozed(self, minutes: float) -> None: - """Setter: snoozed.""" - self._snooze = time.time() + minutes * 60.0 - - def notify( - self, - title: str, - message: str, - level: int = FILECHANGE, - on_click: Optional[Callable] = None, - buttons: Optional[Dict[str, Optional[Callable]]] = None, - ) -> None: - """ - Sends a desktop notification. - - :param title: Notification title. - :param message: Notification message. - :param level: Notification level of the message. - :param on_click: A callback to execute when the notification is clicked. The - provided callable must not take any arguments. - :param buttons: A dictionary with button names and callbacks for the - notification. - """ - - ignore = self.snoozed and level == MaestralDesktopNotifier.FILECHANGE - if level == MaestralDesktopNotifier.ERROR: - urgency = NotificationLevel.Critical - else: - urgency = NotificationLevel.Normal - - if level >= self.notify_level and not ignore: - _desktop_notifier_maestral.send( - title=title, - message=message, - icon=APP_ICON_PATH, - urgency=urgency, - action=on_click, - buttons=buttons, - ) - - -class MaestralDesktopNotificationHandler(logging.Handler): - """A logging handler to send desktop notifications.""" - - def __init__(self) -> None: - super().__init__() - self.setFormatter(logging.Formatter(fmt="%(message)s")) - - def emit(self, record: logging.LogRecord) -> None: - """ - Emits a logging message as a desktop notification. - - :param record: Log record. - """ - - # avoid recursive notifications from our own logger - if record.name.startswith(__name__): - return - - self.format(record) - - if record.levelno == logging.ERROR: - urgency = NotificationLevel.Critical - else: - urgency = NotificationLevel.Normal - - _desktop_notifier_maestral.send( - title=record.levelname, - message=record.message, - icon=APP_ICON_PATH, - urgency=urgency, - ) diff --git a/src/maestral/notify/notify_base.py b/src/maestral/notify/notify_base.py deleted file mode 100644 index 9e5131c75..000000000 --- a/src/maestral/notify/notify_base.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module defines base classes for desktop notifications. All platform implementations -must inherit from :class:`DesktopNotifierBase`. -""" - -# system imports -from enum import Enum -from typing import Optional, Dict, Callable, Union - - -class NotificationLevel(Enum): - """Enumeration of notification levels - - The interpretation and visuals will depend on the platform. - - :cvar Critical: For critical errors. - :cvar Normal: Default platform notification level. - :cvar Low: Low priority notification. - """ - - Critical = "critical" - Normal = "normal" - Low = "low" - - -class Notification: - """A desktop notification - - :param title: Notification title. - :param message: Notification message. - :param urgency: Notification level: low, normal or critical. This is ignored by some - implementations. - :param icon: Path to an icon to use for the notification, typically the app icon. - This is ignored by some implementations, e.g., on macOS where the icon of the - app bundle is always used. - :param action: Handler to call when the notification is clicked. This is ignored by - some implementations. - :param buttons: A dictionary with button names to show in the notification and - handler to call when the respective button is clicked. This is ignored by some - implementations. - - :ivar identifier: An identifier which gets assigned to the notification after it is - sent. This may be a str or int, depending on the type of identifier used by the - platform. - """ - - identifier: Union[str, int, None] - - def __init__( - self, - title: str, - message: str, - urgency: NotificationLevel = NotificationLevel.Normal, - icon: Optional[str] = None, - action: Optional[Callable] = None, - buttons: Optional[Dict[str, Optional[Callable]]] = None, - ) -> None: - - self.title = title - self.message = message - self.urgency = urgency - self.icon = icon - self.action = action - self.buttons = buttons or dict() - self.identifier = None - - -class DesktopNotifierBase: - """Base class for desktop notifier implementations - - Notification levels CRITICAL, NORMAL and LOW may be used by some implementations to - determine how a notification is displayed. - - :param app_name: Name to identify the application in the notification center. On - Linux, this should correspond to the application name in a desktop entry. On - macOS, this field is discarded and the app is identified by the bundle id of the - sending program (e.g., Python). - :param notification_limit: Maximum number of notifications to keep in the system's - notification center. This may be ignored by some implementations. - """ - - app_name: str - notification_limit: int - current_notifications: Dict[int, Notification] - - def __init__( - self, app_name: str = "", app_id: str = "", notification_limit: int = 5 - ) -> None: - self.app_name = app_name - self.app_id = app_id - self.notification_limit = notification_limit - self.current_notifications = dict() - self._current_nid = 0 - - def send(self, notification: Notification) -> None: - """ - Sends a desktop notification. Some arguments may be ignored, depending on the - implementation. - - :param notification: Notification to send. - """ - raise NotImplementedError() - - def _next_nid(self) -> int: - self._current_nid += 1 - self._current_nid %= self.notification_limit - return self._current_nid diff --git a/src/maestral/notify/notify_linux.py b/src/maestral/notify/notify_linux.py deleted file mode 100644 index d922b8cd1..000000000 --- a/src/maestral/notify/notify_linux.py +++ /dev/null @@ -1,150 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Notification backend for Linux. Includes an implementation to send desktop notifications -over Dbus. Responding to user interaction with a notification requires a running asyncio -event loop. -""" - -# system imports -import asyncio -import logging -from typing import Optional, Type, Coroutine - -# external imports -from dbus_next import Variant # type: ignore -from dbus_next.aio import MessageBus, ProxyInterface # type: ignore - -# local imports -from .notify_base import Notification, DesktopNotifierBase, NotificationLevel - - -__all__ = ["Impl", "DBusDesktopNotifier"] - -logger = logging.getLogger(__name__) - -Impl: Optional[Type[DesktopNotifierBase]] - - -class DBusDesktopNotifier(DesktopNotifierBase): - """DBus notification backend for Linux - - This implements the org.freedesktop.Notifications standard. The DBUS connection is - created in a thread with a running asyncio loop to handle clicked notifications. - """ - - _to_native_urgency = { - NotificationLevel.Low: Variant("y", 0), - NotificationLevel.Normal: Variant("y", 1), - NotificationLevel.Critical: Variant("y", 2), - } - - def __init__(self, app_name: str, app_id: str) -> None: - super().__init__(app_name, app_id) - self._loop = asyncio.get_event_loop() - self.interface: Optional[ProxyInterface] = None - self._force_run_in_loop(self._init_dbus()) - - def _force_run_in_loop(self, coro: Coroutine) -> None: - - if self._loop.is_running(): - asyncio.run_coroutine_threadsafe(coro, self._loop) - else: - self._loop.run_until_complete(coro) - - async def _init_dbus(self) -> None: - - try: - self.bus = await MessageBus().connect() - introspection = await self.bus.introspect( - "org.freedesktop.Notifications", "/org/freedesktop/Notifications" - ) - self.proxy_object = self.bus.get_proxy_object( - "org.freedesktop.Notifications", - "/org/freedesktop/Notifications", - introspection, - ) - self.interface = self.proxy_object.get_interface( - "org.freedesktop.Notifications" - ) - self.interface.on_action_invoked(self._on_action) - except Exception: - self.interface = None - logger.warning("Could not connect to DBUS interface", exc_info=True) - - def send(self, notification: Notification) -> None: - """ - Sends a notification. - - :param notification: Notification to send. - """ - self._force_run_in_loop(self._send(notification)) - - async def _send(self, notification: Notification) -> None: - - # Do nothing if we couldn't connect. - if not self.interface: - return - - # Get an internal ID for the notifications. This will recycle an old ID if we - # are above the max number of notifications. - internal_nid = self._next_nid() - - # Get the old notification to replace, if any. - notification_to_replace = self.current_notifications.get(internal_nid) - - if notification_to_replace: - replaces_nid = notification_to_replace.identifier - else: - replaces_nid = 0 - - # Create list of actions with default and user-supplied. - actions = ["default", "default"] - - for button_name in notification.buttons.keys(): - actions += [button_name, button_name] - - try: - # Post the new notification and record the platform ID assigned to it. - platform_nid = await self.interface.call_notify( - self.app_name, # app_name - replaces_nid, # replaces_id - notification.icon or "", # app_icon - notification.title, # summary - notification.message, # body - actions, # actions - {"urgency": self._to_native_urgency[notification.urgency]}, # hints - -1, # expire_timeout (-1 = default) - ) - except Exception: - # This may fail for several reasons: there may not be a systemd service - # file for 'org.freedesktop.Notifications' or the system configuration - # may have changed after DesktopNotifierFreedesktopDBus was initialized. - logger.warning("Notification failed", exc_info=True) - else: - # Store the notification for future replacement and to keep track of - # user-supplied callbacks. - notification.identifier = platform_nid - self.current_notifications[internal_nid] = notification - - def _on_action(self, nid, action_key) -> None: - - # Get the notification instance from the platform ID. - nid = int(nid) - action_key = str(action_key) - notification = next( - iter(n for n in self.current_notifications.values() if n.identifier == nid), - None, - ) - - # Execute any callbacks for button clicks. - if notification: - if action_key == "default" and notification.action: - notification.action() - else: - callback = notification.buttons.get(action_key) - - if callback: - callback() - - -Impl = DBusDesktopNotifier diff --git a/src/maestral/notify/notify_macos.py b/src/maestral/notify/notify_macos.py deleted file mode 100644 index f12760635..000000000 --- a/src/maestral/notify/notify_macos.py +++ /dev/null @@ -1,359 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Notification backend for macOS. Includes three implementations, in order of preference: - -1) UNUserNotificationCenter: Introduced in macOS 10.14 and cross-platform with iOS and - iPadOS. Only available from signed app bundles if called from the main executable. - Not available from interactive Python interpreter. -2) NSUserNotificationCenter: Deprecated but still available in macOS 11.0. Can be used - from Python framework. -3) Apple Script: Always available but notifications are sent from Apple Script and not - Python or Maestral app. No callbacks when the user clicks on notification. - -The first two implementations require a running CFRunLoop to invoke callbacks. -""" - -# system imports -import uuid -import platform -import subprocess -import shutil -import logging -from typing import Type, Optional, Dict, Tuple - -# external imports -from packaging.version import Version -from rubicon.objc import ObjCClass, objc_method, py_from_ns # type: ignore -from rubicon.objc.runtime import load_library, objc_id, objc_block # type: ignore - -# local imports -from .notify_base import Notification, DesktopNotifierBase -from maestral.constants import FROZEN - - -__all__ = ["Impl", "CocoaNotificationCenter", "CocoaNotificationCenterLegacy"] - -logger = logging.getLogger(__name__) -macos_version, *_ = platform.mac_ver() - -foundation = load_library("Foundation") - -NSObject = ObjCClass("NSObject") - - -Impl: Optional[Type[DesktopNotifierBase]] = None - - -if FROZEN and Version(macos_version) >= Version("10.14.0"): - - uns = load_library("UserNotifications") - - UNUserNotificationCenter = ObjCClass("UNUserNotificationCenter") - UNMutableNotificationContent = ObjCClass("UNMutableNotificationContent") - UNNotificationRequest = ObjCClass("UNNotificationRequest") - UNNotificationAction = ObjCClass("UNNotificationAction") - UNNotificationCategory = ObjCClass("UNNotificationCategory") - - NSSet = ObjCClass("NSSet") - - UNNotificationDefaultActionIdentifier = ( - "com.apple.UNNotificationDefaultActionIdentifier" - ) - UNNotificationDismissActionIdentifier = ( - "com.apple.UNNotificationDismissActionIdentifier" - ) - - UNAuthorizationOptionBadge = 1 << 0 - UNAuthorizationOptionSound = 1 << 1 - UNAuthorizationOptionAlert = 1 << 2 - - UNNotificationActionOptionForeground = 1 << 2 - - UNNotificationCategoryOptionNone = 0 - - class NotificationCenterDelegate(NSObject): # type: ignore - """Delegate to handle user interactions with notifications""" - - @objc_method - def userNotificationCenter_didReceiveNotificationResponse_withCompletionHandler_( - self, center, response, completion_handler: objc_block - ) -> None: - - # Get the notification which was clicked from the platform ID. - internal_nid = py_from_ns( - response.notification.request.content.userInfo["internal_nid"] - ) - notification = self.interface.current_notifications[internal_nid] - - # Get and call the callback which corresponds to the user interaction. - if response.actionIdentifier == UNNotificationDefaultActionIdentifier: - - callback = notification.action - - if callback: - callback() - - elif response.actionIdentifier != UNNotificationDismissActionIdentifier: - - action_id_str = py_from_ns(response.actionIdentifier) - - callback = notification.buttons.get(action_id_str) - - if callback: - callback() - - completion_handler() - - class CocoaNotificationCenter(DesktopNotifierBase): - """UNUserNotificationCenter backend for macOS - - Can be used with macOS Catalina and newer. Both app name and bundle identifier - will be ignored. The notification center automatically uses the values provided - by the app bundle. This implementation only works from within signed app bundles - and if called from the main executable. - - :param app_name: The name of the app. - :param app_id: The bundle identifier of the app. - """ - - _notification_categories: Dict[Tuple[str, ...], str] - - def __init__(self, app_name: str, app_id: str) -> None: - super().__init__(app_name, app_id) - self.nc = UNUserNotificationCenter.currentNotificationCenter() - self.nc_delegate = NotificationCenterDelegate.alloc().init() - self.nc_delegate.interface = self - self.nc.delegate = self.nc_delegate - - self._notification_categories = {} - - def _on_auth_completed(granted: bool, error: objc_id) -> None: - if granted: - logger.debug("UNUserNotificationCenter: authorisation granted") - else: - logger.debug("UNUserNotificationCenter: authorisation denied") - - if error: - error = py_from_ns(error) - logger.warning("UNUserNotificationCenter: %s", str(error)) - - self.nc.requestAuthorizationWithOptions( - UNAuthorizationOptionAlert - | UNAuthorizationOptionSound - | UNAuthorizationOptionBadge, - completionHandler=_on_auth_completed, - ) - - def send(self, notification: Notification) -> None: - """ - Sends a notification. - - :param notification: Notification to send. - """ - - # Get an internal ID for the notifications. This will recycle an old ID if - # we are above the max number of notifications. - internal_nid = self._next_nid() - - # Get the old notification to replace, if any. - notification_to_replace = self.current_notifications.get(internal_nid) - - if notification_to_replace: - platform_nid = notification_to_replace.identifier - else: - platform_nid = str(uuid.uuid4()) - - # Set up buttons for notification. On macOS, we need need to register a new - # notification category for every unique set of buttons. - button_names = tuple(notification.buttons.keys()) - category_id = self._category_id_for_button_names(button_names) - - # Create the native notification + notification request. - content = UNMutableNotificationContent.alloc().init() - content.title = notification.title - content.body = notification.message - content.categoryIdentifier = category_id - content.userInfo = {"internal_nid": internal_nid} - - notification_request = UNNotificationRequest.requestWithIdentifier( - platform_nid, content=content, trigger=None - ) - - # Post the notification. - self.nc.addNotificationRequest( - notification_request, withCompletionHandler=None - ) - - # Store the notification for future replacement and to keep track of - # user-supplied callbacks. - notification.identifier = platform_nid - self.current_notifications[internal_nid] = notification - - def _category_id_for_button_names( - self, button_names: Tuple[str, ...] - ) -> Optional[str]: - """ - Creates a and registers a new notification category with the given buttons - or retrieves an existing one. - """ - - if not button_names: - return None - - try: - return self._notification_categories[button_names] - except KeyError: - actions = [] - - for name in button_names: - action = UNNotificationAction.actionWithIdentifier( - name, title=name, options=UNNotificationActionOptionForeground - ) - actions.append(action) - - categories = self.nc.notificationCategories - category_id = str(uuid.uuid4()) - new_categories = categories.setByAddingObject( - UNNotificationCategory.categoryWithIdentifier( - category_id, - actions=actions, - intentIdentifiers=[], - options=UNNotificationCategoryOptionNone, - ) - ) - self.nc.notificationCategories = new_categories - self._notification_categories[button_names] = category_id - - return category_id - - if UNUserNotificationCenter.currentNotificationCenter(): - Impl = CocoaNotificationCenter - - -elif Version(macos_version) < Version("11.1.0"): - - NSUserNotification = ObjCClass("NSUserNotification") - NSUserNotificationCenter = ObjCClass("NSUserNotificationCenter") - NSDate = ObjCClass("NSDate") - - NSUserNotificationActivationTypeContentsClicked = 1 - NSUserNotificationActivationTypeActionButtonClicked = 2 - NSUserNotificationActivationTypeAdditionalActionClicked = 4 - - class NotificationCenterDelegate(NSObject): # type: ignore - """Delegate to handle user interactions with notifications""" - - # subclass UNUserNotificationCenter and define delegate method - # to handle clicked notifications - - @objc_method - def userNotificationCenter_didActivateNotification_( - self, center, notification - ) -> None: - - internal_nid = py_from_ns(notification.userInfo["internal_nid"]) - notification_info = self.interface.current_notifications[internal_nid] - - if ( - notification.activationType - == NSUserNotificationActivationTypeContentsClicked - ): - - if notification_info.action: - notification_info.action() - - elif ( - notification.activationType - == NSUserNotificationActivationTypeActionButtonClicked - ): - - button_title = py_from_ns(notification.actionButtonTitle) - callback = notification_info.buttons.get(button_title) - - if callback: - callback() - - class CocoaNotificationCenterLegacy(DesktopNotifierBase): - """NSUserNotificationCenter backend for macOS - - Should be used for macOS High Sierra and earlier or outside of app bundles. - Supports only a single button per notification. Both app name and bundle - identifier will be ignored. The notification center automatically uses the - values provided by the app bundle or the Python framework. - - :param app_name: The name of the app. - :param app_id: The bundle identifier of the app. - """ - - def __init__(self, app_name: str, app_id: str) -> None: - super().__init__(app_name, app_id) - - self.nc = NSUserNotificationCenter.defaultUserNotificationCenter - self.nc.delegate = NotificationCenterDelegate.alloc().init() - self.nc.delegate.interface = self - - def send(self, notification: Notification) -> None: - """ - Sends a notification. - - :param notification: Notification to send. - """ - - internal_nid = self._next_nid() - notification_to_replace = self.current_notifications.get(internal_nid) - - if notification_to_replace: - platform_nid = notification_to_replace.identifier - else: - platform_nid = str(uuid.uuid4()) - - n = NSUserNotification.alloc().init() - n.title = notification.title - n.informativeText = notification.message - n.identifier = platform_nid - n.userInfo = {"internal_nid": internal_nid} - n.deliveryDate = NSDate.dateWithTimeInterval(0, sinceDate=NSDate.date()) - - if notification.buttons: - if len(notification.buttons) > 1: - logger.debug( - "NSUserNotificationCenter: only a single button is supported" - ) - n.hasActionButton = True - n.actionButtonTitle = list(notification.buttons.keys())[0] - - self.nc.scheduleNotification(n) - - notification.identifier = platform_nid - self.current_notifications[internal_nid] = notification - - if NSUserNotificationCenter.defaultUserNotificationCenter: - Impl = CocoaNotificationCenterLegacy - - -if Impl is None and shutil.which("osascript"): - - # fall back to apple script - - class DesktopNotifierOsaScript(DesktopNotifierBase): - """Apple script backend for macOS - - Sends desktop notifications via apple script. Does not support buttons or - callbacks. Apple script will always appear as the sending application. - """ - - def send(self, notification: Notification) -> None: - """ - Sends a notification. - - :param notification: Notification to send. - """ - - script = ( - f'display notification "{notification.message}" ' - f'with title "{notification.title}"' - ) - - subprocess.call(["osascript", "-e", script]) - - Impl = DesktopNotifierOsaScript diff --git a/src/maestral/oauth.py b/src/maestral/oauth.py index ea2b1242a..ac44f24d6 100644 --- a/src/maestral/oauth.py +++ b/src/maestral/oauth.py @@ -11,7 +11,7 @@ # external imports import keyring.backends # type: ignore -import keyring.backends.OS_X # type: ignore +import keyring.backends.macOS # type: ignore import keyring.backends.SecretService # type: ignore import keyring.backends.kwallet # type: ignore from keyring.backend import KeyringBackend # type: ignore @@ -33,7 +33,7 @@ logger = logging.getLogger(__name__) supported_keyring_backends = ( - keyring.backends.OS_X.Keyring, + keyring.backends.macOS.Keyring, keyring.backends.SecretService.Keyring, keyring.backends.kwallet.DBusKeyring, keyring.backends.kwallet.DBusKeyringKWallet4, @@ -114,14 +114,25 @@ def __init__(self, config_name: str, app_key: str = DROPBOX_APP_KEY) -> None: self._state.get("account", "token_access_type") or None ) - self.keyring = self._get_keyring_backend() - # defer keyring access until token requested by user self.loaded = False + self._keyring: Optional[KeyringBackend] = None self._access_token: Optional[str] = None self._refresh_token: Optional[str] = None self._expires_at: Optional[datetime] = None + @property + def keyring(self) -> KeyringBackend: + + if not self._keyring: + self._keyring = self._get_keyring_backend() + + return self._keyring + + @keyring.setter + def keyring(self, ring: KeyringBackend) -> None: + self._keyring = ring + def _get_keyring_backend(self) -> KeyringBackend: """ Returns the keyring backend currently used. If none is used because we are not diff --git a/src/maestral/sync.py b/src/maestral/sync.py index 5386650ff..3f9d54ddc 100644 --- a/src/maestral/sync.py +++ b/src/maestral/sync.py @@ -5,24 +5,23 @@ import sys import os import os.path as osp +import errno from stat import S_ISDIR -import socket -import resource import logging import time -import tempfile import random import uuid import urllib.parse -from threading import Thread, Event, RLock, current_thread +import enum +import pprint +import gc +from threading import Thread, Event, Condition, RLock, current_thread from concurrent.futures import ThreadPoolExecutor from queue import Queue, Empty from collections import abc from contextlib import contextmanager -import enum -import pprint -import gc from functools import wraps +from tempfile import NamedTemporaryFile from typing import ( Optional, Any, @@ -50,7 +49,12 @@ import dropbox # type: ignore from dropbox.files import Metadata, DeletedMetadata, FileMetadata, FolderMetadata # type: ignore from watchdog.events import FileSystemEventHandler # type: ignore -from watchdog.events import EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MOVED +from watchdog.events import ( + EVENT_TYPE_CREATED, + EVENT_TYPE_DELETED, + EVENT_TYPE_MOVED, + EVENT_TYPE_MODIFIED, +) from watchdog.events import ( DirModifiedEvent, FileModifiedEvent, @@ -65,37 +69,38 @@ from watchdog.utils.dirsnapshot import DirectorySnapshot # type: ignore # local imports +from . import notify from .config import MaestralConfig, MaestralState -from .notify import MaestralDesktopNotifier -from .fsevents import Observer from .constants import ( IDLE, SYNCING, - PAUSED, STOPPED, + CONNECTED, DISCONNECTED, + CONNECTING, EXCLUDED_FILE_NAMES, EXCLUDED_DIR_NAMES, MIGNORE_FILE, FILE_CACHE, ) from .errors import ( + MaestralApiError, SyncError, + CancelledError, NoDropboxDirError, CacheDirError, PathError, NotFoundError, - DropboxServerError, FileConflictError, FolderConflictError, InvalidDbidError, DatabaseError, + InotifyError, ) from .client import ( DropboxClient, os_to_maestral_error, convert_api_errors, - fswatch_to_maestral_error, ) from .database import ( Base, @@ -108,8 +113,15 @@ ItemType, ChangeType, ) +from .fsevents import Observer from .utils import removeprefix, sanitize_string from .utils.caches import LRUCache +from .utils.integration import ( + get_inotify_limits, + cpu_usage_percent, + check_connection, + CPU_COUNT, +) from .utils.path import ( generate_cc_name, cased_path_candidates, @@ -144,7 +156,9 @@ logger = logging.getLogger(__name__) -_cpu_count = os.cpu_count() or 1 # os.cpu_count can return None + +umask = os.umask(0o22) +os.umask(umask) # type definitions ExecInfoType = Tuple[Type[BaseException], BaseException, Optional[TracebackType]] @@ -178,6 +192,12 @@ def __init__( self.ttl = ttl self.recursive = recursive + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__}(event={self.event}, " + f"recursive={self.recursive}, ttl={self.ttl})>" + ) + class FSEventHandler(FileSystemEventHandler): """A local file event handler @@ -186,8 +206,14 @@ class FSEventHandler(FileSystemEventHandler): to be uploaded by :meth:`upload_worker`. This acts as a translation layer between :class:`watchdog.Observer` and :class:`SyncEngine`. - :param syncing: Set when syncing is running. - :param startup: Set when startup is running. + White lists of event types to handle are supplied as ``file_event_types`` and + ``dir_event_types``. This is for forward compatibility as additional event types + may be added to watchdog in the future. + + :param file_event_types: Types of file events to handle. This acts as a whitelist. + By default, only FileClosedEvents are ignored. + :param dir_event_types: Types of directory events to handle. This acts as a + whitelist. By default, only DirModifiedEvents are ignored. :cvar float ignore_timeout: Timeout in seconds after which ignored paths will be discarded. @@ -196,15 +222,53 @@ class FSEventHandler(FileSystemEventHandler): _ignored_events: List[_Ignore] local_file_event_queue: "Queue[FileSystemEvent]" - def __init__(self, syncing: Event, startup: Event) -> None: + def __init__( + self, + file_event_types: Tuple[str, ...] = ( + EVENT_TYPE_CREATED, + EVENT_TYPE_DELETED, + EVENT_TYPE_MODIFIED, + EVENT_TYPE_MOVED, + ), + dir_event_types: Tuple[str, ...] = ( + EVENT_TYPE_CREATED, + EVENT_TYPE_DELETED, + EVENT_TYPE_MOVED, + ), + ) -> None: + + self._enabled = False + self.has_events = Condition() - self.syncing = syncing - self.startup = startup + self.file_event_types = file_event_types + self.dir_event_types = dir_event_types + + self.file_event_types = file_event_types + self.dir_event_types = dir_event_types self._ignored_events = [] self.ignore_timeout = 2.0 self.local_file_event_queue = Queue() + @property + def enabled(self) -> bool: + """Whether queuing of events is enabled.""" + return self._enabled + + def enable(self) -> None: + """Turn on queueing of events.""" + self._enabled = True + + def disable(self) -> None: + """Turn off queueing of new events and remove all events from queue.""" + self._enabled = False + + while True: + try: + self.local_file_event_queue.get_nowait() + except Empty: + break + @contextmanager def ignore( self, *events: FileSystemEvent, recursive: bool = True @@ -244,6 +308,8 @@ def ignore( for ignore in new_ignores: ignore.ttl = time.time() + self.ignore_timeout + self.expire_ignored_events() + def expire_ignored_events(self) -> None: """Removes all expired ignore entries.""" @@ -266,8 +332,6 @@ def _is_ignored(self, event: FileSystemEvent) -> bool: :returns: Whether the event should be ignored. """ - self.expire_ignored_events() - for ignore in self._ignored_events.copy(): ignore_event = ignore.event recursive = ignore.recursive @@ -304,19 +368,57 @@ def on_any_event(self, event: FileSystemEvent) -> None: :param event: Watchdog file event. """ - # ignore events if we are not during startup or sync - if not (self.syncing.is_set() or self.startup.is_set()): + # ignore events if asked to do so + if not self._enabled: + return + + # handle only whitelisted dir event types + if event.is_directory and event.event_type not in self.dir_event_types: return - # ignore all DirMovedEvents - if isinstance(event, DirModifiedEvent): + # handle only whitelisted file event types + if not event.is_directory and event.event_type not in self.file_event_types: return # check if event should be ignored if self._is_ignored(event): return - self.local_file_event_queue.put(event) + self.queue_event(event) + + def queue_event(self, event: FileSystemEvent) -> None: + """ + Queues an individual file system event. Notifies / wakes up all threads that are + waiting with :meth:`wait_for_event`. + + :param event: File system event to queue. + """ + with self.has_events: + self.local_file_event_queue.put(event) + self.has_events.notify_all() + + def wait_for_event(self, timeout: float = 40) -> bool: + """ + Blocks until an event is available in the queue or a timeout occurs, whichever + comes first. You can use with method to wait for file system events in another + thread. + + .. note:: If there are multiple threads waiting for events, all of them will be + notified. If one of those threads starts getting events from + :attr:`local_file_event_queue`, other threads may find that queue empty. You + should therefore always be prepared to handle an empty queue, if if this + method returns ``True``. + + :param timeout: Maximum time to block in seconds. + :returns: ``True`` if an event is available, ``False`` if the call returns due + to a timeout. + """ + + with self.has_events: + if self.local_file_event_queue.qsize() > 0: + return True + self.has_events.wait(timeout) + return self.local_file_event_queue.qsize() > 0 class PersistentStateMutableSet(abc.MutableSet): @@ -394,7 +496,6 @@ class SyncEngine: conflict resolution and updates to our index. :param client: Dropbox API client instance. - :param fs_events_handler: File system event handler to inform us of local events. """ sync_errors: Set[SyncError] @@ -402,14 +503,13 @@ class SyncEngine: _case_conversion_cache: LRUCache _max_history = 1000 - _num_threads = min(32, _cpu_count * 3) + _num_threads = min(32, CPU_COUNT * 3) - def __init__(self, client: DropboxClient, fs_events_handler: FSEventHandler): + def __init__(self, client: DropboxClient): self.client = client self.config_name = self.client.config_name - self.cancel_pending = Event() - self.fs_events = fs_events_handler + self.fs_events = FSEventHandler() self.sync_lock = RLock() self._db_lock = RLock() @@ -417,7 +517,7 @@ def __init__(self, client: DropboxClient, fs_events_handler: FSEventHandler): self._conf = MaestralConfig(self.config_name) self._state = MaestralState(self.config_name) - self.notifier = MaestralDesktopNotifier(self.config_name) + self.notifier = notify.MaestralDesktopNotifier(self.config_name) # upload_errors / download_errors: contains failed uploads / downloads # (from sync errors) to retry later @@ -440,6 +540,7 @@ def __init__(self, client: DropboxClient, fs_events_handler: FSEventHandler): # data structures for internal communication self.sync_errors = set() + self._cancel_requested = Event() # data structures for user information self.syncing = [] @@ -470,7 +571,7 @@ def __init__(self, client: DropboxClient, fs_events_handler: FSEventHandler): self._is_case_sensitive = is_fs_case_sensitive(get_home_dir()) self._mignore_rules = self._load_mignore_rules_form_file() self._excluded_items = self._conf.get("main", "excluded_items") - self._max_cpu_percent = self._conf.get("sync", "max_cpu_percent") * _cpu_count + self._max_cpu_percent = self._conf.get("sync", "max_cpu_percent") * CPU_COUNT # caches self._case_conversion_cache = LRUCache(capacity=5000) @@ -566,7 +667,7 @@ def max_cpu_percent(self) -> float: def max_cpu_percent(self, percent: float) -> None: """Setter: max_cpu_percent.""" self._max_cpu_percent = percent - self._conf.set("app", "max_cpu_percent", percent // _cpu_count) + self._conf.set("app", "max_cpu_percent", percent // CPU_COUNT) # ==== sync state ================================================================== @@ -654,6 +755,16 @@ def iter_index(self) -> Iterator[IndexEntry]: for entry in self._db_session.query(IndexEntry).yield_per(1000): yield entry + def index_count(self) -> int: + """ + Returns the number if items in our index without loading any items. + + :returns: Number of index entries. + """ + + with self._database_access(): + return self._db_session.query(IndexEntry).count() + def get_local_rev(self, dbx_path: str) -> Optional[str]: """ Gets revision number of local file. @@ -741,11 +852,11 @@ def get_local_hash(self, local_path: str) -> Optional[str]: with convert_api_errors(local_path=local_path): hash_str, mtime = content_hash(local_path) - self.save_local_hash(local_path, hash_str, mtime) + self._save_local_hash(local_path, hash_str, mtime) return hash_str - def save_local_hash( + def _save_local_hash( self, local_path: str, hash_str: Optional[str], mtime: Optional[float] ) -> None: """ @@ -959,7 +1070,7 @@ def ensure_dropbox_folder_present(self) -> None: """ Checks if the Dropbox folder still exists where we expect it to be. - :raises DropboxDeletedError: When localal Dropbox directory does not exist. + :raises DropboxDeletedError: When local Dropbox directory does not exist. """ if not osp.isdir(self.dropbox_path): @@ -978,7 +1089,7 @@ def _ensure_cache_dir_present(self) -> None: """ retries = 0 - max_retries = 3 + max_retries = 100 while not osp.isdir(self.file_cache_path): try: @@ -990,7 +1101,7 @@ def _ensure_cache_dir_present(self) -> None: self.clean_cache_dir() except OSError as err: raise CacheDirError( - f"Cannot create cache directory (errno {err.errno})", + f"Cannot create cache directory: {os.strerror(err.errno)}", "Please check if you have write permissions for " f"{self._file_cache_path}.", ) @@ -1001,6 +1112,8 @@ def _ensure_cache_dir_present(self) -> None: "Exceeded maximum number of retries", ) + retries += 1 + def clean_cache_dir(self) -> None: """Removes all items in the cache directory.""" @@ -1011,7 +1124,7 @@ def clean_cache_dir(self) -> None: pass except OSError as err: raise CacheDirError( - f"Cannot clean cache directory (errno {err.errno})", + f"Cannot create cache directory: {os.strerror(err.errno)}", "Please check if you have write permissions for " f"{self._file_cache_path}.", ) @@ -1020,16 +1133,17 @@ def _new_tmp_file(self) -> str: """Returns a new temporary file name in our cache directory.""" self._ensure_cache_dir_present() try: - with tempfile.NamedTemporaryFile( - dir=self.file_cache_path, delete=False - ) as f: - umask = os.umask(0o22) - os.umask(umask) - os.fchmod(f.fileno(), 0o666 & ~umask) + with NamedTemporaryFile(dir=self.file_cache_path, delete=False) as f: + try: + os.chmod(f.fileno(), 0o666 & ~umask) + except OSError as exc: + # Can occur on file system's that don't support POSIX permissions + # such as NTFS mounted without the permissions option. + logger.debug("Cannot set permissions: errno %s", exc.errno) return f.name except OSError as err: raise CacheDirError( - f"Cannot create temporary file (errno {err.errno})", + f"Cannot create cache directory: {os.strerror(err.errno)}", "Please check if you have write permissions for " f"{self._file_cache_path}.", ) @@ -1138,7 +1252,10 @@ def to_local_path_from_cased(self, dbx_path_cased: str) -> str: def to_local_path(self, dbx_path: str) -> str: """ Converts a Dropbox path to the corresponding local path. Only the basename must - be correctly cased. This is slower than :meth:`to_local_path_from_cased`. + be correctly cased, as guaranteed by the Dropbox API for the ``display_path`` + attribute of file or folder metadata. + + This method slower than :meth:`to_local_path_from_cased`. :param dbx_path: Path relative to Dropbox folder, must be correctly cased in its basename. @@ -1280,6 +1397,21 @@ def _slow_down(self) -> None: while cpu_usage > self._max_cpu_percent: cpu_usage = cpu_usage_percent(0.5 + 2 * random.random()) + def cancel_sync(self) -> None: + """ + Raises a CancelledError in all sync threads and waits for them to shut down. + """ + + self._cancel_requested.set() + + # Wait until we can acquire the sync lock => we are idle. + self.sync_lock.acquire() + self.sync_lock.release() + + self._cancel_requested.clear() + + logger.info("Sync aborted") + def busy(self) -> bool: """ Checks if we are currently syncing. @@ -1331,8 +1463,8 @@ def callback(): self.notifier.notify( "Sync error", f"Could not sync {file_name}", - level=self.notifier.SYNCISSUE, - buttons={"Show": callback}, + level=notify.SYNCISSUE, + actions={"Show": callback}, ) self.sync_errors.add(err) @@ -1353,7 +1485,8 @@ def _database_access(self, log_errors: bool = False) -> Iterator[None]: only logged. """ - title = None + title = "" + msg = "" new_exc = None try: @@ -1383,10 +1516,11 @@ def _database_access(self, log_errors: bool = False) -> Iterator[None]: if new_exc: if log_errors: logger.error(title, exc_info=exc_info_tuple(new_exc)) + self.notifier.notify(title, msg, level=notify.ERROR) else: raise new_exc - def free_memory(self) -> None: + def _free_memory(self) -> None: """ Frees memory by resetting our database session and the requests session, clearing out case-conversion cache and clearing all expired event ignores and. @@ -1421,17 +1555,18 @@ def upload_local_changes_while_inactive(self) -> None: sync_events = [ SyncEvent.from_file_system_event(e, self) for e in events ] - except FileNotFoundError: + except (FileNotFoundError, NotADirectoryError): self.ensure_dropbox_folder_present() return if len(events) > 0: - self.apply_local_changes(sync_events, local_cursor) + self.apply_local_changes(sync_events) logger.debug("Uploaded local changes while inactive") else: - self.local_cursor = local_cursor logger.debug("No local changes while inactive") + self.local_cursor = local_cursor + def _get_local_changes_while_inactive(self) -> Tuple[List[FileSystemEvent], float]: """ Retrieves all local changes since the last sync by performing a full scan of the @@ -1448,7 +1583,9 @@ def _get_local_changes_while_inactive(self) -> Tuple[List[FileSystemEvent], floa changes = [] snapshot_time = time.time() - snapshot = DirectorySnapshot(self.dropbox_path) + snapshot = DirectorySnapshot( + self.dropbox_path, listdir=self._scandir_with_mignore + ) lowercase_snapshot_paths: Set[str] = set() # don't use iterator here but pre-fetch all entries @@ -1468,14 +1605,13 @@ def _get_local_changes_while_inactive(self) -> Tuple[List[FileSystemEvent], floa # check if item was created or modified since last sync # but before we started the FileEventHandler (~snapshot_time) stats = snapshot.stat_info(path) - ctime_check = ( - snapshot_time > stats.st_ctime > self.get_last_sync(dbx_path_lower) - ) + last_sync = self.get_last_sync(dbx_path_lower) + ctime_check = snapshot_time > stats.st_ctime > last_sync # always upload untracked items, check ctime of tracked items - local_entry = self.get_index_entry(dbx_path_lower) - is_new = local_entry is None - is_modified = ctime_check and local_entry is not None + index_entry = self.get_index_entry(dbx_path_lower) + is_new = index_entry is None + is_modified = ctime_check and not is_new if is_new: if snapshot.isdir(path): @@ -1485,10 +1621,10 @@ def _get_local_changes_while_inactive(self) -> Tuple[List[FileSystemEvent], floa changes.append(event) elif is_modified: - if snapshot.isdir(path) and local_entry.is_directory: # type: ignore - event = DirModifiedEvent(path) - changes.append(event) - elif not snapshot.isdir(path) and not local_entry.is_directory: # type: ignore + if snapshot.isdir(path) and index_entry.is_directory: # type: ignore + # We don't emit `DirModifiedEvent`s. + pass + elif not snapshot.isdir(path) and not index_entry.is_directory: # type: ignore event = FileModifiedEvent(path) changes.append(event) elif snapshot.isdir(path): @@ -1501,8 +1637,9 @@ def _get_local_changes_while_inactive(self) -> Tuple[List[FileSystemEvent], floa changes += [event0, event1] # get deleted items + dbx_root_lower = self.dropbox_path.lower() for entry in entries: - local_path_uncased = f"{self.dropbox_path}{entry.dbx_path_lower}".lower() + local_path_uncased = f"{dbx_root_lower}{entry.dbx_path_lower}" if local_path_uncased not in lowercase_snapshot_paths: local_path = self.to_local_path_from_cased(entry.dbx_path_cased) if entry.is_directory: @@ -1519,26 +1656,53 @@ def _get_local_changes_while_inactive(self) -> Tuple[List[FileSystemEvent], floa return changes, snapshot_time - def wait_for_local_changes( - self, timeout: float = 40, delay: float = 1 - ) -> Tuple[List[SyncEvent], float]: + def wait_for_local_changes(self, timeout: float = 40) -> bool: + """ + Blocks until local changes are available. + + :param timeout: Maximum time in seconds to wait. + :returns: ``True`` if changes are available, ``False`` otherwise. + """ + + logger.debug("Waiting for local changes since cursor: %s", self.local_cursor) + + return self.fs_events.wait_for_event(timeout) + + def upload_sync_cycle(self): + """ + Performs a full upload sync cycle by calling in order: + + 1) :meth:`list_local_changes` + 2) :meth:`apply_local_changes` + + Handles updating the local cursor for you. If monitoring for local file events + was interrupted, call :meth:`upload_local_changes_while_inactive` instead. + """ + + with self.sync_lock: + + changes, cursor = self.list_local_changes() + self.apply_local_changes(changes) + + self.local_cursor = cursor + + del changes + self._free_memory() + + if self._cancel_requested.is_set(): + raise CancelledError("Sync cancelled") + + def list_local_changes(self, delay: float = 1) -> Tuple[List[SyncEvent], float]: """ Waits for local file changes. Returns a list of local changes with at most one entry per path. - :param timeout: If no changes are detected within timeout (sec), an empty list - is returned. :param delay: Delay in sec to wait for subsequent changes before returning. :returns: (list of sync times events, time_stamp) """ - self.ensure_dropbox_folder_present() - - try: - events = [self.fs_events.local_file_event_queue.get(timeout=timeout)] - local_cursor = time.time() - except Empty: - return [], time.time() + events = [] + local_cursor = time.time() # keep collecting events until idle for `delay` while True: @@ -1556,84 +1720,74 @@ def wait_for_local_changes( return sync_events, local_cursor - def apply_local_changes( - self, sync_events: List[SyncEvent], local_cursor: float - ) -> List[SyncEvent]: + def apply_local_changes(self, sync_events: List[SyncEvent]) -> List[SyncEvent]: """ Applies locally detected changes to the remote Dropbox. Changes which should be ignored (mignore or always ignored files) are skipped. :param sync_events: List of local file system events. - :param local_cursor: Time stamp of last event in ``events``. """ - with self.sync_lock: - - results = [] + results = [] - if len(sync_events) > 0: + if len(sync_events) > 0: - sync_events, _ = self._filter_excluded_changes_local(sync_events) + sync_events, _ = self._filter_excluded_changes_local(sync_events) - deleted: List[SyncEvent] = [] - dir_moved: List[SyncEvent] = [] - other: List[SyncEvent] = [] # file created + moved, dir created + deleted: List[SyncEvent] = [] + dir_moved: List[SyncEvent] = [] + other: List[SyncEvent] = [] # file created + moved, dir created - for event in sync_events: - if event.is_deleted: - deleted.append(event) - elif event.is_directory and event.is_moved: - dir_moved.append(event) - else: - other.append(event) - - # housekeeping - self.syncing.append(event) + for event in sync_events: + if event.is_deleted: + deleted.append(event) + elif event.is_directory and event.is_moved: + dir_moved.append(event) + else: + other.append(event) - # apply deleted events first, folder moved events second - # neither event type requires an actual upload - if deleted: - logger.info("Uploading deletions...") + # housekeeping + self.syncing.append(event) - with ThreadPoolExecutor( - max_workers=self._num_threads, - thread_name_prefix="maestral-upload-pool", - ) as executor: - res = executor.map(self._create_remote_entry, deleted) + # apply deleted events first, folder moved events second + # neither event type requires an actual upload + if deleted: + logger.info("Uploading deletions...") - n_items = len(deleted) - for n, r in enumerate(res): - throttled_log(logger, f"Deleting {n + 1}/{n_items}...") - results.append(r) + with ThreadPoolExecutor( + max_workers=self._num_threads, + thread_name_prefix="maestral-upload-pool", + ) as executor: + res = executor.map(self._create_remote_entry, deleted) - if dir_moved: - logger.info("Moving folders...") + n_items = len(deleted) + for n, r in enumerate(res): + throttled_log(logger, f"Deleting {n + 1}/{n_items}...") + results.append(r) - for event in dir_moved: - logger.info(f"Moving {event.dbx_path_from}...") - res = self._create_remote_entry(event) - results.append(res) + if dir_moved: + logger.info("Moving folders...") - # apply other events in parallel since order does not matter - with ThreadPoolExecutor( - max_workers=self._num_threads, - thread_name_prefix="maestral-upload-pool", - ) as executor: - res = executor.map(self._create_remote_entry, other) + for event in dir_moved: + logger.info(f"Moving {event.dbx_path_from}...") + res = self._create_remote_entry(event) + results.append(res) - n_items = len(other) - for n, r in enumerate(res): - throttled_log(logger, f"Syncing ↑ {n + 1}/{n_items}") - results.append(r) + # apply other events in parallel since order does not matter + with ThreadPoolExecutor( + max_workers=self._num_threads, + thread_name_prefix="maestral-upload-pool", + ) as executor: + res = executor.map(self._create_remote_entry, other) - self._clean_history() + n_items = len(other) + for n, r in enumerate(res): + throttled_log(logger, f"Syncing ↑ {n + 1}/{n_items}") + results.append(r) - if not self.cancel_pending.is_set(): - # always save local cursor if not aborted by user, - # failed uploads will be tracked and retried individually - self.local_cursor = local_cursor + self._clean_history() - return results + return results def _filter_excluded_changes_local( self, sync_events: List[SyncEvent] @@ -1966,10 +2120,8 @@ def _create_remote_entry(self, event: SyncEvent) -> SyncEvent: :returns: SyncEvent with updated status. """ - if self.cancel_pending.is_set(): - event.status = SyncStatus.Aborted - self.syncing.remove(event) - return event + if self._cancel_requested.is_set(): + raise CancelledError("Sync cancelled") self._slow_down() @@ -2041,6 +2193,9 @@ def _on_local_moved(self, event: SyncEvent) -> Optional[Metadata]: :raises MaestralApiError: For any issues when syncing the item. """ + if event.local_path_from == self.dropbox_path: + self.ensure_dropbox_folder_present() + # fail fast on badly decoded paths validate_encoding(event.local_path) @@ -2297,6 +2452,9 @@ def _on_local_deleted(self, event: SyncEvent) -> Optional[Metadata]: :raises MaestralApiError: For any issues when syncing the item. """ + if event.local_path == self.dropbox_path: + self.ensure_dropbox_folder_present() + if self.is_excluded_by_user(event.dbx_path): logger.debug( 'Not deleting "%s": is excluded by selective sync', event.dbx_path @@ -2363,89 +2521,11 @@ def _on_local_deleted(self, event: SyncEvent) -> Optional[Metadata]: # ==== Download sync =============================================================== - def get_remote_folder(self, dbx_path: str = "/") -> bool: - """ - Gets all files/folders from Dropbox and writes them to the local folder - :attr:`dropbox_path`. Call this method on first run of the Maestral. Indexing - and downloading may take several minutes, depending on the size of the user's - Dropbox folder. - - :param dbx_path: Path relative to Dropbox folder. Defaults to root ('/'). - :returns: Whether download was successful. - """ - - with self.sync_lock: - - dbx_path = dbx_path or "/" - is_dbx_root = dbx_path == "/" - success = True - - if is_dbx_root: - logger.info("Fetching remote Dropbox") - else: - logger.info(f"Syncing ↓ {dbx_path}") - - if any(is_child(folder, dbx_path) for folder in self.excluded_items): - # if there are excluded subfolders, index and download only included - skip_excluded = True - else: - skip_excluded = False - - try: - # get a cursor and list the folder content - cursor = self.client.get_latest_cursor(dbx_path) - - # iterate over index and download results - list_iter = self.client.list_folder_iterator( - dbx_path, recursive=not skip_excluded, include_deleted=False - ) - - for res in list_iter: - res.entries.sort(key=lambda x: x.path_lower.count("/")) - - # convert metadata to sync_events - sync_events = [ - SyncEvent.from_dbx_metadata(md, self) for md in res.entries - ] - download_res = self.apply_remote_changes(sync_events, cursor=None) - - s = all( - e.status in (SyncStatus.Done, SyncStatus.Skipped) - for e in download_res - ) - success = s and success - - if skip_excluded: - # list and download sub-folder contents if not excluded - included_subfolders = [ - md - for md in res.entries - if isinstance(md, FolderMetadata) - and not self.is_excluded_by_user(md.path_display) - ] - for md in included_subfolders: - s = self.get_remote_folder(md.path_display) - success = s and success - - del included_subfolders - - except SyncError as e: - self._handle_sync_error(e, direction=SyncDirection.Down) - return False - - if is_dbx_root: - # always save remote cursor if this is the root folder, - # failed downloads will be tracked and retried individually - self.remote_cursor = cursor - self._state.set("sync", "last_reindex", time.time()) - - return success - def get_remote_item(self, dbx_path: str) -> bool: """ Downloads a remote file or folder and updates its local rev. If the remote item does not exist, any corresponding local items will be deleted. If ``dbx_path`` - refers to a folder, the download will be handled by :meth:`get_remote_folder`. + refers to a folder, the download will be handled by :meth:`_get_remote_folder`. If it refers to a single file, the download will be performed by :meth:`_create_local_entry`. @@ -2456,6 +2536,8 @@ def get_remote_item(self, dbx_path: str) -> bool: :returns: Whether download was successful. """ + logger.info(f"Syncing ↓ {dbx_path}") + with self.sync_lock: md = self.client.get_metadata(dbx_path, include_deleted=True) @@ -2474,71 +2556,166 @@ def get_remote_item(self, dbx_path: str) -> bool: event = SyncEvent.from_dbx_metadata(md, self) if event.is_directory: - success = self.get_remote_folder(dbx_path) + success = self._get_remote_folder(dbx_path) else: self.syncing.append(event) e = self._create_local_entry(event) success = e.status in (SyncStatus.Done, SyncStatus.Skipped) + self._free_memory() + + return success + + def _get_remote_folder(self, dbx_path: str) -> bool: + """ + Gets all files/folders from a Dropbox folder and writes them to the local folder + :attr:`dropbox_path`. + + :param dbx_path: Path relative to Dropbox folder. + :returns: Whether download was successful. + """ + + with self.sync_lock: + + try: + + idx = 0 + + # iterate over index and download results + list_iter = self.client.list_folder_iterator(dbx_path, recursive=True) + + for res in list_iter: + + idx += len(res.entries) + + if idx > 0: + logger.info(f"Indexing {idx}...") + + res.entries.sort(key=lambda x: x.path_lower.count("/")) + + # convert metadata to sync_events + sync_events = [ + SyncEvent.from_dbx_metadata(md, self) for md in res.entries + ] + download_res = self.apply_remote_changes(sync_events) + + success = all( + e.status in (SyncStatus.Done, SyncStatus.Skipped) + for e in download_res + ) + + if self._cancel_requested.is_set(): + raise CancelledError("Sync cancelled") + + except SyncError as e: + self._handle_sync_error(e, direction=SyncDirection.Down) + return False + return success - def wait_for_remote_changes( - self, last_cursor: str, timeout: int = 40, delay: float = 2 - ) -> bool: + def wait_for_remote_changes(self, last_cursor: str, timeout: int = 40) -> bool: """ Blocks until changes to the remote Dropbox are available. :param last_cursor: Cursor form last sync. :param timeout: Timeout in seconds before returning even if there are no changes. Dropbox adds random jitter of up to 90 sec to this value. - :param delay: Delay in sec to wait for subsequent changes that may be - duplicates. This delay is typically only necessary folders are shared / - un-shared with other Dropbox accounts. + :returns: ``True`` if changes are available, ``False`` otherwise. """ logger.debug("Waiting for remote changes since cursor:\n%s", last_cursor) has_changes = self.client.wait_for_remote_changes(last_cursor, timeout=timeout) - time.sleep(delay) + + # For for 2 sec. This delay is typically only necessary folders are shared / + # un-shared with other Dropbox accounts. + time.sleep(2) + logger.debug("Detected remote changes: %s", has_changes) return has_changes - def list_remote_changes(self, last_cursor: str) -> Tuple[List[SyncEvent], str]: + def download_sync_cycle(self) -> None: """ - Lists remote changes since the last download sync. + Performs a full download sync cycle by calling in order: - :param last_cursor: Cursor from last download sync. - :returns: Tuple with remote changes and corresponding cursor + 1) :meth:`list_remote_changes_iterator` + 2) :meth:`apply_remote_changes` + + Handles updating the remote cursor and resuming interrupted syncs for you. + Calling this method will perform a full indexing if this is the first download. """ - logger.info("Fetching remote changes...") - changes = self.client.list_remote_changes(last_cursor) - logger.debug("Listed remote changes:\n%s", entries_repr(changes.entries)) + with self.sync_lock: - clean_changes = self._clean_remote_changes(changes) - logger.debug("Cleaned remote changes:\n%s", entries_repr(clean_changes.entries)) + if self.remote_cursor == "": - clean_changes.entries.sort(key=lambda x: x.path_lower.count("/")) - sync_events = [ - SyncEvent.from_dbx_metadata(md, self) for md in clean_changes.entries - ] - logger.debug("Converted remote changes to SyncEvents") + self._state.set("sync", "last_reindex", time.time()) + self._state.set("sync", "did_finish_indexing", False) + self._state.set("sync", "indexing_counter", 0) + + idx = self._state.get("sync", "indexing_counter") + is_indexing = not self._state.get("sync", "did_finish_indexing") + + if is_indexing and idx == 0: + logger.info("Indexing remote Dropbox") + elif is_indexing: + logger.info("Resuming indexing") + else: + logger.info("Fetching remote changes") + + changes_iter = self.list_remote_changes_iterator(self.remote_cursor) + + # Download changes in chunks to reduce memory usage. + for changes, cursor in changes_iter: + + idx += len(changes) + + if idx > 0: + logger.info(f"Indexing {idx}...") + + downloaded = self.apply_remote_changes(changes) + + # Save (incremental) remote cursor. + self.remote_cursor = cursor + self._state.set("sync", "indexing_counter", idx) + + # Send desktop notifications when not indexing. + if not is_indexing: + self.notify_user(downloaded) + + if self._cancel_requested.is_set(): + raise CancelledError("Sync cancelled") + + del changes + del downloaded + + self._state.set("sync", "did_finish_indexing", True) + self._state.set("sync", "indexing_counter", 0) - return sync_events, changes.cursor + if idx > 0: + logger.info(IDLE) + + self._free_memory() def list_remote_changes_iterator( self, last_cursor: str - ) -> Iterator[Tuple[List[SyncEvent], Optional[str]]]: + ) -> Iterator[Tuple[List[SyncEvent], str]]: """ - Lists remote changes since the last download sync. Works the same as - :meth:`list_remote_changes` but returns an iterator over remote changes. Only - the last result will have a valid cursor which is not None. + Get remote changes since the last download sync, as specified by + ``last_cursor``. If the ``last_cursor`` is from paginating through a previous + set of changes, continue where we left off. If ``last_cursor`` is an emtpy + string, tart a full indexing of the Dropbox folder. :param last_cursor: Cursor from last download sync. :returns: Iterator yielding tuples with remote changes and corresponding cursor. """ - logger.info("Fetching remote changes...") - - changes_iter = self.client.list_remote_changes_iterator(last_cursor) + if last_cursor == "": + # We are starting from the beginning, do a full indexing. + changes_iter = self.client.list_folder_iterator("/", recursive=True) + else: + # Pick up where we left off. This may be an interrupted indexing / + # pagination through changes or a completely new set of changes. + logger.debug("Fetching remote changes since cursor: %s", last_cursor) + changes_iter = self.client.list_remote_changes_iterator(last_cursor) for changes in changes_iter: @@ -2556,12 +2733,9 @@ def list_remote_changes_iterator( logger.debug("Converted remote changes to SyncEvents") - cursor = changes.cursor if not changes.has_more else None - yield sync_events, cursor + yield sync_events, changes.cursor - def apply_remote_changes( - self, sync_events: List[SyncEvent], cursor: Optional[str] - ) -> List[SyncEvent]: + def apply_remote_changes(self, sync_events: List[SyncEvent]) -> List[SyncEvent]: """ Applies remote changes to local folder. Call this on the result of :meth:`list_remote_changes`. The saved cursor is updated after a set of changes @@ -2569,9 +2743,6 @@ def apply_remote_changes( successful completion. :param sync_events: List of remote changes. - :param cursor: Remote cursor corresponding to changes. Take care to only pass - cursors which represent the state of the entire Dropbox. Pass None instead - if you are only downloading a subset of changes. :returns: List of changes that were made to local files and bool indicating if all download syncs were successful. """ @@ -2659,11 +2830,6 @@ def apply_remote_changes( throttled_log(logger, f"Syncing ↓ {n + 1}/{n_items}") results.append(r) - if cursor and not self.cancel_pending.is_set(): - # always save remote cursor if not aborted by user, - # failed downloads will be tracked and retried individually - self.remote_cursor = cursor - self._clean_history() return results @@ -2675,7 +2841,7 @@ def notify_user(self, sync_events: List[SyncEvent]) -> None: :param sync_events: List of SyncEvents from download sync. """ - callback: Optional[Callable] + buttons: Dict[str, Callable] changes = [e for e in sync_events if e.status != SyncStatus.Skipped] @@ -2712,6 +2878,8 @@ def notify_user(self, sync_events: List[SyncEvent]) -> None: def callback(): click.launch(event.local_path, locate=True) + buttons = {"Show": callback} + else: if all(e.change_type == sync_events[0].change_type for e in sync_events): @@ -2726,7 +2894,7 @@ def callback(): else: file_name = f"{n_changed} items" - callback = None + buttons = {} if change_type == ChangeType.Removed.value: @@ -2734,12 +2902,14 @@ def callback(): # show dropbox website with deleted files click.launch("https://www.dropbox.com/deleted_files") + buttons = {"Show": callback} + if user_name: msg = f"{user_name} {change_type} {file_name}" else: msg = f"{file_name} {change_type}" - self.notifier.notify("Items synced", msg, buttons={"Show": callback}) + self.notifier.notify("Items synced", msg, actions=buttons) def _filter_excluded_changes_remote( self, changes: List[SyncEvent] @@ -2849,7 +3019,7 @@ def _ctime_newer_than_last_sync(self, local_path: str) -> bool: try: stat = os.stat(local_path) - except FileNotFoundError: + except (FileNotFoundError, NotADirectoryError): # don't check ctime for deleted items (os won't give stat info) # but confirm absence from index return index_entry is not None @@ -2904,7 +3074,7 @@ def _get_ctime(self, local_path: str) -> float: return ctime else: return os.stat(local_path).st_ctime - except FileNotFoundError: + except (FileNotFoundError, NotADirectoryError): return -1.0 def _clean_remote_changes( @@ -2978,10 +3148,8 @@ def _create_local_entry(self, event: SyncEvent) -> SyncEvent: :class:`errors.SyncError` and ``None`` if cancelled. """ - if self.cancel_pending.is_set(): - event.status = SyncStatus.Aborted - self.syncing.remove(event) - return event + if self._cancel_requested.is_set(): + raise CancelledError("Sync cancelled") self._slow_down() @@ -3104,7 +3272,7 @@ def _on_remote_file(self, event: SyncEvent) -> Optional[SyncEvent]: ) self.update_index_from_sync_event(event) - self.save_local_hash(event.local_path, event.content_hash, mtime) + self._save_local_hash(event.local_path, event.content_hash, mtime) logger.debug('Created local file "%s"', event.dbx_path) @@ -3204,7 +3372,7 @@ def _on_remote_deleted(self, event: SyncEvent) -> Optional[SyncEvent]: self.update_index_from_sync_event(event) logger.debug('Deleted local item "%s"', event.dbx_path) return event - elif isinstance(exc, FileNotFoundError): + elif isinstance(exc, (FileNotFoundError, NotADirectoryError)): self.update_index_from_sync_event(event) logger.debug('Deletion failed: "%s" not found', event.dbx_path) return None @@ -3250,20 +3418,20 @@ def rescan(self, local_path: str) -> None: logger.debug('Rescanning "%s"', local_path) if osp.isfile(local_path): - self.fs_events.local_file_event_queue.put(FileModifiedEvent(local_path)) + self.fs_events.queue_event(FileModifiedEvent(local_path)) elif osp.isdir(local_path): # add created and deleted events of children as appropriate - snapshot = DirectorySnapshot(local_path) + snapshot = DirectorySnapshot(local_path, listdir=self._scandir_with_mignore) lowercase_snapshot_paths = {x.lower() for x in snapshot.paths} local_path_lower = local_path.lower() for path in snapshot.paths: if snapshot.isdir(path): - self.fs_events.local_file_event_queue.put(DirCreatedEvent(path)) + self.fs_events.queue_event(DirCreatedEvent(path)) else: - self.fs_events.local_file_event_queue.put(FileModifiedEvent(path)) + self.fs_events.queue_event(FileModifiedEvent(path)) # add deleted events @@ -3274,22 +3442,16 @@ def rescan(self, local_path: str) -> None: .all() ) + dbx_root_lower = self.dropbox_path.lower() + for entry in entries: - child_path_uncased = ( - f"{self.dropbox_path}{entry.dbx_path_lower}".lower() - ) + child_path_uncased = f"{dbx_root_lower}{entry.dbx_path_lower}" if child_path_uncased not in lowercase_snapshot_paths: - local_child_path = self.to_local_path_from_cased( - entry.dbx_path_cased - ) + local_child = self.to_local_path_from_cased(entry.dbx_path_cased) if entry.is_directory: - self.fs_events.local_file_event_queue.put( - DirDeletedEvent(local_child_path) - ) + self.fs_events.queue_event(DirDeletedEvent(local_child)) else: - self.fs_events.local_file_event_queue.put( - FileDeletedEvent(local_child_path) - ) + self.fs_events.queue_event(FileDeletedEvent(local_child)) elif not osp.exists(local_path): dbx_path = self.to_dbx_path(local_path) @@ -3298,13 +3460,9 @@ def rescan(self, local_path: str) -> None: if local_entry: if local_entry.is_directory: - self.fs_events.local_file_event_queue.put( - DirDeletedEvent(local_path) - ) + self.fs_events.queue_event(DirDeletedEvent(local_path)) else: - self.fs_events.local_file_event_queue.put( - FileDeletedEvent(local_path) - ) + self.fs_events.queue_event(FileDeletedEvent(local_path)) def _clean_history(self): """Commits new events and removes all events older than ``_keep_history`` from @@ -3327,266 +3485,208 @@ def _clean_history(self): # commit to drive self._db_session.commit() + def _scandir_with_mignore(self, path: str) -> List: + return [ + f + for f in os.scandir(path) + if not self._is_mignore_path(self.to_dbx_path(f.path), f.is_dir()) + ] + # ====================================================================================== # Workers for upload, download and connection monitoring threads # ====================================================================================== +@contextmanager +def handle_sync_thread_errors( + running: Event, + autostart: Event, + notifier: notify.MaestralDesktopNotifier, +) -> Iterator[None]: + + try: + yield + except CancelledError: + running.clear() + except ConnectionError: + logger.info(DISCONNECTED) + logger.debug("Connection error", exc_info=True) + running.clear() + autostart.set() + logger.info(CONNECTING) + except Exception as err: + running.clear() + autostart.clear() + title = getattr(err, "title", "Unexpected error") + message = getattr(err, "message", "Please restart to continue syncing") + logger.error(title, exc_info=True) + notifier.notify(title, message, level=notify.ERROR) + + def download_worker( - sync: SyncEngine, syncing: Event, running: Event, connected: Event + sync: SyncEngine, + running: Event, + startup_completed: Event, + autostart: Event, ) -> None: """ Worker to sync changes of remote Dropbox with local folder. :param sync: Instance of :class:`SyncEngine`. - :param syncing: Event that indicates if workers are running or paused. :param running: Event to shutdown local file event handler and worker threads. - :param connected: Event that indicates if we can connect to Dropbox. + :param startup_completed: Set when startup sync is completed. + :param autostart: Set when syncing should automatically resume on connection. """ + startup_completed.wait() + while running.is_set(): - syncing.wait() + with handle_sync_thread_errors(running, autostart, sync.notifier): - try: has_changes = sync.wait_for_remote_changes(sync.remote_cursor) - with sync.sync_lock: - - if not (running.is_set() and syncing.is_set()): - continue - - if has_changes: - logger.info(SYNCING) - - changes_iter = sync.list_remote_changes_iterator(sync.remote_cursor) + if not running.is_set(): + return - # download changes in chunks to reduce memory usage - for changes, cursor in changes_iter: - downloaded = sync.apply_remote_changes(changes, cursor) - sync.notify_user(downloaded) + sync.ensure_dropbox_folder_present() - sync.client.get_space_usage() # update space usage - logger.info(IDLE) + if has_changes: + logger.info(SYNCING) + sync.download_sync_cycle() + logger.info(IDLE) - # free memory - del changes - del downloaded - sync.free_memory() - - except DropboxServerError: - logger.info("Dropbox server error", exc_info=True) - except ConnectionError: - syncing.clear() - connected.clear() - logger.debug("Lost connection", exc_info=True) - logger.info(DISCONNECTED) - except Exception as err: - running.clear() - syncing.clear() - title = getattr(err, "title", "Unexpected error") - logger.error(title, exc_info=True) + sync.client.get_space_usage() # update space usage def download_worker_added_item( sync: SyncEngine, - syncing: Event, running: Event, - connected: Event, + startup_completed: Event, + autostart: Event, added_item_queue: "Queue[str]", ) -> None: """ Worker to download items which have been newly included in sync. :param sync: Instance of :class:`SyncEngine`. - :param syncing: Event that indicates if workers are running or paused. :param running: Event to shutdown local file event handler and worker threads. - :param connected: Event that indicates if we can connect to Dropbox. + :param startup_completed: Set when startup sync is completed. + :param autostart: Set when syncing should automatically resume on connection. :param added_item_queue: Queue with newly added items to download. Entries are Dropbox paths. """ - while running.is_set(): - - syncing.wait() + startup_completed.wait() - try: - dbx_path = added_item_queue.get() - sync.pending_downloads.add(dbx_path.lower()) # protect against crashes + while running.is_set(): - with sync.sync_lock: + with handle_sync_thread_errors(running, autostart, sync.notifier): - if not (running.is_set() and syncing.is_set()): - # try again later - continue + try: + dbx_path = added_item_queue.get(timeout=40) + except Empty: + pass + else: + # protect against crashes + sync.pending_downloads.add(dbx_path.lower()) - sync.get_remote_item(dbx_path) - sync.pending_downloads.discard(dbx_path) + if not running.is_set(): + return - logger.info(IDLE) + with sync.sync_lock: - # free some memory - sync.free_memory() + sync.get_remote_item(dbx_path) + sync.pending_downloads.discard(dbx_path) - except DropboxServerError: - logger.info("Dropbox server error", exc_info=True) - except ConnectionError: - syncing.clear() - connected.clear() - logger.debug("Lost connection", exc_info=True) - logger.info(DISCONNECTED) - except Exception as err: - running.clear() - syncing.clear() - title = getattr(err, "title", "Unexpected error") - logger.error(title, exc_info=True) + logger.info(IDLE) def upload_worker( - sync: SyncEngine, syncing: Event, running: Event, connected: Event + sync: SyncEngine, + running: Event, + startup_completed: Event, + autostart: Event, ) -> None: """ Worker to sync local changes to remote Dropbox. :param sync: Instance of :class:`SyncEngine`. - :param syncing: Event that indicates if workers are running or paused. :param running: Event to shutdown local file event handler and worker threads. - :param connected: Event that indicates if we can connect to Dropbox. + :param startup_completed: Set when startup sync is completed. + :param autostart: Set when syncing should automatically resume on connection. """ - while running.is_set(): - - syncing.wait() + startup_completed.wait() - try: - changes, local_cursor = sync.wait_for_local_changes() + while running.is_set(): - with sync.sync_lock: - if not (running.is_set() and syncing.is_set()): - continue + with handle_sync_thread_errors(running, autostart, sync.notifier): - if len(changes) > 0: - logger.info(SYNCING) + has_changes = sync.wait_for_local_changes() - sync.apply_local_changes(changes, local_cursor) + if not running.is_set(): + return - if len(changes) > 0: - logger.info(IDLE) + sync.ensure_dropbox_folder_present() - # free some memory - del changes - sync.free_memory() - - except DropboxServerError: - logger.info("Dropbox server error", exc_info=True) - except ConnectionError: - syncing.clear() - connected.clear() - logger.debug("Lost connection", exc_info=True) - logger.info(DISCONNECTED) - except Exception as err: - running.clear() - syncing.clear() - title = getattr(err, "title", "Unexpected error") - logger.error(title, exc_info=True) + if has_changes: + logger.info(SYNCING) + sync.upload_sync_cycle() + logger.info(IDLE) def startup_worker( sync: SyncEngine, - syncing: Event, running: Event, - connected: Event, - startup: Event, - paused_by_user: Event, + startup_completed: Event, + autostart: Event, ) -> None: """ Worker to sync local changes to remote Dropbox. :param sync: Instance of :class:`SyncEngine`. - :param syncing: Event that indicates if workers are running or paused. :param running: Event to shutdown local file event handler and worker threads. - :param connected: Event that indicates if we can connect to Dropbox. - :param startup: Set when we should run startup routines. - :param paused_by_user: Set when syncing has been paused by the user. + :param startup_completed: Set when startup sync is completed. + :param autostart: Set when syncing should automatically resume on connection. """ - while running.is_set(): - - startup.wait() + conf = MaestralConfig(sync.config_name) - try: - with sync.sync_lock: - # run / resume initial download - # local changes during this download will be registered by the local - # FileSystemObserver but only uploaded after `syncing` has been set - if sync.remote_cursor == "": - sync.clear_sync_errors() - sync.get_remote_folder() - sync.local_cursor = time.time() + with handle_sync_thread_errors(running, autostart, sync.notifier): - if not running.is_set(): - continue + # Retry failed downloads. + if len(sync.download_errors) > 0: + logger.info("Retrying failed syncs...") - # retry failed downloads - if len(sync.download_errors) > 0: - logger.info("Retrying failed syncs...") + for dbx_path in list(sync.download_errors): + sync.get_remote_item(dbx_path) - for dbx_path in list(sync.download_errors): - logger.info(f"Syncing ↓ {dbx_path}") - sync.get_remote_item(dbx_path) + # Resume interrupted downloads. + if len(sync.pending_downloads) > 0: + logger.info("Resuming interrupted syncs...") - # resume interrupted downloads - if len(sync.pending_downloads) > 0: - logger.info("Resuming interrupted syncs...") + for dbx_path in list(sync.pending_downloads): + sync.get_remote_item(dbx_path) + sync.pending_downloads.discard(dbx_path) - for dbx_path in list(sync.pending_downloads): - logger.info(f"Syncing ↓ {dbx_path}") - sync.get_remote_item(dbx_path) - sync.pending_downloads.discard(dbx_path) - - # retry failed / interrupted uploads by scheduling additional events - # if len(sync.upload_errors) > 0: - # logger.debug('Retrying failed uploads...') - # - # for dbx_path in list(sync.upload_errors): - # sync.rescan(sync.to_local_path(dbx_path)) - - # upload changes while inactive - sync.upload_local_changes_while_inactive() + if not running.is_set(): + startup_completed.set() + return - # enforce immediate check for remote changes - changes, remote_cursor = sync.list_remote_changes(sync.remote_cursor) - downloaded = sync.apply_remote_changes(changes, remote_cursor) - sync.notify_user(downloaded) + sync.download_sync_cycle() - if not running.is_set(): - continue + if not running.is_set(): + startup_completed.set() + return - if not paused_by_user.is_set(): - syncing.set() + if conf.get("sync", "upload"): + sync.upload_local_changes_while_inactive() - startup.clear() - logger.info(IDLE) + logger.info(IDLE) - # free some memory - del changes - del downloaded - sync.free_memory() - - except DropboxServerError: - logger.info("Dropbox server error", exc_info=True) - except ConnectionError: - syncing.clear() - connected.clear() - startup.clear() - logger.debug("Lost connection", exc_info=True) - logger.info(DISCONNECTED) - except Exception as err: - running.clear() - syncing.clear() - title = getattr(err, "title", "Unexpected error") - logger.error(title, exc_info=True) + startup_completed.set() # ====================================================================================== @@ -3607,26 +3707,23 @@ def __init__(self, client: DropboxClient): self.client = client self.config_name = self.client.config_name - self._conf = MaestralConfig(self.config_name) - self.startup = Event() - self.connected = Event() - self.syncing = Event() - self.running = Event() - self.paused_by_user = Event() - self.paused_by_user.set() + self._lock = RLock() - self.added_item_queue = Queue() # entries are dbx_paths + self.running = Event() + self.startup_completed = Event() + self.autostart = Event() - self._lock = RLock() + self.added_item_queue = Queue() - self.fs_event_handler = FSEventHandler(self.syncing, self.startup) - self.sync = SyncEngine(self.client, self.fs_event_handler) + self.sync = SyncEngine(self.client) self._startup_time = -1.0 self.connection_check_interval = 10 + self.connected = False + self._connection_helper_running = True self.connection_helper = Thread( target=self.connection_monitor, name="maestral-connection-helper", @@ -3683,136 +3780,147 @@ def idle_time(self) -> float: def start(self) -> None: """Creates observer threads and starts syncing.""" - if self.running.is_set() or self.startup.is_set(): - # do nothing if already started + if self.running.is_set(): return - self.running = Event() # create new event to let old threads shut down - - self.local_observer_thread = Observer(timeout=40) - self.local_observer_thread.setName("maestral-fsobserver") - self._watch = self.local_observer_thread.schedule( - self.fs_event_handler, self.sync.dropbox_path, recursive=True - ) - for i, emitter in enumerate(self.local_observer_thread.emitters): - emitter.setName(f"maestral-fsemitter-{i}") + # create a new set of events to let old threads die down + self.running = Event() + self.startup_completed = Event() self.startup_thread = Thread( target=startup_worker, daemon=True, args=( self.sync, - self.syncing, self.running, - self.connected, - self.startup, - self.paused_by_user, + self.startup_completed, + self.autostart, ), name="maestral-sync-startup", ) - self.download_thread = Thread( - target=download_worker, - daemon=True, - args=( - self.sync, - self.syncing, - self.running, - self.connected, - ), - name="maestral-download", - ) + if self._conf.get("sync", "download"): + + self.download_thread = Thread( + target=download_worker, + daemon=True, + args=( + self.sync, + self.running, + self.startup_completed, + self.autostart, + ), + name="maestral-download", + ) - self.download_thread_added_folder = Thread( - target=download_worker_added_item, - daemon=True, - args=( - self.sync, - self.syncing, - self.running, - self.connected, - self.added_item_queue, - ), - name="maestral-folder-download", - ) + self.download_thread_added_folder = Thread( + target=download_worker_added_item, + daemon=True, + args=( + self.sync, + self.running, + self.startup_completed, + self.autostart, + self.added_item_queue, + ), + name="maestral-folder-download", + ) - self.upload_thread = Thread( - target=upload_worker, - daemon=True, - args=( - self.sync, - self.syncing, - self.running, - self.connected, - ), - name="maestral-upload", - ) + if self._conf.get("sync", "upload"): + + self.upload_thread = Thread( + target=upload_worker, + daemon=True, + args=( + self.sync, + self.running, + self.startup_completed, + self.autostart, + ), + name="maestral-upload", + ) - try: - self.local_observer_thread.start() - except OSError as err: - new_err = fswatch_to_maestral_error(err) - title = getattr(new_err, "title", "Unexpected error") - logger.error(title, exc_info=exc_info_tuple(new_err)) + self.local_observer_thread = Observer(timeout=40) + self.local_observer_thread.setName("maestral-fsobserver") + self._watch = self.local_observer_thread.schedule( + self.sync.fs_events, self.sync.dropbox_path, recursive=True + ) + for i, emitter in enumerate(self.local_observer_thread.emitters): + emitter.setName(f"maestral-fsemitter-{i}") - self.running.set() - self.syncing.clear() - self.connected.set() - self.startup.set() + try: + self.local_observer_thread.start() + except OSError as exc: - self.startup_thread.start() - self.upload_thread.start() - self.download_thread.start() - self.download_thread_added_folder.start() + err_cls: Type[MaestralApiError] - self.paused_by_user.clear() + if exc.errno in (errno.ENOSPC, errno.EMFILE): + title = "Inotify limit reached" - self._startup_time = time.time() + try: + max_user_watches, max_user_instances, _ = get_inotify_limits() + except OSError: + max_user_watches, max_user_instances = 2 ** 18, 2 ** 9 - @_with_lock - def pause(self) -> None: - """Pauses syncing.""" + if exc.errno == errno.ENOSPC: + n_new = max(2 ** 19, 2 * max_user_watches) + new_config = f"fs.inotify.max_user_watches={n_new}" + else: + n_new = max(2 ** 10, 2 * max_user_instances) + new_config = f"fs.inotify.max_user_instances={n_new}" + + msg = ( + "Changes to your Dropbox folder cannot be monitored because it " + "contains too many items. Please increase the inotify limit by " + "adding the following line to /etc/sysctl.conf, then apply the " + 'settings with "sysctl -p":\n\n' + new_config + ) + err_cls = InotifyError - self.paused_by_user.set() - self.syncing.clear() + elif PermissionError: + title = "Insufficient permissions to monitor local changes" + msg = "Please check the permissions for your local Dropbox folder" + err_cls = InotifyError - self.sync.cancel_pending.set() - self._wait_for_idle() - self.sync.cancel_pending.clear() + else: + title = "Could not start watch of local directory" + msg = exc.strerror + err_cls = MaestralApiError - logger.info(PAUSED) + new_error = err_cls(title, msg) + logger.error(title, exc_info=exc_info_tuple(new_error)) + self.sync.notifier.notify(title, msg, level=notify.ERROR) - @_with_lock - def resume(self) -> None: - """Checks for changes while idle and starts syncing.""" + self.running.set() + self.autostart.set() - if not self.paused_by_user.is_set(): - return + if self._conf.get("sync", "upload"): + self.sync.fs_events.enable() + self.upload_thread.start() + + if self._conf.get("sync", "download"): + self.download_thread.start() + self.download_thread_added_folder.start() + + self.startup_thread.start() - self.startup.set() - self.paused_by_user.clear() + self._startup_time = time.time() @_with_lock def stop(self) -> None: """Stops syncing and destroys worker threads.""" - if not self.running.is_set(): - return - - logger.info("Shutting down threads...") + if self.running.is_set(): + logger.info("Shutting down threads...") + self.sync.fs_events.disable() self.running.clear() - self.syncing.clear() - self.paused_by_user.clear() - self.startup.clear() + self.startup_completed.clear() + self.autostart.clear() - self.sync.cancel_pending.set() - self._wait_for_idle() - self.sync.cancel_pending.clear() + self.sync.cancel_sync() self.local_observer_thread.stop() - # self.local_observer_thread.join() - # self.upload_thread.join() logger.info(STOPPED) @@ -3823,38 +3931,28 @@ def connection_monitor(self) -> None: the user. """ - while True: - if check_connection("www.dropbox.com"): - self.on_connect() - else: - self.on_disconnect() + while self._connection_helper_running: - time.sleep(self.connection_check_interval) + self.connected = check_connection("www.dropbox.com") - @_with_lock - def on_connect(self) -> None: - """Callback to run when we have lost the connection to Dropbox""" + if self.connected and not self.running.is_set() and self.autostart.is_set(): + logger.info(CONNECTED) + self.start() - if self.running.is_set(): - if not self.connected.is_set() and not self.paused_by_user.is_set(): - self.startup.set() - self.connected.set() + elif not self.connected and self.running.is_set(): - @_with_lock - def on_disconnect(self) -> None: - """Callback to run when we have reestablished the connection to Dropbox""" + # Don't stop sync threads, let them deal with the connection issues with + # their own timeout. This prevents us from aborting any uploads or + # downloads which could still be saved on reconnection. - if self.running.is_set(): - if self.connected.is_set(): - logger.info(DISCONNECTED) - self.syncing.clear() - self.connected.clear() - self.startup.clear() + logger.info(CONNECTING) + + time.sleep(self.connection_check_interval) def reset_sync_state(self) -> None: """Resets all saved sync state. Settings are not affected.""" - if self.syncing.is_set() or self.startup.is_set() or self.sync.busy(): + if self.running.is_set() or self.sync.busy(): raise RuntimeError("Cannot reset sync state while syncing.") self.sync.remote_cursor = "" @@ -3876,20 +3974,22 @@ def rebuild_index(self) -> None: logger.info("Rebuilding index...") - self.pause() + was_running = self.running.is_set() + + self.stop() self.sync.remote_cursor = "" self.sync.clear_index() - if not self.running.is_set(): + if was_running: self.start() - else: - self.resume() - - def _wait_for_idle(self) -> None: - self.sync.sync_lock.acquire() - self.sync.sync_lock.release() + def __del__(self): + try: + self.stop() + self._connection_helper_running = False + except Exception: + pass # ====================================================================================== @@ -3987,65 +4087,6 @@ def throttled_log( _last_emit = time.time() -def cpu_usage_percent(interval: float = 0.1) -> float: - """ - Returns a float representing the CPU utilization of the current process as a - percentage. This duplicates the similar method from psutil to avoid the psutil - dependency. - - Compares process times to system CPU times elapsed before and after the interval - (blocking). It is recommended for accuracy that this function be called with an - interval of at least 0.1 sec. - - A value > 100.0 can be returned in case of processes running multiple threads on - different CPU cores. The returned value is explicitly NOT split evenly between all - available logical CPUs. This means that a busy loop process running on a system with - 2 logical CPUs will be reported as having 100% CPU utilization instead of 50%. - - :param interval: Interval in sec between comparisons of CPU times. - :returns: CPU usage during interval in percent. - """ - - if not interval > 0: - raise ValueError(f"interval is not positive (got {interval!r})") - - def timer(): - return time.monotonic() * _cpu_count - - st1 = timer() - rt1 = resource.getrusage(resource.RUSAGE_SELF) - time.sleep(interval) - st2 = timer() - rt2 = resource.getrusage(resource.RUSAGE_SELF) - - delta_proc = (rt2.ru_utime - rt1.ru_utime) + (rt2.ru_stime - rt1.ru_stime) - delta_time = st2 - st1 - - try: - overall_cpus_percent = (delta_proc / delta_time) * 100 - except ZeroDivisionError: - return 0.0 - else: - single_cpu_percent = overall_cpus_percent * _cpu_count - return round(single_cpu_percent, 1) - - -def check_connection(hostname: str) -> bool: - """ - A low latency check for an internet connection. - - :param hostname: Hostname to use for connection check. - :returns: Connection availability. - """ - try: - host = socket.gethostbyname(hostname) - s = socket.create_connection((host, 80), 2) - s.close() - return True - except Exception: - return False - - def validate_encoding(local_path: str) -> None: """ Validate that the path contains only characters in the reported file system diff --git a/src/maestral/utils/cli.py b/src/maestral/utils/cli.py index 95e627bdc..b37760adc 100644 --- a/src/maestral/utils/cli.py +++ b/src/maestral/utils/cli.py @@ -9,7 +9,6 @@ Iterator, Sequence, Any, - Tuple, Callable, TYPE_CHECKING, ) @@ -543,84 +542,28 @@ def ok(message: str, nl: bool = True) -> None: def _style_message(message: str) -> str: - pre = click.style("?", fg="green") - return f"{pre} {message} " + return f"{message} " def _syle_hint(hint: str) -> str: - return click.style(hint, fg="white") + " " if hint else "" + return f"{hint} " if hint else "" -orange = "\x1b[38;5;214m" -cyan = "\x1b[38;5;6m" -grey = "\x1b[90m" -bold = "\x1b[1m" - -response_color = cyan -focus_color = f"{response_color}" - - -class loading: - - _animation = ("...", " ", ". ", ".. ") - - def __init__(self, iterable, prefix="Loading", animation=None, clear=True): - - import itertools - - self.iterable = iterable - self.prefix = prefix - self.clear = clear - self.indicator = itertools.cycle(animation or loading._animation) - - def _render(self) -> None: - click.echo(self.prefix + next(self.indicator) + "\r", nl=False) - - def __enter__(self) -> "loading": - self._render() - return self - - def __iter__(self) -> "loading": - return self - - def __next__(self) -> Any: - res = next(self.iterable) - self._render() - return res - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - if self.clear: - click.echo(" " * (len(self.prefix) + 3) + "\r", nl=False) - else: - click.echo("") - - -def prompt(message: str, default: str = "", validate: Optional[Callable] = None) -> str: +def prompt( + message: str, default: Optional[str] = None, validate: Optional[Callable] = None +) -> str: import survey - styled_default = _syle_hint(default) styled_message = _style_message(message) - def view(value: str) -> Tuple[str]: - response = value or default - return (response,) - def check(value: str) -> bool: - if validate is None: - return True - elif value == "" and default: - return True - else: + if validate is not None: return validate(value) + else: + return True - res = survey.input( - styled_message, - hint=styled_default, - view=view, - check=check, - color=response_color, - ) + res = survey.input(styled_message, default=default, check=check) return res @@ -631,7 +574,7 @@ def confirm(message: str, default: Optional[bool] = True) -> bool: styled_message = _style_message(message) - return survey.confirm(styled_message, default=default, color=response_color) + return survey.confirm(styled_message, default=default) def select(message: str, options: Sequence[str], hint="") -> int: @@ -642,17 +585,11 @@ def select(message: str, options: Sequence[str], hint="") -> int: styled_hint = _syle_hint(hint) styled_message = _style_message(message) - index = survey.select( - options, - styled_message, - focus=focus_color, - color=response_color, - hint=styled_hint, - ) + index = survey.select(options, styled_message, hint=styled_hint) return index except (KeyboardInterrupt, SystemExit): - survey.api.respond() + survey.respond() raise @@ -666,53 +603,42 @@ def select_multiple(message: str, options: Sequence[str], hint="") -> List[int]: kwargs = {"hint": styled_hint} if hint else {} - def view(value: Sequence[int]) -> Tuple[str]: - - chosen = [options[index] for index in value] - response = ", ".join(chosen) + indices = survey.select( + options, styled_message, multi=True, pin="[✓] ", unpin="[ ] ", **kwargs + ) - if len(value) == 0 or len(response) > 50: - response = f"[{len(value)} chosen]" + chosen = [options[index] for index in indices] + response = ", ".join(chosen) - return (response,) + if len(indices) == 0 or len(response) > 50: + response = f"[{len(indices)} chosen]" - indices = survey.select( - options, - styled_message, - multi=True, - focus=focus_color, - color=response_color, - pin="[✓] ", - unpin="[ ] ", - view=view, - **kwargs, - ) + survey.respond(response) return indices except (KeyboardInterrupt, SystemExit): - survey.api.respond() + survey.respond() raise def select_path( message: str, - default: str = "", + default: Optional[str] = None, validate: Callable = lambda x: True, exists: bool = False, - only_directories: bool = False, + files_allowed: bool = True, + dirs_allowed: bool = True, ) -> str: import os import survey + import wrapio - styled_default = _syle_hint(f"[{default}]") - styled_message = _style_message(message) + track = wrapio.Track() - def view(value: str) -> Tuple[str]: - response = value or default - return (response,) + styled_message = _style_message(message) failed = False @@ -724,36 +650,43 @@ def check(value: str) -> bool: return True full_path = os.path.expanduser(value) - dir_condition = os.path.isdir(full_path) or not only_directories + forbidden_dir = os.path.isdir(full_path) and not dirs_allowed + forbidden_file = os.path.isfile(full_path) and not files_allowed exist_condition = os.path.exists(full_path) or not exists - if not dir_condition: - survey.update(click.style("(not a directory) ", fg="red")) - elif not exist_condition: - survey.update(click.style("(does not exist) ", fg="red")) - - passed = dir_condition and exist_condition and validate(value) - failed = not passed + if not exist_condition: + survey.update(click.style("(not found) ", fg="red")) + elif forbidden_dir: + survey.update(click.style("(not a file) ", fg="red")) + elif forbidden_file: + survey.update(click.style("(not a folder) ", fg="red")) + + failed = ( + not exist_condition + or forbidden_dir + or forbidden_file + or not validate(value) + ) - return passed + return not failed - def callback(event: str, result: str, *args) -> None: + @track.call("insert") + @track.call("delete") + def handle(result: str, *args) -> None: nonlocal failed - if event == "delete" and failed: - survey.update(styled_default) + if failed: + survey.update("") failed = False res = survey.input( styled_message, - hint=styled_default, - view=view, + default=default, + callback=track.invoke, check=check, - callback=callback, - color=response_color, ) - return res or default + return res class RemoteApiError(click.ClickException): diff --git a/src/maestral/utils/integration.py b/src/maestral/utils/integration.py new file mode 100644 index 000000000..cd98ba825 --- /dev/null +++ b/src/maestral/utils/integration.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- +""" +This module provides functions for platform integration. Most of the functionality here +could also be achieved with psutils but we want to avoid the large dependency. +""" + +import os +import platform +import enum +import resource +import socket +import time +import logging +from pathlib import Path +from typing import Union, Tuple + +__all__ = [ + "get_ac_state", + "ACState", + "get_inotify_limits", + "CPU_COUNT", + "cpu_usage_percent", + "check_connection", +] + + +logger = logging.getLogger(__name__) + +CPU_COUNT = os.cpu_count() or 1 # os.cpu_count can return None +LINUX_POWER_SUPPLY_PATH = "/sys/class/power_supply" + + +def multi_cat(*paths: Path) -> Union[int, bytes, None]: + """ + Attempts to read the content of multiple files which may not exist. Returns the + content of the first file which can be read. If none of them can be read return + None. Returns an integer if the content is a digit. + """ + + for path in paths: + try: + ret = path.read_bytes().strip() + except OSError: + pass + else: + return int(ret) if ret.isdigit() else ret + + return None + + +class ACState(enum.Enum): + """Enumeration of AC power states""" + + Connected = "Connected" + Disconnected = "Disconnected" + Undetermined = "Undetermined" + + +def get_ac_state() -> ACState: + """ + Checks if the current device has AC power or is running on battery. + + :returns: ``True`` if the device has AC power, ``False`` otherwise. + """ + + if platform.system() == "Darwin": + + from ctypes import c_double + from rubicon.objc.runtime import load_library + + iokit = load_library("IOKit") + kIOPSTimeRemainingUnlimited = -2.0 + + iokit.IOPSGetTimeRemainingEstimate.restype = c_double + + remaining_time = iokit.IOPSGetTimeRemainingEstimate() + + if remaining_time == kIOPSTimeRemainingUnlimited: + return ACState.Connected + else: + return ACState.Disconnected + + elif platform.system() == "Linux": + + # taken from https://github.com/giampaolo/psutil + + supplies = list(os.scandir(LINUX_POWER_SUPPLY_PATH)) + + ac_paths = [ + Path(s.path) + for s in supplies + if s.name.startswith("A") or "ac" in s.name.lower() + ] + + bat_paths = [ + Path(s.path) + for s in supplies + if s.name.startswith("B") or "battery" in s.name.lower() + ] + + online = multi_cat(*iter(path / "online" for path in ac_paths)) + + if online is not None: + if online == 1: + return ACState.Connected + else: + return ACState.Disconnected + + elif len(bat_paths) > 0: + + # Get the first available battery. Usually this is "BAT0", except + # some rare exceptions: + # https://github.com/giampaolo/psutil/issues/1238 + bat0 = sorted(bat_paths)[0] + + try: + status = (bat0 / "status").read_text().strip().lower() + except OSError: + status = "" + + if status == "discharging": + return ACState.Disconnected + elif status in ("charging", "full"): + return ACState.Connected + + return ACState.Undetermined + + +def get_inotify_limits() -> Tuple[int, int, int]: + """ + Returns the current inotify limit settings as tuple. + + :returns: ``(max_user_watches, max_user_instances, max_queued_events)`` + :raises OSError: if the settings cannot be read from /proc/sys/fs/inotify. This may + happen if /proc/sys is left out of the kernel image or simply not mounted. + """ + + from pathlib import Path + + root = Path("/proc/sys/fs/inotify") + + max_user_watches_path = root / "max_user_watches" + max_user_instances_path = root / "max_user_instances" + max_queued_events_path = root / "max_queued_events" + + max_user_watches = int(max_user_watches_path.read_bytes().strip()) + max_user_instances = int(max_user_instances_path.read_bytes().strip()) + max_queued_events = int(max_queued_events_path.read_bytes().strip()) + + return max_user_watches, max_user_instances, max_queued_events + + +def cpu_usage_percent(interval: float = 0.1) -> float: + """ + Returns a float representing the CPU utilization of the current process as a + percentage. This duplicates the similar method from psutil to avoid the psutil + dependency. + + Compares process times to system CPU times elapsed before and after the interval + (blocking). It is recommended for accuracy that this function be called with an + interval of at least 0.1 sec. + + A value > 100.0 can be returned in case of processes running multiple threads on + different CPU cores. The returned value is explicitly NOT split evenly between all + available logical CPUs. This means that a busy loop process running on a system with + 2 logical CPUs will be reported as having 100% CPU utilization instead of 50%. + + :param interval: Interval in sec between comparisons of CPU times. + :returns: CPU usage during interval in percent. + """ + + if not interval > 0: + raise ValueError(f"interval is not positive (got {interval!r})") + + def timer(): + return time.monotonic() * CPU_COUNT + + st1 = timer() + rt1 = resource.getrusage(resource.RUSAGE_SELF) + time.sleep(interval) + st2 = timer() + rt2 = resource.getrusage(resource.RUSAGE_SELF) + + delta_proc = (rt2.ru_utime - rt1.ru_utime) + (rt2.ru_stime - rt1.ru_stime) + delta_time = st2 - st1 + + try: + overall_cpus_percent = (delta_proc / delta_time) * 100 + except ZeroDivisionError: + return 0.0 + else: + single_cpu_percent = overall_cpus_percent * CPU_COUNT + return round(single_cpu_percent, 1) + + +def check_connection(hostname: str) -> bool: + """ + A low latency check for an internet connection. + + :param hostname: Hostname to use for connection check. + :returns: Connection availability. + """ + try: + host = socket.gethostbyname(hostname) + s = socket.create_connection((host, 80), 2) + s.close() + return True + except Exception: + logger.debug("Connection error", exc_info=True) + return False diff --git a/tests/linked/conftest.py b/tests/linked/conftest.py index 2a8e378ee..281621828 100644 --- a/tests/linked/conftest.py +++ b/tests/linked/conftest.py @@ -7,9 +7,10 @@ import uuid import pytest - +from watchdog.utils.dirsnapshot import DirectorySnapshot from dropbox.files import WriteMode, FileMetadata -from maestral.main import Maestral + +from maestral.main import Maestral, logger from maestral.errors import NotFoundError, FileConflictError from maestral.client import convert_api_errors from maestral.config import remove_configuration @@ -19,12 +20,17 @@ to_existing_cased_path, is_child, ) -from maestral.sync import DirectorySnapshot from maestral.utils.appdirs import get_home_dir +from maestral.daemon import MaestralProxy +from maestral.daemon import start_maestral_daemon_process, stop_maestral_daemon_process resources = os.path.dirname(__file__) + "/resources" +fsevents_logger = logging.getLogger("fsevents") +fsevents_logger.setLevel(logging.DEBUG) +logger.setLevel(logging.DEBUG) + @pytest.fixture def m(): @@ -34,14 +40,27 @@ def m(): m.log_level = logging.DEBUG # link with given token - access_token = os.environ.get("DROPBOX_TOKEN", "") - m.client._init_sdk_with_token(access_token=access_token) + access_token = os.environ.get("DROPBOX_ACCESS_TOKEN") + refresh_token = os.environ.get("DROPBOX_REFRESH_TOKEN") + + if access_token: + m.client._init_sdk_with_token(access_token=access_token) + m.client.auth._access_token = access_token + m.client.auth._token_access_type = "legacy" + elif refresh_token: + m.client._init_sdk_with_token(refresh_token=refresh_token) + m.client.auth._refresh_token = refresh_token + m.client.auth._token_access_type = "offline" + else: + raise RuntimeError( + "Either access token or refresh token must be given as environment " + "variable DROPBOX_ACCESS_TOKEN or DROPBOX_REFRESH_TOKEN." + ) # get corresponding Dropbox ID and store in keyring for other processes res = m.client.get_account_info() m.client.auth._account_id = res.account_id - m.client.auth._access_token = access_token - m.client.auth._token_access_type = "legacy" + m.client.auth.loaded = True m.client.auth.save_creds() # set local Dropbox directory @@ -84,6 +103,12 @@ def m(): except NotFoundError: pass + # remove all shared links + res = m.client.list_shared_links() + + for link in res.links: + m.revoke_shared_link(link.url) + # remove creds from system keyring m.client.auth.delete_creds() @@ -95,16 +120,27 @@ def m(): lock.release() +@pytest.fixture +def proxy(m): + m.stop_sync() + start_maestral_daemon_process(m.config_name, timeout=20) + yield MaestralProxy(m.config_name) + + stop_maestral_daemon_process(m.config_name) + + # helper functions -def wait_for_idle(m: Maestral, minimum: int = 4): +def wait_for_idle(m: Maestral, minimum: int = 5): """Blocks until Maestral instance is idle for at least `minimum` sec.""" t0 = time.time() while time.time() - t0 < minimum: if m.sync.busy(): - m.monitor._wait_for_idle() + # Wait until we can acquire the sync lock => we are idle. + m.sync.sync_lock.acquire() + m.sync.sync_lock.release() t0 = time.time() else: time.sleep(0.1) diff --git a/tests/linked/resources/bin.txt b/tests/linked/resources/bin.txt new file mode 100755 index 000000000..0e3fa8d7b Binary files /dev/null and b/tests/linked/resources/bin.txt differ diff --git a/tests/linked/resources/large-file.pdf b/tests/linked/resources/large-file.pdf new file mode 100644 index 000000000..7793960c0 Binary files /dev/null and b/tests/linked/resources/large-file.pdf differ diff --git a/tests/linked/test_cli.py b/tests/linked/test_cli.py new file mode 100644 index 000000000..0a01edcf9 --- /dev/null +++ b/tests/linked/test_cli.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- + +import os + +import pytest +from click.testing import CliRunner + +from maestral.cli import main +from maestral.constants import IDLE, STOPPED, ERROR +from maestral.daemon import MaestralProxy + + +if not ("DROPBOX_ACCESS_TOKEN" in os.environ or "DROPBOX_REFRESH_TOKEN" in os.environ): + pytest.skip("Requires auth token", allow_module_level=True) + + +def wait_for_idle(m: MaestralProxy, minimum: int = 2): + + while True: + current_status = m.status + if current_status in (IDLE, STOPPED, ERROR, ""): + m.status_change_longpoll(timeout=minimum) + if m.status == current_status: + # status did not change, we are done + return + else: + m.status_change_longpoll(timeout=minimum) + + +def test_pause_resume(proxy): + + runner = CliRunner() + result = runner.invoke(main, ["pause", "-c", proxy.config_name]) + + wait_for_idle(proxy) + + assert result.exit_code == 0 + assert proxy.paused + + result = runner.invoke(main, ["resume", "-c", proxy.config_name]) + + wait_for_idle(proxy) + + assert result.exit_code == 0 + assert not proxy.paused + + +def test_status(proxy): + runner = CliRunner() + result = runner.invoke(main, ["status", "-c", proxy.config_name]) + + assert result.exit_code == 0 + assert "Paused" in result.output + + +def test_filestatus(proxy): + runner = CliRunner() + proxy.start_sync() + wait_for_idle(proxy) + + local_path = proxy.to_local_path("/sync_tests") + + result = runner.invoke(main, ["filestatus", local_path, "-c", proxy.config_name]) + + assert result.exit_code == 0 + assert result.output == "up to date\n" + + +def test_history(proxy): + + proxy.start_sync() + wait_for_idle(proxy) + + # lets make history + dbx_path = "/sync_tests/new_file.txt" + local_path = proxy.to_local_path(dbx_path) + + with open(local_path, "a") as f: + f.write("content") + + wait_for_idle(proxy) + + # check that history has been be written + runner = CliRunner() + result = runner.invoke(main, ["history", "-c", proxy.config_name]) + + lines = result.output.strip().split("\n") + + assert result.exit_code == 0 + # last entry will be test.lock with change time in the future + assert "/test.lock" in lines[-1] + assert "added" in lines[-1] + # then comes our own file + assert dbx_path in lines[-2] + assert "added" in lines[-2] + + +def test_ls(proxy): + runner = CliRunner() + result = runner.invoke(main, ["ls", "/", "-c", proxy.config_name]) + + entries = proxy.list_folder("/") + + assert result.exit_code == 0 + + for entry in entries: + assert entry["name"] in result.output + + +def test_ls_long(proxy): + runner = CliRunner() + result = runner.invoke(main, ["ls", "-l", "/", "-c", proxy.config_name]) + + lines = result.output.strip().split("\n") + entries = proxy.list_folder("/") + + assert result.exit_code == 0 + assert lines[0].startswith("Loading...") # loading indicator + assert lines[1].startswith("Name") # column titles + + for line, entry in zip(lines[2:], entries): + assert entry["name"] in line diff --git a/tests/linked/test_client.py b/tests/linked/test_client.py new file mode 100644 index 000000000..11efc95b7 --- /dev/null +++ b/tests/linked/test_client.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +import os + +import pytest +from dropbox.files import FolderMetadata + +from maestral.errors import NotFoundError, PathError + +from .conftest import resources + + +if not ("DROPBOX_ACCESS_TOKEN" in os.environ or "DROPBOX_REFRESH_TOKEN" in os.environ): + pytest.skip("Requires auth token", allow_module_level=True) + + +# Client API unit tests: we explicitly test those method calls which are not covered +# by current integration tests, either because they are not used by the sync module or +# because niche cases require additional testing. + + +def test_upload_large_file(m): + # not tested during during integration tests + + large_file = resources + "/large-file.pdf" + md = m.client.upload( + large_file, "/sync_tests/large-file.pdf", chunk_size=5 * 10 ** 5 + ) + + assert md.content_hash == m.sync.get_local_hash(large_file) + + +@pytest.mark.parametrize("batch_size", [10, 30]) +@pytest.mark.parametrize("force_async", [True, False]) +def test_batch_methods(m, batch_size, force_async): + # batch methods are not currently used by sync module + + folders = [f"/sync_tests/folder {i}" for i in range(20)] + + # create some test directories + res = m.client.make_dir_batch(folders + ["/invalid\\"], force_async=force_async) + + for i in range(20): + assert isinstance(res[i], FolderMetadata) + assert res[i].path_lower == folders[i] + + assert isinstance(res[20], PathError) + + # remove them again + res = m.client.remove_batch( + [(folder, None) for folder in folders] + [("/not_a_folder", None)], + batch_size=batch_size, + ) + + for i in range(20): + assert isinstance(res[i], FolderMetadata) + assert res[i].path_lower == folders[i] + + assert isinstance(res[20], NotFoundError) diff --git a/tests/linked/test_main.py b/tests/linked/test_main.py index 6a582d636..3e4f693dc 100644 --- a/tests/linked/test_main.py +++ b/tests/linked/test_main.py @@ -1,19 +1,24 @@ # -*- coding: utf-8 -*- +import sys import os import os.path as osp +import shutil +import requests +import subprocess import pytest -from maestral.errors import NotFoundError +from maestral.errors import NotFoundError, UnsupportedFileTypeForDiff, SharedLinkError from maestral.main import FileStatus, IDLE from maestral.main import logger as maestral_logger from maestral.utils.path import delete +from maestral.utils.integration import get_inotify_limits -from .conftest import wait_for_idle +from .conftest import wait_for_idle, resources -if not os.environ.get("DROPBOX_TOKEN"): +if not ("DROPBOX_ACCESS_TOKEN" in os.environ or "DROPBOX_REFRESH_TOKEN" in os.environ): pytest.skip("Requires auth token", allow_module_level=True) @@ -28,7 +33,6 @@ def test_status_properties(m): assert m.status == IDLE assert m.running assert m.connected - assert m.syncing assert not m.paused assert not m.sync_errors assert not m.fatal_errors @@ -49,16 +53,16 @@ def test_file_status(m): # test unwatched non-existent file_status = m.get_file_status("/this is not a folder") - assert file_status == FileStatus.Unwatched.value, file_status + assert file_status == FileStatus.Unwatched.value # test unwatched when paused - m.pause_sync() + m.stop_sync() wait_for_idle(m) file_status = m.get_file_status(m.test_folder_local) assert file_status == FileStatus.Unwatched.value - m.resume_sync() + m.start_sync() wait_for_idle(m) # test error status @@ -81,7 +85,7 @@ def test_move_dropbox_folder(m): wait_for_idle(m) # assert that sync was resumed after moving folder - assert m.syncing + assert m.running def test_move_dropbox_folder_to_itself(m): @@ -89,7 +93,7 @@ def test_move_dropbox_folder_to_itself(m): m.move_dropbox_directory(m.dropbox_path) # assert that sync is still running - assert m.syncing + assert m.running def test_move_dropbox_folder_to_existing(m): @@ -104,7 +108,7 @@ def test_move_dropbox_folder_to_existing(m): m.move_dropbox_directory(new_dir) # assert that sync is still running - assert m.syncing + assert m.running finally: # cleanup @@ -205,3 +209,177 @@ def test_selective_sync_api_nested(m): # check for fatal errors assert not m.fatal_errors + + +def test_create_file_diff(m): + """Tests file diffs for supported and unsupported files.""" + + def write_and_get_rev(dbx_path, content, o="w"): + """ + Open the dbx_path locally and write the content to the string. + If it should append something, you can set 'o = "a"'. + """ + + local_path = m.to_local_path(dbx_path) + with open(local_path, o) as f: + f.write(content) + wait_for_idle(m) + return m.client.get_metadata(dbx_path).rev + + dbx_path_success = "/sync_tests/file.txt" + dbx_path_fail_pdf = "/sync_tests/diff.pdf" + dbx_path_fail_ext = "/sync_tests/bin.txt" + + with pytest.raises(UnsupportedFileTypeForDiff): + # Write some dummy stuff to create two revs + old_rev = write_and_get_rev(dbx_path_fail_pdf, "old") + new_rev = write_and_get_rev(dbx_path_fail_pdf, "new") + m.get_file_diff(old_rev, new_rev) + + with pytest.raises(UnsupportedFileTypeForDiff): + # Add a compiled helloworld c file with .txt extension + shutil.copy(resources + "/bin.txt", m.test_folder_local) + wait_for_idle(m) + old_rev = m.client.get_metadata(dbx_path_fail_ext).rev + # Just some bytes + new_rev = write_and_get_rev(dbx_path_fail_ext, "hi".encode(), o="ab") + m.get_file_diff(old_rev, new_rev) + + old_rev = write_and_get_rev(dbx_path_success, "old") + new_rev = write_and_get_rev(dbx_path_success, "new") + # If this does not raise an error, + # the function should have been successful + _ = m.get_file_diff(old_rev, new_rev) + + +def test_restore(m): + """Tests restoring an old revision""" + + dbx_path = "/sync_tests/file.txt" + local_path = m.to_local_path(dbx_path) + + # create a local file and sync it, remember its rev + with open(local_path, "w") as f: + f.write("old content") + + wait_for_idle(m) + + old_md = m.client.get_metadata(dbx_path) + + # modify the file and sync it + with open(local_path, "w") as f: + f.write("new content") + + wait_for_idle(m) + + new_md = m.client.get_metadata(dbx_path) + + assert new_md.content_hash == m.sync.get_local_hash(local_path) + + # restore the old rev + + m.restore(dbx_path, old_md.rev) + wait_for_idle(m) + + with open(local_path) as f: + restored_content = f.read() + + assert restored_content == "old content" + + +def test_restore_failed(m): + """Tests restoring a non-existing file""" + + with pytest.raises(NotFoundError): + m.restore("/sync_tests/restored-file", "015982ea314dac40000000154e40990") + + +def test_sharedlink_lifecycle(m): + + # create a folder to share + dbx_path = "/sync_tests/shared_folder" + m.client.make_dir(dbx_path) + + # test creating a shared link + link_data = m.create_shared_link(dbx_path) + + resp = requests.get(link_data["url"]) + assert resp.status_code == 200 + + links = m.list_shared_links(dbx_path) + assert link_data in links + + # test revoking a shared link + m.revoke_shared_link(link_data["url"]) + links = m.list_shared_links(dbx_path) + assert link_data not in links + + +def test_sharedlink_errors(m): + + dbx_path = "/sync_tests/shared_folder" + m.client.make_dir(dbx_path) + + # test creating a shared link with password, no password provided + with pytest.raises(ValueError): + m.create_shared_link(dbx_path, visibility="password") + + # test creating a shared link with password fails on basic account + account_info = m.get_account_info() + + if account_info["account_type"][".tag"] == "basic": + with pytest.raises(SharedLinkError): + m.create_shared_link(dbx_path, visibility="password", password="secret") + + # test creating a shared link with the same settings as an existing link + m.create_shared_link(dbx_path) + + with pytest.raises(SharedLinkError): + m.create_shared_link(dbx_path) + + # test creating a shared link with an invalid path + with pytest.raises(NotFoundError): + m.create_shared_link("/this_is_not_a_file.txt") + + # test listing shared links for an invalid path + with pytest.raises(NotFoundError): + m.list_shared_links("/this_is_not_a_file.txt") + + # test revoking a non existent link + with pytest.raises(NotFoundError): + m.revoke_shared_link("https://www.dropbox.com/sh/48r2qxq748jfk5x/AAAS-niuW") + + # test revoking a malformed link + with pytest.raises(SharedLinkError): + m.revoke_shared_link("https://www.testlink.de") + + +@pytest.mark.skipif(sys.platform != "linux", reason="inotify specific test") +@pytest.mark.skipif(os.getenv("CI", False) is False, reason="Only running on CI") +def test_inotify_error(m): + + max_user_watches, max_user_instances, _ = get_inotify_limits() + + try: + subprocess.check_call(["sudo", "sysctl", "-w", "fs.inotify.max_user_watches=1"]) + except subprocess.CalledProcessError: + return + + try: + m.stop_sync() + wait_for_idle(m) + m.start_sync() + + assert len(m.fatal_errors) > 0 + + last_error = m.fatal_errors[-1] + + assert last_error["type"] == "InotifyError" + assert not m.monitor.local_observer_thread.is_alive() + assert m.monitor.upload_thread.is_alive() + assert m.monitor.download_thread.is_alive() + + finally: + subprocess.check_call( + ["sudo", "sysctl", "-w", f"fs.inotify.max_user_watches={max_user_watches}"] + ) diff --git a/tests/linked/test_sync.py b/tests/linked/test_sync.py index 001fdbd4e..eaa013972 100644 --- a/tests/linked/test_sync.py +++ b/tests/linked/test_sync.py @@ -20,7 +20,7 @@ from .conftest import assert_synced, wait_for_idle, resources -if not os.environ.get("DROPBOX_TOKEN"): +if not ("DROPBOX_ACCESS_TOKEN" in os.environ or "DROPBOX_REFRESH_TOKEN" in os.environ): pytest.skip("Requires auth token", allow_module_level=True) @@ -85,7 +85,7 @@ def test_file_conflict(m): shutil.copy(resources + "/file.txt", m.test_folder_local) wait_for_idle(m) - m.pause_sync() + m.stop_sync() wait_for_idle(m) # modify file.txt locally @@ -100,7 +100,7 @@ def test_file_conflict(m): ) # resume syncing and check for conflicting copy - m.resume_sync() + m.start_sync() wait_for_idle(m) @@ -122,7 +122,7 @@ def test_parallel_deletion_when_paused(m): wait_for_idle(m) assert_synced(m) - m.pause_sync() + m.stop_sync() wait_for_idle(m) # delete local file @@ -131,7 +131,7 @@ def test_parallel_deletion_when_paused(m): # delete remote file m.client.remove("/sync_tests/file.txt") - m.resume_sync() + m.start_sync() wait_for_idle(m) assert_synced(m) @@ -144,7 +144,7 @@ def test_parallel_deletion_when_paused(m): def test_local_and_remote_creation_with_equal_content(m): """Tests parallel and equal remote and local changes of an item.""" - m.pause_sync() + m.stop_sync() wait_for_idle(m) # create local file @@ -152,7 +152,7 @@ def test_local_and_remote_creation_with_equal_content(m): # create remote file with equal content m.client.upload(resources + "/file.txt", "/sync_tests/file.txt") - m.resume_sync() + m.start_sync() wait_for_idle(m) assert_synced(m) @@ -166,7 +166,7 @@ def test_local_and_remote_creation_with_equal_content(m): def test_local_and_remote_creation_with_different_content(m): """Tests parallel and different remote and local changes of an item.""" - m.pause_sync() + m.stop_sync() wait_for_idle(m) # create local file @@ -174,7 +174,7 @@ def test_local_and_remote_creation_with_different_content(m): # create remote file with different content m.client.upload(resources + "/file1.txt", "/sync_tests/file.txt") - m.resume_sync() + m.start_sync() wait_for_idle(m) assert_synced(m) @@ -191,7 +191,7 @@ def test_local_deletion_during_upload(m): # we mimic a deletion during upload by queueing a fake FileCreatedEvent fake_created_event = FileCreatedEvent(m.test_folder_local + "/file.txt") - m.monitor.fs_event_handler.local_file_event_queue.put(fake_created_event) + m.monitor.sync.fs_events.queue_event(fake_created_event) wait_for_idle(m) @@ -238,8 +238,9 @@ def test_rapid_remote_changes(m): mode=WriteMode.update(md.rev), ) + # reset file content with open(resources + "/file.txt", "w") as f: - f.write("content") # reset file content + f.write("content") wait_for_idle(m) @@ -295,7 +296,7 @@ def test_folder_tree_created_remote(m): # test deleting remote tree m.client.remove("/sync_tests/nested_folder") - wait_for_idle(m, 10) + wait_for_idle(m, 15) assert_synced(m) assert_child_count(m, "/sync_tests", 0) @@ -310,14 +311,12 @@ def test_remote_file_replaced_by_folder(m): shutil.copy(resources + "/file.txt", m.test_folder_local + "/file.txt") wait_for_idle(m) - m.pause_sync() - wait_for_idle(m) + with m.sync.sync_lock: - # replace remote file with folder - m.client.remove("/sync_tests/file.txt") - m.client.make_dir("/sync_tests/file.txt") + # replace remote file with folder + m.client.remove("/sync_tests/file.txt") + m.client.make_dir("/sync_tests/file.txt") - m.resume_sync() wait_for_idle(m) assert_synced(m) @@ -337,18 +336,16 @@ def test_remote_file_replaced_by_folder_and_unsynced_local_changes(m): shutil.copy(resources + "/file.txt", m.test_folder_local + "/file.txt") wait_for_idle(m) - m.pause_sync() - wait_for_idle(m) + with m.sync.sync_lock: - # replace remote file with folder - m.client.remove("/sync_tests/file.txt") - m.client.make_dir("/sync_tests/file.txt") + # replace remote file with folder + m.client.remove("/sync_tests/file.txt") + m.client.make_dir("/sync_tests/file.txt") - # create local changes - with open(m.test_folder_local + "/file.txt", "a") as f: - f.write(" modified") + # create local changes + with open(m.test_folder_local + "/file.txt", "a") as f: + f.write(" modified") - m.resume_sync() wait_for_idle(m) assert_synced(m) @@ -363,17 +360,15 @@ def test_remote_file_replaced_by_folder_and_unsynced_local_changes(m): def test_remote_folder_replaced_by_file(m): """Tests the download sync when a folder is replaced by a file.""" - os.mkdir(m.test_folder_local + "/folder") - wait_for_idle(m) - - m.pause_sync() + m.client.make_dir("/sync_tests/folder") wait_for_idle(m) # replace remote folder with file - m.client.remove("/sync_tests/folder") - m.client.upload(resources + "/file.txt", "/sync_tests/folder") - m.resume_sync() + with m.sync.sync_lock: + m.client.remove("/sync_tests/folder") + m.client.upload(resources + "/file.txt", "/sync_tests/folder") + wait_for_idle(m) assert_synced(m) @@ -393,17 +388,15 @@ def test_remote_folder_replaced_by_file_and_unsynced_local_changes(m): os.mkdir(m.test_folder_local + "/folder") wait_for_idle(m) - m.pause_sync() - wait_for_idle(m) + with m.sync.sync_lock: - # replace remote folder with file - m.client.remove("/sync_tests/folder") - m.client.upload(resources + "/file.txt", "/sync_tests/folder") + # replace remote folder with file + m.client.remove("/sync_tests/folder") + m.client.upload(resources + "/file.txt", "/sync_tests/folder") - # create local changes - os.mkdir(m.test_folder_local + "/folder/subfolder") + # create local changes + os.mkdir(m.test_folder_local + "/folder/subfolder") - m.resume_sync() wait_for_idle(m) assert_synced(m) @@ -421,13 +414,12 @@ def test_local_folder_replaced_by_file(m): os.mkdir(m.test_folder_local + "/folder") wait_for_idle(m) - m.pause_sync() + with m.sync.sync_lock: - # replace local folder with file - delete(m.test_folder_local + "/folder") - shutil.copy(resources + "/file.txt", m.test_folder_local + "/folder") + # replace local folder with file + delete(m.test_folder_local + "/folder") + shutil.copy(resources + "/file.txt", m.test_folder_local + "/folder") - m.resume_sync() wait_for_idle(m) assert_synced(m) @@ -449,17 +441,15 @@ def test_local_folder_replaced_by_file_and_unsynced_remote_changes(m): os.mkdir(m.test_folder_local + "/folder") wait_for_idle(m) - m.pause_sync() - wait_for_idle(m) + with m.sync.sync_lock: - # replace local folder with file - delete(m.test_folder_local + "/folder") - shutil.copy(resources + "/file.txt", m.test_folder_local + "/folder") + # replace local folder with file + delete(m.test_folder_local + "/folder") + shutil.copy(resources + "/file.txt", m.test_folder_local + "/folder") - # create remote changes - m.client.upload(resources + "/file1.txt", "/sync_tests/folder/file.txt") + # create remote changes + m.client.upload(resources + "/file1.txt", "/sync_tests/folder/file.txt") - m.resume_sync() wait_for_idle(m) assert_synced(m) @@ -476,14 +466,12 @@ def test_local_file_replaced_by_folder(m): shutil.copy(resources + "/file.txt", m.test_folder_local + "/file.txt") wait_for_idle(m) - m.pause_sync() - wait_for_idle(m) + with m.sync.sync_lock: - # replace local file with folder - os.unlink(m.test_folder_local + "/file.txt") - os.mkdir(m.test_folder_local + "/file.txt") + # replace local file with folder + os.unlink(m.test_folder_local + "/file.txt") + os.mkdir(m.test_folder_local + "/file.txt") - m.resume_sync() wait_for_idle(m) assert_synced(m) @@ -506,21 +494,19 @@ def test_local_file_replaced_by_folder_and_unsynced_remote_changes(m): shutil.copy(resources + "/file.txt", m.test_folder_local + "/file.txt") wait_for_idle(m) - m.pause_sync() - wait_for_idle(m) + with m.sync.sync_lock: - # replace local file with folder - os.unlink(m.test_folder_local + "/file.txt") - os.mkdir(m.test_folder_local + "/file.txt") + # replace local file with folder + os.unlink(m.test_folder_local + "/file.txt") + os.mkdir(m.test_folder_local + "/file.txt") - # create remote changes - m.client.upload( - resources + "/file1.txt", - "/sync_tests/file.txt", - mode=WriteMode.overwrite, - ) + # create remote changes + m.client.upload( + resources + "/file1.txt", + "/sync_tests/file.txt", + mode=WriteMode.overwrite, + ) - m.resume_sync() wait_for_idle(m) assert_synced(m) @@ -545,7 +531,7 @@ def test_selective_sync_conflict(m): m.exclude_item("/sync_tests/folder") wait_for_idle(m) - assert not (osp.exists(m.test_folder_local + "/folder")) + assert not osp.exists(m.test_folder_local + "/folder") # recreate 'folder' locally os.mkdir(m.test_folder_local + "/folder") @@ -672,14 +658,14 @@ def test_mignore(m): os.mkdir(m.test_folder_local + "/foo") wait_for_idle(m) - assert not (m.client.get_metadata("/sync_tests/foo")) + assert not m.client.get_metadata("/sync_tests/foo") # 3) test that renaming an item excludes it move(m.test_folder_local + "/bar", m.test_folder_local + "/build") wait_for_idle(m) - assert not (m.client.get_metadata("/sync_tests/build")) + assert not m.client.get_metadata("/sync_tests/build") # 4) test that renaming an item includes it @@ -752,8 +738,8 @@ def test_download_sync_issues(m): # 2) Check that the sync is retried after pause / resume - m.pause_sync() - m.resume_sync() + m.stop_sync() + m.start_sync() wait_for_idle(m) @@ -878,8 +864,8 @@ def test_unknown_path_encoding(m, capsys): # This requires that our logic to save failed paths in our state file and retry the # sync on startup can handle strings with surrogate escapes. - m.pause_sync() - m.resume_sync() + m.stop_sync() + m.start_sync() wait_for_idle(m) @@ -911,7 +897,7 @@ def test_indexing_performance(m): # generate tree with 5 entries shutil.copytree(resources + "/test_folder", m.test_folder_local + "/test_folder") wait_for_idle(m) - m.pause_sync() + m.stop_sync() res = m.client.list_folder("/sync_tests", recursive=True) @@ -930,7 +916,7 @@ def generate_sync_events(): duration = timeit.timeit(stmt=generate_sync_events, setup=setup, number=n_loops) - assert duration < 3 # expected ~ 1.8 sec + assert duration < 4 # expected ~ 1.8 sec def test_invalid_pending_download(m): @@ -945,8 +931,8 @@ def test_invalid_pending_download(m): m.sync.pending_downloads.add(bogus_path) # trigger a resync - m.pause_sync() - m.resume_sync() + m.stop_sync() + m.start_sync() wait_for_idle(m) # assert that there are no sync errors / fatal errors and that the invalid path diff --git a/tests/offline/conftest.py b/tests/offline/conftest.py new file mode 100644 index 000000000..dc1c8ca8e --- /dev/null +++ b/tests/offline/conftest.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +import os +import os.path as osp +import logging + +import pytest + +from maestral.main import Maestral, logger +from maestral.sync import SyncEngine, Observer +from maestral.client import DropboxClient +from maestral.config import list_configs, remove_configuration +from maestral.daemon import stop_maestral_daemon_process, Stop +from maestral.utils.appdirs import get_home_dir +from maestral.utils.path import delete + + +logger.setLevel(logging.DEBUG) + + +@pytest.fixture +def m(): + m = Maestral("test-config") + m._conf.save() + yield m + remove_configuration(m.config_name) + + +@pytest.fixture +def sync(): + + local_dir = osp.join(get_home_dir(), "dummy_dir") + os.mkdir(local_dir) + + sync = SyncEngine(DropboxClient("test-config")) + sync.fs_events.enable() + sync.dropbox_path = local_dir + + observer = Observer() + observer.schedule(sync.fs_events, sync.dropbox_path, recursive=True) + observer.start() + + yield sync + + observer.stop() + observer.join() + + remove_configuration("test-config") + delete(sync.dropbox_path) + + +@pytest.fixture +def client(): + yield DropboxClient("test-config") + remove_configuration("test-config") + + +@pytest.fixture +def config_name(prefix: str = "test-config"): + + i = 0 + config_name = f"{prefix}-{i}" + + while config_name in list_configs(): + i += 1 + config_name = f"{prefix}-{i}" + + yield config_name + + res = stop_maestral_daemon_process(config_name) + + if res is Stop.Failed: + raise RuntimeError("Could not stop test daemon") + + remove_configuration(config_name) diff --git a/tests/offline/test_cleaning_events.py b/tests/offline/test_cleaning_events.py index b19a8a72b..5b608115a 100644 --- a/tests/offline/test_cleaning_events.py +++ b/tests/offline/test_cleaning_events.py @@ -3,8 +3,7 @@ import timeit import pytest - -from maestral.sync import ( +from watchdog.events import ( FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, @@ -13,18 +12,15 @@ DirDeletedEvent, DirMovedEvent, ) -from maestral.sync import SyncEngine, DropboxClient -from maestral.config import remove_configuration - -def ipath(i): - """Returns path names '/test 1', '/test 2', ... """ - return f"/test {i}" +from maestral.sync import SyncEngine +from maestral.client import DropboxClient +from maestral.config import remove_configuration @pytest.fixture def sync(): - sync = SyncEngine(DropboxClient("test-config"), None) + sync = SyncEngine(DropboxClient("test-config")) sync.dropbox_path = "/" yield sync @@ -32,6 +28,11 @@ def sync(): remove_configuration("test-config") +def ipath(i): + """Returns path names '/test 1', '/test 2', ... """ + return f"/test {i}" + + def test_single_file_events(sync): # only a single event for every path -> no consolidation diff --git a/tests/offline/test_cli.py b/tests/offline/test_cli.py new file mode 100644 index 000000000..02609c6cc --- /dev/null +++ b/tests/offline/test_cli.py @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- + +import logging + +from click.testing import CliRunner + +from maestral.cli import main +from maestral.main import logger +from maestral.autostart import AutoStart +from maestral.notify import level_number_to_name, level_name_to_number +from maestral.daemon import MaestralProxy, start_maestral_daemon_process, Start + + +def test_help(): + runner = CliRunner() + result = runner.invoke(main) + + assert result.exit_code == 0 + assert result.output.startswith("Usage: main [OPTIONS] COMMAND [ARGS]") + + +def test_invalid_config(m): + runner = CliRunner() + result = runner.invoke(main, ["resume", "-c", "non-existent-config"]) + + assert result.exit_code == 1 + assert ( + result.output == "! Configuration 'non-existent-config' does not exist. " + "Use 'maestral configs' to list all configurations.\n" + ) + + +def test_start(config_name): + + res = start_maestral_daemon_process(config_name, timeout=20) + assert res is Start.Ok + + runner = CliRunner() + result = runner.invoke(main, ["start", "-c", config_name]) + + assert result.exit_code == 0 + assert "already running" in result.output + + +def test_stop(config_name): + + res = start_maestral_daemon_process(config_name, timeout=20) + assert res is Start.Ok + + runner = CliRunner() + result = runner.invoke(main, ["stop", "-c", config_name]) + + assert result.exit_code == 0 + + +def test_filestatus(m): + runner = CliRunner() + result = runner.invoke(main, ["filestatus", "/usr", "-c", m.config_name]) + + assert result.exit_code == 0 + assert result.output == "unwatched\n" + + result = runner.invoke(main, ["filestatus", "/invalid-dir", "-c", m.config_name]) + + # the exception will be already raised by click's argument check + assert result.exit_code == 2 + assert isinstance(result.exception, SystemExit) + assert "'/invalid-dir' does not exist" in result.output + + +def test_history(m): + runner = CliRunner() + result = runner.invoke(main, ["history", "-c", m.config_name]) + + assert result.exit_code == 1 + assert isinstance(result.exception, SystemExit) + assert "No Dropbox account linked." in result.output + + +def test_ls(m): + runner = CliRunner() + result = runner.invoke(main, ["ls", "/", "-c", m.config_name]) + + assert result.exit_code == 1 + assert isinstance(result.exception, SystemExit) + assert "No Dropbox account linked." in result.output + + +def test_autostart(m): + autostart = AutoStart(m.config_name) + autostart.disable() + + runner = CliRunner() + result = runner.invoke(main, ["autostart", "-c", m.config_name]) + + assert result.exit_code == 0 + assert "disabled" in result.output + + result = runner.invoke(main, ["autostart", "-Y", "-c", m.config_name]) + + if autostart.implementation: + if result.exit_code == 0: + assert "Enabled" in result.output + assert autostart.enabled + else: + # TODO: be more specific here + assert result.exception is not None + else: + assert "not supported" in result.output + assert not autostart.enabled + + result = runner.invoke(main, ["autostart", "-N", "-c", m.config_name]) + + assert result.exit_code == 0 + assert "Disabled" in result.output + assert not autostart.enabled + + +def test_excluded_list(m): + runner = CliRunner() + result = runner.invoke(main, ["excluded", "list", "-c", m.config_name]) + + assert result.exit_code == 0 + assert result.output == "No excluded files or folders.\n" + + +def test_excluded_add(m): + runner = CliRunner() + result = runner.invoke(main, ["excluded", "add", "/test", "-c", m.config_name]) + + assert result.exit_code == 1 + assert isinstance(result.exception, SystemExit) + assert "No Dropbox account linked." in result.output + + +def test_excluded_remove(m): + runner = CliRunner() + result = runner.invoke(main, ["excluded", "remove", "/test", "-c", m.config_name]) + + assert result.exit_code == 1 + assert isinstance(result.exception, SystemExit) + assert "Daemon must be running to download folders." in result.output + + +def test_notify_level(config_name): + + start_maestral_daemon_process(config_name, timeout=20) + m = MaestralProxy(config_name) + + runner = CliRunner() + result = runner.invoke(main, ["notify", "level", "-c", m.config_name]) + + level_name = level_number_to_name(m.notification_level) + + assert result.exit_code == 0 + assert level_name in result.output + + level_name = "SYNCISSUE" + level_number = level_name_to_number(level_name) + result = runner.invoke(main, ["notify", "level", level_name, "-c", m.config_name]) + + assert result.exit_code == 0 + assert level_name in result.output + assert m.notification_level == level_number + + result = runner.invoke(main, ["notify", "level", "INVALID", "-c", m.config_name]) + + assert result.exit_code == 2 + assert isinstance(result.exception, SystemExit) + + +def test_notify_snooze(config_name): + + start_maestral_daemon_process(config_name, timeout=20) + m = MaestralProxy(config_name) + + runner = CliRunner() + result = runner.invoke(main, ["notify", "snooze", "20", "-c", m.config_name]) + + assert result.exit_code == 0 + assert 0 < m.notification_snooze <= 20 + + result = runner.invoke(main, ["notify", "snooze", "0", "-c", m.config_name]) + + assert result.exit_code == 0 + assert m.notification_snooze == 0 + + +def test_log_level(m): + runner = CliRunner() + result = runner.invoke(main, ["log", "level", "-c", m.config_name]) + + level_name = logging.getLevelName(m.log_level) + + assert result.exit_code == 0 + assert level_name in result.output + + result = runner.invoke(main, ["log", "level", "DEBUG", "-c", m.config_name]) + assert result.exit_code == 0 + assert "DEBUG" in result.output + + result = runner.invoke(main, ["notify", "level", "INVALID", "-c", m.config_name]) + assert result.exit_code == 2 + assert isinstance(result.exception, SystemExit) + + +def test_log_show(m): + # log a message + logger.info("Hello from pytest!") + runner = CliRunner() + result = runner.invoke(main, ["log", "show", "-c", m.config_name]) + + assert result.exit_code == 0 + assert "Hello from pytest!" in result.output + + +def test_log_clear(m): + # log a message + logger.info("Hello from pytest!") + runner = CliRunner() + result = runner.invoke(main, ["log", "show", "-c", m.config_name]) + + assert result.exit_code == 0 + assert "Hello from pytest!" in result.output + + # clear the logs + result = runner.invoke(main, ["log", "clear", "-c", m.config_name]) + assert result.exit_code == 0 + + with open(m.log_handler_file.stream.name) as f: + log_content = f.read() + + assert log_content == "" diff --git a/tests/offline/test_client.py b/tests/offline/test_client.py new file mode 100644 index 000000000..37deca038 --- /dev/null +++ b/tests/offline/test_client.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + +import pytest + +from maestral.errors import NotLinkedError + + +def test_client_not_linked(client): + assert not client.linked + + with pytest.raises(NotLinkedError): + client.get_account_info() + + +def test_auth_url(client): + url = client.get_auth_url() + assert url.startswith("https://www.dropbox.com/oauth2/authorize?") + + +def test_auth_error(client): + assert client.link("invalid-token") == 1 diff --git a/tests/offline/test_daemon.py b/tests/offline/test_daemon.py index 351647ef1..920d11d3b 100644 --- a/tests/offline/test_daemon.py +++ b/tests/offline/test_daemon.py @@ -5,14 +5,13 @@ import time import subprocess import threading -import multiprocessing as mp import uuid import pytest +from Pyro5.api import Proxy from maestral.daemon import ( CommunicationError, - Proxy, MaestralProxy, start_maestral_daemon, start_maestral_daemon_process, @@ -20,31 +19,9 @@ Start, Stop, Lock, - IS_MACOS, ) from maestral.main import Maestral from maestral.errors import NotLinkedError -from maestral.config import list_configs, remove_configuration - - -@pytest.fixture -def config_name(prefix: str = "test-config"): - - i = 0 - config_name = f"{prefix}-{i}" - - while config_name in list_configs(): - i += 1 - config_name = f"{prefix}-{i}" - - yield config_name - - res = stop_maestral_daemon_process(config_name) - - if res is Stop.Failed: - raise RuntimeError("Could not stop test daemon") - - remove_configuration(config_name) # locking tests @@ -161,15 +138,14 @@ def test_locking_multiprocess(): # daemon lifecycle tests -@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Test is flaky on Github") -def test_lifecycle_detached(config_name): +def test_lifecycle(config_name): # start daemon process - res = start_maestral_daemon_process(config_name) + res = start_maestral_daemon_process(config_name, timeout=20) assert res is Start.Ok # retry start daemon process - res = start_maestral_daemon_process(config_name) + res = start_maestral_daemon_process(config_name, timeout=20) assert res is Start.AlreadyRunning # retry start daemon in-process @@ -185,35 +161,13 @@ def test_lifecycle_detached(config_name): assert res is Stop.NotRunning -@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Test is flaky on Github") -def test_lifecycle_attached(config_name): - - # start daemon process - res = start_maestral_daemon_process(config_name, detach=False) - assert res is Start.Ok - - # check that we have attached process - ctx = mp.get_context("spawn" if IS_MACOS else "fork") - daemon = ctx.active_children()[0] - assert daemon.name == "maestral-daemon" - - # stop daemon - res = stop_maestral_daemon_process(config_name) - assert res is Stop.Ok - - # retry stop daemon - res = stop_maestral_daemon_process(config_name) - assert res is Stop.NotRunning - - # proxy tests -@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Test is flaky on Github") def test_connection(config_name): # start daemon process - res = start_maestral_daemon_process(config_name) + res = start_maestral_daemon_process(config_name, timeout=20) assert res is Start.Ok # create proxy @@ -227,7 +181,6 @@ def test_connection(config_name): assert res is Stop.Ok -@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Test is flaky on Github") def test_fallback(config_name): # create proxy w/o fallback @@ -241,11 +194,10 @@ def test_fallback(config_name): assert isinstance(m._m, Maestral) -@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Test is flaky on Github") def test_remote_exceptions(config_name): # start daemon process - start_maestral_daemon_process(config_name) + start_maestral_daemon_process(config_name, timeout=20) # create proxy and call a remote method which raises an error with MaestralProxy(config_name) as m: diff --git a/tests/offline/test_ignoring_events.py b/tests/offline/test_ignoring_events.py index aa94f9ee9..ee0bcdd99 100644 --- a/tests/offline/test_ignoring_events.py +++ b/tests/offline/test_ignoring_events.py @@ -1,18 +1,12 @@ # -*- coding: utf-8 -*- import os -import os.path as osp from pathlib import Path -from threading import Event -import pytest +from watchdog.events import DirCreatedEvent, DirMovedEvent -from maestral.sync import DirCreatedEvent, DirMovedEvent -from maestral.sync import delete, move -from maestral.sync import SyncEngine, DropboxClient, Observer, FSEventHandler from maestral.sync import SyncDirection, ItemType, ChangeType -from maestral.utils.appdirs import get_home_dir -from maestral.config import remove_configuration +from maestral.utils.path import move def ipath(i): @@ -20,38 +14,13 @@ def ipath(i): return f"/test {i}" -@pytest.fixture -def sync(): - syncing = Event() - startup = Event() - syncing.set() - - local_dir = osp.join(get_home_dir(), "dummy_dir") - os.mkdir(local_dir) - - sync = SyncEngine(DropboxClient("test-config"), FSEventHandler(syncing, startup)) - - sync.dropbox_path = local_dir - - observer = Observer() - observer.schedule(sync.fs_events, sync.dropbox_path, recursive=True) - observer.start() - - yield sync - - observer.stop() - observer.join() - - remove_configuration("test-config") - delete(sync.dropbox_path) - - def test_receiving_events(sync): new_dir = Path(sync.dropbox_path) / "parent" new_dir.mkdir() - sync_events, local_cursor = sync.wait_for_local_changes() + sync.wait_for_local_changes() + sync_events, _ = sync.list_local_changes() assert len(sync_events) == 1 @@ -78,7 +47,8 @@ def test_ignore_tree_creation(sync): file = new_dir / f"test_{i}" file.touch() - sync_events, local_cursor = sync.wait_for_local_changes() + sync.wait_for_local_changes() + sync_events, _ = sync.list_local_changes() assert len(sync_events) == 0 @@ -92,13 +62,15 @@ def test_ignore_tree_move(sync): file.touch() sync.wait_for_local_changes() + sync.list_local_changes() new_dir_1 = Path(sync.dropbox_path) / "parent2" with sync.fs_events.ignore(DirMovedEvent(str(new_dir), str(new_dir_1))): move(new_dir, new_dir_1) - sync_events, local_cursor = sync.wait_for_local_changes() + sync.wait_for_local_changes() + sync_events, _ = sync.list_local_changes() assert len(sync_events) == 0 @@ -113,5 +85,6 @@ def test_catching_non_ignored_events(sync): file = new_dir / f"test_{i}" file.touch() - sync_events, local_cursor = sync.wait_for_local_changes() + sync.wait_for_local_changes() + sync_events, _ = sync.list_local_changes() assert all(not si.is_directory for si in sync_events) diff --git a/tests/offline/utils/test_path.py b/tests/offline/utils/test_path.py index cf3dfb604..90bfe81f2 100644 --- a/tests/offline/utils/test_path.py +++ b/tests/offline/utils/test_path.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import os.path as osp -import tempfile import pytest @@ -11,7 +10,6 @@ to_cased_path, is_fs_case_sensitive, is_child, - delete, ) from maestral.utils.appdirs import get_home_dir @@ -99,17 +97,3 @@ def test_is_child(): assert is_child("/parent/path/child/", "/parent/path") assert not is_child("/parent/path", "/parent/path") assert not is_child("/path1", "/path2") - - -def test_delete(): - # test deleting file - test_file = tempfile.NamedTemporaryFile() - assert osp.isfile(test_file.name) - delete(test_file.name) - assert not osp.exists(test_file.name) - - # test deleting directory - test_dir = tempfile.TemporaryDirectory() - assert osp.isdir(test_dir.name) - delete(test_dir.name) - assert not osp.exists(test_dir.name)