diff --git a/.asf.yaml b/.asf.yaml new file mode 100644 index 0000000..2fca9c0 --- /dev/null +++ b/.asf.yaml @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +github: + description: "Apache Ignite Python Thin Client" + homepage: https://ignite.apache.org/ + labels: + - ignite + - python + features: + wiki: false + issues: false + projects: false + enabled_merge_buttons: + squash: true + merge: false + rebase: false +notifications: + commits: commits@ignite.apache.org + issues: notifications@ignite.apache.org + pullrequests: notifications@ignite.apache.org + jira_options: link label worklog diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml new file mode 100644 index 0000000..5aaf49f --- /dev/null +++ b/.github/workflows/pr_check.yml @@ -0,0 +1,58 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Check code style and run tests +on: [ push, pull_request ] + +env: + IGNITE_VERSION: 2.14.0 + IGNITE_HOME: /opt/ignite + +jobs: + build: + runs-on: ubuntu-latest + continue-on-error: true + strategy: + fail-fast: false + matrix: + cfg: + - { python: "3.7", toxenv: "py37" } + - { python: "3.8", toxenv: "py38" } + - { python: "3.9", toxenv: "py39" } + - { python: "3.10", toxenv: "py310" } + - { python: "3.11", toxenv: "py311" } + - { python: "3.11", toxenv: "codestyle" } + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.cfg.python}} + - name: Install Apache Ignite + run: | + curl -L https://apache-mirror.rbc.ru/pub/apache/ignite/${IGNITE_VERSION}/apache-ignite-slim-${IGNITE_VERSION}-bin.zip > ignite.zip + unzip ignite.zip -d /opt + mv /opt/apache-ignite-slim-${IGNITE_VERSION}-bin /opt/ignite + mv /opt/ignite/libs/optional/ignite-log4j2 /opt/ignite/libs/ + + - name: Install tox + run: | + pip install tox + + - name: Run tests + run: | + pip install tox + tox -e ${{ matrix.cfg.toxenv }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7576fcd --- /dev/null +++ b/.gitignore @@ -0,0 +1,16 @@ +.idea +.benchmarks +.vscode +.eggs +.pytest_cache +.tox +*.so +build +distr +docs/generated +tests/config/*.xml +junit*.xml +pyignite.egg-info +ignite-log-* +__pycache__ +venv diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..3f62aea --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,13 @@ +graft requirements +graft examples +graft docs +recursive-exclude docs/generated * +graft tests +recursive-exclude tests/config *.xml +recursive-exclude tests/logs * +global-exclude *.py[cod] +global-exclude *__pycache__* +include tox.ini +include README.md +include LICENSE +include NOTICE diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..feeebfd --- /dev/null +++ b/NOTICE @@ -0,0 +1,5 @@ +Apache Ignite binary client Python API +Copyright 2021 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/README.md b/README.md index 22732ce..be5fa7b 100644 --- a/README.md +++ b/README.md @@ -3,24 +3,23 @@ Apache Ignite thin (binary protocol) client, written in Python 3. ## Prerequisites -- Python 3.4 or above (3.6 is tested), +- Python 3.7 or above (3.7, 3.8, 3.9 and 3.10 are tested), - Access to Apache Ignite node, local or remote. The current thin client - version was tested on Apache Ignite 2.7.0 (binary client protocol 1.2.0). + version was tested on Apache Ignite 2.10 (binary client protocol 1.7.0). ## Installation -#### *for end user* +### *for end user* If you only want to use the `pyignite` module in your project, do: -``` +```bash $ pip install pyignite ``` -#### *for developer* +### *for developer* If you want to run tests, examples or build documentation, clone the whole repository: -``` -$ git clone git@github.com:apache/ignite.git -$ cd ignite/modules/platforms/python +```bash +$ git clone git@github.com:apache/ignite-python-thin-client.git $ pip install -e . ``` @@ -31,32 +30,64 @@ in the `pip` manual. Then run through the contents of `requirements` folder to install the additional requirements into your working Python environment using -``` +```bash $ pip install -r requirements/.txt ``` You may also want to consult the `setuptools` manual about using `setup.py`. +### *optional C extension* +There is an optional C extension to speedup some computational intensive tasks. If it's compilation fails +(missing compiler or CPython headers), `pyignite` will be installed without this module. + +- On Linux or MacOS X only C compiler is required (`gcc` or `clang`). It compiles during standard setup process. +- For building universal `wheels` (binary packages) for Linux, just invoke script `./scripts/create_distr.sh`. + + ***NB!* Docker is required.** + +- On Windows MSVC 14.x required, and it should be in path, also python versions 3.7, 3.8, 3.9 and 3.10 both for x86 and + x86-64 should be installed. You can disable some of these versions but you'd need to edit script for that. +- For building `wheels` for Windows, invoke script `.\scripts\BuildWheels.ps1` using PowerShell. Just make sure that + your execution policy allows execution of scripts in your environment. + + Ready wheels for `x86` and `x86-64` for different python versions (3.7, 3.8, 3.9 and 3.10) will be + located in `distr` directory. + +### Updating from older version + +To upgrade an existing package, use the following command: +```bash +pip install --upgrade pyignite +``` + +To install the latest version of a package: +```bash +pip install pyignite +``` + +To install a specific version: +```bash +pip install pyignite==0.5.1 +``` + ## Documentation [The package documentation](https://apache-ignite-binary-protocol-client.readthedocs.io) is available at *RTD* for your convenience. If you want to build the documentation from source, do the developer -installation as described above, then run the following commands: -``` -$ cd ignite/modules/platforms/python +installation as described above, then run the following commands from the +client's root directory: +```bash $ pip install -r requirements/docs.txt $ cd docs $ make html ``` -Then open `ignite/modules/platforms/python/docs/generated/html/index.html` -in your browser. +Then open `docs/generated/html/index.html` in your browser. ## Examples -Some examples of using pyignite are provided in -`ignite/modules/platforms/python/examples` folder. They are extensively -commented in the +Some examples of using pyignite are provided in `examples` folder. They are +extensively commented in the “[Examples of usage](https://apache-ignite-binary-protocol-client.readthedocs.io/en/latest/examples.html)” section of the documentation. @@ -64,12 +95,29 @@ This code implies that it is run in the environment with `pyignite` package installed, and Apache Ignite node is running on localhost:10800. ## Testing -Run +*NB!* It is recommended installing `pyignite` in development mode. +Refer to [this section](#for-developer) for instructions. + +Do not forget to install test requirements: +```bash +$ pip install -r requirements/install.txt -r requirements/tests.txt +``` + +Also, you'll need to have a binary release of Ignite with `log4j2` enabled and to set +`IGNITE_HOME` environment variable: +```bash +$ cd +$ export IGNITE_HOME=$(pwd) +$ cp -r $IGNITE_HOME/libs/optional/ignite-log4j2 $IGNITE_HOME/libs/ +``` +### Run basic tests +```bash +$ pytest ``` -$ cd ignite/modules/platforms/python -$ python setup.py pytest +### Run with examples +```bash +$ pytest --examples ``` -*NB!* All tests require Apache Ignite node running on localhost:10800. If you need to change the connection parameters, see the documentation on [testing](https://apache-ignite-binary-protocol-client.readthedocs.io/en/latest/readme.html#testing). diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt new file mode 100644 index 0000000..a67624a --- /dev/null +++ b/RELEASE_NOTES.txt @@ -0,0 +1,48 @@ +Apache Ignite python thin client +================================ + +0.5.2 +-------------------------------- +* Fixed incorrect partial read from socket in sync client +* Fixed nested object arrays deserialization + +0.5.1 +-------------------------------- +* Added logging of connection and queries events +* Added event listeners to connection events and query events +* Added client's side handshake timeout +* Fixed excessive deprecation warnings on python 3.7 +* Fixed request to failed node when querying replicated cache +* Fixed excessive partition mapping requests + +0.5.0 +-------------------------------- +* Added transaction API support (sync and async versions, async version supports only python 3.7+) +* Added ExpiryPolicy (TTL) support +* Improved performance of asyncio version by reimplementing network code using asyncio transports and protocols +* Enabled partition awareness by default +* Fixed handling collections of binary objects + +0.4.0 +-------------------------------- +* Added partition awareness support +* Added asyncio support +* Added C module to speedup hashcode calculation +* Implement context management for connection method +* Implement cursors and context management for ScanQuery, SqlQuery and SqlFieldsQuery +* Add the ability to activate/deactivate the cluster +* Implement support for big-endianness +* Implement support of password for certificates +* Fix performance issues while working with big bytearrays and binary objects +* Fix serialization/deserialization of cache configuration +* Fix handling of null fields +* Fix SQL API +* Fix UUID serialization/deserialization +* Fix nested complex objects +* Fix incorrect hash code calculation for classes as composite keys +* Fix hashing of complex object +* Fix insert and select VARBINARY data type through SQL +* Fix wrong order of the SQL query result +* Fix handling of bytes and bytearrays +* Fix bool arrays handling +* Fix cache.get_size with non-default PeekModes diff --git a/cext/cutils.c b/cext/cutils.c new file mode 100644 index 0000000..0106edc --- /dev/null +++ b/cext/cutils.c @@ -0,0 +1,193 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include + +#ifdef _MSC_VER + +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; + +#else +#include +#endif + +static int32_t FNV1_OFFSET_BASIS = 0x811c9dc5; +static int32_t FNV1_PRIME = 0x01000193; + + +PyObject* hashcode(PyObject* self, PyObject *args); +PyObject* schema_id(PyObject* self, PyObject *args); + +PyObject* str_hashcode(PyObject* data); +int32_t str_hashcode_(PyObject* data, int lower); +PyObject* b_hashcode(PyObject* data); + +static PyMethodDef methods[] = { + {"hashcode", (PyCFunction) hashcode, METH_VARARGS, ""}, + {"schema_id", (PyCFunction) schema_id, METH_VARARGS, ""}, + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_cutils", + 0, /* m_doc */ + -1, /* m_size */ + methods, /* m_methods */ + NULL, /* m_slots */ + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL, /* m_free */ +}; + +static char* hashcode_input_err = "supported only strings, bytearrays, bytes and memoryview"; +static char* schema_id_input_err = "input argument must be dict or int"; +static char* schema_field_type_err = "schema keys must be strings"; + +PyMODINIT_FUNC PyInit__cutils(void) { + return PyModule_Create(&moduledef); +} + +PyObject* hashcode(PyObject* self, PyObject *args) { + PyObject* data; + + if (!PyArg_ParseTuple(args, "O", &data)) { + return NULL; + } + + if (data == Py_None) { + return PyLong_FromLong(0); + } + else if (PyUnicode_CheckExact(data)) { + return str_hashcode(data); + } + else { + return b_hashcode(data); + } +} + +PyObject* str_hashcode(PyObject* data) { + return PyLong_FromLong(str_hashcode_(data, 0)); +} + +int32_t str_hashcode_(PyObject *str, int lower) { + int32_t res = 0; + + Py_ssize_t sz = PyUnicode_GET_LENGTH(str); + if (!sz) { + return res; + } + + int kind = PyUnicode_KIND(str); + void* buf = PyUnicode_DATA(str); + + Py_ssize_t i; + for (i = 0; i < sz; i++) { + Py_UCS4 ch = PyUnicode_READ(kind, buf, i); + + if (lower) { + ch = Py_UNICODE_TOLOWER(ch); + } + + res = 31 * res + ch; + } + + return res; +} + +PyObject* b_hashcode(PyObject* data) { + int32_t res = 1; + Py_ssize_t sz; char* buf; + + if (PyBytes_CheckExact(data)) { + sz = PyBytes_GET_SIZE(data); + buf = PyBytes_AS_STRING(data); + } + else if (PyByteArray_CheckExact(data)) { + sz = PyByteArray_GET_SIZE(data); + buf = PyByteArray_AS_STRING(data); + } + else if (PyMemoryView_Check(data)) { + Py_buffer* pyBuf = PyMemoryView_GET_BUFFER(data); + sz = pyBuf->len; + buf = (char*)pyBuf->buf; + } + else { + PyErr_SetString(PyExc_ValueError, hashcode_input_err); + return NULL; + } + + Py_ssize_t i; + for (i = 0; i < sz; i++) { + res = 31 * res + (signed char)buf[i]; + } + + return PyLong_FromLong(res); +} + +PyObject* schema_id(PyObject* self, PyObject *args) { + PyObject* data; + + if (!PyArg_ParseTuple(args, "O", &data)) { + return NULL; + } + + if (PyLong_CheckExact(data)) { + return PyNumber_Long(data); + } + else if (data == Py_None) { + return PyLong_FromLong(0); + } + else if (PyDict_Check(data)) { + Py_ssize_t sz = PyDict_Size(data); + + if (sz == 0) { + return PyLong_FromLong(0); + } + + int32_t s_id = FNV1_OFFSET_BASIS; + + PyObject *key, *value; + Py_ssize_t pos = 0; + + while (PyDict_Next(data, &pos, &key, &value)) { + if (!PyUnicode_CheckExact(key)) { + PyErr_SetString(PyExc_ValueError, schema_field_type_err); + return NULL; + } + + int32_t field_id = str_hashcode_(key, 1); + s_id ^= field_id & 0xff; + s_id *= FNV1_PRIME; + s_id ^= (field_id >> 8) & 0xff; + s_id *= FNV1_PRIME; + s_id ^= (field_id >> 16) & 0xff; + s_id *= FNV1_PRIME; + s_id ^= (field_id >> 24) & 0xff; + s_id *= FNV1_PRIME; + } + + return PyLong_FromLong(s_id); + } + else { + PyErr_SetString(PyExc_ValueError, schema_id_input_err); + return NULL; + } +} diff --git a/docs/async_examples.rst b/docs/async_examples.rst new file mode 100644 index 0000000..644fcfe --- /dev/null +++ b/docs/async_examples.rst @@ -0,0 +1,226 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +.. _async_examples_of_usage: + +============================ +Asynchronous client examples +============================ +File: `async_key_value.py`_. + +Basic usage +----------- +Asynchronous client and cache (:py:class:`~pyignite.aio_client.AioClient` and :py:class:`~pyignite.aio_cache.AioCache`) +has mostly the same API as synchronous ones (:py:class:`~pyignite.client.Client` and :py:class:`~pyignite.cache.Cache`). +But there is some peculiarities. + +Basic key-value +=============== +Firstly, import dependencies. + +.. literalinclude:: ../examples/async_key_value.py + :language: python + :lines: 19 + +Let's connect to cluster and perform key-value queries. + +.. literalinclude:: ../examples/async_key_value.py + :language: python + :dedent: 4 + :lines: 23-47 + +Scan +==== +The :py:meth:`~pyignite.aio_cache.AioСache.scan` method returns :py:class:`~pyignite.cursors.AioScanCursor`, +that yields the resulting rows. + +.. literalinclude:: ../examples/async_key_value.py + :language: python + :dedent: 8 + :lines: 49-60 + +ExpiryPolicy +============ +File: `expiry_policy.py`_. + +You can enable expiry policy (TTL) by two approaches. + +Firstly, expiry policy can be set for entire cache by setting :py:attr:`~pyignite.datatypes.prop_codes.PROP_EXPIRY_POLICY` +in cache settings dictionary on creation. + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 74-77 + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 83-91 + +Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use +:py:meth:`~pyignite.cache.BaseCache.with_expire_policy` + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 98-107 + +Transactions +------------ +File: `transactions.py`_. + +Client transactions are supported for caches with +:py:attr:`~pyignite.datatypes.cache_config.CacheAtomicityMode.TRANSACTIONAL` mode. +**Supported only python 3.7+** + +Let's create transactional cache: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 30-33 + +Let's start a transaction and commit it: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 36-42 + +Let's check that the transaction was committed successfully: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 45-46 + +Let's check that raising exception inside `async with` block leads to transaction's rollback + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 49-61 + +Let's check that timed out transaction is successfully rolled back + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 64-75 + +See more info about transaction's parameters in a documentation of :py:meth:`~pyignite.aio_client.AioClient.tx_start` + +SQL +--- +File: `async_sql.py`_. + +First let us establish a connection. + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 4 + :lines: 24-25 + +Then create tables. Begin with `Country` table, than proceed with related +tables `City` and `CountryLanguage`. + +.. literalinclude:: ../examples/helpers/sql_helper.py + :language: python + :dedent: 4 + :lines: 27-43, 53-60, 68-74 + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 4 + :lines: 27-32 + +Create indexes. + +.. literalinclude:: ../examples/helpers/sql_helper.py + :language: python + :dedent: 4 + :lines: 62, 76 + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 35-36 + +Fill tables with data. + +.. literalinclude:: ../examples/helpers/sql_helper.py + :language: python + :dedent: 4 + :lines: 45-51, 64-66, 78-80 + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 39-49 + +Now let us answer some questions. + +What are the 10 largest cities in our data sample (population-wise)? +==================================================================== + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 52-66 + +The :py:meth:`~pyignite.aio_client.AioClient.sql` method returns :py:class:`~pyignite.cursors.AioSqlFieldsCursor`, +that yields the resulting rows. + +What are the 10 most populated cities throughout the 3 chosen countries? +======================================================================== + +If you set the `include_field_names` argument to `True`, the +:py:meth:`~pyignite.client.Client.sql` method will generate a list of +column names as a first yield. Unfortunately, there is no async equivalent of `next` but +you can await :py:meth:`__anext__()` +of :py:class:`~pyignite.cursors.AioSqlFieldsCursor` + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 69-95 + +Display all the information about a given city +============================================== + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 98-110 + +Finally, delete the tables used in this example with the following queries: + +.. literalinclude:: ../examples/helpers/sql_helper.py + :language: python + :dedent: 4 + :lines: 82 + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 113-115 + + + +.. _expiry_policy.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/expiry_policy.py +.. _async_key_value.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_key_value.py +.. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py +.. _transactions.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/transactions.py \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 8c498aa..31e4fa1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,19 @@ -# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + # # Configuration file for the Sphinx documentation builder. # @@ -14,19 +29,16 @@ # import os import sys + + sys.path.insert(0, os.path.abspath('../')) # -- Project information ----------------------------------------------------- project = 'Apache Ignite binary client Python API' -copyright = '2018, Apache Software Foundation (ASF)' -author = 'Dmitry Melnichuk' - -# The short X.Y version -version = '' -# The full version, including alpha/beta/rc tags -release = '0.1.0' +copyright = '2021, Apache Software Foundation (ASF)' +author = '' # -- General configuration --------------------------------------------------- diff --git a/docs/datatypes/cache_props.rst b/docs/datatypes/cache_props.rst index 03443b9..77d50f7 100644 --- a/docs/datatypes/cache_props.rst +++ b/docs/datatypes/cache_props.rst @@ -22,84 +22,84 @@ Cache Properties The :mod:`~pyignite.datatypes.prop_codes` module contains a list of ordinal values, that represent various cache settings. -Please refer to the `Apache Ignite Data Grid`_ documentation on cache +Please refer to the `Configuring Caches`_ documentation on cache synchronization, rebalance, affinity and other cache configuration-related matters. -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| Property | Ordinal | Property | Description | -| name | value | type | | -+=======================================+==========+==========+=======================================================+ -| Read/write cache properties, used to configure cache via :py:meth:`~pyignite.client.Client.create_cache` or | -| :py:meth:`~pyignite.client.Client.get_or_create_cache` | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_NAME | 0 | str | Cache name. This is the only *required* property. | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_CACHE_MODE | 1 | int | Cache mode: LOCAL=0, REPLICATED=1, PARTITIONED=2 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_CACHE_ATOMICITY_MODE | 2 | int | Cache atomicity mode: TRANSACTIONAL=0, ATOMIC=1 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_BACKUPS_NUMBER | 3 | int | Number of backups | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_WRITE_SYNCHRONIZATION_MODE | 4 | int | Write synchronization mode: FULL_SYNC=0, | -| | | | FULL_ASYNC=1, PRIMARY_SYNC=2 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_COPY_ON_READ | 5 | bool | Copy-on-read | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_READ_FROM_BACKUP | 6 | bool | Read from backup | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_DATA_REGION_NAME | 100 | str | Data region name | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_IS_ONHEAP_CACHE_ENABLED | 101 | bool | Is OnHeap cache enabled? | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_QUERY_ENTITIES | 200 | list | A list of query entities (see `Query entity`_) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_QUERY_PARALLELISM | 201 | int | Query parallelism | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_QUERY_DETAIL_METRIC_SIZE | 202 | int | Query detail metric size | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_SQL_SCHEMA | 203 | str | SQL schema | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_SQL_INDEX_INLINE_MAX_SIZE | 204 | int | SQL index inline maximum size | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_SQL_ESCAPE_ALL | 205 | bool | Turns on SQL escapes | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_MAX_QUERY_ITERATORS | 206 | int | Maximum number of query iterators | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_MODE | 300 | int | Rebalance mode: SYNC=0, ASYNC=1, NONE=2 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_DELAY | 301 | int | Rebalance delay (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_TIMEOUT | 302 | int | Rebalance timeout (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_BATCH_SIZE | 303 | int | Rebalance batch size | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_BATCHES_PREFETCH_COUNT | 304 | int | Rebalance batches prefetch count | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_ORDER | 305 | int | Rebalance order | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_THROTTLE | 306 | int | Rebalance throttle (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_GROUP_NAME | 400 | str | Group name | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_CACHE_KEY_CONFIGURATION | 401 | list | Cache key configuration (see `Cache key`_) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_DEFAULT_LOCK_TIMEOUT | 402 | int | Default lock timeout (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_MAX_CONCURRENT_ASYNC_OPERATIONS | 403 | int | Maximum number of concurrent asynchronous operations | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_PARTITION_LOSS_POLICY | 404 | int | Partition loss policy: READ_ONLY_SAFE=0, | -| | | | READ_ONLY_ALL=1, READ_WRITE_SAFE=2, READ_WRITE_ALL=3, | -| | | | IGNORE=4 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_EAGER_TTL | 405 | bool | Eager TTL | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_STATISTICS_ENABLED | 406 | bool | Statistics enabled | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| Read-only cache properties. Can not be set, but only retrieved via :py:meth:`~pyignite.cache.Cache.settings` | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_INVALIDATE | -1 | bool | Invalidate | -+---------------------------------------+----------+----------+-------------------------------------------------------+ ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| Property | Ordinal | Property | Description | +| name | value | type | | ++=======================================+==========+============================================================+=======================================================+ +| Read/write cache properties, used to configure cache via :py:meth:`~pyignite.client.Client.create_cache` or | +| :py:meth:`~pyignite.client.Client.get_or_create_cache` of :py:class:`~pyignite.client.Client` | +| (:py:meth:`~pyignite.aio_client.AioClient.create_cache` or | +| :py:meth:`~pyignite.aio_client.AioClient.get_or_create_cache` of :py:class:`~pyignite.aio_client.AioClient`). | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_NAME | 0 | str | Cache name. This is the only *required* property. | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_CACHE_MODE | 1 | int | Cache mode: LOCAL=0, REPLICATED=1, PARTITIONED=2 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_CACHE_ATOMICITY_MODE | 2 | int | Cache atomicity mode: TRANSACTIONAL=0, ATOMIC=1 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_BACKUPS_NUMBER | 3 | int | Number of backups | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_WRITE_SYNCHRONIZATION_MODE | 4 | int | Write synchronization mode: FULL_SYNC=0, | +| | | | FULL_ASYNC=1, PRIMARY_SYNC=2 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_COPY_ON_READ | 5 | bool | Copy-on-read | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_READ_FROM_BACKUP | 6 | bool | Read from backup | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_DATA_REGION_NAME | 100 | str | Data region name | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_IS_ONHEAP_CACHE_ENABLED | 101 | bool | Is OnHeap cache enabled? | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_QUERY_ENTITIES | 200 | list | A list of query entities (see `Query entity`_) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_QUERY_PARALLELISM | 201 | int | Query parallelism | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_QUERY_DETAIL_METRIC_SIZE | 202 | int | Query detail metric size | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_SQL_SCHEMA | 203 | str | SQL schema | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_SQL_INDEX_INLINE_MAX_SIZE | 204 | int | SQL index inline maximum size | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_SQL_ESCAPE_ALL | 205 | bool | Turns on SQL escapes | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_MAX_QUERY_ITERATORS | 206 | int | Maximum number of query iterators | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_MODE | 300 | int | Rebalance mode: SYNC=0, ASYNC=1, NONE=2 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_DELAY | 301 | int | Rebalance delay (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_TIMEOUT | 302 | int | Rebalance timeout (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_BATCH_SIZE | 303 | int | Rebalance batch size | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_BATCHES_PREFETCH_COUNT | 304 | int | Rebalance batches prefetch count | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_ORDER | 305 | int | Rebalance order | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_THROTTLE | 306 | int | Rebalance throttle (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_GROUP_NAME | 400 | str | Group name | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_CACHE_KEY_CONFIGURATION | 401 | list | Cache key configuration (see `Cache key`_) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_DEFAULT_LOCK_TIMEOUT | 402 | int | Default lock timeout (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_MAX_CONCURRENT_ASYNC_OPERATIONS | 403 | int | Maximum number of concurrent asynchronous operations | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_PARTITION_LOSS_POLICY | 404 | int | Partition loss policy: READ_ONLY_SAFE=0, | +| | | | READ_ONLY_ALL=1, READ_WRITE_SAFE=2, READ_WRITE_ALL=3, | +| | | | IGNORE=4 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_EAGER_TTL | 405 | bool | Eager TTL | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_STATISTICS_ENABLED | 406 | bool | Statistics enabled | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_EXPIRY_POLICY | 407 | :py:class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy` | Set expiry policy (see `Expiry policy`_) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ Query entity ------------ @@ -160,4 +160,10 @@ A dict of the following format: - `type_name`: name of the complex object, - `affinity_key_field_name`: name of the affinity key field. -.. _Apache Ignite Data Grid: https://apacheignite.readme.io/docs/data-grid +.. _Configuring Caches: https://ignite.apache.org/docs/latest/configuring-caches/configuration-overview.html + +Expiry policy +------------- + +Set expiry policy to cache (see :py:class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy`). If set to `None`, +expiry policy will not be set. diff --git a/docs/datatypes/parsers.rst b/docs/datatypes/parsers.rst index a717f4c..06ce659 100644 --- a/docs/datatypes/parsers.rst +++ b/docs/datatypes/parsers.rst @@ -47,129 +47,129 @@ However, in some rare cases of type ambiguity, as well as for the needs of interoperability, you may have to sneak one or the other class, along with your data, in to some API function as a *type conversion hint*. -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|`type_code`|Apache Ignite |Python type |Parser/constructor | -| |docs reference |or class |class | -+===========+====================+===============================+=================================================================+ -|*Primitive data types* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x01 |Byte_ |int |:class:`~pyignite.datatypes.primitive_objects.ByteObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x02 |Short_ |int |:class:`~pyignite.datatypes.primitive_objects.ShortObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x03 |Int_ |int |:class:`~pyignite.datatypes.primitive_objects.IntObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x04 |Long_ |int |:class:`~pyignite.datatypes.primitive_objects.LongObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x05 |Float_ |float |:class:`~pyignite.datatypes.primitive_objects.FloatObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x06 |Double_ |float |:class:`~pyignite.datatypes.primitive_objects.DoubleObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x07 |Char_ |str |:class:`~pyignite.datatypes.primitive_objects.CharObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x08 |Bool_ |bool |:class:`~pyignite.datatypes.primitive_objects.BoolObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x65 |Null_ |NoneType |:class:`~pyignite.datatypes.null_object.Null` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|*Standard objects* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x09 |String_ |Str |:class:`~pyignite.datatypes.standard.String` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0a |UUID_ |uuid.UUID |:class:`~pyignite.datatypes.standard.UUIDObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x21 |Timestamp_ |tuple |:class:`~pyignite.datatypes.standard.TimestampObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0b |Date_ |datetime.datetime |:class:`~pyignite.datatypes.standard.DateObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x24 |Time_ |datetime.timedelta |:class:`~pyignite.datatypes.standard.TimeObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1e |Decimal_ |decimal.Decimal |:class:`~pyignite.datatypes.standard.DecimalObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1c |Enum_ |tuple |:class:`~pyignite.datatypes.standard.EnumObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x67 |`Binary enum`_ |tuple |:class:`~pyignite.datatypes.standard.BinaryEnumObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|*Arrays of primitives* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0c |`Byte array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.ByteArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0d |`Short array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.ShortArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0e |`Int array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.IntArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0f |`Long array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.LongArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x10 |`Float array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.FloatArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x11 |`Double array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.DoubleArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x12 |`Char array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.CharArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x13 |`Bool array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.BoolArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|*Arrays of standard objects* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x14 |`String array`_ |iterable/list |:class:`~pyignite.datatypes.standard.StringArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x15 |`UUID array`_ |iterable/list |:class:`~pyignite.datatypes.standard.UUIDArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x22 |`Timestamp array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimestampArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x16 |`Date array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DateArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x23 |`Time array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimeArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1f |`Decimal array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DecimalArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|*Object collections, special types, and complex object* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x17 |`Object array`_ |iterable/list |:class:`~pyignite.datatypes.complex.ObjectArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x18 |`Collection`_ |tuple |:class:`~pyignite.datatypes.complex.CollectionObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x19 |`Map`_ |dict, collections.OrderedDict |:class:`~pyignite.datatypes.complex.MapObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1d |`Enum array`_ |iterable/list |:class:`~pyignite.datatypes.standard.EnumArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x67 |`Complex object`_ |object |:class:`~pyignite.datatypes.complex.BinaryObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1b |`Wrapped data`_ |tuple |:class:`~pyignite.datatypes.complex.WrappedDataObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +| `type_code` |Apache Ignite |Python type |Parser/constructor | +| |docs reference |or class |class | ++=============+====================+===============================+==================================================================+ +|*Primitive data types* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x01 |Byte_ |int |:class:`~pyignite.datatypes.primitive_objects.ByteObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x02 |Short_ |int |:class:`~pyignite.datatypes.primitive_objects.ShortObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x03 |Int_ |int |:class:`~pyignite.datatypes.primitive_objects.IntObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x04 |Long_ |int |:class:`~pyignite.datatypes.primitive_objects.LongObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x05 |Float_ |float |:class:`~pyignite.datatypes.primitive_objects.FloatObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x06 |Double_ |float |:class:`~pyignite.datatypes.primitive_objects.DoubleObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x07 |Char_ |str |:class:`~pyignite.datatypes.primitive_objects.CharObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x08 |Bool_ |bool |:class:`~pyignite.datatypes.primitive_objects.BoolObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x65 |Null_ |NoneType |:class:`~pyignite.datatypes.null_object.Null` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Standard objects* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x09 |String_ |Str |:class:`~pyignite.datatypes.standard.String` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0a |UUID_ |uuid.UUID |:class:`~pyignite.datatypes.standard.UUIDObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x21 |Timestamp_ |tuple |:class:`~pyignite.datatypes.standard.TimestampObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0b |Date_ |datetime.datetime |:class:`~pyignite.datatypes.standard.DateObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x24 |Time_ |datetime.timedelta |:class:`~pyignite.datatypes.standard.TimeObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1e |Decimal_ |decimal.Decimal |:class:`~pyignite.datatypes.standard.DecimalObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1c |Enum_ |tuple |:class:`~pyignite.datatypes.standard.EnumObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x67 |`Binary enum`_ |tuple |:class:`~pyignite.datatypes.standard.BinaryEnumObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Arrays of primitives* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0c |`Byte array`_ |iterable/bytearray |:class:`~pyignite.datatypes.primitive_arrays.ByteArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0d |`Short array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.ShortArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0e |`Int array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.IntArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0f |`Long array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.LongArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x10 |`Float array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.FloatArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x11 |`Double array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.DoubleArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x12 |`Char array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.CharArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x13 |`Bool array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.BoolArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Arrays of standard objects* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x14 |`String array`_ |iterable/list |:class:`~pyignite.datatypes.standard.StringArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x15 |`UUID array`_ |iterable/list |:class:`~pyignite.datatypes.standard.UUIDArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x22 |`Timestamp array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimestampArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x16 |`Date array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DateArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x23 |`Time array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimeArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1f |`Decimal array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DecimalArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Object collections, special types, and complex object* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x17 |`Object array`_ |tuple[int, iterable/list] |:class:`~pyignite.datatypes.complex.ObjectArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x18 |`Collection`_ |tuple[int, iterable/list] |:class:`~pyignite.datatypes.complex.CollectionObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x19 |`Map`_ |tuple[int, dict/OrderedDict] |:class:`~pyignite.datatypes.complex.MapObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1d |`Enum array`_ |iterable/list |:class:`~pyignite.datatypes.standard.EnumArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x67 |`Complex object`_ |object |:class:`~pyignite.datatypes.complex.BinaryObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1b |`Wrapped data`_ |tuple[int, bytes] |:class:`~pyignite.datatypes.complex.WrappedDataObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ -.. _Byte: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-byte -.. _Short: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-short -.. _Int: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-int -.. _Long: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-long -.. _Float: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-float -.. _Double: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-double -.. _Char: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-char -.. _Bool: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-bool -.. _Null: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-null -.. _String: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-string -.. _UUID: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-uuid-guid- -.. _Timestamp: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-timestamp -.. _Date: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-date -.. _Time: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-time -.. _Decimal: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-decimal -.. _Enum: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-enum -.. _Byte array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-byte-array -.. _Short array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-short-array -.. _Int array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-int-array -.. _Long array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-long-array -.. _Float array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-float-array -.. _Double array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-double-array -.. _Char array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-char-array -.. _Bool array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-bool-array -.. _String array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-string-array -.. _UUID array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-uuid-guid-array -.. _Timestamp array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-timestamp-array -.. _Date array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-date-array -.. _Time array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-time-array -.. _Decimal array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-decimal-array -.. _Object array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-object-collections -.. _Collection: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-collection -.. _Map: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-map -.. _Enum array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-enum-array -.. _Binary enum: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-binary-enum -.. _Wrapped data: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-wrapped-data -.. _Complex object: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-complex-object +.. _Byte: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#byte +.. _Short: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#short +.. _Int: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#int +.. _Long: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#long +.. _Float: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#float +.. _Double: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#double +.. _Char: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#char +.. _Bool: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#bool +.. _Null: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#null +.. _String: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#string +.. _UUID: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#uuid-guid +.. _Timestamp: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#timestamp +.. _Date: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#date +.. _Time: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#time +.. _Decimal: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#decimal +.. _Enum: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#enum +.. _Byte array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#byte-array +.. _Short array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#short-array +.. _Int array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#int-array +.. _Long array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#long-array +.. _Float array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#float-array +.. _Double array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#double-array +.. _Char array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#char-array +.. _Bool array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#bool-array +.. _String array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#string-array +.. _UUID array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#uuid-guid-array +.. _Timestamp array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#timestamp-array +.. _Date array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#date-array +.. _Time array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#time-array +.. _Decimal array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#decimal-array +.. _Object array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#object-collections +.. _Collection: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#collection +.. _Map: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#map +.. _Enum array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#enum-array +.. _Binary enum: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#binary-enum +.. _Wrapped data: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#wrapped-data +.. _Complex object: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#complex-object diff --git a/docs/examples.rst b/docs/examples.rst index 3d8d2d9..8f40b91 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -37,28 +37,32 @@ Create cache .. literalinclude:: ../examples/get_and_put.py :language: python - :lines: 21 + :dedent: 4 + :lines: 20 Put value in cache ================== .. literalinclude:: ../examples/get_and_put.py :language: python - :lines: 23 + :dedent: 4 + :lines: 22 Get value from cache ==================== .. literalinclude:: ../examples/get_and_put.py :language: python - :lines: 25-29 + :dedent: 4 + :lines: 24-28 Get multiple values from cache ============================== .. literalinclude:: ../examples/get_and_put.py :language: python - :lines: 31-36 + :dedent: 4 + :lines: 30-35 Type hints usage ================ @@ -66,7 +70,8 @@ File: `type_hints.py`_ .. literalinclude:: ../examples/type_hints.py :language: python - :lines: 24-48 + :dedent: 4 + :lines: 23-47 As a rule of thumb: @@ -80,6 +85,33 @@ As a rule of thumb: Refer the :ref:`data_types` section for the full list of parser/constructor classes you can use as type hints. +ExpiryPolicy +============ +File: `expiry_policy.py`_. + +You can enable expiry policy (TTL) by two approaches. + +Firstly, expiry policy can be set for entire cache by setting :py:attr:`~pyignite.datatypes.prop_codes.PROP_EXPIRY_POLICY` +in cache settings dictionary on creation. + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 33-36 + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 42-48 + +Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use +:py:meth:`~pyignite.cache.BaseCache.with_expire_policy` + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 55-62 + Scan ==== File: `scans.py`_. @@ -91,35 +123,133 @@ Let us put some data in cache. .. literalinclude:: ../examples/scans.py :language: python - :lines: 23-33 + :dedent: 4 + :lines: 22-31 -:py:meth:`~pyignite.cache.Cache.scan` returns a generator, that yields +:py:meth:`~pyignite.cache.Cache.scan` returns a cursor, that yields two-tuples of key and value. You can iterate through the generated pairs in a safe manner: .. literalinclude:: ../examples/scans.py :language: python - :lines: 34-41 + :dedent: 4 + :lines: 33-41 -Or, alternatively, you can convert the generator to dictionary in one go: +Or, alternatively, you can convert the cursor to dictionary in one go: .. literalinclude:: ../examples/scans.py :language: python - :lines: 44-52 + :dedent: 4 + :lines: 43-52 But be cautious: if the cache contains a large set of data, the dictionary -may eat too much memory! +may consume too much memory! + +.. _sql_examples: + +Object collections +------------------ + +File: `get_and_put_complex.py`_. + +Ignite collection types are represented in `pyignite` as two-tuples. +First comes collection type ID or deserialization hint, which is specific for +each of the collection type. Second comes the data value. + +.. literalinclude:: ../examples/get_and_put_complex.py + :language: python + :lines: 17 + +Map +=== + +For Python prior to 3.6, it might be important to distinguish between ordered +(`collections.OrderedDict`) and unordered (`dict`) dictionary types, so you +could use :py:attr:`~pyignite.datatypes.complex.Map.LINKED_HASH_MAP` +for the former and :py:attr:`~pyignite.datatypes.complex.Map.HASH_MAP` +for the latter. + +Since CPython 3.6 all dictionaries became de facto ordered. You can always use +`LINKED_HASH_MAP` as a safe default. -Do cleanup +.. literalinclude:: ../examples/get_and_put_complex.py + :language: python + :dedent: 4 + :lines: 22-36 + +Collection ========== -Destroy created cache and close connection. +See :class:`~pyignite.datatypes.complex.CollectionObject` and Ignite +documentation on `Collection`_ type for the description of various Java +collection types. Note that not all of them have a direct Python +representative. For example, Python do not have ordered sets (it is indeed +recommended to use `OrderedDict`'s keys and disregard its values). -.. literalinclude:: ../examples/scans.py +As for the `pyignite`, the rules are simple: pass any iterable as a data, +and you always get `list` back. + +.. literalinclude:: ../examples/get_and_put_complex.py :language: python - :lines: 54-55 + :dedent: 4 + :lines: 38-52 + +Object array +============ + +:class:`~pyignite.datatypes.complex.ObjectArrayObject` has a very limited +functionality in `pyignite`, since no type checks can be enforced on its +contents. But it still can be used for interoperability with Java. + +.. literalinclude:: ../examples/get_and_put_complex.py + :language: python + :dedent: 4 + :lines: 54-63 -.. _sql_examples: + +Transactions +------------ +File: `transactions.py`_. + +Client transactions are supported for caches with +:py:attr:`~pyignite.datatypes.cache_config.CacheAtomicityMode.TRANSACTIONAL` mode. + +Let's create transactional cache: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 84-87 + +Let's start a transaction and commit it: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 90-96 + +Let's check that the transaction was committed successfully: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 98-99 + +Let's check that raising exception inside `with` block leads to transaction's rollback + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 102-113 + +Let's check that timed out transaction is successfully rolled back + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 116-126 + +See more info about transaction's parameters in a documentation of :py:meth:`~pyignite.client.Client.tx_start` SQL --- @@ -135,28 +265,46 @@ First let us establish a connection. .. literalinclude:: ../examples/sql.py :language: python - :lines: 195-196 + :lines: 20-21 Then create tables. Begin with `Country` table, than proceed with related tables `City` and `CountryLanguage`. +.. literalinclude:: ../examples/helpers/sql_helper.py + :language: python + :dedent: 4 + :lines: 27-43, 53-60, 68-74 + .. literalinclude:: ../examples/sql.py :language: python - :lines: 25-42, 51-59, 67-74, 199-204 + :dedent: 4 + :lines: 23-28 Create indexes. +.. literalinclude:: ../examples/helpers/sql_helper.py + :language: python + :dedent: 4 + :lines: 62, 76 + .. literalinclude:: ../examples/sql.py :language: python - :lines: 60-62, 75-77, 207-208 + :dedent: 4 + :lines: 31-32 Fill tables with data. +.. literalinclude:: ../examples/helpers/sql_helper.py + :language: python + :dedent: 4 + :lines: 45-51, 64-66, 78-80 + .. literalinclude:: ../examples/sql.py :language: python - :lines: 43-50, 63-66, 78-81, 211-218 + :dedent: 4 + :lines: 35-42 -Data samples are taken from `Ignite GitHub repository`_. +Data samples are taken from `PyIgnite GitHub repository`_. That concludes the preparation of data. Now let us answer some questions. @@ -165,7 +313,8 @@ What are the 10 largest cities in our data sample (population-wise)? .. literalinclude:: ../examples/sql.py :language: python - :lines: 24, 221-238 + :dedent: 4 + :lines: 45-59 The :py:meth:`~pyignite.client.Client.sql` method returns a generator, that yields the resulting rows. @@ -180,20 +329,27 @@ column names as a first yield. You can access field names with Python built-in .. literalinclude:: ../examples/sql.py :language: python - :lines: 241-269 + :dedent: 4 + :lines: 62-88 Display all the information about a given city ============================================== .. literalinclude:: ../examples/sql.py :language: python - :lines: 272-290 + :dedent: 4 + :lines: 92-103 Finally, delete the tables used in this example with the following queries: +.. literalinclude:: ../examples/helpers/sql_helper.py + :language: python + :lines: 82 + .. literalinclude:: ../examples/sql.py :language: python - :lines: 82-83, 293-298 + :dedent: 4 + :lines: 106-107 .. _complex_object_usage: @@ -235,24 +391,42 @@ automatically when reading Complex objects. .. literalinclude:: ../examples/binary_basics.py :language: python - :lines: 18-20, 30-34, 39-42, 48-49 + :dedent: 4 + :lines: 36-38, 40-43, 45-46 Here you can see how :class:`~pyignite.binary.GenericObjectMeta` uses `attrs`_ package internally for creating nice `__init__()` and `__repr__()` methods. -You can reuse the autogenerated class for subsequent writes: +In this case the autogenerated dataclass's name `Person` is exactly matches +the type name of the Complex object it represents (the content of the +:py:attr:`~pyignite.datatypes.base.IgniteDataTypeProps.type_name` property). +But when Complex object's class name contains characters, that can not be used +in a Python identifier, for example: + +- `.`, when fully qualified Java class names are used, +- `$`, a common case for Scala classes, +- `+`, internal class name separator in C#, + +then `pyignite` can not maintain this match. In such cases `pyignite` tries +to sanitize a type name to derive a “good” dataclass name from it. + +If your code needs consistent naming between the server and the client, make +sure that your Ignite cluster is configured to use `simple class names`_. + +Anyway, you can reuse the autogenerated dataclass for subsequent writes: .. literalinclude:: ../examples/binary_basics.py :language: python - :lines: 53, 34-37 + :dedent: 4 + :lines: 50, 32-34 :class:`~pyignite.binary.GenericObjectMeta` can also be used directly for creating custom classes: .. literalinclude:: ../examples/binary_basics.py :language: python - :lines: 22-27 + :lines: 20-25 Note how the `Person` class is defined. `schema` is a :class:`~pyignite.binary.GenericObjectMeta` metaclass parameter. @@ -271,7 +445,8 @@ register said class explicitly with your client: .. literalinclude:: ../examples/binary_basics.py :language: python - :lines: 51 + :dedent: 4 + :lines: 48 Now, when we dealt with the basics of `pyignite` implementation of Complex Objects, let us move on to more elaborate examples. @@ -292,7 +467,8 @@ Let us do it again and examine the Ignite storage afterwards. .. literalinclude:: ../examples/read_binary.py :language: python - :lines: 222-229 + :dedent: 4 + :lines: 49-51 We can see that Ignite created a cache for each of our tables. The caches are conveniently named using ‘`SQL__`’ pattern. @@ -302,7 +478,8 @@ using a :py:attr:`~pyignite.cache.Cache.settings` property. .. literalinclude:: ../examples/read_binary.py :language: python - :lines: 231-251 + :dedent: 4 + :lines: 53-103 The values of `value_type_name` and `key_type_name` are names of the binary types. The `City` table's key fields are stored using `key_type_name` type, @@ -314,7 +491,8 @@ functions and verify the correctness of the result. .. literalinclude:: ../examples/read_binary.py :language: python - :lines: 253-267 + :dedent: 4 + :lines: 106-115 What we see is a tuple of key and value, extracted from the cache. Both key and value are represent Complex objects. The dataclass names are the same @@ -349,37 +527,36 @@ These are the necessary steps to perform the task. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 22-63 + :dedent: 4 + :lines: 31-69 2. Define Complex object data class. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 66-76 + :lines: 21-26 3. Insert row. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 79-83 + :dedent: 4 + :lines: 71-75 Now let us make sure that our cache really can be used with SQL functions. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 85-93 + :dedent: 4 + :lines: 77-82 Note, however, that the cache we create can not be dropped with DDL command. - -.. literalinclude:: ../examples/create_binary.py - :language: python - :lines: 95-100 - It should be deleted as any other key-value cache. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 102 + :dedent: 4 + :lines: 84-91 Migrate ======= @@ -398,7 +575,8 @@ First get the vouchers' cache. .. literalinclude:: ../examples/migrate_binary.py :language: python - :lines: 108-111 + :dedent: 4 + :lines: 109 If you do not store the schema of the Complex object in code, you can obtain it as a dataclass property with @@ -406,20 +584,21 @@ it as a dataclass property with .. literalinclude:: ../examples/migrate_binary.py :language: python - :lines: 116-123 + :dedent: 4 + :lines: 115-119 Let us modify the schema and create a new Complex object class with an updated schema. .. literalinclude:: ../examples/migrate_binary.py :language: python - :lines: 125-138 + :lines: 121-137 Now migrate the data from the old schema to the new one. .. literalinclude:: ../examples/migrate_binary.py :language: python - :lines: 141-190 + :lines: 140-190 At this moment all the fields, defined in both of our schemas, can be available in the resulting binary object, depending on which schema was used @@ -445,59 +624,28 @@ When connection to the server is broken or timed out, (`OSError` or `SocketError`), but keeps its constructor's parameters intact and tries to reconnect transparently. -When there's no way for :class:`~pyignite.client.Client` to reconnect, it -raises a special :class:`~pyignite.exceptions.ReconnectError` exception. +When :class:`~pyignite.client.Client` detects that all nodes in the list are +failed without the possibility of restoring connection, it raises a special +:class:`~pyignite.exceptions.ReconnectError` exception. -The following example features a simple node list traversal failover mechanism. Gather 3 Ignite nodes on `localhost` into one cluster and run: .. literalinclude:: ../examples/failover.py :language: python - :lines: 16-49 + :lines: 16-52 Then try shutting down and restarting nodes, and see what happens. .. literalinclude:: ../examples/failover.py :language: python - :lines: 51-61 + :lines: 54-66 Client reconnection do not require an explicit user action, like calling -a special method or resetting a parameter. Note, however, that reconnection -is lazy: it happens only if (and when) it is needed. In this example, -the automatic reconnection happens, when the script checks upon the last -saved value: - -.. literalinclude:: ../examples/failover.py - :language: python - :lines: 48 - +a special method or resetting a parameter. It means that instead of checking the connection status it is better for `pyignite` user to just try the supposed data operations and catch the resulting exception. -:py:meth:`~pyignite.connection.Connection.connect` method accepts any -iterable, not just list. It means that you can implement any reconnection -policy (round-robin, nodes prioritization, pause on reconnect or graceful -backoff) with a generator. - -`pyignite` comes with a sample -:class:`~pyignite.connection.generators.RoundRobin` generator. In the above -example try to replace - -.. literalinclude:: ../examples/failover.py - :language: python - :lines: 29 - -with - -.. code-block:: python3 - - client.connect(RoundRobin(nodes, max_reconnects=20)) - -The client will try to reconnect to node 1 after node 3 is crashed, then to -node 2, et c. At least one node should be active for the -:class:`~pyignite.connection.generators.RoundRobin` to work properly. - SSL/TLS ------- @@ -604,21 +752,28 @@ with the following message: # pyignite.exceptions.HandshakeError: Handshake error: Unauthenticated sessions are prohibited. -.. _get_and_put.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/get_and_put.py -.. _type_hints.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/type_hints.py -.. _failover.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/failover.py -.. _scans.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/scans.py -.. _sql.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/sql.py -.. _binary_basics.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/binary_basics.py -.. _read_binary.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/read_binary.py -.. _create_binary.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/create_binary.py -.. _migrate_binary.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/migrate_binary.py -.. _Getting Started: https://apacheignite-sql.readme.io/docs/getting-started -.. _Ignite GitHub repository: https://github.com/apache/ignite/blob/master/examples/sql/world.sql -.. _Complex object: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-complex-object +.. _get_and_put.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/get_and_put.py +.. _async_key_value.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_key_value.py +.. _type_hints.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/type_hints.py +.. _failover.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/failover.py +.. _scans.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/scans.py +.. _expiry_policy.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/expiry_policy.py +.. _sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/sql.py +.. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py +.. _binary_basics.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/binary_basics.py +.. _read_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/read_binary.py +.. _create_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/create_binary.py +.. _migrate_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/migrate_binary.py +.. _transactions.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/transactions.py +.. _Getting Started: https://ignite.apache.org/docs/latest/thin-clients/python-thin-client +.. _PyIgnite GitHub repository: https://github.com/apache/ignite-python-thin-client/blob/master +.. _Complex object: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#complex-object .. _Java keytool: https://docs.oracle.com/javase/8/docs/technotes/tools/unix/keytool.html -.. _Securing Connection Between Nodes: https://apacheignite.readme.io/docs/ssltls#section-securing-connection-between-nodes +.. _Securing Connection Between Nodes: https://ignite.apache.org/docs/latest/security/ssl-tls#ssltls-for-nodes .. _ClientConnectorConfiguration: https://ignite.apache.org/releases/latest/javadoc/org/apache/ignite/configuration/ClientConnectorConfiguration.html .. _openssl: https://www.openssl.org/docs/manmaster/man1/openssl.html -.. _Authentication: https://apacheignite.readme.io/docs/advanced-security#section-authentication +.. _Authentication: https://ignite.apache.org/docs/latest/security/authentication .. _attrs: https://pypi.org/project/attrs/ +.. _get_and_put_complex.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/get_and_put.py +.. _Collection: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#collection +.. _simple class names: https://ignite.apache.org/docs/latest/data-modeling/binary-marshaller#binary-name-mapper-and-binary-id-mapper diff --git a/docs/images/partitionawareness01.png b/docs/images/partitionawareness01.png new file mode 100644 index 0000000..51c11a7 Binary files /dev/null and b/docs/images/partitionawareness01.png differ diff --git a/docs/images/partitionawareness02.png b/docs/images/partitionawareness02.png new file mode 100644 index 0000000..d6698be Binary files /dev/null and b/docs/images/partitionawareness02.png differ diff --git a/docs/index.rst b/docs/index.rst index 35bd18c..5a6f8d3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -22,7 +22,9 @@ Welcome to Apache Ignite binary client Python API documentation! readme modules + partition_awareness examples + async_examples Indices and tables diff --git a/docs/modules.rst b/docs/modules.rst index cabc915..bdeec8e 100644 --- a/docs/modules.rst +++ b/docs/modules.rst @@ -21,11 +21,14 @@ The modules and subpackages listed here are the basis of a stable API of `pyignite`, intended for end users. .. toctree:: - :maxdepth: 1 - :caption: Modules: + :maxdepth: 1 + :caption: Modules: - Client - Cache - datatypes/parsers - datatypes/cache_props - Exceptions + Client + AioClient + Cache + AioCache + datatypes/parsers + datatypes/cache_props + Exceptions + Monitoring and handling events diff --git a/docs/partition_awareness.rst b/docs/partition_awareness.rst new file mode 100644 index 0000000..5382dfc --- /dev/null +++ b/docs/partition_awareness.rst @@ -0,0 +1,63 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +=================== +Partition Awareness +=================== + +Partition awareness allows the thin client to send query requests directly to the node that owns the queried data. + +Without partition awareness, an application that is connected to the cluster via a thin client executes all queries and operations via a single server node that acts as a proxy for the incoming requests. These operations are then re-routed to the node that stores the data that is being requested. This results in a bottleneck that could prevent the application from scaling linearly. + +.. image:: images/partitionawareness01.png + :alt: Without partition awareness + +Notice how queries must pass through the proxy server node, where they are routed to the correct node. + +With partition awareness in place, the thin client can directly route queries and operations to the primary nodes that own the data required for the queries. This eliminates the bottleneck, allowing the application to scale more easily. + +.. image:: images/partitionawareness02.png + :alt: With partition awareness + +Partition awareness can be enabled or disabled by setting `partition_aware` parameter in +:meth:`pyignite.client.Client.__init__` or :meth:`pyignite.aio_client.AioClient.__init__` to `True` (by default) +or `False`. + +Also, it is recommended to pass list of address and port pairs of all server nodes +to :meth:`pyignite.client.Client.connect` or to :meth:`pyignite.aio_client.AioClient.connect`. + +For example: + +.. code-block:: python3 + + from pyignite import Client + + client = Client( + partition_awareness=True + ) + nodes = [('10.128.0.1', 10800), ('10.128.0.2', 10800),...] + with client.connect(nodes): + .... + +.. code-block:: python3 + + from pyignite import AioClient + + client = AioClient( + partition_awareness=True + ) + nodes = [('10.128.0.1', 10800), ('10.128.0.2', 10800),...] + async with client.connect(nodes): + .... \ No newline at end of file diff --git a/docs/readme.rst b/docs/readme.rst index f91274e..17eb4b5 100644 --- a/docs/readme.rst +++ b/docs/readme.rst @@ -35,9 +35,9 @@ through a raw TCP socket. Prerequisites ------------- -- *Python 3.4* or above (3.6 is tested), +- *Python 3.7* or above (3.7, 3.8, 3.9 and 3.10 are tested), - Access to *Apache Ignite* node, local or remote. The current thin client - version was tested on *Apache Ignite 2.7.0* (binary client protocol 1.2.0). + version was tested on *Apache Ignite 2.13.0* (binary client protocol 1.7.0). Installation ------------ @@ -59,8 +59,7 @@ the whole repository: :: -$ git clone git@github.com:apache/ignite.git -$ cd ignite/modules/platforms/python +$ git clone git@github.com:apache/ignite-python-thin-client.git $ pip install -e . This will install the repository version of `pyignite` into your environment @@ -74,13 +73,26 @@ the the additional requirements into your working Python environment using $ pip install -r requirements/.txt + +For development, it is recommended to install `tests` requirements + +:: + +$ pip install -r requirements/tests.txt + +For checking codestyle run: + +:: + +$ flake8 + You may also want to consult the `setuptools`_ manual about using `setup.py`. Examples -------- Some examples of using pyignite are provided in -`ignite/modules/platforms/python/examples` folder. They are extensively +`examples` folder. They are extensively commented in the :ref:`examples_of_usage` section of the documentation. This code implies that it is run in the environment with `pyignite` package @@ -93,58 +105,26 @@ the explanation of testing, look up the `Testing`_ section. Testing ------- -Create and activate virtualenv_ environment. Run - -:: +Create and activate virtualenv_ environment. -$ cd ignite/modules/platforms/python -$ python ./setup.py pytest - -This does not require `pytest` and other test dependencies to be installed -in your environment. - -Some or all tests require Apache Ignite node running on localhost:10800. -To override the default parameters, use command line options -``--ignite-host`` and ``--ignite-port``: +Install a binary release of Ignite with `log4j2` enabled and set `IGNITE_HOME` environment variable. :: -$ python ./setup.py pytest --addopts "--ignite-host=example.com --ignite-port=19840" - -You can use each of these two options multiple times. All combinations -of given host and port will be tested. +$ cd +$ export IGNITE_HOME=$(pwd) +$ cp -r $IGNITE_HOME/libs/optional/ignite-log4j2 $IGNITE_HOME/libs/ -You can also test client against a server with SSL-encrypted connection. -SSL-related `pytest` parameters are: -``--use-ssl`` − use SSL encryption, +Run -``--ssl-certfile`` − a path to ssl certificate file to identify local party, - -``--ssl-ca-certfile`` − a path to a trusted certificate or a certificate chain, - -``--ssl-cert-reqs`` − determines how the remote side certificate is treated: - -- ``NONE`` (ignore, default), -- ``OPTIONAL`` (validate, if provided), -- ``REQUIRED`` (valid remote certificate is required), - -``--ssl-ciphers`` − ciphers to use, - -``--ssl-version`` − SSL version: +:: -- ``TLSV1_1`` (default), -- ``TLSV1_2``. +$ pip install -e . +$ pytest Other `pytest` parameters: -``--timeout`` − timeout (in seconds) for each socket operation, including -`connect`. Accepts integer or float value. Default is None (blocking mode), - -``--username`` and ``--password`` − credentials to authenticate to Ignite -cluster. Used in conjunction with `authenticationEnabled` property in cluster -configuration. - ``--examples`` − run the examples as one test. If you wish to run *only* the examples, supply also the name of the test function to `pytest` launcher: @@ -163,25 +143,33 @@ Since failover, SSL and authentication examples are meant to be controlled by user or depend on special configuration of the Ignite cluster, they can not be automated. +Using tox +""""""""" +For automate running tests against different python version, it is recommended to use tox_ + +:: + +$ pip install tox +$ tox + + Documentation ------------- To recompile this documentation, do this from your virtualenv_ environment: :: -$ cd ignite/modules/platforms/python $ pip install -r requirements/docs.txt $ cd docs $ make html -Then open `ignite/modules/platforms/python/docs/generated/html/index.html`_ +Then open `docs/generated/html/index.html`_ in your browser. If you feel that old version is stuck, do :: -$ cd ignite/modules/platforms/python/docs $ make clean $ sphinx-apidoc -feM -o source/ ../ ../setup.py $ make html @@ -193,10 +181,11 @@ Licensing This is a free software, brought to you on terms of the `Apache License v2`_. -.. _Apache Ignite: https://apacheignite.readme.io/docs/what-is-ignite -.. _binary client protocol: https://apacheignite.readme.io/docs/binary-client-protocol +.. _Apache Ignite: https://ignite.apache.org +.. _binary client protocol: https://ignite.apache.org/docs/latest/binary-client-protocol/binary-client-protocol .. _Apache License v2: http://www.apache.org/licenses/LICENSE-2.0 .. _virtualenv: https://virtualenv.pypa.io/ +.. _tox: https://tox.readthedocs.io/en/latest/ .. _setuptools: https://setuptools.readthedocs.io/ -.. _ignite/modules/platforms/python/docs/generated/html/index.html: . +.. _docs/generated/html/index.html: . .. _editable installs: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs diff --git a/docs/source/modules.rst b/docs/source/modules.rst index c125dd3..189a011 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + python ====== diff --git a/docs/source/pyignite.aio_cache.rst b/docs/source/pyignite.aio_cache.rst new file mode 100644 index 0000000..b62a33a --- /dev/null +++ b/docs/source/pyignite.aio_cache.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.aio_cache module +========================= + +.. automodule:: pyignite.aio_cache + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.aio_client.rst b/docs/source/pyignite.aio_client.rst new file mode 100644 index 0000000..922c559 --- /dev/null +++ b/docs/source/pyignite.aio_client.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.aio_client module +========================== + +.. automodule:: pyignite.aio_client + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.aio_cluster.rst b/docs/source/pyignite.aio_cluster.rst new file mode 100644 index 0000000..ee2fa1b --- /dev/null +++ b/docs/source/pyignite.aio_cluster.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.aio_cluster module +=========================== + +.. automodule:: pyignite.aio_cluster + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.api.binary.rst b/docs/source/pyignite.api.binary.rst deleted file mode 100644 index 49f1c86..0000000 --- a/docs/source/pyignite.api.binary.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.binary module -========================== - -.. automodule:: pyignite.api.binary - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.api.cache_config.rst b/docs/source/pyignite.api.cache_config.rst deleted file mode 100644 index 599c857..0000000 --- a/docs/source/pyignite.api.cache_config.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.cache\_config module -================================= - -.. automodule:: pyignite.api.cache_config - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.api.key_value.rst b/docs/source/pyignite.api.key_value.rst deleted file mode 100644 index 52d6c3f..0000000 --- a/docs/source/pyignite.api.key_value.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.key\_value module -============================== - -.. automodule:: pyignite.api.key_value - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.api.result.rst b/docs/source/pyignite.api.result.rst deleted file mode 100644 index 21398e3..0000000 --- a/docs/source/pyignite.api.result.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.result module -========================== - -.. automodule:: pyignite.api.result - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.api.rst b/docs/source/pyignite.api.rst deleted file mode 100644 index e18d4a3..0000000 --- a/docs/source/pyignite.api.rst +++ /dev/null @@ -1,19 +0,0 @@ -pyignite.api package -==================== - -.. automodule:: pyignite.api - :members: - :undoc-members: - :show-inheritance: - -Submodules ----------- - -.. toctree:: - - pyignite.api.binary - pyignite.api.cache_config - pyignite.api.key_value - pyignite.api.result - pyignite.api.sql - diff --git a/docs/source/pyignite.api.sql.rst b/docs/source/pyignite.api.sql.rst deleted file mode 100644 index 84479ad..0000000 --- a/docs/source/pyignite.api.sql.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.sql module -======================= - -.. automodule:: pyignite.api.sql - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.binary.rst b/docs/source/pyignite.binary.rst index 6b21582..eeab940 100644 --- a/docs/source/pyignite.binary.rst +++ b/docs/source/pyignite.binary.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.binary module ====================== diff --git a/docs/source/pyignite.cache.rst b/docs/source/pyignite.cache.rst index e6e83c5..f4099de 100644 --- a/docs/source/pyignite.cache.rst +++ b/docs/source/pyignite.cache.rst @@ -1,7 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.cache module ===================== .. automodule:: pyignite.cache :members: :undoc-members: - :show-inheritance: + :inherited-members: diff --git a/docs/source/pyignite.client.rst b/docs/source/pyignite.client.rst index fef316b..e978dc1 100644 --- a/docs/source/pyignite.client.rst +++ b/docs/source/pyignite.client.rst @@ -1,7 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.client module ====================== .. automodule:: pyignite.client :members: :undoc-members: - :show-inheritance: + :inherited-members: diff --git a/docs/source/pyignite.cluster.rst b/docs/source/pyignite.cluster.rst new file mode 100644 index 0000000..cacdfb7 --- /dev/null +++ b/docs/source/pyignite.cluster.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.cluster module +======================= + +.. automodule:: pyignite.cluster + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.connection.generators.rst b/docs/source/pyignite.connection.generators.rst deleted file mode 100644 index daecda3..0000000 --- a/docs/source/pyignite.connection.generators.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.connection.generators module -===================================== - -.. automodule:: pyignite.connection.generators - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.connection.handshake.rst b/docs/source/pyignite.connection.handshake.rst deleted file mode 100644 index 28e83df..0000000 --- a/docs/source/pyignite.connection.handshake.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.connection.handshake module -==================================== - -.. automodule:: pyignite.connection.handshake - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.connection.protocol_context.rst b/docs/source/pyignite.connection.protocol_context.rst new file mode 100644 index 0000000..1ec3c81 --- /dev/null +++ b/docs/source/pyignite.connection.protocol_context.rst @@ -0,0 +1,20 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.connection.protocol_context package +============================================ + +.. automodule:: pyignite.connection.protocol_context + :members: \ No newline at end of file diff --git a/docs/source/pyignite.connection.rst b/docs/source/pyignite.connection.rst index 92b07a7..29c2e57 100644 --- a/docs/source/pyignite.connection.rst +++ b/docs/source/pyignite.connection.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.connection package =========================== @@ -10,8 +25,4 @@ Submodules ---------- .. toctree:: - - pyignite.connection.generators - pyignite.connection.handshake - pyignite.connection.ssl - + pyignite.connection.protocol_context \ No newline at end of file diff --git a/docs/source/pyignite.connection.ssl.rst b/docs/source/pyignite.connection.ssl.rst deleted file mode 100644 index 8eebf43..0000000 --- a/docs/source/pyignite.connection.ssl.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.connection.ssl module -============================== - -.. automodule:: pyignite.connection.ssl - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.constants.rst b/docs/source/pyignite.constants.rst deleted file mode 100644 index f71e4f1..0000000 --- a/docs/source/pyignite.constants.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.constants module -========================= - -.. automodule:: pyignite.constants - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.cursors.rst b/docs/source/pyignite.cursors.rst new file mode 100644 index 0000000..6415a16 --- /dev/null +++ b/docs/source/pyignite.cursors.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.cursors module +======================= + +.. automodule:: pyignite.cursors + :members: + :undoc-members: + :inherited-members: diff --git a/docs/source/pyignite.datatypes.base.rst b/docs/source/pyignite.datatypes.base.rst index 849a028..c482904 100644 --- a/docs/source/pyignite.datatypes.base.rst +++ b/docs/source/pyignite.datatypes.base.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.base module ============================== diff --git a/docs/source/pyignite.datatypes.binary.rst b/docs/source/pyignite.datatypes.binary.rst index 0d175de..37de8b8 100644 --- a/docs/source/pyignite.datatypes.binary.rst +++ b/docs/source/pyignite.datatypes.binary.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.binary module ================================ diff --git a/docs/source/pyignite.datatypes.cache_config.rst b/docs/source/pyignite.datatypes.cache_config.rst index 3d5eaeb..4b63637 100644 --- a/docs/source/pyignite.datatypes.cache_config.rst +++ b/docs/source/pyignite.datatypes.cache_config.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.cache\_config module ======================================= diff --git a/docs/source/pyignite.datatypes.cache_properties.rst b/docs/source/pyignite.datatypes.cache_properties.rst index 57f0e9f..d626366 100644 --- a/docs/source/pyignite.datatypes.cache_properties.rst +++ b/docs/source/pyignite.datatypes.cache_properties.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.cache\_properties module =========================================== diff --git a/docs/source/pyignite.datatypes.cluster_state.rst b/docs/source/pyignite.datatypes.cluster_state.rst new file mode 100644 index 0000000..a1d7663 --- /dev/null +++ b/docs/source/pyignite.datatypes.cluster_state.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.datatypes.cluster_state module +======================================= + +.. automodule:: pyignite.datatypes.cluster_state + :members: + :show-inheritance: diff --git a/docs/source/pyignite.datatypes.complex.rst b/docs/source/pyignite.datatypes.complex.rst index 1e3f21e..83ecacc 100644 --- a/docs/source/pyignite.datatypes.complex.rst +++ b/docs/source/pyignite.datatypes.complex.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.complex module ================================= diff --git a/docs/source/pyignite.datatypes.expiry_policy.rst b/docs/source/pyignite.datatypes.expiry_policy.rst new file mode 100644 index 0000000..87d651e --- /dev/null +++ b/docs/source/pyignite.datatypes.expiry_policy.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.datatypes.expiry_policy module +======================================= + +.. automodule:: pyignite.datatypes.expiry_policy + :members: + :show-inheritance: diff --git a/docs/source/pyignite.datatypes.internal.rst b/docs/source/pyignite.datatypes.internal.rst index 5dc5535..a3e5dcc 100644 --- a/docs/source/pyignite.datatypes.internal.rst +++ b/docs/source/pyignite.datatypes.internal.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.internal module ================================== diff --git a/docs/source/pyignite.datatypes.key_value.rst b/docs/source/pyignite.datatypes.key_value.rst index 0b3aa88..46d83dd 100644 --- a/docs/source/pyignite.datatypes.key_value.rst +++ b/docs/source/pyignite.datatypes.key_value.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.key\_value module ==================================== diff --git a/docs/source/pyignite.datatypes.null_object.rst b/docs/source/pyignite.datatypes.null_object.rst index 05f22b1..5d6381f 100644 --- a/docs/source/pyignite.datatypes.null_object.rst +++ b/docs/source/pyignite.datatypes.null_object.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.null\_object module ====================================== diff --git a/docs/source/pyignite.datatypes.primitive.rst b/docs/source/pyignite.datatypes.primitive.rst index 8a53604..3fa2797 100644 --- a/docs/source/pyignite.datatypes.primitive.rst +++ b/docs/source/pyignite.datatypes.primitive.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.primitive module =================================== diff --git a/docs/source/pyignite.datatypes.primitive_arrays.rst b/docs/source/pyignite.datatypes.primitive_arrays.rst index b4b94bf..d261235 100644 --- a/docs/source/pyignite.datatypes.primitive_arrays.rst +++ b/docs/source/pyignite.datatypes.primitive_arrays.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.primitive\_arrays module =========================================== diff --git a/docs/source/pyignite.datatypes.primitive_objects.rst b/docs/source/pyignite.datatypes.primitive_objects.rst index a74db38..e737f3c 100644 --- a/docs/source/pyignite.datatypes.primitive_objects.rst +++ b/docs/source/pyignite.datatypes.primitive_objects.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.primitive\_objects module ============================================ diff --git a/docs/source/pyignite.datatypes.prop_codes.rst b/docs/source/pyignite.datatypes.prop_codes.rst deleted file mode 100644 index d23596b..0000000 --- a/docs/source/pyignite.datatypes.prop_codes.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.datatypes.prop\_codes module -===================================== - -.. automodule:: pyignite.datatypes.prop_codes - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.datatypes.rst b/docs/source/pyignite.datatypes.rst index d72f844..70f7714 100644 --- a/docs/source/pyignite.datatypes.rst +++ b/docs/source/pyignite.datatypes.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes package ========================== @@ -16,14 +31,13 @@ Submodules pyignite.datatypes.cache_config pyignite.datatypes.cache_properties pyignite.datatypes.complex + pyignite.datatypes.cluster_state + pyignite.datatypes.expiry_policy pyignite.datatypes.internal pyignite.datatypes.key_value pyignite.datatypes.null_object pyignite.datatypes.primitive pyignite.datatypes.primitive_arrays pyignite.datatypes.primitive_objects - pyignite.datatypes.prop_codes pyignite.datatypes.sql pyignite.datatypes.standard - pyignite.datatypes.type_codes - diff --git a/docs/source/pyignite.datatypes.sql.rst b/docs/source/pyignite.datatypes.sql.rst index e20f084..8e564b8 100644 --- a/docs/source/pyignite.datatypes.sql.rst +++ b/docs/source/pyignite.datatypes.sql.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.sql module ============================= diff --git a/docs/source/pyignite.datatypes.standard.rst b/docs/source/pyignite.datatypes.standard.rst index e46d339..f181450 100644 --- a/docs/source/pyignite.datatypes.standard.rst +++ b/docs/source/pyignite.datatypes.standard.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.standard module ================================== diff --git a/docs/source/pyignite.datatypes.transactions.rst b/docs/source/pyignite.datatypes.transactions.rst new file mode 100644 index 0000000..9b38468 --- /dev/null +++ b/docs/source/pyignite.datatypes.transactions.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.datatypes.transactions module +======================================= + +.. automodule:: pyignite.datatypes.transactions + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/pyignite.datatypes.type_codes.rst b/docs/source/pyignite.datatypes.type_codes.rst deleted file mode 100644 index 47baa4b..0000000 --- a/docs/source/pyignite.datatypes.type_codes.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.datatypes.type\_codes module -===================================== - -.. automodule:: pyignite.datatypes.type_codes - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.exceptions.rst b/docs/source/pyignite.exceptions.rst index dd24687..563ea90 100644 --- a/docs/source/pyignite.exceptions.rst +++ b/docs/source/pyignite.exceptions.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.exceptions module ========================== diff --git a/docs/source/pyignite.monitoring.rst b/docs/source/pyignite.monitoring.rst new file mode 100644 index 0000000..98b137d --- /dev/null +++ b/docs/source/pyignite.monitoring.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.monitoring module +=========================== + +.. automodule:: pyignite.monitoring + :members: + :member-order: bysource diff --git a/docs/source/pyignite.queries.op_codes.rst b/docs/source/pyignite.queries.op_codes.rst deleted file mode 100644 index bc556ec..0000000 --- a/docs/source/pyignite.queries.op_codes.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.queries.op\_codes module -================================= - -.. automodule:: pyignite.queries.op_codes - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.queries.rst b/docs/source/pyignite.queries.rst deleted file mode 100644 index 6dd81a2..0000000 --- a/docs/source/pyignite.queries.rst +++ /dev/null @@ -1,15 +0,0 @@ -pyignite.queries package -======================== - -.. automodule:: pyignite.queries - :members: - :undoc-members: - :show-inheritance: - -Submodules ----------- - -.. toctree:: - - pyignite.queries.op_codes - diff --git a/docs/source/pyignite.rst b/docs/source/pyignite.rst index 947cab2..7a0744c 100644 --- a/docs/source/pyignite.rst +++ b/docs/source/pyignite.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite package ================ @@ -11,20 +26,23 @@ Subpackages .. toctree:: - pyignite.api - pyignite.connection pyignite.datatypes - pyignite.queries + pyignite.connection Submodules ---------- .. toctree:: - pyignite.binary - pyignite.cache - pyignite.client - pyignite.constants - pyignite.exceptions - pyignite.utils + pyignite.binary + pyignite.cache + pyignite.aio_cache + pyignite.client + pyignite.aio_client + pyignite.cluster + pyignite.aio_cluster + pyignite.transaction + pyignite.cursors + pyignite.exceptions + pyignite.monitoring diff --git a/docs/source/pyignite.transaction.rst b/docs/source/pyignite.transaction.rst new file mode 100644 index 0000000..b0301f4 --- /dev/null +++ b/docs/source/pyignite.transaction.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.transaction module +=========================== + +.. automodule:: pyignite.transaction + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.utils.rst b/docs/source/pyignite.utils.rst deleted file mode 100644 index 5ee42ab..0000000 --- a/docs/source/pyignite.utils.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.utils module -===================== - -.. automodule:: pyignite.utils - :members: - :undoc-members: - :show-inheritance: diff --git a/examples/async_key_value.py b/examples/async_key_value.py new file mode 100644 index 0000000..7379874 --- /dev/null +++ b/examples/async_key_value.py @@ -0,0 +1,67 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +from pprint import pprint + +from pyignite import AioClient + + +async def main(): + # Create client and connect. + client = AioClient() + async with client.connect('127.0.0.1', 10800): + # Create cache + cache = await client.get_or_create_cache('test_async_cache') + + # Load data concurrently. + await asyncio.gather( + *[cache.put(f'key_{i}', f'value_{i}') for i in range(0, 20)] + ) + + # Key-value queries. + print(await cache.get('key_10')) + # value_10 + pprint(await cache.get_all([f'key_{i}' for i in range(0, 10)])) + # {'key_0': 'value_0', + # 'key_1': 'value_1', + # 'key_2': 'value_2', + # 'key_3': 'value_3', + # 'key_4': 'value_4', + # 'key_5': 'value_5', + # 'key_6': 'value_6', + # 'key_7': 'value_7', + # 'key_8': 'value_8', + # 'key_9': 'value_9'} + + # Scan query. + async with cache.scan() as cursor: + async for k, v in cursor: + print(f'key = {k}, value = {v}') + # key = key_42, value = value_42 + # key = key_43, value = value_43 + # key = key_40, value = value_40 + # key = key_41, value = value_41 + # key = key_37, value = value_37 + # key = key_51, value = value_51 + # key = key_20, value = value_20 + # ...... + + # Clean up. + await cache.destroy() + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/async_sql.py b/examples/async_sql.py new file mode 100644 index 0000000..d8de9f6 --- /dev/null +++ b/examples/async_sql.py @@ -0,0 +1,119 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio + +from helpers.sql_helper import TableNames, Query, TestData +from pyignite import AioClient + + +async def main(): + # establish connection + client = AioClient() + async with client.connect('127.0.0.1', 10800): + # create tables + for query in [ + Query.COUNTRY_CREATE_TABLE, + Query.CITY_CREATE_TABLE, + Query.LANGUAGE_CREATE_TABLE, + ]: + await client.sql(query) + + # create indices + for query in [Query.CITY_CREATE_INDEX, Query.LANGUAGE_CREATE_INDEX]: + await client.sql(query) + + # load data concurrently. + await asyncio.gather(*[ + client.sql(Query.COUNTRY_INSERT, query_args=row) for row in TestData.COUNTRY + ]) + + await asyncio.gather(*[ + client.sql(Query.CITY_INSERT, query_args=row) for row in TestData.CITY + ]) + + await asyncio.gather(*[ + client.sql(Query.LANGUAGE_INSERT, query_args=row) for row in TestData.LANGUAGE + ]) + + # 10 most populated cities (with pagination) + async with client.sql('SELECT name, population FROM City ORDER BY population DESC LIMIT 10') as cursor: + print('Most 10 populated cities:') + async for row in cursor: + print(row) + # Most 10 populated cities: + # ['Mumbai (Bombay)', 10500000] + # ['Shanghai', 9696300] + # ['New York', 8008278] + # ['Peking', 7472000] + # ['Delhi', 7206704] + # ['Chongqing', 6351600] + # ['Tianjin', 5286800] + # ['Calcutta [Kolkata]', 4399819] + # ['Wuhan', 4344600] + # ['Harbin', 4289800] + print('-' * 20) + # 10 most populated cities in 3 countries (with pagination and header row) + most_populated_in_3_countries = ''' + SELECT country.name as country_name, city.name as city_name, MAX(city.population) AS max_pop FROM country + JOIN city ON city.countrycode = country.code + WHERE country.code IN ('USA','IND','CHN') + GROUP BY country.name, city.name ORDER BY max_pop DESC LIMIT 10 + ''' + + async with client.sql(most_populated_in_3_countries, include_field_names=True) as cursor: + print('Most 10 populated cities in USA, India and China:') + table_str_pattern = '{:15}\t| {:20}\t| {}' + print(table_str_pattern.format(*await cursor.__anext__())) + print('*' * 50) + async for row in cursor: + print(table_str_pattern.format(*row)) + # Most 10 populated cities in USA, India and China: + # COUNTRY_NAME | CITY_NAME | MAX_POP + # ************************************************** + # India | Mumbai (Bombay) | 10500000 + # China | Shanghai | 9696300 + # United States | New York | 8008278 + # China | Peking | 7472000 + # India | Delhi | 7206704 + # China | Chongqing | 6351600 + # China | Tianjin | 5286800 + # India | Calcutta [Kolkata] | 4399819 + # China | Wuhan | 4344600 + # China | Harbin | 4289800 + print('-' * 20) + # show city info + async with client.sql('SELECT * FROM City WHERE id = ?', query_args=[3802], include_field_names=True) as cursor: + field_names = await cursor.__anext__() + field_data = await cursor.__anext__() + + print('City info:') + for field_name, field_value in zip(field_names * len(field_data), field_data): + print('{}: {}'.format(field_name, field_value)) + # City info: + # ID: 3802 + # NAME: Detroit + # COUNTRYCODE: USA + # DISTRICT: Michigan + # POPULATION: 951270 + + # clean up concurrently. + await asyncio.gather(*[ + client.sql(Query.DROP_TABLE.format(table_name.value)) for table_name in TableNames + ]) + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/binary_basics.py b/examples/binary_basics.py index 96a9058..835cdc4 100644 --- a/examples/binary_basics.py +++ b/examples/binary_basics.py @@ -13,41 +13,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict - from pyignite import Client, GenericObjectMeta -from pyignite.datatypes import * +from pyignite.datatypes import String, IntObject -class Person(metaclass=GenericObjectMeta, schema=OrderedDict([ - ('first_name', String), - ('last_name', String), - ('age', IntObject), -])): +class Person(metaclass=GenericObjectMeta, schema={ + 'first_name': String, + 'last_name': String, + 'age': IntObject +}): pass client = Client() -client.connect('localhost', 10800) - -person_cache = client.get_or_create_cache('person') +with client.connect('localhost', 10800): + person_cache = client.get_or_create_cache('person') -person_cache.put( - 1, Person(first_name='Ivan', last_name='Ivanov', age=33) -) + person_cache.put( + 1, Person(first_name='Ivan', last_name='Ivanov', age=33) + ) -person = person_cache.get(1) -print(person.__class__.__name__) -# Person + person = person_cache.get(1) + print(person.__class__.__name__) + # Person -print(person.__class__ is Person) -# True if `Person` was registered automatically (on writing) -# or manually (using `client.register_binary_type()` method). -# False otherwise + print(person.__class__ is Person) + # True if `Person` was registered automatically (on writing) + # or manually (using `client.register_binary_type()` method). + # False otherwise -print(person) -# Person(first_name='Ivan', last_name='Ivanov', age=33, version=1) + print(person) + # Person(first_name='Ivan', last_name='Ivanov', age=33, version=1) -client.register_binary_type(Person) + client.register_binary_type(Person) -Person = person.__class__ + Person = person.__class__ + # cleanup + person_cache.destroy() diff --git a/examples/create_binary.py b/examples/create_binary.py index c963796..d0047f5 100644 --- a/examples/create_binary.py +++ b/examples/create_binary.py @@ -13,16 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict - from pyignite import Client, GenericObjectMeta from pyignite.datatypes import DoubleObject, IntObject, String -from pyignite.datatypes.prop_codes import * +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_SQL_SCHEMA, PROP_QUERY_ENTITIES -client = Client() -client.connect('127.0.0.1', 10800) -student_cache = client.create_cache({ +class Student( + metaclass=GenericObjectMeta, + type_name='SQL_PUBLIC_STUDENT_TYPE', + schema={'NAME': String, 'LOGIN': String, 'AGE': IntObject, 'GPA': DoubleObject} +): + pass + + +client = Client() +with client.connect('127.0.0.1', 10800): + student_cache = client.create_cache({ PROP_NAME: 'SQL_PUBLIC_STUDENT', PROP_SQL_SCHEMA: 'PUBLIC', PROP_QUERY_ENTITIES: [ @@ -62,42 +68,24 @@ ], }) + student_cache.put( + 1, + Student(LOGIN='jdoe', NAME='John Doe', AGE=17, GPA=4.25), + key_hint=IntObject + ) -class Student( - metaclass=GenericObjectMeta, - type_name='SQL_PUBLIC_STUDENT_TYPE', - schema=OrderedDict([ - ('NAME', String), - ('LOGIN', String), - ('AGE', IntObject), - ('GPA', DoubleObject), - ]) -): - pass + with client.sql(r'SELECT * FROM Student', include_field_names=True) as cursor: + print(next(cursor)) + # ['SID', 'NAME', 'LOGIN', 'AGE', 'GPA'] + print(*cursor) + # [1, 'John Doe', 'jdoe', 17, 4.25] -student_cache.put( - 1, - Student(LOGIN='jdoe', NAME='John Doe', AGE=17, GPA=4.25), - key_hint=IntObject -) - -result = client.sql( - r'SELECT * FROM Student', - include_field_names=True -) -print(next(result)) -# ['SID', 'NAME', 'LOGIN', 'AGE', 'GPA'] - -print(*result) -# [1, 'John Doe', 'jdoe', 17, 4.25] - -# DROP_QUERY = 'DROP TABLE Student' -# client.sql(DROP_QUERY) -# -# pyignite.exceptions.SQLError: class org.apache.ignite.IgniteCheckedException: -# Only cache created with CREATE TABLE may be removed with DROP TABLE -# [cacheName=SQL_PUBLIC_STUDENT] + # DROP_QUERY = 'DROP TABLE Student' + # client.sql(DROP_QUERY) + # + # pyignite.exceptions.SQLError: class org.apache.ignite.IgniteCheckedException: + # Only cache created with CREATE TABLE may be removed with DROP TABLE + # [cacheName=SQL_PUBLIC_STUDENT] -student_cache.destroy() -client.close() + student_cache.destroy() diff --git a/examples/docker-compose.yml b/examples/docker-compose.yml new file mode 100644 index 0000000..e2dd178 --- /dev/null +++ b/examples/docker-compose.yml @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3" +services: + ignite_0: + image: apacheignite/ignite:latest + ports: + - 10800:10800 + restart: always + + ignite_1: + image: apacheignite/ignite:latest + ports: + - 10801:10800 + restart: always + + ignite_2: + image: apacheignite/ignite:latest + ports: + - 10802:10800 + restart: always diff --git a/examples/expiry_policy.py b/examples/expiry_policy.py new file mode 100644 index 0000000..8482e51 --- /dev/null +++ b/examples/expiry_policy.py @@ -0,0 +1,131 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import time +from datetime import timedelta + +from pyignite import Client, AioClient +from pyignite.datatypes import ExpiryPolicy +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_EXPIRY_POLICY +from pyignite.exceptions import NotSupportedByClusterError + + +def sync_actions(): + print("Running sync ExpiryPolicy example.") + + client = Client() + with client.connect('127.0.0.1', 10800): + print("Create cache with expiry policy.") + try: + ttl_cache = client.create_cache({ + PROP_NAME: 'test', + PROP_EXPIRY_POLICY: ExpiryPolicy(create=timedelta(seconds=1.0)) + }) + except NotSupportedByClusterError: + print("'ExpiryPolicy' API is not supported by cluster. Finishing...") + return + + try: + ttl_cache.put(1, 1) + time.sleep(0.5) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = 1 + time.sleep(1.2) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = None + finally: + ttl_cache.destroy() + + print("Create simple Cache and set TTL through `with_expire_policy`") + simple_cache = client.create_cache('test') + try: + ttl_cache = simple_cache.with_expire_policy(access=timedelta(seconds=1.0)) + ttl_cache.put(1, 1) + time.sleep(0.5) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = 1 + time.sleep(1.7) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = None + finally: + simple_cache.destroy() + + +async def async_actions(): + print("Running async ExpiryPolicy example.") + + client = AioClient() + async with client.connect('127.0.0.1', 10800): + print("Create cache with expiry policy.") + try: + ttl_cache = await client.create_cache({ + PROP_NAME: 'test', + PROP_EXPIRY_POLICY: ExpiryPolicy(create=timedelta(seconds=1.0)) + }) + except NotSupportedByClusterError: + print("'ExpiryPolicy' API is not supported by cluster. Finishing...") + return + + try: + await ttl_cache.put(1, 1) + await asyncio.sleep(0.5) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = 1 + await asyncio.sleep(1.2) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = None + finally: + await ttl_cache.destroy() + + print("Create simple Cache and set TTL through `with_expire_policy`") + simple_cache = await client.create_cache('test') + try: + ttl_cache = simple_cache.with_expire_policy(access=timedelta(seconds=1.0)) + await ttl_cache.put(1, 1) + await asyncio.sleep(0.5) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = 1 + await asyncio.sleep(1.7) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = None + finally: + await simple_cache.destroy() + + +if __name__ == '__main__': + sync_actions() + print('-' * 20) + asyncio.run(async_actions()) + +# Running sync ExpiryPolicy example. +# Create cache with expiry policy. +# key = 1, value = 1 +# key = 1, value = None +# Create simple Cache and set TTL through `with_expire_policy` +# key = 1, value = 1 +# key = 1, value = None +# -------------------- +# Running async ExpiryPolicy example. +# Create cache with expiry policy. +# key = 1, value = 1 +# key = 1, value = None +# Create simple Cache and set TTL through `with_expire_policy` +# key = 1, value = 1 +# key = 1, value = None diff --git a/examples/failover.py b/examples/failover.py index 3a5fcce..3a5ee42 100644 --- a/examples/failover.py +++ b/examples/failover.py @@ -15,7 +15,7 @@ from pyignite import Client from pyignite.datatypes.cache_config import CacheMode -from pyignite.datatypes.prop_codes import * +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_MODE, PROP_BACKUPS_NUMBER from pyignite.exceptions import SocketError @@ -25,37 +25,42 @@ ('127.0.0.1', 10802), ] + client = Client(timeout=4.0) -client.connect(nodes) -print('Connected to {}'.format(client)) - -my_cache = client.get_or_create_cache({ - PROP_NAME: 'my_cache', - PROP_CACHE_MODE: CacheMode.REPLICATED, -}) -my_cache.put('test_key', 0) - -# abstract main loop -while True: - try: - # do the work - test_value = my_cache.get('test_key') - my_cache.put('test_key', test_value + 1) - except (OSError, SocketError) as e: - # recover from error (repeat last command, check data - # consistency or just continue − depends on the task) - print('Error: {}'.format(e)) - print('Last value: {}'.format(my_cache.get('test_key'))) - print('Reconnected to {}'.format(client)) - -# Connected to 127.0.0.1:10800 -# Error: [Errno 104] Connection reset by peer -# Last value: 6999 -# Reconnected to 127.0.0.1:10801 -# Error: Socket connection broken. -# Last value: 12302 -# Reconnected to 127.0.0.1:10802 -# Error: [Errno 111] Client refused +with client.connect(nodes): + print('Connected') + + my_cache = client.get_or_create_cache({ + PROP_NAME: 'my_cache', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_BACKUPS_NUMBER: 2, + }) + my_cache.put('test_key', 0) + test_value = 0 + + # abstract main loop + while True: + try: + # do the work + test_value = my_cache.get('test_key') or 0 + my_cache.put('test_key', test_value + 1) + except (OSError, SocketError) as e: + # recover from error (repeat last command, check data + # consistency or just continue − depends on the task) + print(f'Error: {e}') + print(f'Last value: {test_value}') + print('Reconnecting') + +# Connected +# Error: Connection broken. +# Last value: 2650 +# Reconnecting +# Error: Connection broken. +# Last value: 10204 +# Reconnecting +# Error: Connection broken. +# Last value: 18932 +# Reconnecting # Traceback (most recent call last): -# ... -# pyignite.exceptions.ReconnectError: Can not reconnect: out of nodes +# ... +# pyignite.exceptions.ReconnectError: Can not reconnect: out of nodes. diff --git a/examples/get_and_put.py b/examples/get_and_put.py index 49c5108..053e4b7 100644 --- a/examples/get_and_put.py +++ b/examples/get_and_put.py @@ -16,26 +16,24 @@ from pyignite import Client client = Client() -client.connect('127.0.0.1', 10800) +with client.connect('127.0.0.1', 10800): + my_cache = client.create_cache('my cache') -my_cache = client.create_cache('my cache') + my_cache.put('my key', 42) -my_cache.put('my key', 42) + result = my_cache.get('my key') + print(result) # 42 -result = my_cache.get('my key') -print(result) # 42 + result = my_cache.get('non-existent key') + print(result) # None -result = my_cache.get('non-existent key') -print(result) # None + result = my_cache.get_all([ + 'my key', + 'non-existent key', + 'other-key', + ]) + print(result) # {'my key': 42} -result = my_cache.get_all([ - 'my key', - 'non-existent key', - 'other-key', -]) -print(result) # {'my key': 42} + my_cache.clear_key('my key') -my_cache.clear_key('my key') - -my_cache.destroy() -client.close() + my_cache.destroy() diff --git a/examples/get_and_put_complex.py b/examples/get_and_put_complex.py new file mode 100644 index 0000000..0938379 --- /dev/null +++ b/examples/get_and_put_complex.py @@ -0,0 +1,65 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pyignite import Client +from pyignite.datatypes import CollectionObject, MapObject, ObjectArrayObject + + +client = Client() +with client.connect('127.0.0.1', 10800): + my_cache = client.get_or_create_cache('my cache') + + value = {1: 'test', 'key': 2.0} + + # saving ordered dictionary + type_id = MapObject.LINKED_HASH_MAP + my_cache.put('my dict', (type_id, value)) + result = my_cache.get('my dict') + print(result) # (2, {1: 'test', 'key': 2.0}) + + # saving unordered dictionary + type_id = MapObject.HASH_MAP + my_cache.put('my dict', (type_id, value)) + result = my_cache.get('my dict') + print(result) # (1, {1: 'test', 'key': 2.0}) + + type_id = CollectionObject.LINKED_LIST + value = [1, '2', 3.0] + + my_cache.put('my list', (type_id, value)) + + result = my_cache.get('my list') + print(result) # (2, [1, '2', 3.0]) + + type_id = CollectionObject.HASH_SET + value = [4, 4, 'test', 5.6] + + my_cache.put('my set', (type_id, value)) + + result = my_cache.get('my set') + print(result) # (3, [5.6, 4, 'test']) + + type_id = ObjectArrayObject.OBJECT + value = [7, '8', 9.0] + + my_cache.put( + 'my array of objects', + (type_id, value), + value_hint=ObjectArrayObject # this hint is mandatory! + ) + result = my_cache.get('my array of objects') + print(result) # (-1, [7, '8', 9.0]) + + my_cache.destroy() diff --git a/examples/helpers/converters.py b/examples/helpers/converters.py new file mode 100644 index 0000000..4122c49 --- /dev/null +++ b/examples/helpers/converters.py @@ -0,0 +1,5 @@ +def obj_to_dict(obj): + result = {'type_name': obj.type_name} + for data in obj.schema: + result.update({data: getattr(obj, data)}) + return result diff --git a/examples/helpers/sql_helper.py b/examples/helpers/sql_helper.py new file mode 100644 index 0000000..f13d2ed --- /dev/null +++ b/examples/helpers/sql_helper.py @@ -0,0 +1,193 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from decimal import Decimal +from enum import Enum + + +class TableNames(Enum): + COUNTRY_TABLE_NAME = 'Country' + CITY_TABLE_NAME = 'City' + LANGUAGE_TABLE_NAME = 'CountryLanguage' + + +class Query: + COUNTRY_CREATE_TABLE = '''CREATE TABLE Country ( + Code CHAR(3) PRIMARY KEY, + Name CHAR(52), + Continent CHAR(50), + Region CHAR(26), + SurfaceArea DECIMAL(10,2), + IndepYear SMALLINT(6), + Population INT(11), + LifeExpectancy DECIMAL(3,1), + GNP DECIMAL(10,2), + GNPOld DECIMAL(10,2), + LocalName CHAR(45), + GovernmentForm CHAR(45), + HeadOfState CHAR(60), + Capital INT(11), + Code2 CHAR(2) + )''' + + COUNTRY_INSERT = '''INSERT INTO Country( + Code, Name, Continent, Region, + SurfaceArea, IndepYear, Population, + LifeExpectancy, GNP, GNPOld, + LocalName, GovernmentForm, HeadOfState, + Capital, Code2 + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' + + CITY_CREATE_TABLE = '''CREATE TABLE City ( + ID INT(11), + Name CHAR(35), + CountryCode CHAR(3), + District CHAR(20), + Population INT(11), + PRIMARY KEY (ID, CountryCode) + ) WITH "affinityKey=CountryCode"''' + + CITY_CREATE_INDEX = 'CREATE INDEX idx_country_code ON city (CountryCode)' + + CITY_INSERT = '''INSERT INTO City( + ID, Name, CountryCode, District, Population + ) VALUES (?, ?, ?, ?, ?)''' + + LANGUAGE_CREATE_TABLE = '''CREATE TABLE CountryLanguage ( + CountryCode CHAR(3), + Language CHAR(30), + IsOfficial BOOLEAN, + Percentage DECIMAL(4,1), + PRIMARY KEY (CountryCode, Language) + ) WITH "affinityKey=CountryCode"''' + + LANGUAGE_CREATE_INDEX = 'CREATE INDEX idx_lang_country_code ON CountryLanguage (CountryCode)' + + LANGUAGE_INSERT = '''INSERT INTO CountryLanguage( + CountryCode, Language, IsOfficial, Percentage + ) VALUES (?, ?, ?, ?)''' + + DROP_TABLE = 'DROP TABLE {} IF EXISTS' + + +class TestData: + COUNTRY = [ + [ + 'USA', 'United States', 'North America', 'North America', + Decimal('9363520.00'), 1776, 278357000, + Decimal('77.1'), Decimal('8510700.00'), Decimal('8110900.00'), + 'United States', 'Federal Republic', 'George W. Bush', + 3813, 'US', + ], + [ + 'IND', 'India', 'Asia', 'Southern and Central Asia', + Decimal('3287263.00'), 1947, 1013662000, + Decimal('62.5'), Decimal('447114.00'), Decimal('430572.00'), + 'Bharat/India', 'Federal Republic', 'Kocheril Raman Narayanan', + 1109, 'IN', + ], + [ + 'CHN', 'China', 'Asia', 'Eastern Asia', + Decimal('9572900.00'), -1523, 1277558000, + Decimal('71.4'), Decimal('982268.00'), Decimal('917719.00'), + 'Zhongquo', 'PeoplesRepublic', 'Jiang Zemin', + 1891, 'CN', + ], + ] + + CITY = [ + [3793, 'New York', 'USA', 'New York', 8008278], + [3794, 'Los Angeles', 'USA', 'California', 3694820], + [3795, 'Chicago', 'USA', 'Illinois', 2896016], + [3796, 'Houston', 'USA', 'Texas', 1953631], + [3797, 'Philadelphia', 'USA', 'Pennsylvania', 1517550], + [3798, 'Phoenix', 'USA', 'Arizona', 1321045], + [3799, 'San Diego', 'USA', 'California', 1223400], + [3800, 'Dallas', 'USA', 'Texas', 1188580], + [3801, 'San Antonio', 'USA', 'Texas', 1144646], + [3802, 'Detroit', 'USA', 'Michigan', 951270], + [3803, 'San Jose', 'USA', 'California', 894943], + [3804, 'Indianapolis', 'USA', 'Indiana', 791926], + [3805, 'San Francisco', 'USA', 'California', 776733], + [1024, 'Mumbai (Bombay)', 'IND', 'Maharashtra', 10500000], + [1025, 'Delhi', 'IND', 'Delhi', 7206704], + [1026, 'Calcutta [Kolkata]', 'IND', 'West Bengali', 4399819], + [1027, 'Chennai (Madras)', 'IND', 'Tamil Nadu', 3841396], + [1028, 'Hyderabad', 'IND', 'Andhra Pradesh', 2964638], + [1029, 'Ahmedabad', 'IND', 'Gujarat', 2876710], + [1030, 'Bangalore', 'IND', 'Karnataka', 2660088], + [1031, 'Kanpur', 'IND', 'Uttar Pradesh', 1874409], + [1032, 'Nagpur', 'IND', 'Maharashtra', 1624752], + [1033, 'Lucknow', 'IND', 'Uttar Pradesh', 1619115], + [1034, 'Pune', 'IND', 'Maharashtra', 1566651], + [1035, 'Surat', 'IND', 'Gujarat', 1498817], + [1036, 'Jaipur', 'IND', 'Rajasthan', 1458483], + [1890, 'Shanghai', 'CHN', 'Shanghai', 9696300], + [1891, 'Peking', 'CHN', 'Peking', 7472000], + [1892, 'Chongqing', 'CHN', 'Chongqing', 6351600], + [1893, 'Tianjin', 'CHN', 'Tianjin', 5286800], + [1894, 'Wuhan', 'CHN', 'Hubei', 4344600], + [1895, 'Harbin', 'CHN', 'Heilongjiang', 4289800], + [1896, 'Shenyang', 'CHN', 'Liaoning', 4265200], + [1897, 'Kanton [Guangzhou]', 'CHN', 'Guangdong', 4256300], + [1898, 'Chengdu', 'CHN', 'Sichuan', 3361500], + [1899, 'Nanking [Nanjing]', 'CHN', 'Jiangsu', 2870300], + [1900, 'Changchun', 'CHN', 'Jilin', 2812000], + [1901, 'Xi´an', 'CHN', 'Shaanxi', 2761400], + [1902, 'Dalian', 'CHN', 'Liaoning', 2697000], + [1903, 'Qingdao', 'CHN', 'Shandong', 2596000], + [1904, 'Jinan', 'CHN', 'Shandong', 2278100], + [1905, 'Hangzhou', 'CHN', 'Zhejiang', 2190500], + [1906, 'Zhengzhou', 'CHN', 'Henan', 2107200], + ] + + LANGUAGE = [ + ['USA', 'Chinese', False, Decimal('0.6')], + ['USA', 'English', True, Decimal('86.2')], + ['USA', 'French', False, Decimal('0.7')], + ['USA', 'German', False, Decimal('0.7')], + ['USA', 'Italian', False, Decimal('0.6')], + ['USA', 'Japanese', False, Decimal('0.2')], + ['USA', 'Korean', False, Decimal('0.3')], + ['USA', 'Polish', False, Decimal('0.3')], + ['USA', 'Portuguese', False, Decimal('0.2')], + ['USA', 'Spanish', False, Decimal('7.5')], + ['USA', 'Tagalog', False, Decimal('0.4')], + ['USA', 'Vietnamese', False, Decimal('0.2')], + ['IND', 'Asami', False, Decimal('1.5')], + ['IND', 'Bengali', False, Decimal('8.2')], + ['IND', 'Gujarati', False, Decimal('4.8')], + ['IND', 'Hindi', True, Decimal('39.9')], + ['IND', 'Kannada', False, Decimal('3.9')], + ['IND', 'Malajalam', False, Decimal('3.6')], + ['IND', 'Marathi', False, Decimal('7.4')], + ['IND', 'Orija', False, Decimal('3.3')], + ['IND', 'Punjabi', False, Decimal('2.8')], + ['IND', 'Tamil', False, Decimal('6.3')], + ['IND', 'Telugu', False, Decimal('7.8')], + ['IND', 'Urdu', False, Decimal('5.1')], + ['CHN', 'Chinese', True, Decimal('92.0')], + ['CHN', 'Dong', False, Decimal('0.2')], + ['CHN', 'Hui', False, Decimal('0.8')], + ['CHN', 'Mantšu', False, Decimal('0.9')], + ['CHN', 'Miao', False, Decimal('0.7')], + ['CHN', 'Mongolian', False, Decimal('0.4')], + ['CHN', 'Puyi', False, Decimal('0.2')], + ['CHN', 'Tibetan', False, Decimal('0.4')], + ['CHN', 'Tujia', False, Decimal('0.5')], + ['CHN', 'Uighur', False, Decimal('0.6')], + ['CHN', 'Yi', False, Decimal('0.6')], + ['CHN', 'Zhuang', False, Decimal('1.4')], + ] diff --git a/examples/migrate_binary.py b/examples/migrate_binary.py index f0b0f74..0c7f518 100644 --- a/examples/migrate_binary.py +++ b/examples/migrate_binary.py @@ -13,76 +13,74 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict from datetime import date from decimal import Decimal +from pprint import pprint +from helpers.converters import obj_to_dict from pyignite import Client, GenericObjectMeta -from pyignite.datatypes import ( - BoolObject, DateObject, DecimalObject, LongObject, String, -) - +from pyignite.datatypes import BoolObject, DateObject, DecimalObject, LongObject, String # prepare old data -old_schema = OrderedDict([ - ('date', DateObject), - ('reported', BoolObject), - ('purpose', String), - ('sum', DecimalObject), - ('recipient', String), - ('cashier_id', LongObject), -]) - -old_data = [ - (1, { +old_schema = {'date': DateObject, + 'reported': BoolObject, + 'purpose': String, + 'sum': DecimalObject, + 'recipient': String, + 'cashier_id': LongObject + } + +old_data = { + 1: { 'date': date(2017, 9, 21), 'reported': True, 'purpose': 'Praesent eget fermentum massa', 'sum': Decimal('666.67'), 'recipient': 'John Doe', 'cashier_id': 8, - }), - (2, { + }, + 2: { 'date': date(2017, 10, 11), 'reported': True, 'purpose': 'Proin in bibendum nulla', 'sum': Decimal('333.33'), 'recipient': 'Jane Roe', 'cashier_id': 9, - }), - (3, { + }, + 3: { 'date': date(2017, 10, 11), 'reported': True, 'purpose': 'Suspendisse nec dolor auctor, scelerisque ex eu, iaculis odio', 'sum': Decimal('400.0'), 'recipient': 'Jane Roe', 'cashier_id': 8, - }), - (4, { + }, + 4: { 'date': date(2017, 10, 24), 'reported': False, 'purpose': 'Quisque ut leo ligula', 'sum': Decimal('1234.5'), 'recipient': 'Joe Bloggs', 'cashier_id': 10, - }), - (5, { + }, + 5: { 'date': date(2017, 12, 1), 'reported': True, 'purpose': 'Quisque ut leo ligula', 'sum': Decimal('800.0'), 'recipient': 'Richard Public', 'cashier_id': 12, - }), - (6, { + }, + 6: { 'date': date(2017, 12, 1), 'reported': True, 'purpose': 'Aenean eget bibendum lorem, a luctus libero', 'sum': Decimal('135.79'), 'recipient': 'Joe Bloggs', 'cashier_id': 10, - }), -] + } +} + # - add `report_date` # - set `report_date` to the current date if `reported` is True, None if False @@ -106,18 +104,19 @@ class ExpenseVoucher( client = Client() -client.connect('127.0.0.1', 10800) -accounting = client.get_or_create_cache('accounting') +with client.connect('127.0.0.1', 10800): + accounting = client.get_or_create_cache('accounting') -for key, value in old_data: - accounting.put(key, ExpenseVoucher(**value)) + for item, value in old_data.items(): + print(item) + accounting.put(item, ExpenseVoucher(**value)) -data_classes = client.query_binary_type('ExpenseVoucher') -print(data_classes) -# { -# -231598180: -# } + data_classes = client.query_binary_type('ExpenseVoucher') + print(data_classes) + # { + # {547629991: , -231598180: } + # } s_id, data_class = data_classes.popitem() schema = data_class.schema @@ -142,16 +141,16 @@ def migrate(cache, data, new_class): """ Migrate given data pages. """ for key, old_value in data: # read data - print(old_value) - # ExpenseVoucher( - # date=datetime(2017, 9, 21, 0, 0), - # reported=True, - # purpose='Praesent eget fermentum massa', - # sum=Decimal('666.67'), - # recipient='John Doe', - # cashier_id=8, - # version=1 - # ) + print('Old value:') + pprint(obj_to_dict(old_value)) + # Old value: + # {'cashier_id': 10, + # 'date': datetime.datetime(2017, 12, 1, 0, 0), + # 'purpose': 'Aenean eget bibendum lorem, a luctus libero', + # 'recipient': 'Joe Bloggs', + # 'reported': True, + # 'sum': Decimal('135.79'), + # 'type_name': 'ExpenseVoucher'} # create new binary object new_value = new_class() @@ -169,22 +168,26 @@ def migrate(cache, data, new_class): # verify data verify = cache.get(key) - print(verify) - # ExpenseVoucherV2( - # purpose='Praesent eget fermentum massa', - # sum=Decimal('666.67'), - # recipient='John Doe', - # cashier_id=8, - # expense_date=datetime(2017, 9, 21, 0, 0), - # report_date=datetime(2018, 8, 29, 0, 0), - # version=1, - # ) + print('New value:') + pprint(obj_to_dict(verify)) + # New value: + # {'cashier_id': 10, + # 'expense_date': datetime.datetime(2017, 12, 1, 0, 0), + # 'purpose': 'Aenean eget bibendum lorem, a luctus libero', + # 'recipient': 'Joe Bloggs', + # 'report_date': datetime.datetime(2022, 5, 6, 0, 0), + # 'sum': Decimal('135.79'), + # 'type_name': 'ExpenseVoucher'} + + print('-' * 20) # migrate data -result = accounting.scan() -migrate(accounting, result, ExpenseVoucherV2) +with client.connect('127.0.0.1', 10800): + accounting = client.get_or_create_cache('accounting') + + with accounting.scan() as cursor: + migrate(accounting, cursor, ExpenseVoucherV2) -# cleanup -accounting.destroy() -client.close() + # cleanup + accounting.destroy() diff --git a/examples/read_binary.py b/examples/read_binary.py index 3a8e9e2..92404ca 100644 --- a/examples/read_binary.py +++ b/examples/read_binary.py @@ -13,263 +13,119 @@ # See the License for the specific language governing permissions and # limitations under the License. -from decimal import Decimal +from pprint import pprint +from helpers.converters import obj_to_dict +from helpers.sql_helper import TableNames, Query, TestData from pyignite import Client -from pyignite.datatypes.prop_codes import * - - -COUNTRY_TABLE_NAME = 'Country' -CITY_TABLE_NAME = 'City' -LANGUAGE_TABLE_NAME = 'CountryLanguage' - -COUNTRY_CREATE_TABLE_QUERY = '''CREATE TABLE Country ( - Code CHAR(3) PRIMARY KEY, - Name CHAR(52), - Continent CHAR(50), - Region CHAR(26), - SurfaceArea DECIMAL(10,2), - IndepYear SMALLINT(6), - Population INT(11), - LifeExpectancy DECIMAL(3,1), - GNP DECIMAL(10,2), - GNPOld DECIMAL(10,2), - LocalName CHAR(45), - GovernmentForm CHAR(45), - HeadOfState CHAR(60), - Capital INT(11), - Code2 CHAR(2) -)''' - -COUNTRY_INSERT_QUERY = '''INSERT INTO Country( - Code, Name, Continent, Region, - SurfaceArea, IndepYear, Population, - LifeExpectancy, GNP, GNPOld, - LocalName, GovernmentForm, HeadOfState, - Capital, Code2 -) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' - -CITY_CREATE_TABLE_QUERY = '''CREATE TABLE City ( - ID INT(11), - Name CHAR(35), - CountryCode CHAR(3), - District CHAR(20), - Population INT(11), - PRIMARY KEY (ID, CountryCode) -) WITH "affinityKey=CountryCode"''' - -CITY_CREATE_INDEX = ''' -CREATE INDEX idx_country_code ON city (CountryCode)''' - -CITY_INSERT_QUERY = '''INSERT INTO City( - ID, Name, CountryCode, District, Population -) VALUES (?, ?, ?, ?, ?)''' - -LANGUAGE_CREATE_TABLE_QUERY = '''CREATE TABLE CountryLanguage ( - CountryCode CHAR(3), - Language CHAR(30), - IsOfficial BOOLEAN, - Percentage DECIMAL(4,1), - PRIMARY KEY (CountryCode, Language) -) WITH "affinityKey=CountryCode"''' - -LANGUAGE_CREATE_INDEX = ''' -CREATE INDEX idx_lang_country_code ON CountryLanguage (CountryCode)''' - -LANGUAGE_INSERT_QUERY = '''INSERT INTO CountryLanguage( - CountryCode, Language, IsOfficial, Percentage -) VALUES (?, ?, ?, ?)''' - -DROP_TABLE_QUERY = '''DROP TABLE {} IF EXISTS''' - -COUNTRY_DATA = [ - [ - 'USA', 'United States', 'North America', 'North America', - Decimal('9363520.00'), 1776, 278357000, - Decimal('77.1'), Decimal('8510700.00'), Decimal('8110900.00'), - 'United States', 'Federal Republic', 'George W. Bush', - 3813, 'US', - ], - [ - 'IND', 'India', 'Asia', 'Southern and Central Asia', - Decimal('3287263.00'), 1947, 1013662000, - Decimal('62.5'), Decimal('447114.00'), Decimal('430572.00'), - 'Bharat/India', 'Federal Republic', 'Kocheril Raman Narayanan', - 1109, 'IN', - ], - [ - 'CHN', 'China', 'Asia', 'Eastern Asia', - Decimal('9572900.00'), -1523, 1277558000, - Decimal('71.4'), Decimal('982268.00'), Decimal('917719.00'), - 'Zhongquo', 'PeoplesRepublic', 'Jiang Zemin', - 1891, 'CN', - ], -] - -CITY_DATA = [ - [3793, 'New York', 'USA', 'New York', 8008278], - [3794, 'Los Angeles', 'USA', 'California', 3694820], - [3795, 'Chicago', 'USA', 'Illinois', 2896016], - [3796, 'Houston', 'USA', 'Texas', 1953631], - [3797, 'Philadelphia', 'USA', 'Pennsylvania', 1517550], - [3798, 'Phoenix', 'USA', 'Arizona', 1321045], - [3799, 'San Diego', 'USA', 'California', 1223400], - [3800, 'Dallas', 'USA', 'Texas', 1188580], - [3801, 'San Antonio', 'USA', 'Texas', 1144646], - [3802, 'Detroit', 'USA', 'Michigan', 951270], - [3803, 'San Jose', 'USA', 'California', 894943], - [3804, 'Indianapolis', 'USA', 'Indiana', 791926], - [3805, 'San Francisco', 'USA', 'California', 776733], - [1024, 'Mumbai (Bombay)', 'IND', 'Maharashtra', 10500000], - [1025, 'Delhi', 'IND', 'Delhi', 7206704], - [1026, 'Calcutta [Kolkata]', 'IND', 'West Bengali', 4399819], - [1027, 'Chennai (Madras)', 'IND', 'Tamil Nadu', 3841396], - [1028, 'Hyderabad', 'IND', 'Andhra Pradesh', 2964638], - [1029, 'Ahmedabad', 'IND', 'Gujarat', 2876710], - [1030, 'Bangalore', 'IND', 'Karnataka', 2660088], - [1031, 'Kanpur', 'IND', 'Uttar Pradesh', 1874409], - [1032, 'Nagpur', 'IND', 'Maharashtra', 1624752], - [1033, 'Lucknow', 'IND', 'Uttar Pradesh', 1619115], - [1034, 'Pune', 'IND', 'Maharashtra', 1566651], - [1035, 'Surat', 'IND', 'Gujarat', 1498817], - [1036, 'Jaipur', 'IND', 'Rajasthan', 1458483], - [1890, 'Shanghai', 'CHN', 'Shanghai', 9696300], - [1891, 'Peking', 'CHN', 'Peking', 7472000], - [1892, 'Chongqing', 'CHN', 'Chongqing', 6351600], - [1893, 'Tianjin', 'CHN', 'Tianjin', 5286800], - [1894, 'Wuhan', 'CHN', 'Hubei', 4344600], - [1895, 'Harbin', 'CHN', 'Heilongjiang', 4289800], - [1896, 'Shenyang', 'CHN', 'Liaoning', 4265200], - [1897, 'Kanton [Guangzhou]', 'CHN', 'Guangdong', 4256300], - [1898, 'Chengdu', 'CHN', 'Sichuan', 3361500], - [1899, 'Nanking [Nanjing]', 'CHN', 'Jiangsu', 2870300], - [1900, 'Changchun', 'CHN', 'Jilin', 2812000], - [1901, 'Xi´an', 'CHN', 'Shaanxi', 2761400], - [1902, 'Dalian', 'CHN', 'Liaoning', 2697000], - [1903, 'Qingdao', 'CHN', 'Shandong', 2596000], - [1904, 'Jinan', 'CHN', 'Shandong', 2278100], - [1905, 'Hangzhou', 'CHN', 'Zhejiang', 2190500], - [1906, 'Zhengzhou', 'CHN', 'Henan', 2107200], -] - -LANGUAGE_DATA = [ - ['USA', 'Chinese', False, Decimal('0.6')], - ['USA', 'English', True, Decimal('86.2')], - ['USA', 'French', False, Decimal('0.7')], - ['USA', 'German', False, Decimal('0.7')], - ['USA', 'Italian', False, Decimal('0.6')], - ['USA', 'Japanese', False, Decimal('0.2')], - ['USA', 'Korean', False, Decimal('0.3')], - ['USA', 'Polish', False, Decimal('0.3')], - ['USA', 'Portuguese', False, Decimal('0.2')], - ['USA', 'Spanish', False, Decimal('7.5')], - ['USA', 'Tagalog', False, Decimal('0.4')], - ['USA', 'Vietnamese', False, Decimal('0.2')], - ['IND', 'Asami', False, Decimal('1.5')], - ['IND', 'Bengali', False, Decimal('8.2')], - ['IND', 'Gujarati', False, Decimal('4.8')], - ['IND', 'Hindi', True, Decimal('39.9')], - ['IND', 'Kannada', False, Decimal('3.9')], - ['IND', 'Malajalam', False, Decimal('3.6')], - ['IND', 'Marathi', False, Decimal('7.4')], - ['IND', 'Orija', False, Decimal('3.3')], - ['IND', 'Punjabi', False, Decimal('2.8')], - ['IND', 'Tamil', False, Decimal('6.3')], - ['IND', 'Telugu', False, Decimal('7.8')], - ['IND', 'Urdu', False, Decimal('5.1')], - ['CHN', 'Chinese', True, Decimal('92.0')], - ['CHN', 'Dong', False, Decimal('0.2')], - ['CHN', 'Hui', False, Decimal('0.8')], - ['CHN', 'Mantšu', False, Decimal('0.9')], - ['CHN', 'Miao', False, Decimal('0.7')], - ['CHN', 'Mongolian', False, Decimal('0.4')], - ['CHN', 'Puyi', False, Decimal('0.2')], - ['CHN', 'Tibetan', False, Decimal('0.4')], - ['CHN', 'Tujia', False, Decimal('0.5')], - ['CHN', 'Uighur', False, Decimal('0.6')], - ['CHN', 'Yi', False, Decimal('0.6')], - ['CHN', 'Zhuang', False, Decimal('1.4')], -] - +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_QUERY_ENTITIES # establish connection client = Client() -client.connect('127.0.0.1', 10800) - -# create tables -for query in [ - COUNTRY_CREATE_TABLE_QUERY, - CITY_CREATE_TABLE_QUERY, - LANGUAGE_CREATE_TABLE_QUERY, -]: - client.sql(query) - -# create indices -for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: - client.sql(query) - -# load data -for row in COUNTRY_DATA: - client.sql(COUNTRY_INSERT_QUERY, query_args=row) - -for row in CITY_DATA: - client.sql(CITY_INSERT_QUERY, query_args=row) - -for row in LANGUAGE_DATA: - client.sql(LANGUAGE_INSERT_QUERY, query_args=row) - -# examine the storage -result = client.get_cache_names() -print(result) -# [ -# 'SQL_PUBLIC_CITY', -# 'SQL_PUBLIC_COUNTRY', -# 'PUBLIC', -# 'SQL_PUBLIC_COUNTRYLANGUAGE' -# ] - -city_cache = client.get_or_create_cache('SQL_PUBLIC_CITY') -print(city_cache.settings[PROP_NAME]) -# 'SQL_PUBLIC_CITY' - -print(city_cache.settings[PROP_QUERY_ENTITIES]) -# { -# 'key_type_name': ( -# 'SQL_PUBLIC_CITY_9ac8e17a_2f99_45b7_958e_06da32882e9d_KEY' -# ), -# 'value_type_name': ( -# 'SQL_PUBLIC_CITY_9ac8e17a_2f99_45b7_958e_06da32882e9d' -# ), -# 'table_name': 'CITY', -# 'query_fields': [ -# ... -# ], -# 'field_name_aliases': [ -# ... -# ], -# 'query_indexes': [] -# } - -result = city_cache.scan() -print(next(result)) -# ( -# SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43_KEY( -# ID=1890, -# COUNTRYCODE='CHN', -# version=1 -# ), -# SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43( -# NAME='Shanghai', -# DISTRICT='Shanghai', -# POPULATION=9696300, -# version=1 -# ) -# ) - -# clean up -for table_name in [ - CITY_TABLE_NAME, - LANGUAGE_TABLE_NAME, - COUNTRY_TABLE_NAME, -]: - result = client.sql(DROP_TABLE_QUERY.format(table_name)) +with client.connect('127.0.0.1', 10800): + # create tables + for query in [ + Query.COUNTRY_CREATE_TABLE, + Query.CITY_CREATE_TABLE, + Query.LANGUAGE_CREATE_TABLE, + ]: + client.sql(query) + + # create indices + for query in [Query.CITY_CREATE_INDEX, Query.LANGUAGE_CREATE_INDEX]: + client.sql(query) + + # load data + for row in TestData.COUNTRY: + client.sql(Query.COUNTRY_INSERT, query_args=row) + + for row in TestData.CITY: + client.sql(Query.CITY_INSERT, query_args=row) + + for row in TestData.LANGUAGE: + client.sql(Query.LANGUAGE_INSERT, query_args=row) + + # examine the storage + result = client.get_cache_names() + pprint(result) + # ['SQL_PUBLIC_CITY', 'SQL_PUBLIC_COUNTRY', 'SQL_PUBLIC_COUNTRYLANGUAGE'] + + city_cache = client.get_or_create_cache('SQL_PUBLIC_CITY') + pprint(city_cache.settings[PROP_NAME]) + # 'SQL_PUBLIC_CITY' + + pprint(city_cache.settings[PROP_QUERY_ENTITIES]) + # [{'field_name_aliases': [{'alias': 'DISTRICT', 'field_name': 'DISTRICT'}, + # {'alias': 'POPULATION', 'field_name': 'POPULATION'}, + # {'alias': 'COUNTRYCODE', 'field_name': 'COUNTRYCODE'}, + # {'alias': 'ID', 'field_name': 'ID'}, + # {'alias': 'NAME', 'field_name': 'NAME'}], + # 'key_field_name': None, + # 'key_type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497_KEY', + # 'query_fields': [{'default_value': None, + # 'is_key_field': True, + # 'is_notnull_constraint_field': False, + # 'name': 'ID', + # 'precision': -1, + # 'scale': -1, + # 'type_name': 'java.lang.Integer'}, + # {'default_value': None, + # 'is_key_field': False, + # 'is_notnull_constraint_field': False, + # 'name': 'NAME', + # 'precision': 35, + # 'scale': -1, + # 'type_name': 'java.lang.String'}, + # {'default_value': None, + # 'is_key_field': True, + # 'is_notnull_constraint_field': False, + # 'name': 'COUNTRYCODE', + # 'precision': 3, + # 'scale': -1, + # 'type_name': 'java.lang.String'}, + # {'default_value': None, + # 'is_key_field': False, + # 'is_notnull_constraint_field': False, + # 'name': 'DISTRICT', + # 'precision': 20, + # 'scale': -1, + # 'type_name': 'java.lang.String'}, + # {'default_value': None, + # 'is_key_field': False, + # 'is_notnull_constraint_field': False, + # 'name': 'POPULATION', + # 'precision': -1, + # 'scale': -1, + # 'type_name': 'java.lang.Integer'}], + # 'query_indexes': [], + # 'table_name': 'CITY', + # 'value_field_name': None, + # 'value_type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497'}] + + print('-' * 20) + with city_cache.scan() as cursor: + for line in next(cursor): + pprint(obj_to_dict(line)) + # {'COUNTRYCODE': 'USA', + # 'ID': 3793, + # 'type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497_KEY'} + # {'DISTRICT': 'New York', + # 'NAME': 'New York', + # 'POPULATION': 8008278, + # 'type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497'} + + print('-' * 20) + with client.sql('SELECT _KEY, _VAL FROM CITY WHERE ID = ?', query_args=[1890]) as cursor: + for line in next(cursor): + pprint(obj_to_dict(line)) + # {'COUNTRYCODE': 'CHN', + # 'ID': 1890, + # 'type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497_KEY'} + # {'DISTRICT': 'Shanghai', + # 'NAME': 'Shanghai', + # 'POPULATION': 9696300, + # 'type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497'} + + # clean up + for table_name in TableNames: + result = client.sql(Query.DROP_TABLE.format(table_name.value)) diff --git a/examples/readme.md b/examples/readme.md index 3628c82..ebc6b7b 100644 --- a/examples/readme.md +++ b/examples/readme.md @@ -2,16 +2,26 @@ This directory contains the following example files: +- `async_key_value` - asynchronous key-value operations, +- `async_sql` - asynchronous SQL operations, - `binary_basics.py` − basic operations with Complex objects, -- `binary_types.py` - read SQL table as a key-value cache, -- `create_binary.py` − create SQL row with key-value operation, +- `create_binary.py` − create SQL row with key-value operation, +- `expiry_policy.py` - the expiration policy for caches for synchronous and asynchronous operations is demonstrated, - `failover.py` − fail-over connection to Ignite cluster, -- `get_and_put.py` − basic key-value operations, +- `get_and_put.py` − basic key-value operations, +- `get_and_put_complex.py` − key-value operations with different value and key types, - `migrate_binary.py` − work with Complex object schemas, -- `scans.py` − cache scan operation, +- `read_binary.py` − creates caches and fills them with data through SQL queries, demonstrates working with binary objects, +- `scans.py` − cache scan operation, - `sql.py` − use Ignite SQL, - `type_hints.py` − type hints. For the explanation of the examples please refer to the [Examples of usage](https://apache-ignite-binary-protocol-client.readthedocs.io/en/latest/examples.html) section of the `pyignite` documentation. + +You can start Apache Ignite locally for running examples using `docker` and `docker-compose` +```bash +cd ./examples +docker-compose up +``` diff --git a/examples/scans.py b/examples/scans.py index d5f2b48..9346372 100644 --- a/examples/scans.py +++ b/examples/scans.py @@ -13,43 +13,42 @@ # See the License for the specific language governing permissions and # limitations under the License. +from pprint import pprint + from pyignite import Client client = Client() -client.connect('127.0.0.1', 10800) - -my_cache = client.create_cache('my cache') - -my_cache.put_all({'key_{}'.format(v): v for v in range(20)}) -# { -# 'key_0': 0, -# 'key_1': 1, -# 'key_2': 2, -# ... 20 elements in total... -# 'key_18': 18, -# 'key_19': 19 -# } +with client.connect('127.0.0.1', 10800): + my_cache = client.create_cache('my cache') + my_cache.put_all({'key_{}'.format(v): v for v in range(20)}) + # { + # 'key_0': 0, + # 'key_1': 1, + # 'key_2': 2, + # ... 20 elements in total... + # 'key_18': 18, + # 'key_19': 19 + # } -result = my_cache.scan() -for k, v in result: - print(k, v) -# 'key_17' 17 -# 'key_10' 10 -# 'key_6' 6, -# ... 20 elements in total... -# 'key_16' 16 -# 'key_12' 12 + with my_cache.scan() as cursor: + for k, v in cursor: + print(k, v) + # 'key_17' 17 + # 'key_10' 10 + # 'key_6' 6, + # ... 20 elements in total... + # 'key_16' 16 + # 'key_12' 12 -result = my_cache.scan() -print(dict(result)) -# { -# 'key_17': 17, -# 'key_10': 10, -# 'key_6': 6, -# ... 20 elements in total... -# 'key_16': 16, -# 'key_12': 12 -# } + with my_cache.scan() as cursor: + pprint(dict(cursor)) + # { + # 'key_17': 17, + # 'key_10': 10, + # 'key_6': 6, + # ... 20 elements in total... + # 'key_16': 16, + # 'key_12': 12 + # } -my_cache.destroy() -client.close() + my_cache.destroy() diff --git a/examples/sql.py b/examples/sql.py index 8f0ee7c..269b20b 100644 --- a/examples/sql.py +++ b/examples/sql.py @@ -13,286 +13,95 @@ # See the License for the specific language governing permissions and # limitations under the License. -from decimal import Decimal - +from helpers.sql_helper import TableNames, Query, TestData from pyignite import Client - -COUNTRY_TABLE_NAME = 'Country' -CITY_TABLE_NAME = 'City' -LANGUAGE_TABLE_NAME = 'CountryLanguage' - -COUNTRY_CREATE_TABLE_QUERY = '''CREATE TABLE Country ( - Code CHAR(3) PRIMARY KEY, - Name CHAR(52), - Continent CHAR(50), - Region CHAR(26), - SurfaceArea DECIMAL(10,2), - IndepYear SMALLINT(6), - Population INT(11), - LifeExpectancy DECIMAL(3,1), - GNP DECIMAL(10,2), - GNPOld DECIMAL(10,2), - LocalName CHAR(45), - GovernmentForm CHAR(45), - HeadOfState CHAR(60), - Capital INT(11), - Code2 CHAR(2) -)''' - -COUNTRY_INSERT_QUERY = '''INSERT INTO Country( - Code, Name, Continent, Region, - SurfaceArea, IndepYear, Population, - LifeExpectancy, GNP, GNPOld, - LocalName, GovernmentForm, HeadOfState, - Capital, Code2 -) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' - -CITY_CREATE_TABLE_QUERY = '''CREATE TABLE City ( - ID INT(11), - Name CHAR(35), - CountryCode CHAR(3), - District CHAR(20), - Population INT(11), - PRIMARY KEY (ID, CountryCode) -) WITH "affinityKey=CountryCode"''' - -CITY_CREATE_INDEX = ''' -CREATE INDEX idx_country_code ON city (CountryCode)''' - -CITY_INSERT_QUERY = '''INSERT INTO City( - ID, Name, CountryCode, District, Population -) VALUES (?, ?, ?, ?, ?)''' - -LANGUAGE_CREATE_TABLE_QUERY = '''CREATE TABLE CountryLanguage ( - CountryCode CHAR(3), - Language CHAR(30), - IsOfficial BOOLEAN, - Percentage DECIMAL(4,1), - PRIMARY KEY (CountryCode, Language) -) WITH "affinityKey=CountryCode"''' - -LANGUAGE_CREATE_INDEX = ''' -CREATE INDEX idx_lang_country_code ON CountryLanguage (CountryCode)''' - -LANGUAGE_INSERT_QUERY = '''INSERT INTO CountryLanguage( - CountryCode, Language, IsOfficial, Percentage -) VALUES (?, ?, ?, ?)''' - -DROP_TABLE_QUERY = '''DROP TABLE {} IF EXISTS''' - -COUNTRY_DATA = [ - [ - 'USA', 'United States', 'North America', 'North America', - Decimal('9363520.00'), 1776, 278357000, - Decimal('77.1'), Decimal('8510700.00'), Decimal('8110900.00'), - 'United States', 'Federal Republic', 'George W. Bush', - 3813, 'US', - ], - [ - 'IND', 'India', 'Asia', 'Southern and Central Asia', - Decimal('3287263.00'), 1947, 1013662000, - Decimal('62.5'), Decimal('447114.00'), Decimal('430572.00'), - 'Bharat/India', 'Federal Republic', 'Kocheril Raman Narayanan', - 1109, 'IN', - ], - [ - 'CHN', 'China', 'Asia', 'Eastern Asia', - Decimal('9572900.00'), -1523, 1277558000, - Decimal('71.4'), Decimal('982268.00'), Decimal('917719.00'), - 'Zhongquo', 'PeoplesRepublic', 'Jiang Zemin', - 1891, 'CN', - ], -] - -CITY_DATA = [ - [3793, 'New York', 'USA', 'New York', 8008278], - [3794, 'Los Angeles', 'USA', 'California', 3694820], - [3795, 'Chicago', 'USA', 'Illinois', 2896016], - [3796, 'Houston', 'USA', 'Texas', 1953631], - [3797, 'Philadelphia', 'USA', 'Pennsylvania', 1517550], - [3798, 'Phoenix', 'USA', 'Arizona', 1321045], - [3799, 'San Diego', 'USA', 'California', 1223400], - [3800, 'Dallas', 'USA', 'Texas', 1188580], - [3801, 'San Antonio', 'USA', 'Texas', 1144646], - [3802, 'Detroit', 'USA', 'Michigan', 951270], - [3803, 'San Jose', 'USA', 'California', 894943], - [3804, 'Indianapolis', 'USA', 'Indiana', 791926], - [3805, 'San Francisco', 'USA', 'California', 776733], - [1024, 'Mumbai (Bombay)', 'IND', 'Maharashtra', 10500000], - [1025, 'Delhi', 'IND', 'Delhi', 7206704], - [1026, 'Calcutta [Kolkata]', 'IND', 'West Bengali', 4399819], - [1027, 'Chennai (Madras)', 'IND', 'Tamil Nadu', 3841396], - [1028, 'Hyderabad', 'IND', 'Andhra Pradesh', 2964638], - [1029, 'Ahmedabad', 'IND', 'Gujarat', 2876710], - [1030, 'Bangalore', 'IND', 'Karnataka', 2660088], - [1031, 'Kanpur', 'IND', 'Uttar Pradesh', 1874409], - [1032, 'Nagpur', 'IND', 'Maharashtra', 1624752], - [1033, 'Lucknow', 'IND', 'Uttar Pradesh', 1619115], - [1034, 'Pune', 'IND', 'Maharashtra', 1566651], - [1035, 'Surat', 'IND', 'Gujarat', 1498817], - [1036, 'Jaipur', 'IND', 'Rajasthan', 1458483], - [1890, 'Shanghai', 'CHN', 'Shanghai', 9696300], - [1891, 'Peking', 'CHN', 'Peking', 7472000], - [1892, 'Chongqing', 'CHN', 'Chongqing', 6351600], - [1893, 'Tianjin', 'CHN', 'Tianjin', 5286800], - [1894, 'Wuhan', 'CHN', 'Hubei', 4344600], - [1895, 'Harbin', 'CHN', 'Heilongjiang', 4289800], - [1896, 'Shenyang', 'CHN', 'Liaoning', 4265200], - [1897, 'Kanton [Guangzhou]', 'CHN', 'Guangdong', 4256300], - [1898, 'Chengdu', 'CHN', 'Sichuan', 3361500], - [1899, 'Nanking [Nanjing]', 'CHN', 'Jiangsu', 2870300], - [1900, 'Changchun', 'CHN', 'Jilin', 2812000], - [1901, 'Xi´an', 'CHN', 'Shaanxi', 2761400], - [1902, 'Dalian', 'CHN', 'Liaoning', 2697000], - [1903, 'Qingdao', 'CHN', 'Shandong', 2596000], - [1904, 'Jinan', 'CHN', 'Shandong', 2278100], - [1905, 'Hangzhou', 'CHN', 'Zhejiang', 2190500], - [1906, 'Zhengzhou', 'CHN', 'Henan', 2107200], -] - -LANGUAGE_DATA = [ - ['USA', 'Chinese', False, Decimal('0.6')], - ['USA', 'English', True, Decimal('86.2')], - ['USA', 'French', False, Decimal('0.7')], - ['USA', 'German', False, Decimal('0.7')], - ['USA', 'Italian', False, Decimal('0.6')], - ['USA', 'Japanese', False, Decimal('0.2')], - ['USA', 'Korean', False, Decimal('0.3')], - ['USA', 'Polish', False, Decimal('0.3')], - ['USA', 'Portuguese', False, Decimal('0.2')], - ['USA', 'Spanish', False, Decimal('7.5')], - ['USA', 'Tagalog', False, Decimal('0.4')], - ['USA', 'Vietnamese', False, Decimal('0.2')], - ['IND', 'Asami', False, Decimal('1.5')], - ['IND', 'Bengali', False, Decimal('8.2')], - ['IND', 'Gujarati', False, Decimal('4.8')], - ['IND', 'Hindi', True, Decimal('39.9')], - ['IND', 'Kannada', False, Decimal('3.9')], - ['IND', 'Malajalam', False, Decimal('3.6')], - ['IND', 'Marathi', False, Decimal('7.4')], - ['IND', 'Orija', False, Decimal('3.3')], - ['IND', 'Punjabi', False, Decimal('2.8')], - ['IND', 'Tamil', False, Decimal('6.3')], - ['IND', 'Telugu', False, Decimal('7.8')], - ['IND', 'Urdu', False, Decimal('5.1')], - ['CHN', 'Chinese', True, Decimal('92.0')], - ['CHN', 'Dong', False, Decimal('0.2')], - ['CHN', 'Hui', False, Decimal('0.8')], - ['CHN', 'Mantšu', False, Decimal('0.9')], - ['CHN', 'Miao', False, Decimal('0.7')], - ['CHN', 'Mongolian', False, Decimal('0.4')], - ['CHN', 'Puyi', False, Decimal('0.2')], - ['CHN', 'Tibetan', False, Decimal('0.4')], - ['CHN', 'Tujia', False, Decimal('0.5')], - ['CHN', 'Uighur', False, Decimal('0.6')], - ['CHN', 'Yi', False, Decimal('0.6')], - ['CHN', 'Zhuang', False, Decimal('1.4')], -] - - # establish connection client = Client() -client.connect('127.0.0.1', 10800) - -# create tables -for query in [ - COUNTRY_CREATE_TABLE_QUERY, - CITY_CREATE_TABLE_QUERY, - LANGUAGE_CREATE_TABLE_QUERY, -]: - client.sql(query) - -# create indices -for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: - client.sql(query) - -# load data -for row in COUNTRY_DATA: - client.sql(COUNTRY_INSERT_QUERY, query_args=row) - -for row in CITY_DATA: - client.sql(CITY_INSERT_QUERY, query_args=row) - -for row in LANGUAGE_DATA: - client.sql(LANGUAGE_INSERT_QUERY, query_args=row) - -# 10 most populated cities (with pagination) -MOST_POPULATED_QUERY = ''' -SELECT name, population FROM City ORDER BY population DESC LIMIT 10''' - -result = client.sql(MOST_POPULATED_QUERY) -print('Most 10 populated cities:') -for row in result: - print(row) -# Most 10 populated cities: -# ['Mumbai (Bombay)', 10500000] -# ['Shanghai', 9696300] -# ['New York', 8008278] -# ['Peking', 7472000] -# ['Delhi', 7206704] -# ['Chongqing', 6351600] -# ['Tianjin', 5286800] -# ['Calcutta [Kolkata]', 4399819] -# ['Wuhan', 4344600] -# ['Harbin', 4289800] - -# 10 most populated cities in 3 countries (with pagination and header row) -MOST_POPULATED_IN_3_COUNTRIES_QUERY = ''' -SELECT country.name as country_name, city.name as city_name, MAX(city.population) AS max_pop FROM country - JOIN city ON city.countrycode = country.code - WHERE country.code IN ('USA','IND','CHN') - GROUP BY country.name, city.name ORDER BY max_pop DESC LIMIT 10 -''' - -result = client.sql( - MOST_POPULATED_IN_3_COUNTRIES_QUERY, - include_field_names=True, -) -print('Most 10 populated cities in USA, India and China:') -print(next(result)) -print('----------------------------------------') -for row in result: - print(row) -# Most 10 populated cities in USA, India and China: -# ['COUNTRY_NAME', 'CITY_NAME', 'MAX_POP'] -# ---------------------------------------- -# ['India', 'Mumbai (Bombay)', 10500000] -# ['China', 'Shanghai', 9696300] -# ['United States', 'New York', 8008278] -# ['China', 'Peking', 7472000] -# ['India', 'Delhi', 7206704] -# ['China', 'Chongqing', 6351600] -# ['China', 'Tianjin', 5286800] -# ['India', 'Calcutta [Kolkata]', 4399819] -# ['China', 'Wuhan', 4344600] -# ['China', 'Harbin', 4289800] - -# show city info -CITY_INFO_QUERY = '''SELECT * FROM City WHERE id = ?''' - -result = client.sql( - CITY_INFO_QUERY, - query_args=[3802], - include_field_names=True, -) -field_names = next(result) -field_data = list(*result) - -print('City info:') -for field_name, field_value in zip(field_names*len(field_data), field_data): - print('{}: {}'.format(field_name, field_value)) -# City info: -# ID: 3802 -# NAME: Detroit -# COUNTRYCODE: USA -# DISTRICT: Michigan -# POPULATION: 951270 - -# clean up -for table_name in [ - CITY_TABLE_NAME, - LANGUAGE_TABLE_NAME, - COUNTRY_TABLE_NAME, -]: - result = client.sql(DROP_TABLE_QUERY.format(table_name)) +with client.connect('127.0.0.1', 10800): + # create tables + for query in [ + Query.COUNTRY_CREATE_TABLE, + Query.CITY_CREATE_TABLE, + Query.LANGUAGE_CREATE_TABLE, + ]: + client.sql(query) + + # create indices + for query in [Query.CITY_CREATE_INDEX, Query.LANGUAGE_CREATE_INDEX]: + client.sql(query) + + # load data + for row in TestData.COUNTRY: + client.sql(Query.COUNTRY_INSERT, query_args=row) + + for row in TestData.CITY: + client.sql(Query.CITY_INSERT, query_args=row) + + for row in TestData.LANGUAGE: + client.sql(Query.LANGUAGE_INSERT, query_args=row) + + # 10 most populated cities (with pagination) + with client.sql('SELECT name, population FROM City ORDER BY population DESC LIMIT 10') as cursor: + print('Most 10 populated cities:') + for row in cursor: + print(row) + # Most 10 populated cities: + # ['Mumbai (Bombay)', 10500000] + # ['Shanghai', 9696300] + # ['New York', 8008278] + # ['Peking', 7472000] + # ['Delhi', 7206704] + # ['Chongqing', 6351600] + # ['Tianjin', 5286800] + # ['Calcutta [Kolkata]', 4399819] + # ['Wuhan', 4344600] + # ['Harbin', 4289800] + print('-' * 20) + # 10 most populated cities in 3 countries (with pagination and header row) + MOST_POPULATED_IN_3_COUNTRIES = ''' + SELECT country.name as country_name, city.name as city_name, MAX(city.population) AS max_pop FROM country + JOIN city ON city.countrycode = country.code + WHERE country.code IN ('USA','IND','CHN') + GROUP BY country.name, city.name ORDER BY max_pop DESC LIMIT 10 + ''' + + with client.sql(MOST_POPULATED_IN_3_COUNTRIES, include_field_names=True) as cursor: + print('Most 10 populated cities in USA, India and China:') + table_str_pattern = '{:15}\t| {:20}\t| {}' + print(table_str_pattern.format(*next(cursor))) + print('*' * 50) + for row in cursor: + print(table_str_pattern.format(*row)) + # Most 10 populated cities in USA, India and China: + # COUNTRY_NAME | CITY_NAME | MAX_POP + # ************************************************** + # India | Mumbai (Bombay) | 10500000 + # China | Shanghai | 9696300 + # United States | New York | 8008278 + # China | Peking | 7472000 + # India | Delhi | 7206704 + # China | Chongqing | 6351600 + # China | Tianjin | 5286800 + # India | Calcutta [Kolkata] | 4399819 + # China | Wuhan | 4344600 + # China | Harbin | 4289800 + print('-' * 20) + + # Show city info + with client.sql('SELECT * FROM City WHERE id = ?', query_args=[3802], include_field_names=True) as cursor: + field_names = next(cursor) + field = list(*cursor) + print('City info:') + for field_name, field_value in zip(field_names * len(field), field): + print(f'{field_name}: {field_value}') + # City info: + # ID: 3802 + # NAME: Detroit + # COUNTRYCODE: USA + # DISTRICT: Michigan + # POPULATION: 951270 + + # Clean up + for table_name in TableNames: + result = client.sql(Query.DROP_TABLE.format(table_name.value)) diff --git a/examples/transactions.py b/examples/transactions.py new file mode 100644 index 0000000..b4231fd --- /dev/null +++ b/examples/transactions.py @@ -0,0 +1,148 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import sys +import time + +from pyignite import AioClient, Client +from pyignite.datatypes import TransactionIsolation, TransactionConcurrency +from pyignite.datatypes.cache_config import CacheAtomicityMode +from pyignite.datatypes.prop_codes import PROP_CACHE_ATOMICITY_MODE, PROP_NAME +from pyignite.exceptions import CacheError + + +async def async_example(): + client = AioClient() + async with client.connect('127.0.0.1', 10800): + cache = await client.get_or_create_cache({ + PROP_NAME: 'tx_cache', + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL + }) + + # starting transaction + key = 1 + async with client.tx_start( + isolation=TransactionIsolation.REPEATABLE_READ, + concurrency=TransactionConcurrency.PESSIMISTIC + ) as tx: + await cache.put(key, 'success') + await tx.commit() + + # key=1 value=success + val = await cache.get(key) + print(f"key={key} value={val}") + + # rollback transaction. + try: + async with client.tx_start( + isolation=TransactionIsolation.REPEATABLE_READ, + concurrency=TransactionConcurrency.PESSIMISTIC + ): + await cache.put(key, 'fail') + raise RuntimeError('test') + except RuntimeError: + pass + + # key=1 value=success + val = await cache.get(key) + print(f"key={key} value={val}") + + # rollback transaction on timeout. + try: + async with client.tx_start(timeout=1000, label='long-tx') as tx: + await cache.put(key, 'fail') + await asyncio.sleep(2.0) + await tx.commit() + except CacheError as e: + # Cache transaction timed out: GridNearTxLocal[...timeout=1000, ... label=long-tx] + print(e) + + # key=1 value=success + val = await cache.get(key) + print(f"key={key} value={val}") + + # destroy cache + await cache.destroy() + + +def sync_example(): + client = Client() + with client.connect('127.0.0.1', 10800): + cache = client.get_or_create_cache({ + PROP_NAME: 'tx_cache', + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL + }) + + # starting transaction + key = 1 + with client.tx_start( + isolation=TransactionIsolation.REPEATABLE_READ, + concurrency=TransactionConcurrency.PESSIMISTIC + ) as tx: + cache.put(key, 'success') + tx.commit() + + # key=1 value=success + print(f"key={key} value={cache.get(key)}") + + # rollback transaction. + try: + with client.tx_start( + isolation=TransactionIsolation.REPEATABLE_READ, + concurrency=TransactionConcurrency.PESSIMISTIC + ): + cache.put(key, 'fail') + raise RuntimeError('test') + except RuntimeError: + pass + + # key=1 value=success + print(f"key={key} value={cache.get(key)}") + + # rollback transaction on timeout. + try: + with client.tx_start(timeout=1000, label='long-tx') as tx: + cache.put(key, 'fail') + time.sleep(2.0) + tx.commit() + except CacheError as e: + # Cache transaction timed out: GridNearTxLocal[...timeout=1000, ... label=long-tx] + print(e) + + # key=1 value=success + print(f"key={key} value={cache.get(key)}") + + # destroy cache + cache.destroy() + + +def check_is_transactions_supported(): + client = Client() + with client.connect('127.0.0.1', 10800): + if not client.protocol_context.is_transactions_supported(): + print("'Transactions' API is not supported by cluster. Finishing...") + exit(0) + + +if __name__ == '__main__': + check_is_transactions_supported() + + print("Starting sync example") + sync_example() + + if sys.version_info >= (3, 7): + print("Starting async example") + asyncio.run(async_example()) diff --git a/examples/type_hints.py b/examples/type_hints.py index 4cc44c0..f8adf70 100644 --- a/examples/type_hints.py +++ b/examples/type_hints.py @@ -17,35 +17,33 @@ from pyignite.datatypes import CharObject, ShortObject client = Client() -client.connect('127.0.0.1', 10800) +with client.connect('127.0.0.1', 10800): + my_cache = client.get_or_create_cache('my cache') -my_cache = client.get_or_create_cache('my cache') + my_cache.put('my key', 42) + # value ‘42’ takes 9 bytes of memory as a LongObject -my_cache.put('my key', 42) -# value ‘42’ takes 9 bytes of memory as a LongObject + my_cache.put('my key', 42, value_hint=ShortObject) + # value ‘42’ takes only 3 bytes as a ShortObject -my_cache.put('my key', 42, value_hint=ShortObject) -# value ‘42’ takes only 3 bytes as a ShortObject + my_cache.put('a', 1) + # ‘a’ is a key of type String -my_cache.put('a', 1) -# ‘a’ is a key of type String + my_cache.put('a', 2, key_hint=CharObject) + # another key ‘a’ of type CharObject was created -my_cache.put('a', 2, key_hint=CharObject) -# another key ‘a’ of type CharObject was created + value = my_cache.get('a') + print(value) + # 1 -value = my_cache.get('a') -print(value) -# 1 + value = my_cache.get('a', key_hint=CharObject) + print(value) + # 2 -value = my_cache.get('a', key_hint=CharObject) -print(value) -# 2 + # now let us delete both keys at once + my_cache.remove_keys([ + 'a', # a default type key + ('a', CharObject), # a key of type CharObject + ]) -# now let us delete both keys at once -my_cache.remove_keys([ - 'a', # a default type key - ('a', CharObject), # a key of type CharObject -]) - -my_cache.destroy() -client.close() + my_cache.destroy() diff --git a/pyignite/__init__.py b/pyignite/__init__.py index 0ac346f..1b0a9c2 100644 --- a/pyignite/__init__.py +++ b/pyignite/__init__.py @@ -14,4 +14,7 @@ # limitations under the License. from pyignite.client import Client +from pyignite.aio_client import AioClient from pyignite.binary import GenericObjectMeta + +__version__ = '0.6.0-dev' diff --git a/pyignite/aio_cache.py b/pyignite/aio_cache.py new file mode 100644 index 0000000..7a92a9a --- /dev/null +++ b/pyignite/aio_cache.py @@ -0,0 +1,492 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +from typing import Any, Iterable, Optional, Union + +from .api.tx_api import get_tx_connection +from .datatypes import ExpiryPolicy +from .datatypes.internal import AnyDataObject +from .exceptions import CacheCreationError, CacheError, ParameterError +from .utils import status_to_exception +from .api.cache_config import ( + cache_create_async, cache_get_or_create_async, cache_destroy_async, cache_get_configuration_async, + cache_create_with_config_async, cache_get_or_create_with_config_async +) +from .api.key_value import ( + cache_get_async, cache_contains_key_async, cache_clear_key_async, cache_clear_keys_async, cache_clear_async, + cache_replace_async, cache_put_all_async, cache_get_all_async, cache_put_async, cache_contains_keys_async, + cache_get_and_put_async, cache_get_and_put_if_absent_async, cache_put_if_absent_async, cache_get_and_remove_async, + cache_get_and_replace_async, cache_remove_key_async, cache_remove_keys_async, cache_remove_all_async, + cache_remove_if_equals_async, cache_replace_if_equals_async, cache_get_size_async, +) +from .cursors import AioScanCursor +from .cache import __parse_settings, BaseCache + + +async def get_cache(client: 'AioClient', settings: Union[str, dict]) -> 'AioCache': + name, settings = __parse_settings(settings) + if settings: + raise ParameterError('Only cache name allowed as a parameter') + + return AioCache(client, name) + + +async def create_cache(client: 'AioClient', settings: Union[str, dict]) -> 'AioCache': + name, settings = __parse_settings(settings) + + conn = await client.random_node() + if settings: + result = await cache_create_with_config_async(conn, settings) + else: + result = await cache_create_async(conn, name) + + if result.status != 0: + raise CacheCreationError(result.message) + + return AioCache(client, name) + + +async def get_or_create_cache(client: 'AioClient', settings: Union[str, dict]) -> 'AioCache': + name, settings = __parse_settings(settings) + + conn = await client.random_node() + if settings: + result = await cache_get_or_create_with_config_async(conn, settings) + else: + result = await cache_get_or_create_async(conn, name) + + if result.status != 0: + raise CacheCreationError(result.message) + + return AioCache(client, name) + + +class AioCache(BaseCache): + """ + Ignite cache abstraction. Users should never use this class directly, + but construct its instances with + :py:meth:`~pyignite.aio_client.AioClient.create_cache`, + :py:meth:`~pyignite.aio_client.AioClient.get_or_create_cache` or + :py:meth:`~pyignite.aio_client.AioClient.get_cache` methods instead. See + :ref:`this example ` on how to do it. + """ + def __init__(self, client: 'AioClient', name: str, expiry_policy: ExpiryPolicy = None): + """ + Initialize async cache object. For internal use. + + :param client: Async Ignite client, + :param name: Cache name. + """ + super().__init__(client, name, expiry_policy) + + async def _get_best_node(self, key=None, key_hint=None): + tx_conn = get_tx_connection() + if tx_conn: + return tx_conn + return await self.client.get_best_node(self, key, key_hint) + + async def settings(self) -> Optional[dict]: + """ + Lazy Cache settings. See the :ref:`example ` + of reading this property. + + All cache properties are documented here: :ref:`cache_props`. + + :return: dict of cache properties and their values. + """ + if self._settings is None: + conn = await self._get_best_node() + config_result = await cache_get_configuration_async(conn, self.cache_info) + + if config_result.status == 0: + self._settings = config_result.value + else: + raise CacheError(config_result.message) + + return self._settings + + @status_to_exception(CacheError) + async def destroy(self): + """ + Destroys cache with a given name. + """ + conn = await self._get_best_node() + return await cache_destroy_async(conn, self.cache_id) + + @status_to_exception(CacheError) + async def get(self, key, key_hint: object = None) -> Any: + """ + Retrieves a value from cache by key. + + :param key: key for the cache entry. Can be of any supported type, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: value retrieved. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + result = await cache_get_async(conn, self.cache_info, key, key_hint=key_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def put(self, key, value, key_hint: object = None, value_hint: object = None): + """ + Puts a value with a given key to cache (overwriting existing value + if any). + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + return await cache_put_async(conn, self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint) + + @status_to_exception(CacheError) + async def get_all(self, keys: list) -> dict: + """ + Retrieves multiple key-value pairs from cache. + + :param keys: list of keys or tuples of (key, key_hint), + :return: a dict of key-value pairs. + """ + conn = await self._get_best_node() + result = await cache_get_all_async(conn, self.cache_info, keys) + if result.value: + keys = list(result.value.keys()) + values = await asyncio.gather(*[self.client.unwrap_binary(value) for value in result.value.values()]) + + for i, key in enumerate(keys): + result.value[key] = values[i] + return result + + @status_to_exception(CacheError) + async def put_all(self, pairs: dict): + """ + Puts multiple key-value pairs to cache (overwriting existing + associations if any). + + :param pairs: dictionary type parameters, contains key-value pairs + to save. Each key or value can be an item of representable + Python type or a tuple of (item, hint), + """ + conn = await self._get_best_node() + return await cache_put_all_async(conn, self.cache_info, pairs) + + @status_to_exception(CacheError) + async def replace(self, key, value, key_hint: object = None, value_hint: object = None): + """ + Puts a value with a given key to cache only if the key already exist. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + result = await cache_replace_async(conn, self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def clear(self, keys: Optional[list] = None): + """ + Clears the cache without notifying listeners or cache writers. + + :param keys: (optional) list of cache keys or (key, key type + hint) tuples to clear (default: clear all). + """ + conn = await self._get_best_node() + if keys: + return await cache_clear_keys_async(conn, self.cache_info, keys) + else: + return await cache_clear_async(conn, self.cache_info) + + @status_to_exception(CacheError) + async def clear_key(self, key, key_hint: object = None): + """ + Clears the cache key without notifying listeners or cache writers. + + :param key: key for the cache entry, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + return await cache_clear_key_async(conn, self.cache_info, key, key_hint=key_hint) + + @status_to_exception(CacheError) + async def clear_keys(self, keys: Iterable): + """ + Clears the cache key without notifying listeners or cache writers. + + :param keys: a list of keys or (key, type hint) tuples + """ + conn = await self._get_best_node() + return await cache_clear_keys_async(conn, self.cache_info, keys) + + @status_to_exception(CacheError) + async def contains_key(self, key, key_hint=None) -> bool: + """ + Returns a value indicating whether given key is present in cache. + + :param key: key for the cache entry. Can be of any supported type, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: boolean `True` when key is present, `False` otherwise. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + return await cache_contains_key_async(conn, self.cache_info, key, key_hint=key_hint) + + @status_to_exception(CacheError) + async def contains_keys(self, keys: Iterable) -> bool: + """ + Returns a value indicating whether all given keys are present in cache. + + :param keys: a list of keys or (key, type hint) tuples, + :return: boolean `True` when all keys are present, `False` otherwise. + """ + conn = await self._get_best_node() + return await cache_contains_keys_async(conn, self.cache_info, keys) + + @status_to_exception(CacheError) + async def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: + """ + Puts a value with a given key to cache, and returns the previous value + for that key, or null value if there was not such key. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + :return: old value or None. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + result = await cache_get_and_put_async(conn, self.cache_info, key, value, key_hint, value_hint) + + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def get_and_put_if_absent(self, key, value, key_hint=None, value_hint=None): + """ + Puts a value with a given key to cache only if the key does not + already exist. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted, + :return: old value or None. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + result = await cache_get_and_put_if_absent_async(conn, self.cache_info, key, value, key_hint, value_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def put_if_absent(self, key, value, key_hint=None, value_hint=None): + """ + Puts a value with a given key to cache only if the key does not + already exist. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + return await cache_put_if_absent_async(conn, self.cache_info, key, value, key_hint, value_hint) + + @status_to_exception(CacheError) + async def get_and_remove(self, key, key_hint=None) -> Any: + """ + Removes the cache entry with specified key, returning the value. + + :param key: key for the cache entry. Can be of any supported type, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: old value or None. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + result = await cache_get_and_remove_async(conn, self.cache_info, key, key_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def get_and_replace(self, key, value, key_hint=None, value_hint=None) -> Any: + """ + Puts a value with a given key to cache, returning previous value + for that key, if and only if there is a value currently mapped + for that key. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + :return: old value or None. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + result = await cache_get_and_replace_async(conn, self.cache_info, key, value, key_hint, value_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def remove_key(self, key, key_hint=None): + """ + Clears the cache key without notifying listeners or cache writers. + + :param key: key for the cache entry, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + return await cache_remove_key_async(conn, self.cache_info, key, key_hint) + + @status_to_exception(CacheError) + async def remove_keys(self, keys: list): + """ + Removes cache entries by given list of keys, notifying listeners + and cache writers. + + :param keys: list of keys or tuples of (key, key_hint) to remove. + """ + conn = await self._get_best_node() + return await cache_remove_keys_async(conn, self.cache_info, keys) + + @status_to_exception(CacheError) + async def remove_all(self): + """ + Removes all cache entries, notifying listeners and cache writers. + """ + conn = await self._get_best_node() + return await cache_remove_all_async(conn, self.cache_info) + + @status_to_exception(CacheError) + async def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): + """ + Removes an entry with a given key if provided value is equal to + actual value, notifying listeners and cache writers. + + :param key: key for the cache entry, + :param sample: a sample to compare the stored value with, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param sample_hint: (optional) Ignite data type, for whic + the given sample should be converted. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + return await cache_remove_if_equals_async(conn, self.cache_info, key, sample, key_hint, sample_hint) + + @status_to_exception(CacheError) + async def replace_if_equals(self, key, sample, value, key_hint=None, sample_hint=None, value_hint=None) -> Any: + """ + Puts a value with a given key to cache only if the key already exists + and value equals provided sample. + + :param key: key for the cache entry, + :param sample: a sample to compare the stored value with, + :param value: new value for the given key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param sample_hint: (optional) Ignite data type, for whic + the given sample should be converted + :param value_hint: (optional) Ignite data type, for which the given + value should be converted, + :return: boolean `True` when key is present, `False` otherwise. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self._get_best_node(key, key_hint) + result = await cache_replace_if_equals_async(conn, self.cache_info, key, sample, value, key_hint, sample_hint, + value_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def get_size(self, peek_modes=None): + """ + Gets the number of entries in cache. + + :param peek_modes: (optional) limit count to near cache partition + (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache + (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), + :return: integer number of cache entries. + """ + conn = await self._get_best_node() + return await cache_get_size_async(conn, self.cache_info, peek_modes) + + def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> AioScanCursor: + """ + Returns all key-value pairs from the cache, similar to `get_all`, but + with internal pagination, which is slower, but safer. + + :param page_size: (optional) page size. Default size is 1 (slowest + and safest), + :param partitions: (optional) number of partitions to query + (negative to query entire cache), + :param local: (optional) pass True if this query should be executed + on local node only. Defaults to False, + :return: async scan query cursor + """ + return AioScanCursor(self.client, self.cache_info, page_size, partitions, local) diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py new file mode 100644 index 0000000..b6ded74 --- /dev/null +++ b/pyignite/aio_client.py @@ -0,0 +1,540 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import random +import sys +from itertools import chain +from typing import Iterable, Type, Union, Any, Dict, Optional, Sequence + +from .aio_cluster import AioCluster +from .api import cache_get_node_partitions_async +from .api.binary import get_binary_type_async, put_binary_type_async +from .api.cache_config import cache_get_names_async +from .cache import BaseCache +from .client import BaseClient +from .cursors import AioSqlFieldsCursor +from .aio_cache import AioCache, get_cache, create_cache, get_or_create_cache +from .connection import AioConnection +from .constants import AFFINITY_RETRIES, AFFINITY_DELAY +from .datatypes import BinaryObject, TransactionConcurrency, TransactionIsolation +from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors, NotSupportedError +from .queries.cache_info import CacheInfo +from .stream import AioBinaryStream, READ_BACKWARD +from .transaction import AioTransaction +from .utils import cache_id, entity_id, status_to_exception + + +__all__ = ['AioClient'] + + +class _ConnectionContextManager: + def __init__(self, client, nodes): + self.client = client + self.nodes = nodes + + def __await__(self): + return (yield from self.__aenter__().__await__()) + + async def __aenter__(self): + await self.client._connect(self.nodes) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.client.close() + + +class AioClient(BaseClient): + """ + Asynchronous Client implementation. + """ + + def __init__(self, compact_footer: bool = None, partition_aware: bool = True, + event_listeners: Optional[Sequence] = None, **kwargs): + """ + Initialize client. + + For the use of the SSL-related parameters see + https://docs.python.org/3/library/ssl.html#ssl-certificates. + + :param compact_footer: (optional) use compact (True, recommended) or + full (False) schema approach when serializing Complex objects. + Default is to use the same approach the server is using (None). + Apache Ignite binary protocol documentation on this topic: + https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#schema + :param partition_aware: (optional) try to calculate the exact data + placement from the key before to issue the key operation to the + server node, `True` by default, + :param event_listeners: (optional) event listeners, + :param handshake_timeout: (optional) sets timeout (in seconds) for performing handshake (connection) + with node. Default is 10.0 seconds, + :param use_ssl: (optional) set to True if Ignite server uses SSL + on its binary connector. Defaults to use SSL when username + and password has been supplied, not to use SSL otherwise, + :param ssl_version: (optional) SSL version constant from standard + `ssl` module. Defaults to TLS v1.2, + :param ssl_ciphers: (optional) ciphers to use. If not provided, + `ssl` default ciphers are used, + :param ssl_cert_reqs: (optional) determines how the remote side + certificate is treated: + + * `ssl.CERT_NONE` − remote certificate is ignored (default), + * `ssl.CERT_OPTIONAL` − remote certificate will be validated, + if provided, + * `ssl.CERT_REQUIRED` − valid remote certificate is required, + + :param ssl_keyfile: (optional) a path to SSL key file to identify + local (client) party, + :param ssl_keyfile_password: (optional) password for SSL key file, + can be provided when key file is encrypted to prevent OpenSSL + password prompt, + :param ssl_certfile: (optional) a path to ssl certificate file + to identify local (client) party, + :param ssl_ca_certfile: (optional) a path to a trusted certificate + or a certificate chain. Required to check the validity of the remote + (server-side) certificate, + :param username: (optional) user name to authenticate to Ignite + cluster, + :param password: (optional) password to authenticate to Ignite cluster. + """ + super().__init__(compact_footer, partition_aware, event_listeners, **kwargs) + self._registry_mux = asyncio.Lock() + self._affinity_query_mux = asyncio.Lock() + + def connect(self, *args): + """ + Connect to Ignite cluster node(s). + + :param args: (optional) host(s) and port(s) to connect to. + """ + nodes = self._process_connect_args(*args) + return _ConnectionContextManager(self, nodes) + + async def _connect(self, nodes): + for i, node in enumerate(nodes): + host, port = node + conn = AioConnection(self, host, port, **self._connection_args) + + if not self.partition_aware: + try: + if self.protocol_context is None: + # open connection before adding to the pool + await conn.connect() + + # do not try to open more nodes + self._current_node = i + except connection_errors: + pass + + self._nodes.append(conn) + + if self.partition_aware: + connect_results = await asyncio.gather( + *[conn.connect() for conn in self._nodes], + return_exceptions=True + ) + + reconnect_coro = [] + for i, res in enumerate(connect_results): + if isinstance(res, Exception): + if isinstance(res, connection_errors): + reconnect_coro.append(self._nodes[i].reconnect()) + else: + raise res + + await asyncio.gather(*reconnect_coro, return_exceptions=True) + + if self.protocol_context is None: + raise ReconnectError('Can not connect.') + + async def close(self): + await asyncio.gather(*[conn.close() for conn in self._nodes], return_exceptions=True) + self._nodes.clear() + + async def random_node(self) -> AioConnection: + """ + Returns random usable node. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + """ + if self.partition_aware: + # if partition awareness is used just pick a random connected node + return await self._get_random_node() + else: + # if partition awareness is not used then just return the current + # node if it's alive or the next usable node if connection with the + # current is broken + node = self._nodes[self._current_node] + if node.alive: + return node + + # close current (supposedly failed) node + await self._nodes[self._current_node].close() + + # advance the node index + self._current_node += 1 + if self._current_node >= len(self._nodes): + self._current_node = 0 + + # prepare the list of node indexes to try to connect to + for i in chain(range(self._current_node, len(self._nodes)), range(self._current_node)): + node = self._nodes[i] + try: + await node.connect() + except connection_errors: + pass + else: + return node + + # no nodes left + raise ReconnectError('Can not reconnect: out of nodes.') + + async def _get_random_node(self, reconnect=True): + alive_nodes = [n for n in self._nodes if n.alive] + if alive_nodes: + return random.choice(alive_nodes) + elif reconnect: + await asyncio.gather(*[n.reconnect() for n in self._nodes], return_exceptions=True) + return await self._get_random_node(reconnect=False) + else: + # cannot choose from an empty sequence + raise ReconnectError('Can not reconnect: out of nodes.') from None + + @status_to_exception(BinaryTypeError) + async def get_binary_type(self, binary_type: Union[str, int]) -> dict: + """ + Gets the binary type information from the Ignite server. This is quite + a low-level implementation of Ignite thin client protocol's + `OP_GET_BINARY_TYPE` operation. You would probably want to use + :py:meth:`~pyignite.client.Client.query_binary_type` instead. + + :param binary_type: binary type name or ID, + :return: binary type description − a dict with the following fields: + + - `type_exists`: True if the type is registered, False otherwise. In + the latter case all the following fields are omitted, + - `type_id`: Complex object type ID, + - `type_name`: Complex object type name, + - `affinity_key_field`: string value or None, + - `is_enum`: False in case of Complex object registration, + - `schemas`: a list, containing the Complex object schemas in format: + OrderedDict[field name: field type hint]. A schema can be empty. + """ + conn = await self.random_node() + result = await get_binary_type_async(conn, binary_type) + return self._process_get_binary_type_result(result) + + @status_to_exception(BinaryTypeError) + async def put_binary_type(self, type_name: str, affinity_key_field: str = None, is_enum=False, schema: dict = None): + """ + Registers binary type information in cluster. Do not update binary + registry. This is a literal implementation of Ignite thin client + protocol's `OP_PUT_BINARY_TYPE` operation. You would probably want + to use :py:meth:`~pyignite.client.Client.register_binary_type` instead. + + :param type_name: name of the data type being registered, + :param affinity_key_field: (optional) name of the affinity key field, + :param is_enum: (optional) register enum if True, binary object + otherwise. Defaults to False, + :param schema: (optional) when register enum, pass a dict + of enumerated parameter names as keys and an integers as values. + When register binary type, pass a dict of field names: field types. + Binary type with no fields is OK. + """ + conn = await self.random_node() + return await put_binary_type_async(conn, type_name, affinity_key_field, is_enum, schema) + + async def register_binary_type(self, data_class: Type, affinity_key_field: str = None): + """ + Register the given class as a representation of a certain Complex + object type. Discards autogenerated or previously registered class. + + :param data_class: Complex object class, + :param affinity_key_field: (optional) affinity parameter. + """ + if not await self.query_binary_type(data_class.type_id, data_class.schema_id): + await self.put_binary_type(data_class.type_name, affinity_key_field, schema=data_class.schema) + + self._registry[data_class.type_id][data_class.schema_id] = data_class + + async def query_binary_type(self, binary_type: Union[int, str], schema: Union[int, dict] = None): + """ + Queries the registry of Complex object classes. + + :param binary_type: Complex object type name or ID, + :param schema: (optional) Complex object schema or schema ID, + :return: found dataclass or None, if `schema` parameter is provided, + a dict of {schema ID: dataclass} format otherwise. + """ + type_id = entity_id(binary_type) + + result = self._get_from_registry(type_id, schema) + + if not result: + async with self._registry_mux: + result = self._get_from_registry(type_id, schema) + + if not result: + type_info = await self.get_binary_type(type_id) + self._sync_binary_registry(type_id, type_info) + return self._get_from_registry(type_id, schema) + + return result + + async def unwrap_binary(self, value: Any) -> Any: + """ + Detects and recursively unwraps Binary Object. + + :param value: anything that could be a Binary Object, + :return: the result of the Binary Object unwrapping with all other data + left intact. + """ + if isinstance(value, tuple) and len(value) == 2: + if type(value[0]) is bytes and type(value[1]) is int: + blob, offset = value + with AioBinaryStream(self, blob) as stream: + data_class = await BinaryObject.parse_async(stream) + return await BinaryObject.to_python_async(stream.read_ctype(data_class, direction=READ_BACKWARD), + client=self) + + if isinstance(value[0], int): + col_type, collection = value + if isinstance(collection, list): + coros = [self.unwrap_binary(v) for v in collection] + return col_type, await asyncio.gather(*coros) + + if isinstance(collection, dict): + coros = [asyncio.gather(self.unwrap_binary(k), self.unwrap_binary(v)) + for k, v in collection.items()] + return col_type, dict(await asyncio.gather(*coros)) + return value + + @status_to_exception(CacheError) + async def _get_affinity(self, conn: 'AioConnection', caches: Iterable[int]) -> Dict: + """ + Queries server for affinity mappings. Retries in case + of an intermittent error (most probably “Getting affinity for topology + version earlier than affinity is calculated”). + + :param conn: connection to Ignite server, + :param caches: Ids of caches, + :return: OP_CACHE_PARTITIONS operation result value. + """ + for _ in range(AFFINITY_RETRIES or 1): + result = await cache_get_node_partitions_async(conn, caches) + if result.status == 0: + break + await asyncio.sleep(AFFINITY_DELAY) + + return result + + async def get_best_node( + self, cache: Union[int, str, 'BaseCache'], key: Any = None, key_hint: 'IgniteDataType' = None + ) -> 'AioConnection': + """ + Returns the node from the list of the nodes, opened by client, that + most probably contains the needed key-value pair. See IEP-23. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + + :param cache: Ignite cache, cache name or cache id, + :param key: (optional) pythonic key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: Ignite connection object. + """ + conn = await self.random_node() + + if self.partition_aware and key is not None: + caches = self._caches_to_update_affinity() + if caches: + async with self._affinity_query_mux: + while True: + caches = self._caches_to_update_affinity() + if not caches: + break + + try: + full_affinity = await self._get_affinity(conn, caches) + self._update_affinity(full_affinity) + + asyncio.ensure_future( + asyncio.gather( + *[node.reconnect() for node in self._nodes if not node.alive], + return_exceptions=True + ) + ) + + break + except connection_errors: + # retry if connection failed + conn = await self.random_node() + pass + except CacheError: + # server did not create mapping in time + return conn + + c_id = cache.cache_id if isinstance(cache, BaseCache) else cache_id(cache) + parts = self._cache_partition_mapping(c_id).get('number_of_partitions') + + if not parts: + return conn + + key, key_hint = self._get_affinity_key(c_id, key, key_hint) + + hashcode = await key_hint.hashcode_async(key, client=self) + + best_node = self._get_node_by_hashcode(c_id, hashcode, parts) + if best_node: + return best_node + + return conn + + async def create_cache(self, settings: Union[str, dict]) -> 'AioCache': + """ + Creates Ignite cache by name. Raises `CacheError` if such a cache is + already exists. + + :param settings: cache name or dict of cache properties' codes + and values. All cache properties are documented here: + :ref:`cache_props`. See also the + :ref:`cache creation example `, + :return: :class:`~pyignite.cache.Cache` object. + """ + return await create_cache(self, settings) + + async def get_or_create_cache(self, settings: Union[str, dict]) -> 'AioCache': + """ + Creates Ignite cache, if not exist. + + :param settings: cache name or dict of cache properties' codes + and values. All cache properties are documented here: + :ref:`cache_props`. See also the + :ref:`cache creation example `, + :return: :class:`~pyignite.cache.Cache` object. + """ + return await get_or_create_cache(self, settings) + + async def get_cache(self, settings: Union[str, dict]) -> 'AioCache': + """ + Creates Cache object with a given cache name without checking it up + on server. If such a cache does not exist, some kind of exception + (most probably `CacheError`) may be raised later. + + :param settings: cache name or cache properties (but only `PROP_NAME` + property is allowed), + :return: :class:`~pyignite.cache.Cache` object. + """ + return await get_cache(self, settings) + + @status_to_exception(CacheError) + async def get_cache_names(self) -> list: + """ + Gets existing cache names. + + :return: list of cache names. + """ + conn = await self.random_node() + return await cache_get_names_async(conn) + + def sql( + self, query_str: str, page_size: int = 1024, + query_args: Iterable = None, schema: str = 'PUBLIC', + statement_type: int = 0, distributed_joins: bool = False, + local: bool = False, replicated_only: bool = False, + enforce_join_order: bool = False, collocated: bool = False, + lazy: bool = False, include_field_names: bool = False, + max_rows: int = -1, timeout: int = 0, + cache: Union[int, str, 'AioCache'] = None + ) -> AioSqlFieldsCursor: + """ + Runs an SQL query and returns its result. + + :param query_str: SQL query string, + :param page_size: (optional) cursor page size. Default is 1024, which + means that client makes one server call per 1024 rows, + :param query_args: (optional) query arguments. List of values or + (value, type hint) tuples, + :param schema: (optional) schema for the query. Defaults to `PUBLIC`, + :param statement_type: (optional) statement type. Can be: + + * StatementType.ALL − any type (default), + * StatementType.SELECT − select, + * StatementType.UPDATE − update. + + :param distributed_joins: (optional) distributed joins. Defaults + to False, + :param local: (optional) pass True if this query should be executed + on local node only. Defaults to False, + :param replicated_only: (optional) whether query contains only + replicated tables or not. Defaults to False, + :param enforce_join_order: (optional) enforce join order. Defaults + to False, + :param collocated: (optional) whether your data is co-located or not. + Defaults to False, + :param lazy: (optional) lazy query execution. Defaults to False, + :param include_field_names: (optional) include field names in result. + Defaults to False, + :param max_rows: (optional) query-wide maximum of rows. Defaults to -1 + (all rows), + :param timeout: (optional) non-negative timeout value in ms. + Zero disables timeout (default), + :param cache: (optional) Name or ID of the cache to use to infer schema. + If set, 'schema' argument is ignored, + :return: async sql fields cursor with result rows as a lists. If + `include_field_names` was set, the first row will hold field names. + """ + if isinstance(cache, (int, str)): + c_info = CacheInfo(cache_id=cache_id(cache), protocol_context=self.protocol_context) + elif isinstance(cache, AioCache): + c_info = cache.cache_info + else: + c_info = CacheInfo(protocol_context=self.protocol_context) + + if c_info.cache_id: + schema = None + + return AioSqlFieldsCursor(self, c_info, query_str, page_size, query_args, schema, statement_type, + distributed_joins, local, replicated_only, enforce_join_order, collocated, + lazy, include_field_names, max_rows, timeout) + + def get_cluster(self) -> 'AioCluster': + """ + Get client cluster facade. + + :return: :py:class:`~pyignite.aio_cluster.AioCluster` instance. + """ + return AioCluster(self) + + def tx_start(self, concurrency: TransactionConcurrency = TransactionConcurrency.PESSIMISTIC, + isolation: TransactionIsolation = TransactionIsolation.REPEATABLE_READ, + timeout: int = 0, label: Optional[str] = None) -> 'AioTransaction': + """ + Start async thin client transaction. **Supported only python 3.7+** + + :param concurrency: (optional) transaction concurrency, see + :py:class:`~pyignite.datatypes.transactions.TransactionConcurrency`, + :param isolation: (optional) transaction isolation level, see + :py:class:`~pyignite.datatypes.transactions.TransactionIsolation`, + :param timeout: (optional) transaction timeout in milliseconds, + :param label: (optional) transaction label. + :return: :py:class:`~pyignite.transaction.AioTransaction` instance. + """ + if sys.version_info < (3, 7): + raise NotSupportedError(f"Transactions are not supported in async client on current python {sys.version}") + return AioTransaction(self, concurrency, isolation, timeout, label) diff --git a/pyignite/aio_cluster.py b/pyignite/aio_cluster.py new file mode 100644 index 0000000..afbc41b --- /dev/null +++ b/pyignite/aio_cluster.py @@ -0,0 +1,64 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains `AioCluster` that lets you get info and change state of the +whole cluster asynchronously. +""" +from pyignite.api.cluster import cluster_get_state_async, cluster_set_state_async +from pyignite.datatypes import ClusterState +from pyignite.exceptions import ClusterError +from pyignite.utils import status_to_exception + + +class AioCluster: + """ + Ignite cluster abstraction. Users should never use this class directly, + but construct its instances with + :py:meth:`~pyignite.aio_client.AioClient.get_cluster` method instead. + """ + + def __init__(self, client: 'AioClient'): + """ + :param client: :py:class:`~pyignite.aio_client.AioClient` instance. + """ + self._client = client + + @status_to_exception(ClusterError) + async def get_state(self) -> 'ClusterState': + """ + Gets current cluster state. + + :return: Current cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. + """ + return await cluster_get_state_async(await self._client.random_node()) + + @status_to_exception(ClusterError) + async def set_state(self, state: 'ClusterState'): + """ + Changes current cluster state to the given. + + Note: Deactivation clears in-memory caches (without persistence) + including the system caches. + + :param state: New cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. + """ + return await cluster_set_state_async(await self._client.random_node(), state) diff --git a/pyignite/api/__init__.py b/pyignite/api/__init__.py index 01437f0..19a7036 100644 --- a/pyignite/api/__init__.py +++ b/pyignite/api/__init__.py @@ -17,55 +17,61 @@ This module contains functions, that are (more or less) directly mapped to Apache Ignite binary protocol operations. Read more: -https://apacheignite.readme.io/docs/binary-client-protocol#section-client-operations +https://ignite.apache.org/docs/latest/binary-client-protocol/binary-client-protocol#client-operations When the binary client protocol changes, these functions also change. For stable end user API see :mod:`pyignite.client` module. """ +# flake8: noqa + +from .affinity import ( + cache_get_node_partitions, cache_get_node_partitions_async, +) from .cache_config import ( - cache_create, - cache_get_names, - cache_get_or_create, - cache_destroy, - cache_get_configuration, - cache_create_with_config, - cache_get_or_create_with_config, + cache_create, cache_create_async, + cache_get_names, cache_get_names_async, + cache_get_or_create, cache_get_or_create_async, + cache_destroy, cache_destroy_async, + cache_get_configuration, cache_get_configuration_async, + cache_create_with_config, cache_create_with_config_async, + cache_get_or_create_with_config, cache_get_or_create_with_config_async, ) from .key_value import ( - cache_get, - cache_put, - cache_get_all, - cache_put_all, - cache_contains_key, - cache_contains_keys, - cache_get_and_put, - cache_get_and_replace, - cache_get_and_remove, - cache_put_if_absent, - cache_get_and_put_if_absent, - cache_replace, - cache_replace_if_equals, - cache_clear, - cache_clear_key, - cache_clear_keys, - cache_remove_key, - cache_remove_if_equals, - cache_remove_keys, - cache_remove_all, - cache_get_size, + cache_get, cache_get_async, + cache_put, cache_put_async, + cache_get_all, cache_get_all_async, + cache_put_all, cache_put_all_async, + cache_contains_key, cache_contains_key_async, + cache_contains_keys, cache_contains_keys_async, + cache_get_and_put, cache_get_and_put_async, + cache_get_and_replace, cache_get_and_replace_async, + cache_get_and_remove, cache_get_and_remove_async, + cache_put_if_absent, cache_put_if_absent_async, + cache_get_and_put_if_absent, cache_get_and_put_if_absent_async, + cache_replace, cache_replace_async, + cache_replace_if_equals, cache_replace_if_equals_async, + cache_clear, cache_clear_async, + cache_clear_key, cache_clear_key_async, + cache_clear_keys, cache_clear_keys_async, + cache_remove_key, cache_remove_key_async, + cache_remove_if_equals, cache_remove_if_equals_async, + cache_remove_keys, cache_remove_keys_async, + cache_remove_all, cache_remove_all_async, + cache_get_size, cache_get_size_async, + cache_local_peek, cache_local_peek_async, ) from .sql import ( - scan, - scan_cursor_get_page, + scan, scan_async, + scan_cursor_get_page, scan_cursor_get_page_async, sql, sql_cursor_get_page, - sql_fields, - sql_fields_cursor_get_page, - resource_close, + sql_fields, sql_fields_async, + sql_fields_cursor_get_page, sql_fields_cursor_get_page_async, + resource_close, resource_close_async ) from .binary import ( - get_binary_type, - put_binary_type, + get_binary_type, get_binary_type_async, + put_binary_type, put_binary_type_async ) from .result import APIResult diff --git a/pyignite/api/affinity.py b/pyignite/api/affinity.py new file mode 100644 index 0000000..30e93ff --- /dev/null +++ b/pyignite/api/affinity.py @@ -0,0 +1,156 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Iterable, Union + +from pyignite.connection import AioConnection, Connection +from pyignite.datatypes import Bool, Int, Long, UUIDObject +from pyignite.datatypes.internal import StructArray, Conditional, Struct +from pyignite.queries import Query, query_perform +from pyignite.queries.op_codes import OP_CACHE_PARTITIONS +from pyignite.utils import is_iterable +from .result import APIResult + + +cache_ids = StructArray([ + ('cache_id', Int), +]) + +cache_config = StructArray([ + ('key_type_id', Int), + ('affinity_key_field_id', Int), +]) + +node_partitions = StructArray([ + ('partition_id', Int), +]) + +node_mapping = StructArray([ + ('node_uuid', UUIDObject), + ('node_partitions', node_partitions) +]) + +cache_mapping = StructArray([ + ('cache_id', Int), + ('cache_config', cache_config), +]) + +empty_cache_mapping = StructArray([ + ('cache_id', Int) +]) + +empty_node_mapping = Struct([]) + +partition_mapping = StructArray([ + ('is_applicable', Bool), + + ('cache_mapping', Conditional(['is_applicable'], + lambda ctx: ctx['is_applicable'] and ctx['is_applicable'].value == 1, + lambda ctx: ctx['is_applicable'], + cache_mapping, empty_cache_mapping)), + + ('node_mapping', Conditional(['is_applicable'], + lambda ctx: ctx['is_applicable'] and ctx['is_applicable'].value == 1, + lambda ctx: ctx['is_applicable'], + node_mapping, empty_node_mapping)), +]) + + +def cache_get_node_partitions(conn: 'Connection', caches: Union[int, Iterable[int]]) -> APIResult: + """ + Gets partition mapping for an Ignite cache or a number of caches. See + “IEP-23: Best Effort Affinity for thin clients”. + + :param conn: connection to Ignite server, + :param caches: cache ID(s) the mapping is provided for + :return: API result data object. + """ + return __cache_get_node_partitions(conn, caches) + + +async def cache_get_node_partitions_async(conn: 'AioConnection', caches: Union[int, Iterable[int]]) -> APIResult: + """ + Async version of cache_get_node_partitions. + """ + return await __cache_get_node_partitions(conn, caches) + + +def __post_process_partitions(result): + if result.status == 0: + # tidying up the result + value = { + 'version': ( + result.value['version_major'], + result.value['version_minor'] + ), + 'partition_mapping': {}, + } + for partition_map in result.value['partition_mapping']: + is_applicable = partition_map['is_applicable'] + + node_mapping = None + if is_applicable: + node_mapping = { + p['node_uuid']: set(x['partition_id'] for x in p['node_partitions']) + for p in partition_map['node_mapping'] + } + + for cache_info in partition_map['cache_mapping']: + cache_id = cache_info['cache_id'] + + cache_partition_mapping = { + 'is_applicable': is_applicable, + } + + parts = 0 + if is_applicable: + cache_partition_mapping['cache_config'] = { + a['key_type_id']: a['affinity_key_field_id'] + for a in cache_info['cache_config'] + } + cache_partition_mapping['node_mapping'] = node_mapping + + parts = sum(len(p) for p in cache_partition_mapping['node_mapping'].values()) + + cache_partition_mapping['number_of_partitions'] = parts + + value['partition_mapping'][cache_id] = cache_partition_mapping + result.value = value + return result + + +def __cache_get_node_partitions(conn, caches): + query_struct = Query( + OP_CACHE_PARTITIONS, + [ + ('cache_ids', cache_ids), + ] + ) + if not is_iterable(caches): + caches = [caches] + + return query_perform( + query_struct, + conn, + query_params={ + 'cache_ids': [{'cache_id': cache} for cache in caches], + }, + response_config=[ + ('version_major', Long), + ('version_minor', Int), + ('partition_mapping', partition_mapping), + ], + post_process_fun=__post_process_partitions + ) diff --git a/pyignite/api/binary.py b/pyignite/api/binary.py index f0a5831..b49ab8b 100644 --- a/pyignite/api/binary.py +++ b/pyignite/api/binary.py @@ -15,96 +15,51 @@ from typing import Union -from pyignite.constants import * -from pyignite.datatypes.binary import ( - body_struct, enum_struct, schema_struct, binary_fields_struct, -) +from pyignite.connection import Connection, AioConnection +from pyignite.constants import PROTOCOL_BYTE_ORDER +from pyignite.datatypes.binary import enum_struct, schema_struct, binary_fields_struct from pyignite.datatypes import String, Int, Bool -from pyignite.queries import Query, Response -from pyignite.queries.op_codes import * -from pyignite.utils import int_overflow, entity_id +from pyignite.queries import Query, query_perform +from pyignite.queries.op_codes import OP_GET_BINARY_TYPE, OP_PUT_BINARY_TYPE +from pyignite.utils import entity_id, schema_id from .result import APIResult +from ..queries.response import BinaryTypeResponse -def get_binary_type( - connection: 'Connection', binary_type: Union[str, int], query_id=None, -) -> APIResult: +def get_binary_type(conn: 'Connection', binary_type: Union[str, int]) -> APIResult: """ Gets the binary type information by type ID. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param binary_type: binary type name or ID, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. """ + return __get_binary_type(conn, binary_type) + + +async def get_binary_type_async(conn: 'AioConnection', binary_type: Union[str, int]) -> APIResult: + """ + Async version of get_binary_type. + """ + return await __get_binary_type(conn, binary_type) + +def __get_binary_type(conn, binary_type): query_struct = Query( OP_GET_BINARY_TYPE, [ ('type_id', Int), ], - query_id=query_id, + response_type=BinaryTypeResponse ) - _, send_buffer = query_struct.from_python({ + return query_perform(query_struct, conn, query_params={ 'type_id': entity_id(binary_type), }) - connection.send(send_buffer) - - response_head_struct = Response([ - ('type_exists', Bool), - ]) - response_head_type, recv_buffer = response_head_struct.parse(connection) - response_head = response_head_type.from_buffer_copy(recv_buffer) - response_parts = [] - if response_head.type_exists: - resp_body_type, resp_body_buffer = body_struct.parse(connection) - response_parts.append(('body', resp_body_type)) - resp_body = resp_body_type.from_buffer_copy(resp_body_buffer) - recv_buffer += resp_body_buffer - if resp_body.is_enum: - resp_enum, resp_enum_buffer = enum_struct.parse(connection) - response_parts.append(('enums', resp_enum)) - recv_buffer += resp_enum_buffer - resp_schema_type, resp_schema_buffer = schema_struct.parse(connection) - response_parts.append(('schema', resp_schema_type)) - recv_buffer += resp_schema_buffer - - response_class = type( - 'GetBinaryTypeResponse', - (response_head_type,), - { - '_pack_': 1, - '_fields_': response_parts, - } - ) - response = response_class.from_buffer_copy(recv_buffer) - result = APIResult(response) - if result.status != 0: - return result - result.value = { - 'type_exists': response.type_exists - } - if hasattr(response, 'body'): - result.value.update(body_struct.to_python(response.body)) - if hasattr(response, 'enums'): - result.value['enums'] = enum_struct.to_python(response.enums) - if hasattr(response, 'schema'): - result.value['schema'] = { - x['schema_id']: [ - z['schema_field_id'] for z in x['schema_fields'] - ] - for x in schema_struct.to_python(response.schema) - } - return result -def put_binary_type( - connection: 'Connection', type_name: str, affinity_key_field: str=None, - is_enum=False, schema: dict=None, query_id=None, -) -> APIResult: +def put_binary_type(connection: 'Connection', type_name: str, affinity_key_field: str = None, + is_enum=False, schema: dict = None) -> APIResult: """ Registers binary type information in cluster. @@ -117,11 +72,31 @@ def put_binary_type( parameter names as keys and an integers as values. When register binary type, pass a dict of field names: field types. Binary type with no fields is OK, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. """ + return __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema) + + +async def put_binary_type_async(connection: 'AioConnection', type_name: str, affinity_key_field: str = None, + is_enum=False, schema: dict = None, query_id=None) -> APIResult: + """ + Async version of put_binary_type. + """ + return await __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema) + + +def __post_process_put_binary(type_id): + def internal(result): + if result.status == 0: + result.value = { + 'type_id': type_id, + 'schema_id': schema_id, + } + return result + return internal + + +def __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema): # prepare data if schema is None: schema = {} @@ -134,7 +109,7 @@ def put_binary_type( 'is_enum': is_enum, 'schema': [], } - schema_id = None + s_id = None if is_enum: data['enums'] = [] for literal, ordinal in schema.items(): @@ -144,7 +119,7 @@ def put_binary_type( }) else: # assemble schema and calculate schema ID in one go - schema_id = FNV1_OFFSET_BASIS if schema else 0 + s_id = schema_id(schema) for field_name, data_type in schema.items(): # TODO: check for allowed data types field_id = entity_id(field_name) @@ -156,17 +131,9 @@ def put_binary_type( ), 'field_id': field_id, }) - schema_id ^= (field_id & 0xff) - schema_id = int_overflow(schema_id * FNV1_PRIME) - schema_id ^= ((field_id >> 8) & 0xff) - schema_id = int_overflow(schema_id * FNV1_PRIME) - schema_id ^= ((field_id >> 16) & 0xff) - schema_id = int_overflow(schema_id * FNV1_PRIME) - schema_id ^= ((field_id >> 24) & 0xff) - schema_id = int_overflow(schema_id * FNV1_PRIME) data['schema'].append({ - 'schema_id': schema_id, + 'schema_id': s_id, 'schema_fields': [ {'schema_field_id': entity_id(x)} for x in schema ], @@ -184,8 +151,7 @@ def put_binary_type( ('is_enum', Bool), ('enums', enum_struct), ('schema', schema_struct), - ], - query_id=query_id, + ] ) else: query_struct = Query( @@ -197,13 +163,7 @@ def put_binary_type( ('binary_fields', binary_fields_struct), ('is_enum', Bool), ('schema', schema_struct), - ], - query_id=query_id, + ] ) - result = query_struct.perform(connection, query_params=data) - if result.status == 0: - result.value = { - 'type_id': type_id, - 'schema_id': schema_id, - } - return result + return query_perform(query_struct, connection, query_params=data, + post_process_fun=__post_process_put_binary(type_id)) diff --git a/pyignite/api/cache_config.py b/pyignite/api/cache_config.py index cfea416..d4a5f81 100644 --- a/pyignite/api/cache_config.py +++ b/pyignite/api/cache_config.py @@ -25,15 +25,22 @@ from typing import Union -from pyignite.datatypes.cache_config import cache_config_struct +from pyignite.connection import Connection, AioConnection +from pyignite.datatypes.cache_config import get_cache_config_struct from pyignite.datatypes.cache_properties import prop_map -from pyignite.datatypes import ( - Int, Byte, prop_codes, Short, String, StringArray, +from pyignite.datatypes import Int, prop_codes, Short, String, StringArray +from pyignite.queries import Query, ConfigQuery, query_perform +from pyignite.queries.op_codes import ( + OP_CACHE_GET_CONFIGURATION, OP_CACHE_CREATE_WITH_NAME, OP_CACHE_GET_OR_CREATE_WITH_NAME, OP_CACHE_DESTROY, + OP_CACHE_GET_NAMES, OP_CACHE_CREATE_WITH_CONFIGURATION, OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION ) -from pyignite.queries import Query, ConfigQuery -from pyignite.queries.op_codes import * from pyignite.utils import cache_id +from .result import APIResult +from ..datatypes.prop_codes import PROP_EXPIRY_POLICY +from ..exceptions import NotSupportedByClusterError +from ..queries.cache_info import CacheInfo + def compact_cache_config(cache_config: dict) -> dict: """ @@ -48,166 +55,158 @@ def compact_cache_config(cache_config: dict) -> dict: for k, v in cache_config.items(): if k == 'length': continue - prop_code = getattr(prop_codes, 'PROP_{}'.format(k.upper())) + prop_code = getattr(prop_codes, f'PROP_{k.upper()}') result[prop_code] = v return result -def cache_get_configuration( - connection: 'Connection', cache: Union[str, int], flags: int=0, query_id=None, -) -> 'APIResult': +def cache_get_configuration(connection: 'Connection', cache_info: CacheInfo) -> 'APIResult': """ Gets configuration for the given cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, - :param flags: Ignite documentation is unclear on this subject, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, + :param cache_info: cache meta info, :return: API result data object. Result value is OrderedDict with the cache configuration parameters. """ + return __cache_get_configuration(connection, cache_info) - query_struct = Query( - OP_CACHE_GET_CONFIGURATION, - [ - ('hash_code', Int), - ('flags', Byte), - ], - query_id=query_id, - ) - result = query_struct.perform( - connection, - query_params={ - 'hash_code': cache_id(cache), - 'flags': flags, - }, - response_config=[ - ('cache_config', cache_config_struct), - ], - ) + +async def cache_get_configuration_async(connection: 'AioConnection', cache_info: CacheInfo) -> 'APIResult': + """ + Async version of cache_get_configuration. + """ + return await __cache_get_configuration(connection, cache_info) + + +def __post_process_cache_config(result): if result.status == 0: result.value = compact_cache_config(result.value['cache_config']) return result -def cache_create( - connection: 'Connection', name: str, query_id=None, -) -> 'APIResult': +def __cache_get_configuration(connection, cache_info): + query_struct = Query( + OP_CACHE_GET_CONFIGURATION, + [ + ('cache_info', CacheInfo) + ] + ) + return query_perform(query_struct, connection, + query_params={ + 'cache_info': cache_info + }, + response_config=[ + ('cache_config', get_cache_config_struct(connection.protocol_context)) + ], + post_process_fun=__post_process_cache_config + ) + + +def cache_create(connection: 'Connection', name: str) -> 'APIResult': """ Creates a cache with a given name. Returns error if a cache with specified name already exists. :param connection: connection to Ignite server, :param name: cache name, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if a cache is created successfully, non-zero status and an error description otherwise. """ - query_struct = Query( - OP_CACHE_CREATE_WITH_NAME, - [ - ('cache_name', String), - ], - query_id=query_id, - ) - return query_struct.perform( - connection, - query_params={ - 'cache_name': name, - }, - ) + return __cache_create_with_name(OP_CACHE_CREATE_WITH_NAME, connection, name) -def cache_get_or_create( - connection: 'Connection', name: str, query_id=None, -) -> 'APIResult': +async def cache_create_async(connection: 'AioConnection', name: str) -> 'APIResult': + """ + Async version of cache_create. + """ + + return await __cache_create_with_name(OP_CACHE_CREATE_WITH_NAME, connection, name) + + +def cache_get_or_create(connection: 'Connection', name: str) -> 'APIResult': """ Creates a cache with a given name. Does nothing if the cache exists. :param connection: connection to Ignite server, :param name: cache name, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if a cache is created successfully, non-zero status and an error description otherwise. """ - query_struct = Query( - OP_CACHE_GET_OR_CREATE_WITH_NAME, - [ - ('cache_name', String), - ], - query_id=query_id, - ) - return query_struct.perform( - connection, - query_params={ - 'cache_name': name, - }, - ) + return __cache_create_with_name(OP_CACHE_GET_OR_CREATE_WITH_NAME, connection, name) -def cache_destroy( - connection: 'Connection', cache: Union[str, int], query_id=None, -) -> 'APIResult': +async def cache_get_or_create_async(connection: 'AioConnection', name: str) -> 'APIResult': + """ + Async version of cache_get_or_create. + """ + return await __cache_create_with_name(OP_CACHE_GET_OR_CREATE_WITH_NAME, connection, name) + + +def __cache_create_with_name(op_code, conn, name): + query_struct = Query(op_code, [('cache_name', String)]) + return query_perform(query_struct, conn, query_params={'cache_name': name}) + + +def cache_destroy(connection: 'Connection', cache: Union[str, int]) -> 'APIResult': """ Destroys cache with a given name. :param connection: connection to Ignite server, :param cache: name or ID of the cache, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. """ + return __cache_destroy(connection, cache) - query_struct = Query( - OP_CACHE_DESTROY,[ - ('hash_code', Int), - ], - query_id=query_id, - ) - return query_struct.perform( - connection, - query_params={ - 'hash_code': cache_id(cache), - }, - ) + +async def cache_destroy_async(connection: 'AioConnection', cache: Union[str, int]) -> 'APIResult': + """ + Async version of cache_destroy. + """ + return await __cache_destroy(connection, cache) -def cache_get_names(connection: 'Connection', query_id=None) -> 'APIResult': +def __cache_destroy(connection, cache): + query_struct = Query(OP_CACHE_DESTROY, [('cache_id', Int)]) + + return query_perform(query_struct, connection, query_params={'cache_id': cache_id(cache)}) + + +def cache_get_names(connection: 'Connection') -> 'APIResult': """ Gets existing cache names. :param connection: connection to Ignite server, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a list of cache names, non-zero status and an error description otherwise. """ - query_struct = Query(OP_CACHE_GET_NAMES, query_id=query_id) - result = query_struct.perform( - connection, - response_config=[ - ('cache_names', StringArray), - ], - ) + return __cache_get_names(connection) + + +async def cache_get_names_async(connection: 'AioConnection') -> 'APIResult': + """ + Async version of cache_get_names. + """ + return await __cache_get_names(connection) + + +def __post_process_cache_names(result): if result.status == 0: result.value = result.value['cache_names'] return result -def cache_create_with_config( - connection: 'Connection', cache_props: dict, query_id=None, -) -> 'APIResult': +def __cache_get_names(connection): + query_struct = Query(OP_CACHE_GET_NAMES) + return query_perform(query_struct, connection, + response_config=[('cache_names', StringArray)], + post_process_fun=__post_process_cache_names) + + +def cache_create_with_config(connection: 'Connection', cache_props: dict) -> 'APIResult': """ Creates cache with provided configuration. An error is returned if the name is already in use. @@ -216,35 +215,20 @@ def cache_create_with_config( :param cache_props: cache configuration properties to create cache with in form of dictionary {property code: python value}. You must supply at least name (PROP_NAME), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if cache was created, non-zero status and an error description otherwise. """ + return __cache_create_with_config(OP_CACHE_CREATE_WITH_CONFIGURATION, connection, cache_props) - prop_types = {} - prop_values = {} - for i, prop_item in enumerate(cache_props.items()): - prop_code, prop_value = prop_item - prop_name = 'property_{}'.format(i) - prop_types[prop_name] = prop_map(prop_code) - prop_values[prop_name] = prop_value - prop_values['param_count'] = len(cache_props) - query_struct = ConfigQuery( - OP_CACHE_CREATE_WITH_CONFIGURATION, - [ - ('param_count', Short), - ] + list(prop_types.items()), - query_id=query_id, - ) - return query_struct.perform(connection, query_params=prop_values) +async def cache_create_with_config_async(connection: 'AioConnection', cache_props: dict) -> 'APIResult': + """ + Async version of cache_create_with_config. + """ + return await __cache_create_with_config(OP_CACHE_CREATE_WITH_CONFIGURATION, connection, cache_props) -def cache_get_or_create_with_config( - connection: 'Connection', cache_props: dict, query_id=None, -) -> 'APIResult': +def cache_get_or_create_with_config(connection: 'Connection', cache_props: dict) -> 'APIResult': """ Creates cache with provided configuration. Does nothing if the name is already in use. @@ -253,27 +237,32 @@ def cache_get_or_create_with_config( :param cache_props: cache configuration properties to create cache with in form of dictionary {property code: python value}. You must supply at least name (PROP_NAME), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if cache was created, non-zero status and an error description otherwise. """ + return __cache_create_with_config(OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, connection, cache_props) - prop_types = {} - prop_values = {} + +async def cache_get_or_create_with_config_async(connection: 'AioConnection', cache_props: dict) -> 'APIResult': + """ + Async version of cache_get_or_create_with_config. + """ + return await __cache_create_with_config(OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, connection, cache_props) + + +def __cache_create_with_config(op_code, connection, cache_props): + prop_types, prop_values = {}, {} + is_expiry_policy_supported = connection.protocol_context.is_expiry_policy_supported() for i, prop_item in enumerate(cache_props.items()): prop_code, prop_value = prop_item + if prop_code == PROP_EXPIRY_POLICY and not is_expiry_policy_supported: + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + prop_name = 'property_{}'.format(i) prop_types[prop_name] = prop_map(prop_code) prop_values[prop_name] = prop_value prop_values['param_count'] = len(cache_props) - query_struct = ConfigQuery( - OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, - [ - ('param_count', Short), - ] + list(prop_types.items()), - query_id=query_id, - ) - return query_struct.perform(connection, query_params=prop_values) + following = [('param_count', Short)] + list(prop_types.items()) + query_struct = ConfigQuery(op_code, following) + return query_perform(query_struct, connection, query_params=prop_values) diff --git a/pyignite/api/cluster.py b/pyignite/api/cluster.py new file mode 100644 index 0000000..50c71bd --- /dev/null +++ b/pyignite/api/cluster.py @@ -0,0 +1,99 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from pyignite.api import APIResult +from pyignite.connection import AioConnection, Connection +from pyignite.datatypes import Byte +from pyignite.exceptions import NotSupportedByClusterError +from pyignite.queries import Query, query_perform +from pyignite.queries.op_codes import OP_CLUSTER_GET_STATE, OP_CLUSTER_CHANGE_STATE + + +def cluster_get_state(connection: 'Connection') -> 'APIResult': + """ + Get cluster state. + + :param connection: Connection to use, + :return: API result data object. Contains zero status and a state + retrieved on success, non-zero status and an error description on failure. + """ + return __cluster_get_state(connection) + + +async def cluster_get_state_async(connection: 'AioConnection') -> 'APIResult': + """ + Async version of cluster_get_state + """ + return await __cluster_get_state(connection) + + +def __post_process_get_state(result): + if result.status == 0: + result.value = result.value['state'] + return result + + +def __cluster_get_state(connection): + if not connection.protocol_context.is_cluster_api_supported(): + raise NotSupportedByClusterError('Cluster API is not supported by the cluster') + + query_struct = Query(OP_CLUSTER_GET_STATE) + return query_perform( + query_struct, connection, + response_config=[('state', Byte)], + post_process_fun=__post_process_get_state + ) + + +def cluster_set_state(connection: 'Connection', state: int) -> 'APIResult': + """ + Set cluster state. + + :param connection: Connection to use, + :param state: State to set, + :return: API result data object. Contains zero status if a value + is written, non-zero status and an error description otherwise. + """ + return __cluster_set_state(connection, state) + + +async def cluster_set_state_async(connection: 'AioConnection', state: int) -> 'APIResult': + """ + Async version of cluster_get_state + """ + return await __cluster_set_state(connection, state) + + +def __post_process_set_state(result): + if result.status == 0: + result.value = result.value['state'] + return result + + +def __cluster_set_state(connection, state): + if not connection.protocol_context.is_cluster_api_supported(): + raise NotSupportedByClusterError('Cluster API is not supported by the cluster') + + query_struct = Query( + OP_CLUSTER_CHANGE_STATE, + [ + ('state', Byte) + ] + ) + return query_perform( + query_struct, connection, + query_params={ + 'state': state, + } + ) diff --git a/pyignite/api/key_value.py b/pyignite/api/key_value.py index 56f5378..5b3f72c 100644 --- a/pyignite/api/key_value.py +++ b/pyignite/api/key_value.py @@ -13,601 +13,604 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterable, Union - -from pyignite.queries.op_codes import * -from pyignite.datatypes import ( - Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, +from typing import Any, Iterable, Union + +from pyignite.connection import AioConnection, Connection +from pyignite.queries.op_codes import ( + OP_CACHE_PUT, OP_CACHE_GET, OP_CACHE_GET_ALL, OP_CACHE_PUT_ALL, OP_CACHE_CONTAINS_KEY, OP_CACHE_CONTAINS_KEYS, + OP_CACHE_GET_AND_PUT, OP_CACHE_GET_AND_REPLACE, OP_CACHE_GET_AND_REMOVE, OP_CACHE_PUT_IF_ABSENT, + OP_CACHE_GET_AND_PUT_IF_ABSENT, OP_CACHE_REPLACE, OP_CACHE_REPLACE_IF_EQUALS, OP_CACHE_CLEAR, OP_CACHE_CLEAR_KEY, + OP_CACHE_CLEAR_KEYS, OP_CACHE_REMOVE_KEY, OP_CACHE_REMOVE_IF_EQUALS, OP_CACHE_REMOVE_KEYS, OP_CACHE_REMOVE_ALL, + OP_CACHE_GET_SIZE, OP_CACHE_LOCAL_PEEK ) -from pyignite.datatypes.key_value import PeekModes -from pyignite.queries import Query, Response -from pyignite.utils import cache_id +from pyignite.datatypes import Map, Bool, Long, AnyDataArray, AnyDataObject, ByteArray +from pyignite.datatypes.base import IgniteDataType +from pyignite.queries import Query, query_perform +from .result import APIResult +from ..queries.cache_info import CacheInfo -def cache_put( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, -) -> 'APIResult': + +def cache_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache (overwriting existing value if any). :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if a value is written, non-zero status and an error description otherwise. """ + return __cache_put(connection, cache_info, key, value, key_hint, value_hint) + + +async def cache_put_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': + """ + Async version of cache_put + """ + return await __cache_put(connection, cache_info, key, value, key_hint, value_hint) + +def __cache_put(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_PUT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] + ) + return query_perform( + query_struct, connection, + query_params={ + 'cache_info': cache_info, + 'key': key, + 'value': value + } ) - return query_struct.perform(connection, { - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, - 'key': key, - 'value': value, - }) -def cache_get( - connection: 'Connection', cache: Union[str, int], key, - key_hint=None, binary=False, query_id=None, -) -> 'APIResult': +def cache_get(connection: 'Connection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None) -> 'APIResult': """ Retrieves a value from cache by key. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key - should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, + should be converted :return: API result data object. Contains zero status and a value retrieved on success, non-zero status and an error description on failure. """ + return __cache_get(connection, cache_info, key, key_hint) + + +async def cache_get_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None) -> 'APIResult': + """ + Async version of cache_get + """ + return await __cache_get(connection, cache_info, key, key_hint) + +def __cache_get(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_GET, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, }, response_config=[ - ('value', AnyDataObject), + ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status != 0: - return result - result.value = result.value['value'] - return result -def cache_get_all( - connection: 'Connection', cache: Union[str, int], keys: Iterable, - binary=False, query_id=None, -) -> 'APIResult': +def cache_get_all(connection: 'Connection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Retrieves multiple key-value pairs from cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a dict, made of retrieved key-value pairs, non-zero status and an error description on failure. """ + return __cache_get_all(connection, cache_info, keys) + + +async def cache_get_all_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': + """ + Async version of cache_get_all. + """ + return await __cache_get_all(connection, cache_info, keys) + +def __cache_get_all(connection, cache_info, keys): query_struct = Query( OP_CACHE_GET_ALL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'keys': keys, }, response_config=[ ('data', Map), ], + post_process_fun=__post_process_value_by_key('data') ) - if result.status == 0: - result.value = dict(result.value)['data'] - return result -def cache_put_all( - connection: 'Connection', cache: Union[str, int], pairs: dict, - binary=False, query_id=None, -) -> 'APIResult': +def cache_put_all(connection: 'Connection', cache_info: CacheInfo, pairs: dict) -> 'APIResult': """ Puts multiple key-value pairs to cache (overwriting existing associations if any). :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param pairs: dictionary type parameters, contains key-value pairs to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if key-value pairs are written, non-zero status and an error description otherwise. """ + return __cache_put_all(connection, cache_info, pairs) + +async def cache_put_all_async(connection: 'AioConnection', cache_info: CacheInfo, pairs: dict) -> 'APIResult': + """ + Async version of cache_put_all. + """ + return await __cache_put_all(connection, cache_info, pairs) + + +def __cache_put_all(connection, cache_info, pairs): query_struct = Query( OP_CACHE_PUT_ALL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('data', Map), - ], - query_id=query_id, + ] ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'data': pairs, }, ) -def cache_contains_key( - connection: 'Connection', cache: Union[str, int], key, - key_hint=None, binary=False, query_id=None, -) -> 'APIResult': +def cache_contains_key(connection: 'Connection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None) -> 'APIResult': """ Returns a value indicating whether given key is present in cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key - should be converted, - :param binary: pass True to keep the value in binary form. False - by default, - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, + should be converted :return: API result data object. Contains zero status and a bool value retrieved on success: `True` when key is present, `False` otherwise, non-zero status and an error description on failure. """ + return __cache_contains_key(connection, cache_info, key, key_hint) + +async def cache_contains_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None) -> 'APIResult': + """ + Async version of cache_contains_key. + """ + return await __cache_contains_key(connection, cache_info, key, key_hint) + + +def __cache_contains_key(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_CONTAINS_KEY, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, - query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + return query_perform( + query_struct, connection, + query_params={ + 'cache_info': cache_info, 'key': key, }, response_config=[ ('value', Bool), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_contains_keys( - connection: 'Connection', cache: Union[str, int], keys: Iterable, - binary=False, query_id=None, -) -> 'APIResult': +def cache_contains_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable, + ) -> 'APIResult': """ Returns a value indicating whether all given keys are present in cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: a list of keys or (key, type hint) tuples, - :param binary: pass True to keep the value in binary form. False - by default, - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a bool value retrieved on success: `True` when all keys are present, `False` otherwise, non-zero status and an error description on failure. """ + return __cache_contains_keys(connection, cache_info, keys) + + +async def cache_contains_keys_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': + """ + Async version of cache_contains_keys. + """ + return await __cache_contains_keys(connection, cache_info, keys) + +def __cache_contains_keys(connection, cache_info, keys): query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'keys': keys, }, response_config=[ ('value', Bool), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_get_and_put( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, -) -> 'APIResult': +def cache_get_and_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ - Puts a value with a given key to cache, and returns the previous value + Puts a value with a given key to cache_info, and returns the previous value for that key, or null value if there was not such key. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: pass True to keep the value in binary form. False - by default, - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and an old value or None if a value is written, non-zero status and an error description in case of error. """ + return __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint) + +async def cache_get_and_put_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': + """ + Async version of cache_get_and_put. + """ + return await __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint) + + +def __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_GET_AND_PUT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], - query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, response_config=[ ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_get_and_replace( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, -) -> 'APIResult': +def cache_get_and_replace(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache, returning previous value for that key, if and only if there is a value currently mapped for that key. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: pass True to keep the value in binary form. False - by default, - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ + return __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint) + +async def cache_get_and_replace_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': + """ + Async version of cache_get_and_replace. + """ + return await __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint) + + +def __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint): query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, response_config=[ ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result def cache_get_and_remove( - connection: 'Connection', cache: Union[str, int], key, - key_hint=None, binary=False, query_id=None, + connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None ) -> 'APIResult': """ Removes the cache entry with specified key, returning the value. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: pass True to keep the value in binary form. False - by default, - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and an old value or None, non-zero status and an error description otherwise. """ + return __cache_get_and_remove(connection, cache_info, key, key_hint) + + +async def cache_get_and_remove_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None +) -> 'APIResult': + return await __cache_get_and_remove(connection, cache_info, key, key_hint) + +def __cache_get_and_remove(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, }, response_config=[ ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_put_if_absent( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, -) -> 'APIResult': +def cache_put_if_absent(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: (optional) pass True to keep the value in binary form. False - by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint) + +async def cache_put_if_absent_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': + """ + Async version of cache_put_if_absent. + """ + return await __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint) + + +def __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_get_and_put_if_absent( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, -) -> 'APIResult': +def cache_get_and_put_if_absent(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: (optional) pass True to keep the value in binary form. False - by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ + return __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint) + + +async def cache_get_and_put_if_absent_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': + """ + Async version of cache_get_and_put_if_absent. + """ + return await __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint) + +def __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, response_config=[ ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_replace( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, -) -> 'APIResult': +def cache_replace(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exist. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: pass True to keep the value in binary form. False - by default, - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a boolean success code, or non-zero status and an error description if something has gone wrong. """ + return __cache_replace(connection, cache_info, key, value, key_hint, value_hint) + + +async def cache_replace_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': + """ + Async version of cache_replace. + """ + return await __cache_replace(connection, cache_info, key, value, key_hint, value_hint) + +def __cache_replace(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_REPLACE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_replace_if_equals( - connection: 'Connection', cache: Union[str, int], key, sample, value, - key_hint=None, sample_hint=None, value_hint=None, - binary=False, query_id=None, -) -> 'APIResult': +def cache_replace_if_equals(connection: 'Connection', cache_info: CacheInfo, key: Any, sample: Any, value: Any, + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, + value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exists and value equals provided sample. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param sample: a sample to compare the stored value with, :param value: new value for the given key, @@ -617,32 +620,39 @@ def cache_replace_if_equals( the given sample should be converted :param value_hint: (optional) Ignite data type, for which the given value should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, :return: API result data object. Contains zero status and a boolean success code, or non-zero status and an error description if something has gone wrong. """ + return __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, + sample_hint, value_hint) + +async def cache_replace_if_equals_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, sample: Any, value: Any, + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': + """ + Async version of cache_replace_if_equals. + """ + return await __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, + sample_hint, value_hint) + + +def __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, sample_hint, value_hint): query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'sample': sample, 'value': value, @@ -650,346 +660,413 @@ def cache_replace_if_equals( response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_clear( - connection: 'Connection', cache: Union[str, int], binary=False, - query_id=None, -) -> 'APIResult': +def cache_clear(connection: 'Connection', cache_info: CacheInfo) -> 'APIResult': """ Clears the cache without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, + :param cache_info: cache meta info, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_clear(connection, cache_info) + +async def cache_clear_async(connection: 'AioConnection', cache_info: CacheInfo) -> 'APIResult': + """ + Async version of cache_clear. + """ + return await __cache_clear(connection, cache_info) + + +def __cache_clear(connection, cache_info): query_struct = Query( OP_CACHE_CLEAR, [ - ('hash_code', Int), - ('flag', Byte), - ], - query_id=query_id, + ('cache_info', CacheInfo), + ] ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, }, ) def cache_clear_key( - connection: 'Connection', cache: Union[str, int], key, - key_hint: object=None, binary=False, query_id=None, + connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None ) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_clear_key(connection, cache_info, key, key_hint) + +async def cache_clear_key_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None +) -> 'APIResult': + """ + Async version of cache_clear_key. + """ + return await __cache_clear_key(connection, cache_info, key, key_hint) + + +def __cache_clear_key(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_CLEAR_KEY, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, }, ) -def cache_clear_keys( - connection: 'Connection', cache: Union[str, int], keys: list, - binary=False, query_id=None, -) -> 'APIResult': +def cache_clear_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Clears the cache keys without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_clear_keys(connection, cache_info, keys) + + +async def cache_clear_keys_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': + """ + Async version of cache_clear_keys. + """ + return await __cache_clear_keys(connection, cache_info, keys) + +def __cache_clear_keys(connection, cache_info, keys): query_struct = Query( OP_CACHE_CLEAR_KEYS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), - ], - query_id=query_id, + ] ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'keys': keys, }, ) def cache_remove_key( - connection: 'Connection', cache: Union[str, int], key, - key_hint: object=None, binary=False, query_id=None, + connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None ) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key - should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, + should be converted :return: API result data object. Contains zero status and a boolean success code, or non-zero status and an error description if something has gone wrong. """ + return __cache_remove_key(connection, cache_info, key, key_hint) + + +async def cache_remove_key_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None +) -> 'APIResult': + """ + Async version of cache_remove_key. + """ + return await __cache_remove_key(connection, cache_info, key, key_hint) + +def __cache_remove_key(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_REMOVE_KEY, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, }, response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_remove_if_equals( - connection: 'Connection', cache: Union[str, int], key, sample, - key_hint=None, sample_hint=None, - binary=False, query_id=None, -) -> 'APIResult': +def cache_remove_if_equals(connection: 'Connection', cache_info: CacheInfo, key: Any, sample: Any, + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None) -> 'APIResult': """ Removes an entry with a given key if provided value is equal to actual value, notifying listeners and cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param sample: a sample to compare the stored value with, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param sample_hint: (optional) Ignite data type, for whic - the given sample should be converted - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, + the given sample should be converted, :return: API result data object. Contains zero status and a boolean success code, or non-zero status and an error description if something has gone wrong. """ + return __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint) + + +async def cache_remove_if_equals_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, sample: Any, + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None +) -> 'APIResult': + """ + Async version of cache_remove_if_equals. + """ + return await __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint) + +def __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint): query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'sample': sample, }, response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_remove_keys( - connection: 'Connection', cache: Union[str, int], keys: Iterable, - binary=False, query_id=None, -) -> 'APIResult': +def cache_remove_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Removes entries with given keys, notifying listeners and cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_remove_keys(connection, cache_info, keys) + + +async def cache_remove_keys_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': + """ + Async version of cache_remove_keys. + """ + return await __cache_remove_keys(connection, cache_info, keys) + +def __cache_remove_keys(connection, cache_info, keys): query_struct = Query( OP_CACHE_REMOVE_KEYS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), - ], - query_id=query_id, + ] ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'keys': keys, }, ) -def cache_remove_all( - connection: 'Connection', cache: Union[str, int], binary=False, - query_id=None, -) -> 'APIResult': +def cache_remove_all(connection: 'Connection', cache_info: CacheInfo) -> 'APIResult': """ - Removes all entries from cache, notifying listeners and cache writers. + Removes all entries from cache_info, notifying listeners and cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, + :param cache_info: cache meta info, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_remove_all(connection, cache_info) + + +async def cache_remove_all_async(connection: 'AioConnection', cache_info: CacheInfo) -> 'APIResult': + """ + Async version of cache_remove_all. + """ + return await __cache_remove_all(connection, cache_info) + +def __cache_remove_all(connection, cache_info): query_struct = Query( OP_CACHE_REMOVE_ALL, [ - ('hash_code', Int), - ('flag', Byte), - ], - query_id=query_id, + ('cache_info', CacheInfo), + ] ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, }, ) def cache_get_size( - connection: 'Connection', cache: Union[str, int], peek_modes=0, - binary=False, query_id=None, + connection: 'Connection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None ) -> 'APIResult': """ Gets the number of entries in cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache - (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, + (PeekModes.BACKUP). Defaults to pimary cache partitions (PeekModes.PRIMARY), :return: API result data object. Contains zero status and a number of cache entries on success, non-zero status and an error description otherwise. """ - if not isinstance(peek_modes, (list, tuple)): - if peek_modes == 0: - peek_modes = [] - else: - peek_modes = [peek_modes] + return __cache_get_size(connection, cache_info, peek_modes) + + +async def cache_get_size_async( + connection: 'AioConnection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None +) -> 'APIResult': + return await __cache_get_size(connection, cache_info, peek_modes) + + +def __cache_get_size(connection, cache_info, peek_modes): + if peek_modes is None: + peek_modes = [] + elif not isinstance(peek_modes, (list, tuple)): + peek_modes = [peek_modes] query_struct = Query( OP_CACHE_GET_SIZE, [ - ('hash_code', Int), - ('flag', Byte), - ('peek_modes', PeekModes), - ], - query_id=query_id, + ('cache_info', CacheInfo), + ('peek_modes', ByteArray), + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'peek_modes': peek_modes, }, response_config=[ ('count', Long), ], + post_process_fun=__post_process_value_by_key('count') ) - if result.status == 0: - result.value = result.value['count'] - return result + + +def cache_local_peek( + conn: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + peek_modes: Union[int, list, tuple] = None +) -> 'APIResult': + """ + Peeks at in-memory cached value using default optional peek mode. + + This method will not load value from any cache store or from a remote + node. + + :param conn: connection: connection to Ignite server, + :param cache_info: cache meta info, + :param key: entry key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param peek_modes: (optional) limit count to near cache partition + (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache + (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), + :return: API result data object. Contains zero status and a peeked value + (null if not found). + """ + return __cache_local_peek(conn, cache_info, key, key_hint, peek_modes) + + +async def cache_local_peek_async( + conn: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + peek_modes: Union[int, list, tuple] = None, +) -> 'APIResult': + """ + Async version of cache_local_peek. + """ + return await __cache_local_peek(conn, cache_info, key, key_hint, peek_modes) + + +def __cache_local_peek(conn, cache_info, key, key_hint, peek_modes): + if peek_modes is None: + peek_modes = [] + elif not isinstance(peek_modes, (list, tuple)): + peek_modes = [peek_modes] + + query_struct = Query( + OP_CACHE_LOCAL_PEEK, + [ + ('cache_info', CacheInfo), + ('key', key_hint or AnyDataObject), + ('peek_modes', ByteArray), + ] + ) + return query_perform( + query_struct, conn, + query_params={ + 'cache_info': cache_info, + 'key': key, + 'peek_modes': peek_modes, + }, + response_config=[ + ('value', AnyDataObject), + ], + post_process_fun=__post_process_value_by_key('value') + ) + + +def __post_process_value_by_key(key): + def internal(result): + if result.status == 0: + result.value = result.value[key] + + return result + return internal diff --git a/pyignite/api/result.py b/pyignite/api/result.py index 864ef61..f134be9 100644 --- a/pyignite/api/result.py +++ b/pyignite/api/result.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from pyignite.queries.op_codes import OP_SUCCESS from pyignite.datatypes import String @@ -31,8 +32,8 @@ class APIResult: message = 'Success' value = None - def __init__(self, response: 'Response'): - self.status = response.status_code + def __init__(self, response): + self.status = getattr(response, 'status_code', OP_SUCCESS) self.query_id = response.query_id if hasattr(response, 'error_message'): self.message = String.to_python(response.error_message) diff --git a/pyignite/api/sql.py b/pyignite/api/sql.py index 1a18496..0f41194 100644 --- a/pyignite/api/sql.py +++ b/pyignite/api/sql.py @@ -13,43 +13,32 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -Only key-value queries (scan queries) are implemented. SQL part is still -in progress. -""" - -from typing import Union - -from pyignite.datatypes import ( - AnyDataArray, AnyDataObject, Bool, Byte, Int, Long, Map, Null, String, - StructArray, -) +from pyignite.connection import AioConnection, Connection +from pyignite.datatypes import AnyDataArray, AnyDataObject, Bool, Int, Long, Map, Null, String, StructArray from pyignite.datatypes.sql import StatementType -from pyignite.queries import Query, Response, SQLResponse -from pyignite.queries.op_codes import * -from pyignite.utils import cache_id +from pyignite.queries import Query, query_perform +from pyignite.queries.op_codes import ( + OP_QUERY_SCAN, OP_QUERY_SCAN_CURSOR_GET_PAGE, OP_QUERY_SQL, OP_QUERY_SQL_CURSOR_GET_PAGE, OP_QUERY_SQL_FIELDS, + OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE, OP_RESOURCE_CLOSE +) +from pyignite.utils import deprecated from .result import APIResult +from ..queries.cache_info import CacheInfo +from ..queries.response import SQLResponse -def scan( - connection: 'Connection', cache: Union[str, int], page_size: int, - partitions: int=-1, local: bool=False, binary: bool=False, query_id=None, -) -> APIResult: +def scan(conn: 'Connection', cache_info: CacheInfo, page_size: int, partitions: int = -1, + local: bool = False) -> APIResult: """ Performs scan query. - :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param conn: connection to Ignite server, + :param cache_info: cache meta info. :param page_size: cursor page size, :param partitions: (optional) number of partitions to query (negative to query entire cache), :param local: (optional) pass True if this query should be executed on local node only. Defaults to False, - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -61,24 +50,38 @@ def scan( * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ + return __scan(conn, cache_info, page_size, partitions, local) + + +async def scan_async(conn: 'AioConnection', cache_info: CacheInfo, page_size: int, partitions: int = -1, + local: bool = False) -> APIResult: + """ + Async version of scan. + """ + return await __scan(conn, cache_info, page_size, partitions, local) + +def __query_result_post_process(result): + if result.status == 0: + result.value = dict(result.value) + return result + + +def __scan(conn, cache_info, page_size, partitions, local): query_struct = Query( OP_QUERY_SCAN, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('filter', Null), ('page_size', Int), ('partitions', Int), ('local', Bool), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'filter': None, 'page_size': page_size, 'partitions': partitions, @@ -89,24 +92,17 @@ def scan( ('data', Map), ('more', Bool), ], + post_process_fun=__query_result_post_process ) - if result.status == 0: - result.value = dict(result.value) - return result -def scan_cursor_get_page( - connection: 'Connection', cursor: int, query_id=None, -) -> APIResult: +def scan_cursor_get_page(conn: 'Connection', cursor: int) -> APIResult: """ Fetches the next scan query cursor page by cursor ID that is obtained from `scan` function. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cursor: cursor ID, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -117,16 +113,22 @@ def scan_cursor_get_page( * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ + return __scan_cursor_get_page(conn, cursor) + + +async def scan_cursor_get_page_async(conn: 'AioConnection', cursor: int) -> APIResult: + return await __scan_cursor_get_page(conn, cursor) + +def __scan_cursor_get_page(conn, cursor): query_struct = Query( OP_QUERY_SCAN_CURSOR_GET_PAGE, [ ('cursor', Long), - ], - query_id=query_id, + ] ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, conn, query_params={ 'cursor': cursor, }, @@ -134,24 +136,24 @@ def scan_cursor_get_page( ('data', Map), ('more', Bool), ], + post_process_fun=__query_result_post_process ) - if result.status == 0: - result.value = dict(result.value) - return result +@deprecated(version='1.2.0', reason="This API is deprecated and will be removed in the following major release. " + "Use sql_fields instead") def sql( - connection: 'Connection', cache: Union[str, int], + conn: 'Connection', cache_info: CacheInfo, table_name: str, query_str: str, page_size: int, query_args=None, - distributed_joins: bool=False, replicated_only: bool=False, - local: bool=False, timeout: int=0, binary: bool=False, query_id=None + distributed_joins: bool = False, replicated_only: bool = False, + local: bool = False, timeout: int = 0 ) -> APIResult: """ Executes an SQL query over data stored in the cluster. The query returns the whole record (key and value). - :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param conn: connection to Ignite server, + :param cache_info: Cache meta info, :param table_name: name of a type or SQL table, :param query_str: SQL query string, :param page_size: cursor page size, @@ -163,11 +165,6 @@ def sql( on local node only. Defaults to False, :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -186,8 +183,7 @@ def sql( query_struct = Query( OP_QUERY_SQL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('table_name', String), ('query_str', String), ('query_args', AnyDataArray()), @@ -196,14 +192,12 @@ def sql( ('replicated_only', Bool), ('page_size', Int), ('timeout', Long), - ], - query_id=query_id, + ] ) result = query_struct.perform( - connection, + conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'table_name': table_name, 'query_str': query_str, 'query_args': query_args, @@ -224,17 +218,14 @@ def sql( return result -def sql_cursor_get_page( - connection: 'Connection', cursor: int, query_id=None, -) -> APIResult: +@deprecated(version='1.2.0', reason="This API is deprecated and will be removed in the following major release. " + "Use sql_fields instead") +def sql_cursor_get_page(conn: 'Connection', cursor: int) -> APIResult: """ Retrieves the next SQL query cursor page by cursor ID from `sql`. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cursor: cursor ID, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -250,11 +241,10 @@ def sql_cursor_get_page( OP_QUERY_SQL_CURSOR_GET_PAGE, [ ('cursor', Long), - ], - query_id=query_id, + ] ) result = query_struct.perform( - connection, + conn, query_params={ 'cursor': cursor, }, @@ -269,51 +259,42 @@ def sql_cursor_get_page( def sql_fields( - connection: 'Connection', cache: Union[str, int], - query_str: str, page_size: int, query_args=None, schema: str=None, - statement_type: int=StatementType.ANY, distributed_joins: bool=False, - local: bool=False, replicated_only: bool=False, - enforce_join_order: bool=False, collocated: bool=False, lazy: bool=False, - include_field_names: bool=False, max_rows: int=-1, timeout: int=0, - binary: bool=False, query_id=None + conn: 'Connection', cache_info: CacheInfo, + query_str: str, page_size: int, query_args=None, schema: str = None, + statement_type: int = StatementType.ANY, distributed_joins: bool = False, + local: bool = False, replicated_only: bool = False, + enforce_join_order: bool = False, collocated: bool = False, + lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, + timeout: int = 0 ) -> APIResult: """ Performs SQL fields query. - :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param conn: connection to Ignite server, + :param cache_info: cache meta info. :param query_str: SQL query string, :param page_size: cursor page size, :param query_args: (optional) query arguments. List of values or (value, type hint) tuples, - :param schema: (optional) schema for the query. Defaults to `PUBLIC`, + :param schema: schema for the query. :param statement_type: (optional) statement type. Can be: - * StatementType.ALL − any type (default), + * StatementType.ALL − any type (default), * StatementType.SELECT − select, - * StatementType.UPDATE − update. + * StatementType.UPDATE − update. - :param distributed_joins: (optional) distributed joins. Defaults to False, + :param distributed_joins: (optional) distributed joins. :param local: (optional) pass True if this query should be executed - on local node only. Defaults to False, + on local node only. :param replicated_only: (optional) whether query contains only - replicated tables or not. Defaults to False, - :param enforce_join_order: (optional) enforce join order. Defaults - to False, + replicated tables or not. + :param enforce_join_order: (optional) enforce join order. :param collocated: (optional) whether your data is co-located or not. - Defaults to False, - :param lazy: (optional) lazy query execution. Defaults to False, + :param lazy: (optional) lazy query execution. :param include_field_names: (optional) include field names in result. - Defaults to False, - :param max_rows: (optional) query-wide maximum of rows. Defaults to -1 - (all rows), + :param max_rows: (optional) query-wide maximum of rows. :param timeout: (optional) non-negative timeout value in ms. Zero disables - timeout (default), - :param binary: (optional) pass True to keep the value in binary form. - False by default, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, + timeout. :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -325,14 +306,39 @@ def sql_fields( * `more`: bool, True if more data is available for subsequent ‘sql_fields_cursor_get_page’ calls. """ + return __sql_fields(conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, + local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, + timeout) + + +async def sql_fields_async( + conn: 'AioConnection', cache_info: CacheInfo, + query_str: str, page_size: int, query_args=None, schema: str = None, + statement_type: int = StatementType.ANY, distributed_joins: bool = False, + local: bool = False, replicated_only: bool = False, + enforce_join_order: bool = False, collocated: bool = False, + lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, + timeout: int = 0 +) -> APIResult: + """ + Async version of sql_fields. + """ + return await __sql_fields(conn, cache_info, query_str, page_size, query_args, schema, statement_type, + distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, + include_field_names, max_rows, timeout) + + +def __sql_fields( + conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, + replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout +): if query_args is None: query_args = [] query_struct = Query( OP_QUERY_SQL_FIELDS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('schema', String), ('page_size', Int), ('max_rows', Int), @@ -348,56 +354,40 @@ def sql_fields( ('timeout', Long), ('include_field_names', Bool), ], - query_id=query_id, + response_type=SQLResponse ) - _, send_buffer = query_struct.from_python({ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, - 'schema': schema, - 'page_size': page_size, - 'max_rows': max_rows, - 'query_str': query_str, - 'query_args': query_args, - 'statement_type': statement_type, - 'distributed_joins': distributed_joins, - 'local': local, - 'replicated_only': replicated_only, - 'enforce_join_order': enforce_join_order, - 'collocated': collocated, - 'lazy': lazy, - 'timeout': timeout, - 'include_field_names': include_field_names, - }) - - connection.send(send_buffer) - - response_struct = SQLResponse( + return query_perform( + query_struct, conn, + query_params={ + 'cache_info': cache_info, + 'schema': schema, + 'page_size': page_size, + 'max_rows': max_rows, + 'query_str': query_str, + 'query_args': query_args, + 'statement_type': statement_type, + 'distributed_joins': distributed_joins, + 'local': local, + 'replicated_only': replicated_only, + 'enforce_join_order': enforce_join_order, + 'collocated': collocated, + 'lazy': lazy, + 'timeout': timeout, + 'include_field_names': include_field_names, + }, include_field_names=include_field_names, has_cursor=True, ) - response_class, recv_buffer = response_struct.parse(connection) - response = response_class.from_buffer_copy(recv_buffer) - - result = APIResult(response) - if result.status != 0: - return result - result.value = response_struct.to_python(response) - return result -def sql_fields_cursor_get_page( - connection: 'Connection', cursor: int, field_count: int, query_id=None, -) -> APIResult: +def sql_fields_cursor_get_page(conn: 'Connection', cursor: int, field_count: int) -> APIResult: """ Retrieves the next query result page by cursor ID from `sql_fields`. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cursor: cursor ID, :param field_count: a number of fields in a row, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -408,34 +398,41 @@ def sql_fields_cursor_get_page( * `more`: bool, True if more data is available for subsequent ‘sql_fields_cursor_get_page’ calls. """ + return __sql_fields_cursor_get_page(conn, cursor, field_count) + +async def sql_fields_cursor_get_page_async(conn: 'AioConnection', cursor: int, field_count: int) -> APIResult: + """ + Async version sql_fields_cursor_get_page. + """ + return await __sql_fields_cursor_get_page(conn, cursor, field_count) + + +def __sql_fields_cursor_get_page(conn, cursor, field_count): query_struct = Query( OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE, [ ('cursor', Long), + ] + ) + return query_perform( + query_struct, conn, + query_params={ + 'cursor': cursor, + }, + response_config=[ + ('data', StructArray([(f'field_{i}', AnyDataObject) for i in range(field_count)])), + ('more', Bool), ], - query_id=query_id, + post_process_fun=__post_process_sql_fields_cursor ) - _, send_buffer = query_struct.from_python({ - 'cursor': cursor, - }) - - connection.send(send_buffer) - response_struct = Response([ - ('data', StructArray([ - ('field_{}'.format(i), AnyDataObject) for i in range(field_count) - ])), - ('more', Bool), - ]) - response_class, recv_buffer = response_struct.parse(connection) - response = response_class.from_buffer_copy(recv_buffer) - - result = APIResult(response) +def __post_process_sql_fields_cursor(result): if result.status != 0: return result - value = response_struct.to_python(response) + + value = result.value result.value = { 'data': [], 'more': value['more'] @@ -445,31 +442,32 @@ def sql_fields_cursor_get_page( return result -def resource_close( - connection: 'Connection', cursor: int, query_id=None -) -> APIResult: +def resource_close(conn: 'Connection', cursor: int) -> APIResult: """ Closes a resource, such as query cursor. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cursor: cursor ID, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __resource_close(conn, cursor) + +async def resource_close_async(conn: 'AioConnection', cursor: int) -> APIResult: + return await __resource_close(conn, cursor) + + +def __resource_close(conn, cursor): query_struct = Query( OP_RESOURCE_CLOSE, [ ('cursor', Long), - ], - query_id=query_id, + ] ) - return query_struct.perform( - connection, + return query_perform( + query_struct, conn, query_params={ 'cursor': cursor, - }, + } ) diff --git a/pyignite/api/tx_api.py b/pyignite/api/tx_api.py new file mode 100644 index 0000000..ee8de07 --- /dev/null +++ b/pyignite/api/tx_api.py @@ -0,0 +1,124 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextvars + +import attr + +from pyignite.datatypes import Byte, String, Long, Int, Bool +from pyignite.exceptions import CacheError +from pyignite.queries import Query, query_perform +from pyignite.queries.op_codes import OP_TX_START, OP_TX_END + +__CURRENT_TX = contextvars.ContextVar('current_tx', default=None) + + +def get_tx_id(): + ctx = __CURRENT_TX.get() if __CURRENT_TX else None + return ctx.tx_id if ctx else None + + +def get_tx_connection(): + ctx = __CURRENT_TX.get() if __CURRENT_TX else None + return ctx.conn if ctx else None + + +@attr.s +class TransactionContext: + tx_id = attr.ib(type=int, default=None) + conn = attr.ib(default=None) + + +def tx_start(conn, concurrency, isolation, timeout: int = 0, label: str = None): + result = __tx_start(conn, concurrency, isolation, timeout, label) + return __tx_start_post_process(result, conn) + + +async def tx_start_async(conn, concurrency, isolation, timeout: int = 0, label: str = None): + result = await __tx_start(conn, concurrency, isolation, timeout, label) + return __tx_start_post_process(result, conn) + + +def __tx_start(conn, concurrency, isolation, timeout, label): + query_struct = Query( + OP_TX_START, + [ + ('concurrency', Byte), + ('isolation', Byte), + ('timeout', Long), + ('label', String) + ] + ) + return query_perform( + query_struct, conn, + query_params={ + 'concurrency': concurrency, + 'isolation': isolation, + 'timeout': timeout, + 'label': label + }, + response_config=[ + ('tx_id', Int) + ] + ) + + +def tx_end(tx_id, committed): + ctx = __CURRENT_TX.get() + + if not ctx or ctx.tx_id != tx_id: + raise CacheError("Cannot commit transaction from different thread or coroutine") + + try: + return __tx_end(ctx.conn, tx_id, committed) + finally: + __CURRENT_TX.set(None) + + +async def tx_end_async(tx_id, committed): + ctx = __CURRENT_TX.get() + + if not ctx or ctx.tx_id != tx_id: + raise CacheError("Cannot commit transaction from different thread or coroutine") + + try: + return await __tx_end(ctx.conn, tx_id, committed) + finally: + __CURRENT_TX.set(None) + + +def __tx_end(conn, tx_id, committed): + query_struct = Query( + OP_TX_END, + [ + ('tx_id', Int), + ('committed', Bool) + ], + ) + return query_perform( + query_struct, conn, + query_params={ + 'tx_id': tx_id, + 'committed': committed + } + ) + + +def __tx_start_post_process(result, conn): + if result.status == 0: + tx_id = result.value['tx_id'] + __CURRENT_TX.set(TransactionContext(tx_id, conn)) + result.value = tx_id + return result diff --git a/pyignite/binary.py b/pyignite/binary.py index e726730..551f1d0 100644 --- a/pyignite/binary.py +++ b/pyignite/binary.py @@ -26,11 +26,21 @@ """ from collections import OrderedDict +import ctypes +from io import SEEK_CUR from typing import Any import attr -from .datatypes import * +from .constants import PROTOCOL_BYTE_ORDER +from .datatypes import ( + Null, ByteObject, ShortObject, IntObject, LongObject, FloatObject, DoubleObject, CharObject, BoolObject, UUIDObject, + DateObject, TimestampObject, TimeObject, EnumObject, BinaryEnumObject, ByteArrayObject, ShortArrayObject, + IntArrayObject, LongArrayObject, FloatArrayObject, DoubleArrayObject, CharArrayObject, BoolArrayObject, + UUIDArrayObject, DateArrayObject, TimestampArrayObject, TimeArrayObject, EnumArrayObject, String, StringArrayObject, + DecimalObject, DecimalArrayObject, ObjectArrayObject, CollectionObject, MapObject, BinaryObject, WrappedDataObject +) +from .datatypes.base import IgniteDataTypeProps from .exceptions import ParseError from .utils import entity_id, schema_id @@ -48,21 +58,11 @@ ] -class GenericObjectPropsMixin: +class GenericObjectProps(IgniteDataTypeProps): """ This class is mixed both to metaclass and to resulting class to make class properties universally available. You should not subclass it directly. """ - @property - def type_name(self) -> str: - """ Binary object type name. """ - return self._type_name - - @property - def type_id(self) -> int: - """ Binary object type ID. """ - return entity_id(self._type_name) - @property def schema(self) -> OrderedDict: """ Binary object schema. """ @@ -76,20 +76,23 @@ def schema_id(self) -> int: def __new__(cls, *args, **kwargs) -> Any: # allow all items in Binary Object schema to be populated as optional # arguments to `__init__()` with sensible defaults. - if cls is not GenericObjectMeta: + if not attr.has(cls): attributes = { - k: attr.ib( - type=getattr(v, 'pythonic', type(None)), - default=getattr(v, 'default', None), - ) for k, v in cls.schema.items() + k: attr.ib(type=getattr(v, 'pythonic', type(None)), default=getattr(v, 'default', None)) + for k, v in cls.schema.items() } + attributes.update({'version': attr.ib(type=int, default=1)}) cls = attr.s(cls, these=attributes) # skip parameters return super().__new__(cls) -class GenericObjectMeta(type, GenericObjectPropsMixin): +class GenericObjectPropsMeta(type, GenericObjectProps): + pass + + +class GenericObjectMeta(GenericObjectPropsMeta): """ Complex (or Binary) Object metaclass. It is aimed to help user create classes, which objects could serve as a pythonic representation of the @@ -103,10 +106,120 @@ def __new__( mcs: Any, name: str, base_classes: tuple, namespace: dict, **kwargs ) -> Any: """ Sort out class creation arguments. """ - return super().__new__( - mcs, name, (GenericObjectPropsMixin, )+base_classes, namespace + + result = super().__new__( + mcs, name, (GenericObjectProps, ) + base_classes, namespace ) + def _from_python(self, stream, save_to_buf=False): + """ + Method for building binary representation of the Generic object + and calculating a hashcode from it. + + :param self: Generic object instance, + :param stream: BinaryStream + :param save_to_buf: Optional. If True, save serialized data to buffer. + """ + initial_pos = stream.tell() + header, header_class = write_header(self, stream) + + offsets = [ctypes.sizeof(header_class)] + schema_items = list(self.schema.items()) + for field_name, field_type in schema_items: + val = getattr(self, field_name, getattr(field_type, 'default', None)) + field_start_pos = stream.tell() + field_type.from_python(stream, val) + offsets.append(max(offsets) + stream.tell() - field_start_pos) + + write_footer(self, stream, header, header_class, schema_items, offsets, initial_pos, save_to_buf) + + async def _from_python_async(self, stream, save_to_buf=False): + """ + Async version of _from_python + """ + initial_pos = stream.tell() + header, header_class = write_header(self, stream) + + offsets = [ctypes.sizeof(header_class)] + schema_items = list(self.schema.items()) + for field_name, field_type in schema_items: + val = getattr(self, field_name, getattr(field_type, 'default', None)) + field_start_pos = stream.tell() + await field_type.from_python_async(stream, val) + offsets.append(max(offsets) + stream.tell() - field_start_pos) + + write_footer(self, stream, header, header_class, schema_items, offsets, initial_pos, save_to_buf) + + def write_header(obj, stream): + header_class = BinaryObject.get_header_class() + header = header_class() + header.type_code = int.from_bytes( + BinaryObject.type_code, + byteorder=PROTOCOL_BYTE_ORDER + ) + header.flags = BinaryObject.USER_TYPE | BinaryObject.HAS_SCHEMA + if stream.compact_footer: + header.flags |= BinaryObject.COMPACT_FOOTER + header.version = obj.version + header.type_id = obj.type_id + header.schema_id = obj.schema_id + + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + + return header, header_class + + def write_footer(obj, stream, header, header_class, schema_items, offsets, initial_pos, save_to_buf): + offsets = offsets[:-1] + header_len = ctypes.sizeof(header_class) + + # create footer + if max(offsets, default=0) < 255: + header.flags |= BinaryObject.OFFSET_ONE_BYTE + elif max(offsets) < 65535: + header.flags |= BinaryObject.OFFSET_TWO_BYTES + + schema_class = BinaryObject.schema_type(header.flags) * len(offsets) + schema = schema_class() + + if stream.compact_footer: + for i, offset in enumerate(offsets): + schema[i] = offset + else: + for i, offset in enumerate(offsets): + schema[i].field_id = entity_id(schema_items[i][0]) + schema[i].offset = offset + + # calculate size and hash code + fields_data_len = stream.tell() - initial_pos - header_len + header.schema_offset = fields_data_len + header_len + header.length = header.schema_offset + ctypes.sizeof(schema_class) + header.hash_code = stream.hashcode(initial_pos + header_len, fields_data_len) + + stream.seek(initial_pos) + stream.write(header) + stream.seek(initial_pos + header.schema_offset) + stream.write(schema) + + if save_to_buf: + obj._buffer = stream.slice(initial_pos, stream.tell() - initial_pos) + obj._hashcode = header.hash_code + + def _setattr(self, attr_name: str, attr_value: Any): + # reset binary representation, if any field is changed + if attr_name in self._schema.keys(): + self._buffer = None + self._hashcode = None + + # `super()` is really need these parameters + super(result, self).__setattr__(attr_name, attr_value) + + setattr(result, _from_python.__name__, _from_python) + setattr(result, _from_python_async.__name__, _from_python_async) + setattr(result, '__setattr__', _setattr) + setattr(result, '_buffer', None) + setattr(result, '_hashcode', None) + return result + @staticmethod def _validate_schema(schema: dict): for field_type in schema.values(): @@ -117,7 +230,7 @@ def _validate_schema(schema: dict): def __init__( cls, name: str, base_classes: tuple, namespace: dict, - type_name: str=None, schema: OrderedDict=None, **kwargs + type_name: str = None, schema: OrderedDict = None, **kwargs ): """ Initializes binary object class. diff --git a/pyignite/cache.py b/pyignite/cache.py index 6cd7377..51f07c9 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -12,153 +12,119 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from typing import Any, Iterable, Optional, Union - -from .datatypes import prop_codes -from .exceptions import ( - CacheCreationError, CacheError, ParameterError, SQLError, -) -from .utils import cache_id, is_wrapped, status_to_exception, unwrap_binary +import datetime +from typing import Any, Iterable, Optional, Tuple, Union + +from .api.tx_api import get_tx_connection +from .datatypes import prop_codes, ExpiryPolicy +from .datatypes.internal import AnyDataObject +from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError, NotSupportedByClusterError +from .queries.cache_info import CacheInfo +from .utils import cache_id, status_to_exception from .api.cache_config import ( - cache_create, cache_create_with_config, - cache_get_or_create, cache_get_or_create_with_config, - cache_destroy, cache_get_configuration, + cache_create, cache_create_with_config, cache_get_or_create, cache_get_or_create_with_config, cache_destroy, + cache_get_configuration ) from .api.key_value import ( - cache_get, cache_put, cache_get_all, cache_put_all, cache_replace, - cache_clear, cache_clear_key, cache_clear_keys, - cache_contains_key, cache_contains_keys, - cache_get_and_put, cache_get_and_put_if_absent, cache_put_if_absent, - cache_get_and_remove, cache_get_and_replace, - cache_remove_key, cache_remove_keys, cache_remove_all, - cache_remove_if_equals, cache_replace_if_equals, cache_get_size, + cache_get, cache_put, cache_get_all, cache_put_all, cache_replace, cache_clear, cache_clear_key, cache_clear_keys, + cache_contains_key, cache_contains_keys, cache_get_and_put, cache_get_and_put_if_absent, cache_put_if_absent, + cache_get_and_remove, cache_get_and_replace, cache_remove_key, cache_remove_keys, cache_remove_all, + cache_remove_if_equals, cache_replace_if_equals, cache_get_size ) -from .api.sql import scan, scan_cursor_get_page, sql, sql_cursor_get_page - +from .cursors import ScanCursor, SqlCursor PROP_CODES = set([ getattr(prop_codes, x) for x in dir(prop_codes) if x.startswith('PROP_') ]) -CACHE_CREATE_FUNCS = { - True: { - True: cache_get_or_create_with_config, - False: cache_create_with_config, - }, - False: { - True: cache_get_or_create, - False: cache_create, - }, -} - - -class Cache: - """ - Ignite cache abstraction. Users should never use this class directly, - but construct its instances with - :py:meth:`~pyignite.client.Client.create_cache`, - :py:meth:`~pyignite.client.Client.get_or_create_cache` or - :py:meth:`~pyignite.client.Client.get_cache` methods instead. See - :ref:`this example ` on how to do it. - """ - _cache_id = None - _name = None - _client = None - _settings = None - - @staticmethod - def _validate_settings( - settings: Union[str, dict]=None, get_only: bool=False, - ): - if any([ - not settings, - type(settings) not in (str, dict), - type(settings) is dict and prop_codes.PROP_NAME not in settings, - ]): - raise ParameterError('You should supply at least cache name') - - if all([ - type(settings) is dict, - not set(settings).issubset(PROP_CODES), - ]): - raise ParameterError('One or more settings was not recognized') - if get_only and type(settings) is dict and len(settings) != 1: - raise ParameterError('Only cache name allowed as a parameter') - def __init__( - self, client: 'Client', settings: Union[str, dict]=None, - with_get: bool=False, get_only: bool=False, - ): - """ - Initialize cache object. +def get_cache(client: 'Client', settings: Union[str, dict]) -> 'Cache': + name, settings = __parse_settings(settings) + if settings: + raise ParameterError('Only cache name allowed as a parameter') - :param client: Ignite client, - :param settings: cache settings. Can be a string (cache name) or a dict - of cache properties and their values. In this case PROP_NAME is - mandatory, - :param with_get: (optional) do not raise exception, if the cache - is already exists. Defaults to False, - :param get_only: (optional) do not communicate with Ignite server - at all, only create Cache instance. Defaults to False. - """ - self._client = client - self._validate_settings(settings) - if type(settings) == str: - self._name = settings - else: - self._name = settings[prop_codes.PROP_NAME] + return Cache(client, name) - if not get_only: - func = CACHE_CREATE_FUNCS[type(settings) is dict][with_get] - result = func(client, settings) - if result.status != 0: - raise CacheCreationError(result.message) - self._cache_id = cache_id(self._name) +def create_cache(client: 'Client', settings: Union[str, dict]) -> 'Cache': + name, settings = __parse_settings(settings) - @property - def settings(self) -> Optional[dict]: - """ - Lazy Cache settings. See the :ref:`example ` - of reading this property. + conn = client.random_node + if settings: + result = cache_create_with_config(conn, settings) + else: + result = cache_create(conn, name) - All cache properties are documented here: :ref:`cache_props`. + if result.status != 0: + raise CacheCreationError(result.message) - :return: dict of cache properties and their values. - """ - if self._settings is None: - config_result = cache_get_configuration(self._client, self._cache_id) - if config_result.status == 0: - self._settings = config_result.value - else: - raise CacheError(config_result.message) + return Cache(client, name) - return self._settings + +def get_or_create_cache(client: 'Client', settings: Union[str, dict]) -> 'Cache': + name, settings = __parse_settings(settings) + + conn = client.random_node + if settings: + result = cache_get_or_create_with_config(conn, settings) + else: + result = cache_get_or_create(conn, name) + + if result.status != 0: + raise CacheCreationError(result.message) + + return Cache(client, name) + + +def __parse_settings(settings: Union[str, dict]) -> Tuple[Optional[str], Optional[dict]]: + if isinstance(settings, str): + return settings, None + elif isinstance(settings, dict) and prop_codes.PROP_NAME in settings: + name = settings[prop_codes.PROP_NAME] + if len(settings) == 1: + return name, None + + if not set(settings).issubset(PROP_CODES): + raise ParameterError('One or more settings was not recognized') + + return name, settings + else: + raise ParameterError('You should supply at least cache name') + + +class BaseCache: + def __init__(self, client: 'BaseClient', name: str, expiry_policy: ExpiryPolicy = None): + self._client = client + self._name = name + self._settings = None + self._cache_info = CacheInfo(cache_id=cache_id(self._name), + protocol_context=client.protocol_context, + expiry_policy=expiry_policy) + self._client.register_cache(self.cache_info.cache_id) @property def name(self) -> str: """ - Lazy cache name. - :return: cache name string. """ - if self._name is None: - self._name = self.settings[prop_codes.PROP_NAME] - return self._name @property - def client(self) -> 'Client': + def client(self) -> 'BaseClient': """ - Ignite :class:`~pyignite.client.Client` object. - :return: Client object, through which the cache is accessed. """ return self._client + @property + def cache_info(self) -> CacheInfo: + """ + Cache meta info. + """ + return self._cache_info + @property def cache_id(self) -> int: """ @@ -166,29 +132,86 @@ def cache_id(self) -> int: :return: integer value of the cache ID. """ - return self._cache_id + return self._cache_info.cache_id + + def with_expire_policy( + self, expiry_policy: Optional[ExpiryPolicy] = None, + create: Union[int, datetime.timedelta] = ExpiryPolicy.UNCHANGED, + update: Union[int, datetime.timedelta] = ExpiryPolicy.UNCHANGED, + access: Union[int, datetime.timedelta] = ExpiryPolicy.UNCHANGED + ): + """ + :param expiry_policy: optional :class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy` + object. If it is set, other params will be ignored, + :param create: TTL for create in milliseconds or :py:class:`~time.timedelta`, + :param update: TTL for update in milliseconds or :py:class:`~time.timedelta`, + :param access: TTL for access in milliseconds or :py:class:`~time.timedelta`, + :return: cache decorator with expiry policy set. + """ + if not self.client.protocol_context.is_expiry_policy_supported(): + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + + cache_cls = type(self) + if not expiry_policy: + expiry_policy = ExpiryPolicy(create=create, update=update, access=access) + + return cache_cls(self.client, self.name, expiry_policy) + + +class Cache(BaseCache): + """ + Ignite cache abstraction. Users should never use this class directly, + but construct its instances with + :py:meth:`~pyignite.client.Client.create_cache`, + :py:meth:`~pyignite.client.Client.get_or_create_cache` or + :py:meth:`~pyignite.client.Client.get_cache` methods instead. See + :ref:`this example ` on how to do it. + """ + + def __init__(self, client: 'Client', name: str, expiry_policy: ExpiryPolicy = None): + """ + Initialize cache object. For internal use. + + :param client: Ignite client, + :param name: Cache name. + """ + super().__init__(client, name, expiry_policy) + + def _get_best_node(self, key=None, key_hint=None): + tx_conn = get_tx_connection() + return tx_conn if tx_conn else self.client.get_best_node(self, key, key_hint) - def _process_binary(self, value: Any) -> Any: + @property + def settings(self) -> Optional[dict]: """ - Detects and recursively unwraps Binary Object. + Lazy Cache settings. See the :ref:`example ` + of reading this property. - :param value: anything that could be a Binary Object, - :return: the result of the Binary Object unwrapping with all other data - left intact. + All cache properties are documented here: :ref:`cache_props`. + + :return: dict of cache properties and their values. """ - if is_wrapped(value): - return unwrap_binary(self._client, value) - return value + if self._settings is None: + config_result = cache_get_configuration( + self._get_best_node(), + self.cache_info + ) + if config_result.status == 0: + self._settings = config_result.value + else: + raise CacheError(config_result.message) + + return self._settings @status_to_exception(CacheError) def destroy(self): """ Destroys cache with a given name. """ - return cache_destroy(self._client, self._cache_id) + return cache_destroy(self._get_best_node(), self.cache_id) @status_to_exception(CacheError) - def get(self, key, key_hint: object=None) -> Any: + def get(self, key, key_hint: object = None) -> Any: """ Retrieves a value from cache by key. @@ -197,12 +220,20 @@ def get(self, key, key_hint: object=None) -> Any: should be converted, :return: value retrieved. """ - result = cache_get(self._client, self._cache_id, key, key_hint=key_hint) - result.value = self._process_binary(result.value) + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + result = cache_get( + self._get_best_node(key, key_hint), + self.cache_info, + key, + key_hint=key_hint + ) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) - def put(self, key, value, key_hint: object=None, value_hint: object=None): + def put(self, key, value, key_hint: object = None, value_hint: object = None): """ Puts a value with a given key to cache (overwriting existing value if any). @@ -214,23 +245,27 @@ def put(self, key, value, key_hint: object=None, value_hint: object=None): :param value_hint: (optional) Ignite data type, for which the given value should be converted. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_put( - self._client, self._cache_id, key, value, + self._get_best_node(key, key_hint), + self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint ) @status_to_exception(CacheError) - def get_all(self, keys: list) -> list: + def get_all(self, keys: list) -> dict: """ Retrieves multiple key-value pairs from cache. :param keys: list of keys or tuples of (key, key_hint), :return: a dict of key-value pairs. """ - result = cache_get_all(self._client, self._cache_id, keys) + result = cache_get_all(self._get_best_node(), self.cache_info, keys) if result.value: for key, value in result.value.items(): - result.value[key] = self._process_binary(value) + result.value[key] = self.client.unwrap_binary(value) return result @status_to_exception(CacheError) @@ -243,11 +278,11 @@ def put_all(self, pairs: dict): to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), """ - return cache_put_all(self._client, self._cache_id, pairs) + return cache_put_all(self._get_best_node(), self.cache_info, pairs) @status_to_exception(CacheError) def replace( - self, key, value, key_hint: object=None, value_hint: object=None + self, key, value, key_hint: object = None, value_hint: object = None ): """ Puts a value with a given key to cache only if the key already exist. @@ -259,28 +294,33 @@ def replace( :param value_hint: (optional) Ignite data type, for which the given value should be converted. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_replace( - self._client, self._cache_id, key, value, + self._get_best_node(key, key_hint), + self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) - def clear(self, keys: Optional[list]=None): + def clear(self, keys: Optional[list] = None): """ Clears the cache without notifying listeners or cache writers. :param keys: (optional) list of cache keys or (key, key type hint) tuples to clear (default: clear all). """ + conn = self._get_best_node() if keys: - return cache_clear_keys(self._client, self._cache_id, keys) + return cache_clear_keys(conn, self.cache_info, keys) else: - return cache_clear(self._client, self._cache_id) + return cache_clear(conn, self.cache_info) @status_to_exception(CacheError) - def clear_key(self, key, key_hint: object=None): + def clear_key(self, key, key_hint: object = None): """ Clears the cache key without notifying listeners or cache writers. @@ -288,10 +328,26 @@ def clear_key(self, key, key_hint: object=None): :param key_hint: (optional) Ignite data type, for which the given key should be converted, """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_clear_key( - self._client, self._cache_id, key, key_hint=key_hint + self._get_best_node(key, key_hint), + self.cache_info, + key, + key_hint=key_hint ) + @status_to_exception(CacheError) + def clear_keys(self, keys: Iterable): + """ + Clears the cache key without notifying listeners or cache writers. + + :param keys: a list of keys or (key, type hint) tuples + """ + + return cache_clear_keys(self._get_best_node(), self.cache_info, keys) + @status_to_exception(CacheError) def contains_key(self, key, key_hint=None) -> bool: """ @@ -302,8 +358,14 @@ def contains_key(self, key, key_hint=None) -> bool: should be converted, :return: boolean `True` when key is present, `False` otherwise. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_contains_key( - self._client, self._cache_id, key, key_hint=key_hint + self._get_best_node(key, key_hint), + self.cache_info, + key, + key_hint=key_hint ) @status_to_exception(CacheError) @@ -314,7 +376,7 @@ def contains_keys(self, keys: Iterable) -> bool: :param keys: a list of keys or (key, type hint) tuples, :return: boolean `True` when all keys are present, `False` otherwise. """ - return cache_contains_keys(self._client, self._cache_id, keys) + return cache_contains_keys(self._get_best_node(), self.cache_info, keys) @status_to_exception(CacheError) def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: @@ -330,15 +392,21 @@ def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: value should be converted. :return: old value or None. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_get_and_put( - self._client, self._cache_id, key, value, key_hint, value_hint + self._get_best_node(key, key_hint), + self.cache_info, + key, value, + key_hint, value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) def get_and_put_if_absent( - self, key, value, key_hint=None, value_hint=None + self, key, value, key_hint=None, value_hint=None ): """ Puts a value with a given key to cache only if the key does not @@ -352,10 +420,16 @@ def get_and_put_if_absent( value should be converted, :return: old value or None. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_get_and_put_if_absent( - self._client, self._cache_id, key, value, key_hint, value_hint + self._get_best_node(key, key_hint), + self.cache_info, + key, value, + key_hint, value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) @@ -371,8 +445,14 @@ def put_if_absent(self, key, value, key_hint=None, value_hint=None): :param value_hint: (optional) Ignite data type, for which the given value should be converted. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_put_if_absent( - self._client, self._cache_id, key, value, key_hint, value_hint + self._get_best_node(key, key_hint), + self.cache_info, + key, value, + key_hint, value_hint ) @status_to_exception(CacheError) @@ -385,15 +465,21 @@ def get_and_remove(self, key, key_hint=None) -> Any: should be converted, :return: old value or None. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_get_and_remove( - self._client, self._cache_id, key, key_hint + self._get_best_node(key, key_hint), + self.cache_info, + key, + key_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) def get_and_replace( - self, key, value, key_hint=None, value_hint=None + self, key, value, key_hint=None, value_hint=None ) -> Any: """ Puts a value with a given key to cache, returning previous value @@ -408,10 +494,16 @@ def get_and_replace( value should be converted. :return: old value or None. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_get_and_replace( - self._client, self._cache_id, key, value, key_hint, value_hint + self._get_best_node(key, key_hint), + self.cache_info, + key, value, + key_hint, value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) @@ -423,7 +515,12 @@ def remove_key(self, key, key_hint=None): :param key_hint: (optional) Ignite data type, for which the given key should be converted, """ - return cache_remove_key(self._client, self._cache_id, key, key_hint) + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + return cache_remove_key( + self._get_best_node(key, key_hint), self.cache_info, key, key_hint + ) @status_to_exception(CacheError) def remove_keys(self, keys: list): @@ -433,14 +530,16 @@ def remove_keys(self, keys: list): :param keys: list of keys or tuples of (key, key_hint) to remove. """ - return cache_remove_keys(self._client, self._cache_id, keys) + return cache_remove_keys( + self._get_best_node(), self.cache_info, keys + ) @status_to_exception(CacheError) def remove_all(self): """ Removes all cache entries, notifying listeners and cache writers. """ - return cache_remove_all(self._client, self._cache_id) + return cache_remove_all(self._get_best_node(), self.cache_info) @status_to_exception(CacheError) def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): @@ -455,14 +554,20 @@ def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): :param sample_hint: (optional) Ignite data type, for whic the given sample should be converted. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_remove_if_equals( - self._client, self._cache_id, key, sample, key_hint, sample_hint + self._get_best_node(key, key_hint), + self.cache_info, + key, sample, + key_hint, sample_hint ) @status_to_exception(CacheError) def replace_if_equals( - self, key, sample, value, - key_hint=None, sample_hint=None, value_hint=None + self, key, sample, value, + key_hint=None, sample_hint=None, value_hint=None ) -> Any: """ Puts a value with a given key to cache only if the key already exists @@ -479,26 +584,33 @@ def replace_if_equals( value should be converted, :return: boolean `True` when key is present, `False` otherwise. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_replace_if_equals( - self._client, self._cache_id, key, sample, value, + self._get_best_node(key, key_hint), + self.cache_info, + key, sample, value, key_hint, sample_hint, value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) - def get_size(self, peek_modes=0): + def get_size(self, peek_modes=None): """ Gets the number of entries in cache. :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache - (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), + (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), :return: integer number of cache entries. """ - return cache_get_size(self._client, self._cache_id, peek_modes) + return cache_get_size( + self._get_best_node(), self.cache_info, peek_modes + ) - def scan(self, page_size: int=1, partitions: int=-1, local: bool=False): + def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> ScanCursor: """ Returns all key-value pairs from the cache, similar to `get_all`, but with internal pagination, which is slower, but safer. @@ -509,33 +621,15 @@ def scan(self, page_size: int=1, partitions: int=-1, local: bool=False): (negative to query entire cache), :param local: (optional) pass True if this query should be executed on local node only. Defaults to False, - :return: generator with key-value pairs. + :return: Scan query cursor. """ - result = scan(self._client, self._cache_id, page_size, partitions, local) - if result.status != 0: - raise CacheError(result.message) - - cursor = result.value['cursor'] - for k, v in result.value['data'].items(): - k = self._process_binary(k) - v = self._process_binary(v) - yield k, v - - while result.value['more']: - result = scan_cursor_get_page(self._client, cursor) - if result.status != 0: - raise CacheError(result.message) - - for k, v in result.value['data'].items(): - k = self._process_binary(k) - v = self._process_binary(v) - yield k, v + return ScanCursor(self.client, self.cache_info, page_size, partitions, local) def select_row( - self, query_str: str, page_size: int=1, - query_args: Optional[list]=None, distributed_joins: bool=False, - replicated_only: bool=False, local: bool=False, timeout: int=0 - ): + self, query_str: str, page_size: int = 1, + query_args: Optional[list] = None, distributed_joins: bool = False, + replicated_only: bool = False, local: bool = False, timeout: int = 0 + ) -> SqlCursor: """ Executes a simplified SQL SELECT query over data stored in the cache. The query returns the whole record (key and value). @@ -552,44 +646,13 @@ def select_row( on local node only. Defaults to False, :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), - :return: generator with key-value pairs. - """ - def generate_result(value): - cursor = value['cursor'] - more = value['more'] - for k, v in value['data'].items(): - k = self._process_binary(k) - v = self._process_binary(v) - yield k, v - - while more: - inner_result = sql_cursor_get_page(self._client, cursor) - if result.status != 0: - raise SQLError(result.message) - more = inner_result.value['more'] - for k, v in inner_result.value['data'].items(): - k = self._process_binary(k) - v = self._process_binary(v) - yield k, v - + :return: Sql cursor. + """ type_name = self.settings[ prop_codes.PROP_QUERY_ENTITIES ][0]['value_type_name'] if not type_name: raise SQLError('Value type is unknown') - result = sql( - self._client, - self._cache_id, - type_name, - query_str, - page_size, - query_args, - distributed_joins, - replicated_only, - local, - timeout - ) - if result.status != 0: - raise SQLError(result.message) - return generate_result(result.value) + return SqlCursor(self.client, self.cache_info, type_name, query_str, page_size, query_args, + distributed_joins, replicated_only, local, timeout) diff --git a/pyignite/client.py b/pyignite/client.py index d5a9464..397c52e 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -39,114 +39,82 @@ :py:meth:`~pyignite.client.Client.query_binary_type` methods operates the local (class-wise) registry for Ignite Complex objects. """ - +import time from collections import defaultdict, OrderedDict -from typing import Iterable, Type, Union +import random +import re +from itertools import chain +from typing import Iterable, Type, Union, Any, Dict, Optional, Sequence +from .api import cache_get_node_partitions from .api.binary import get_binary_type, put_binary_type from .api.cache_config import cache_get_names -from .api.sql import sql_fields, sql_fields_cursor_get_page -from .cache import Cache +from .cluster import Cluster +from .cursors import SqlFieldsCursor +from .cache import Cache, create_cache, get_cache, get_or_create_cache, BaseCache from .connection import Connection -from .constants import * -from .datatypes import BinaryObject +from .constants import IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT, PROTOCOL_BYTE_ORDER, AFFINITY_RETRIES, AFFINITY_DELAY +from .datatypes import BinaryObject, AnyDataObject, TransactionConcurrency, TransactionIsolation +from .datatypes.base import IgniteDataType from .datatypes.internal import tc_map -from .exceptions import BinaryTypeError, CacheError, SQLError -from .utils import entity_id, schema_id, status_to_exception +from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors +from .queries.cache_info import CacheInfo +from .stream import BinaryStream, READ_BACKWARD +from .transaction import Transaction +from .utils import ( + cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, + get_field_by_id, unsigned +) from .binary import GenericObjectMeta +from .monitoring import _EventListeners __all__ = ['Client'] -class Client(Connection): - """ - This is a main `pyignite` class, that is build upon the - :class:`~pyignite.connection.Connection`. In addition to the attributes, - properties and methods of its parent class, `Client` implements - the following features: - - * cache factory. Cache objects are used for key-value operations, - * Ignite SQL endpoint, - * binary types registration endpoint. - """ - - _registry = defaultdict(dict) - _compact_footer = None +class BaseClient: + # used for Complex object data class names sanitizing + _identifier = re.compile(r'[^0-9a-zA-Z_.+$]', re.UNICODE) + _ident_start = re.compile(r'^[^a-zA-Z_]+', re.UNICODE) - def _transfer_params(self, to: 'Client'): - super()._transfer_params(to) - to._registry = self._registry - to._compact_footer = self._compact_footer - - def __init__(self, compact_footer: bool=None, *args, **kwargs): - """ - Initialize client. + def __init__(self, compact_footer: bool = None, partition_aware: bool = False, + event_listeners: Optional[Sequence] = None, **kwargs): + self._compact_footer = compact_footer + self._partition_aware = partition_aware + self._connection_args = kwargs + self._registry = defaultdict(dict) + self._nodes = [] + self._current_node = 0 + self._partition_aware = partition_aware + self.affinity_version = (0, 0) + self._affinity = {'version': self.affinity_version, 'partition_mapping': defaultdict(dict)} + self._protocol_context = None + self._event_listeners = _EventListeners(event_listeners) - :param compact_footer: (optional) use compact (True, recommended) or - full (False) schema approach when serializing Complex objects. - Default is to use the same approach the server is using (None). - Apache Ignite binary protocol documentation on this topic: - https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-schema + @property + def protocol_context(self): """ - self._compact_footer = compact_footer - super().__init__(*args, **kwargs) + Returns protocol context, or None, if no connection to the Ignite + cluster was not yet established. - @status_to_exception(BinaryTypeError) - def get_binary_type(self, binary_type: Union[str, int]) -> dict: + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. """ - Gets the binary type information from the Ignite server. This is quite - a low-level implementation of Ignite thin client protocol's - `OP_GET_BINARY_TYPE` operation. You would probably want to use - :py:meth:`~pyignite.client.Client.query_binary_type` instead. + return self._protocol_context - :param binary_type: binary type name or ID, - :return: binary type description − a dict with the following fields: + @protocol_context.setter + def protocol_context(self, value): + self._protocol_context = value - - `type_exists`: True if the type is registered, False otherwise. In - the latter case all the following fields are omitted, - - `type_id`: Complex object type ID, - - `type_name`: Complex object type name, - - `affinity_key_field`: string value or None, - - `is_enum`: False in case of Complex object registration, - - `schemas`: a list, containing the Complex object schemas in format: - OrderedDict[field name: field type hint]. A schema can be empty. - """ - def convert_type(tc_type: int): - try: - return tc_map(tc_type.to_bytes(1, PROTOCOL_BYTE_ORDER)) - except (KeyError, OverflowError): - # if conversion to char or type lookup failed, - # we probably have a binary object type ID - return BinaryObject - - def convert_schema( - field_ids: list, binary_fields: list - ) -> OrderedDict: - converted_schema = OrderedDict() - for field_id in field_ids: - binary_field = [ - x - for x in binary_fields - if x['field_id'] == field_id - ][0] - converted_schema[binary_field['field_name']] = convert_type( - binary_field['type_id'] - ) - return converted_schema - - result = get_binary_type(self, binary_type) - if result.status != 0 or not result.value['type_exists']: - return result + @property + def partition_aware(self): + return self._partition_aware and self.partition_awareness_supported_by_protocol - binary_fields = result.value.pop('binary_fields') - old_format_schemas = result.value.pop('schema') - result.value['schemas'] = [] - for s_id, field_ids in old_format_schemas.items(): - result.value['schemas'].append( - convert_schema(field_ids, binary_fields) - ) - return result + @property + def partition_awareness_supported_by_protocol(self): + return self.protocol_context is not None \ + and self.protocol_context.is_partition_awareness_supported() @property def compact_footer(self) -> bool: @@ -163,45 +131,59 @@ def compact_footer(self) -> bool: # use compact schema by default, but leave initial (falsy) backing # value unchanged - return ( - self.__class__._compact_footer - or self.__class__._compact_footer is None - ) + return self._compact_footer or self._compact_footer is None @compact_footer.setter def compact_footer(self, value: bool): # normally schema approach should not change - if self.__class__._compact_footer not in (value, None): + if self._compact_footer not in (value, None): raise Warning('Can not change client schema approach.') else: - self.__class__._compact_footer = value + self._compact_footer = value - @status_to_exception(BinaryTypeError) - def put_binary_type( - self, type_name: str, affinity_key_field: str=None, - is_enum=False, schema: dict=None - ): - """ - Registers binary type information in cluster. Do not update binary - registry. This is a literal implementation of Ignite thin client - protocol's `OP_PUT_BINARY_TYPE` operation. You would probably want - to use :py:meth:`~pyignite.client.Client.register_binary_type` instead. + @staticmethod + def _process_connect_args(*args): + if len(args) == 0: + # no parameters − use default Ignite host and port + return [(IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT)] + if len(args) == 1 and is_iterable(args[0]): + # iterable of host-port pairs is given + return args[0] + if len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], int): + # host and port are given + return [args] + + raise ConnectionError('Connection parameters are not valid.') + + def _process_get_binary_type_result(self, result): + if result.status != 0 or not result.value['type_exists']: + return result - :param type_name: name of the data type being registered, - :param affinity_key_field: (optional) name of the affinity key field, - :param is_enum: (optional) register enum if True, binary object - otherwise. Defaults to False, - :param schema: (optional) when register enum, pass a dict - of enumerated parameter names as keys and an integers as values. - When register binary type, pass a dict of field names: field types. - Binary type with no fields is OK. - """ - return put_binary_type( - self, type_name, affinity_key_field, is_enum, schema - ) + binary_fields = result.value.pop('binary_fields') + old_format_schemas = result.value.pop('schema') + result.value['schemas'] = [] + for s_id, field_ids in old_format_schemas.items(): + result.value['schemas'].append(self._convert_schema(field_ids, binary_fields)) + return result @staticmethod - def _create_dataclass(type_name: str, schema: OrderedDict=None) -> Type: + def _convert_type(tc_type: int): + try: + return tc_map(tc_type.to_bytes(1, PROTOCOL_BYTE_ORDER)) + except (KeyError, OverflowError): + # if conversion to char or type lookup failed, + # we probably have a binary object type ID + return BinaryObject + + def _convert_schema(self, field_ids: list, binary_fields: list) -> OrderedDict: + converted_schema = OrderedDict() + for field_id in field_ids: + binary_field = next(x for x in binary_fields if x['field_id'] == field_id) + converted_schema[binary_field['field_name']] = self._convert_type(binary_field['type_id']) + return converted_schema + + @staticmethod + def _create_dataclass(type_name: str, schema: OrderedDict = None) -> Type: """ Creates default (generic) class for Ignite Complex object. @@ -212,26 +194,351 @@ def _create_dataclass(type_name: str, schema: OrderedDict=None) -> Type: schema = schema or {} return GenericObjectMeta(type_name, (), {}, schema=schema) - def _sync_binary_registry(self, type_id: int): + @classmethod + def _create_type_name(cls, type_name: str) -> str: + """ + Creates Python data class name from Ignite binary type name. + + Handles all the special cases found in + `java.org.apache.ignite.binary.BinaryBasicNameMapper.simpleName()`. + Tries to adhere to PEP8 along the way. """ - Reads Complex object description from Ignite server. Creates default - Complex object classes and puts in registry, if not already there. + # general sanitizing + type_name = cls._identifier.sub('', type_name) + + # - name ending with '$' (Scala) + # - name + '$' + some digits (anonymous class) + # - '$$Lambda$' in the middle + type_name = process_delimiter(type_name, '$') + + # .NET outer/inner class delimiter + type_name = process_delimiter(type_name, '+') + + # Java fully qualified class name + type_name = process_delimiter(type_name, '.') + + # start chars sanitizing + type_name = capitalize(cls._ident_start.sub('', type_name)) + + return type_name + + def _sync_binary_registry(self, type_id: int, type_info: dict): + """ + Sync binary registry :param type_id: Complex object type ID. + :param type_info: Complex object type info. """ - type_info = self.get_binary_type(type_id) if type_info['type_exists']: for schema in type_info['schemas']: if not self._registry[type_id].get(schema_id(schema), None): data_class = self._create_dataclass( - type_info['type_name'], + self._create_type_name(type_info['type_name']), schema, ) self._registry[type_id][schema_id(schema)] = data_class - def register_binary_type( - self, data_class: Type, affinity_key_field: str=None, + def _get_from_registry(self, type_id, schema): + """ + Get binary type info from registry. + + :param type_id: Complex object type ID. + :param schema: Complex object schema. + """ + if schema: + try: + return self._registry[type_id][schema_id(schema)] + except KeyError: + return None + return self._registry[type_id] + + def register_cache(self, cache_id: int): + if self.partition_aware and cache_id not in self._affinity: + self._affinity['partition_mapping'][cache_id] = {} + + def _get_affinity_key(self, cache_id, key, key_hint=None): + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + cache_partition_mapping = self._cache_partition_mapping(cache_id) + if cache_partition_mapping and cache_partition_mapping.get('is_applicable'): + config = cache_partition_mapping.get('cache_config') + if config: + affinity_key_id = config.get(key_hint.type_id) + + if affinity_key_id and isinstance(key, GenericObjectMeta): + return get_field_by_id(key, affinity_key_id) + + return key, key_hint + + def _update_affinity(self, full_affinity): + self._affinity['version'] = full_affinity['version'] + + full_mapping = full_affinity.get('partition_mapping') + if full_mapping: + self._affinity['partition_mapping'].update(full_mapping) + + def _caches_to_update_affinity(self): + if self._affinity['version'] < self.affinity_version: + return list(self._affinity['partition_mapping'].keys()) + else: + return list(c_id for c_id, c_mapping in self._affinity['partition_mapping'].items() if not c_mapping) + + def _cache_partition_mapping(self, cache_id): + return self._affinity['partition_mapping'][cache_id] + + def _get_node_by_hashcode(self, cache_id, hashcode, parts): + """ + Get node by key hashcode. Calculate partition and return node on that it is primary. + (algorithm is taken from `RendezvousAffinityFunction.java`) + """ + + # calculate partition for key or affinity key + # (algorithm is taken from `RendezvousAffinityFunction.java`) + mask = parts - 1 + + if parts & mask == 0: + part = (hashcode ^ (unsigned(hashcode) >> 16)) & mask + else: + part = abs(hashcode // parts) + + assert 0 <= part < parts, 'Partition calculation has failed' + + node_mapping = self._cache_partition_mapping(cache_id).get('node_mapping') + if not node_mapping: + return None + + node_uuid, best_conn = None, None + for u, p in node_mapping.items(): + if part in p: + node_uuid = u + break + + if node_uuid: + for n in self._nodes: + if n.uuid == node_uuid: + best_conn = n + break + if best_conn and best_conn.alive: + return best_conn + + +class _ConnectionContextManager: + def __init__(self, client, nodes): + self.client = client + self.nodes = nodes + self.client._connect(self.nodes) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.client.close() + + +class Client(BaseClient): + """ + Synchronous Client implementation. + """ + + def __init__(self, compact_footer: bool = None, partition_aware: bool = True, + event_listeners: Optional[Sequence] = None, **kwargs): + """ + Initialize client. + + For the use of the SSL-related parameters see + https://docs.python.org/3/library/ssl.html#ssl-certificates. + + :param compact_footer: (optional) use compact (True, recommended) or + full (False) schema approach when serializing Complex objects. + Default is to use the same approach the server is using (None). + Apache Ignite binary protocol documentation on this topic: + https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#schema + :param partition_aware: (optional) try to calculate the exact data + placement from the key before to issue the key operation to the + server node, `True` by default, + :param event_listeners: (optional) event listeners, + :param timeout: (optional) sets timeout (in seconds) for each socket + operation including `connect`. 0 means non-blocking mode, which is + virtually guaranteed to fail. Can accept integer or float value. + Default is None (blocking mode), + :param handshake_timeout: (optional) sets timeout (in seconds) for performing handshake (connection) + with node. Default is 10.0 seconds, + :param use_ssl: (optional) set to True if Ignite server uses SSL + on its binary connector. Defaults to use SSL when username + and password has been supplied, not to use SSL otherwise, + :param ssl_version: (optional) SSL version constant from standard + `ssl` module. Defaults to TLS v1.2, + :param ssl_ciphers: (optional) ciphers to use. If not provided, + `ssl` default ciphers are used, + :param ssl_cert_reqs: (optional) determines how the remote side + certificate is treated: + + * `ssl.CERT_NONE` − remote certificate is ignored (default), + * `ssl.CERT_OPTIONAL` − remote certificate will be validated, + if provided, + * `ssl.CERT_REQUIRED` − valid remote certificate is required, + + :param ssl_keyfile: (optional) a path to SSL key file to identify + local (client) party, + :param ssl_keyfile_password: (optional) password for SSL key file, + can be provided when key file is encrypted to prevent OpenSSL + password prompt, + :param ssl_certfile: (optional) a path to ssl certificate file + to identify local (client) party, + :param ssl_ca_certfile: (optional) a path to a trusted certificate + or a certificate chain. Required to check the validity of the remote + (server-side) certificate, + :param username: (optional) user name to authenticate to Ignite + cluster, + :param password: (optional) password to authenticate to Ignite cluster. + """ + super().__init__(compact_footer, partition_aware, event_listeners, **kwargs) + + def connect(self, *args): + """ + Connect to Ignite cluster node(s). + + :param args: (optional) host(s) and port(s) to connect to. + """ + nodes = self._process_connect_args(*args) + return _ConnectionContextManager(self, nodes) + + def _connect(self, nodes): + # the following code is quite twisted, because the protocol version + # is initially unknown + + # TODO: open first node in foreground, others − in background + for i, node in enumerate(nodes): + host, port = node + conn = Connection(self, host, port, **self._connection_args) + + try: + if self.protocol_context is None or self.partition_aware: + # open connection before adding to the pool + conn.connect() + + # now we have the protocol version + if not self.partition_aware: + # do not try to open more nodes + self._current_node = i + + except connection_errors: + if self.partition_aware: + # schedule the reconnection + conn.reconnect() + + self._nodes.append(conn) + + if self.protocol_context is None: + raise ReconnectError('Can not connect.') + + def close(self): + for conn in self._nodes: + conn.close() + self._nodes.clear() + + @property + def random_node(self) -> Connection: + """ + Returns random usable node. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + """ + if self.partition_aware: + # if partition awareness is used just pick a random connected node + return self._get_random_node() + else: + # if partition awareness is not used then just return the current + # node if it's alive or the next usable node if connection with the + # current is broken + node = self._nodes[self._current_node] + if node.alive: + return node + + # close current (supposedly failed) node + self._nodes[self._current_node].close() + + # advance the node index + self._current_node += 1 + if self._current_node >= len(self._nodes): + self._current_node = 0 + + # prepare the list of node indexes to try to connect to + num_nodes = len(self._nodes) + for i in chain(range(self._current_node, num_nodes), range(self._current_node)): + node = self._nodes[i] + try: + node.connect() + except connection_errors: + pass + else: + return node + + # no nodes left + raise ReconnectError('Can not reconnect: out of nodes.') + + def _get_random_node(self, reconnect=True): + alive_nodes = [n for n in self._nodes if n.alive] + if alive_nodes: + return random.choice(alive_nodes) + elif reconnect: + for n in self._nodes: + n.reconnect() + + return self._get_random_node(reconnect=False) + else: + # cannot choose from an empty sequence + raise ReconnectError('Can not reconnect: out of nodes.') from None + + @status_to_exception(BinaryTypeError) + def get_binary_type(self, binary_type: Union[str, int]) -> dict: + """ + Gets the binary type information from the Ignite server. This is quite + a low-level implementation of Ignite thin client protocol's + `OP_GET_BINARY_TYPE` operation. You would probably want to use + :py:meth:`~pyignite.client.Client.query_binary_type` instead. + + :param binary_type: binary type name or ID, + :return: binary type description − a dict with the following fields: + + - `type_exists`: True if the type is registered, False otherwise. In + the latter case all the following fields are omitted, + - `type_id`: Complex object type ID, + - `type_name`: Complex object type name, + - `affinity_key_field`: string value or None, + - `is_enum`: False in case of Complex object registration, + - `schemas`: a list, containing the Complex object schemas in format: + OrderedDict[field name: field type hint]. A schema can be empty. + """ + result = get_binary_type(self.random_node, binary_type) + return self._process_get_binary_type_result(result) + + @status_to_exception(BinaryTypeError) + def put_binary_type( + self, type_name: str, affinity_key_field: str = None, + is_enum=False, schema: dict = None ): + """ + Registers binary type information in cluster. Do not update binary + registry. This is a literal implementation of Ignite thin client + protocol's `OP_PUT_BINARY_TYPE` operation. You would probably want + to use :py:meth:`~pyignite.client.Client.register_binary_type` instead. + + :param type_name: name of the data type being registered, + :param affinity_key_field: (optional) name of the affinity key field, + :param is_enum: (optional) register enum if True, binary object + otherwise. Defaults to False, + :param schema: (optional) when register enum, pass a dict + of enumerated parameter names as keys and an integers as values. + When register binary type, pass a dict of field names: field types. + Binary type with no fields is OK. + """ + return put_binary_type(self.random_node, type_name, affinity_key_field, is_enum, schema) + + def register_binary_type(self, data_class: Type, affinity_key_field: str = None): """ Register the given class as a representation of a certain Complex object type. Discards autogenerated or previously registered class. @@ -239,47 +546,128 @@ def register_binary_type( :param data_class: Complex object class, :param affinity_key_field: (optional) affinity parameter. """ - if not self.query_binary_type( - data_class.type_id, data_class.schema_id - ): - self.put_binary_type( - data_class.type_name, - affinity_key_field, - schema=data_class.schema, - ) + if not self.query_binary_type(data_class.type_id, data_class.schema_id): + self.put_binary_type(data_class.type_name, affinity_key_field, schema=data_class.schema) self._registry[data_class.type_id][data_class.schema_id] = data_class - def query_binary_type( - self, binary_type: Union[int, str], schema: Union[int, dict]=None, - sync: bool=True - ): + def query_binary_type(self, binary_type: Union[int, str], schema: Union[int, dict] = None): """ Queries the registry of Complex object classes. :param binary_type: Complex object type name or ID, - :param schema: (optional) Complex object schema or schema ID, - :param sync: (optional) look up the Ignite server for registered - Complex objects and create data classes for them if needed, + :param schema: (optional) Complex object schema or schema ID :return: found dataclass or None, if `schema` parameter is provided, a dict of {schema ID: dataclass} format otherwise. """ type_id = entity_id(binary_type) - s_id = schema_id(schema) - if schema: - try: - result = self._registry[type_id][s_id] - except KeyError: - result = None - else: - result = self._registry[type_id] + result = self._get_from_registry(type_id, schema) + if not result: + type_info = self.get_binary_type(type_id) + self._sync_binary_registry(type_id, type_info) + return self._get_from_registry(type_id, schema) + + return result + + def unwrap_binary(self, value: Any) -> Any: + """ + Detects and recursively unwraps Binary Object or collections of BinaryObject. + + :param value: anything that could be a Binary Object or collection of BinaryObject, + :return: the result of the Binary Object unwrapping with all other data + left intact. + """ + if isinstance(value, tuple) and len(value) == 2: + if type(value[0]) is bytes and type(value[1]) is int: + blob, offset = value + with BinaryStream(self, blob) as stream: + data_class = BinaryObject.parse(stream) + return BinaryObject.to_python(stream.read_ctype(data_class, direction=READ_BACKWARD), client=self) + + if isinstance(value[0], int): + col_type, collection = value + if isinstance(collection, list): + return col_type, [self.unwrap_binary(v) for v in collection] + + if isinstance(collection, dict): + return col_type, {self.unwrap_binary(k): self.unwrap_binary(v) for k, v in collection.items()} + return value - if sync and not result: - self._sync_binary_registry(type_id) - return self.query_binary_type(type_id, s_id, sync=False) + @status_to_exception(CacheError) + def _get_affinity(self, conn: 'Connection', caches: Iterable[int]) -> Dict: + """ + Queries server for affinity mappings. Retries in case + of an intermittent error (most probably “Getting affinity for topology + version earlier than affinity is calculated”). + + :param conn: connection to Ignite server, + :param caches: Ids of caches, + :return: OP_CACHE_PARTITIONS operation result value. + """ + for _ in range(AFFINITY_RETRIES or 1): + result = cache_get_node_partitions(conn, caches) + if result.status == 0: + break + time.sleep(AFFINITY_DELAY) return result + def get_best_node( + self, cache: Union[int, str, 'BaseCache'], key: Any = None, key_hint: 'IgniteDataType' = None + ) -> 'Connection': + """ + Returns the node from the list of the nodes, opened by client, that + most probably contains the needed key-value pair. See IEP-23. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + + :param cache: Ignite cache, cache name or cache id, + :param key: (optional) pythonic key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: Ignite connection object. + """ + conn = self.random_node + + if self.partition_aware and key is not None: + caches = self._caches_to_update_affinity() + if caches: + # update partition mapping + while True: + try: + full_affinity = self._get_affinity(conn, caches) + break + except connection_errors: + # retry if connection failed + conn = self.random_node + pass + except CacheError: + # server did not create mapping in time + return conn + + self._update_affinity(full_affinity) + + for node in self._nodes: + if not node.alive: + node.reconnect() + + c_id = cache.cache_id if isinstance(cache, BaseCache) else cache_id(cache) + parts = self._cache_partition_mapping(c_id).get('number_of_partitions') + + if not parts: + return conn + + key, key_hint = self._get_affinity_key(c_id, key, key_hint) + hashcode = key_hint.hashcode(key, client=self) + + best_node = self._get_node_by_hashcode(c_id, hashcode, parts) + if best_node: + return best_node + + return conn + def create_cache(self, settings: Union[str, dict]) -> 'Cache': """ Creates Ignite cache by name. Raises `CacheError` if such a cache is @@ -291,7 +679,7 @@ def create_cache(self, settings: Union[str, dict]) -> 'Cache': :ref:`cache creation example `, :return: :class:`~pyignite.cache.Cache` object. """ - return Cache(self, settings) + return create_cache(self, settings) def get_or_create_cache(self, settings: Union[str, dict]) -> 'Cache': """ @@ -303,7 +691,7 @@ def get_or_create_cache(self, settings: Union[str, dict]) -> 'Cache': :ref:`cache creation example `, :return: :class:`~pyignite.cache.Cache` object. """ - return Cache(self, settings, with_get=True) + return get_or_create_cache(self, settings) def get_cache(self, settings: Union[str, dict]) -> 'Cache': """ @@ -315,7 +703,7 @@ def get_cache(self, settings: Union[str, dict]) -> 'Cache': property is allowed), :return: :class:`~pyignite.cache.Cache` object. """ - return Cache(self, settings, get_only=True) + return get_cache(self, settings) @status_to_exception(CacheError) def get_cache_names(self) -> list: @@ -324,31 +712,32 @@ def get_cache_names(self) -> list: :return: list of cache names. """ - return cache_get_names(self) + return cache_get_names(self.random_node) def sql( - self, query_str: str, page_size: int=1, query_args: Iterable=None, - schema: Union[int, str]='PUBLIC', - statement_type: int=0, distributed_joins: bool=False, - local: bool=False, replicated_only: bool=False, - enforce_join_order: bool=False, collocated: bool=False, - lazy: bool=False, include_field_names: bool=False, - max_rows: int=-1, timeout: int=0, - ): + self, query_str: str, page_size: int = 1024, + query_args: Iterable = None, schema: str = 'PUBLIC', + statement_type: int = 0, distributed_joins: bool = False, + local: bool = False, replicated_only: bool = False, + enforce_join_order: bool = False, collocated: bool = False, + lazy: bool = False, include_field_names: bool = False, + max_rows: int = -1, timeout: int = 0, + cache: Union[int, str, Cache] = None + ) -> SqlFieldsCursor: """ Runs an SQL query and returns its result. :param query_str: SQL query string, - :param page_size: (optional) cursor page size. Default is 1, which - means that client makes one server call per row, + :param page_size: (optional) cursor page size. Default is 1024, which + means that client makes one server call per 1024 rows, :param query_args: (optional) query arguments. List of values or (value, type hint) tuples, :param schema: (optional) schema for the query. Defaults to `PUBLIC`, :param statement_type: (optional) statement type. Can be: - * StatementType.ALL − any type (default), + * StatementType.ALL − any type (default), * StatementType.SELECT − select, - * StatementType.UPDATE − update. + * StatementType.UPDATE − update. :param distributed_joins: (optional) distributed joins. Defaults to False, @@ -367,40 +756,45 @@ def sql( (all rows), :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), - :return: generator with result rows as a lists. If + :param cache: (optional) Name or ID of the cache to use to infer schema. + If set, 'schema' argument is ignored, + :return: sql fields cursor with result rows as a lists. If `include_field_names` was set, the first row will hold field names. """ - def generate_result(value): - cursor = value['cursor'] - more = value['more'] - - if include_field_names: - yield value['fields'] - field_count = len(value['fields']) - else: - field_count = value['field_count'] - for line in value['data']: - yield line - - while more: - inner_result = sql_fields_cursor_get_page( - self, cursor, field_count - ) - if inner_result.status != 0: - raise SQLError(result.message) - more = inner_result.value['more'] - for line in inner_result.value['data']: - yield line - - schema = self.get_or_create_cache(schema) - result = sql_fields( - self, schema.cache_id, query_str, - page_size, query_args, schema.name, - statement_type, distributed_joins, local, replicated_only, - enforce_join_order, collocated, lazy, include_field_names, - max_rows, timeout, - ) - if result.status != 0: - raise SQLError(result.message) - - return generate_result(result.value) + if isinstance(cache, (int, str)): + c_info = CacheInfo(cache_id=cache_id(cache), protocol_context=self.protocol_context) + elif isinstance(cache, Cache): + c_info = cache.cache_info + else: + c_info = CacheInfo(protocol_context=self.protocol_context) + + if c_info.cache_id: + schema = None + + return SqlFieldsCursor(self, c_info, query_str, page_size, query_args, schema, statement_type, + distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, + include_field_names, max_rows, timeout) + + def get_cluster(self) -> 'Cluster': + """ + Get client cluster facade. + + :return: :py:class:`~pyignite.cluster.Cluster` instance. + """ + return Cluster(self) + + def tx_start(self, concurrency: TransactionConcurrency = TransactionConcurrency.PESSIMISTIC, + isolation: TransactionIsolation = TransactionIsolation.REPEATABLE_READ, + timeout: int = 0, label: Optional[str] = None) -> 'Transaction': + """ + Start thin client transaction. + + :param concurrency: (optional) transaction concurrency, see + :py:class:`~pyignite.datatypes.transactions.TransactionConcurrency`, + :param isolation: (optional) transaction isolation level, see + :py:class:`~pyignite.datatypes.transactions.TransactionIsolation`, + :param timeout: (optional) transaction timeout in milliseconds, + :param label: (optional) transaction label. + :return: :py:class:`~pyignite.transaction.Transaction` instance. + """ + return Transaction(self, concurrency, isolation, timeout, label) diff --git a/pyignite/cluster.py b/pyignite/cluster.py new file mode 100644 index 0000000..d953b5c --- /dev/null +++ b/pyignite/cluster.py @@ -0,0 +1,64 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains `Cluster` that lets you get info and change state of the +whole cluster. +""" +from pyignite.api.cluster import cluster_get_state, cluster_set_state +from pyignite.exceptions import ClusterError +from pyignite.utils import status_to_exception +from pyignite.datatypes import ClusterState + + +class Cluster: + """ + Ignite cluster abstraction. Users should never use this class directly, + but construct its instances with + :py:meth:`~pyignite.client.Client.get_cluster` method instead. + """ + + def __init__(self, client: 'Client'): + """ + :param client: :py:class:`~pyignite.client.Client` instance. + """ + self._client = client + + @status_to_exception(ClusterError) + def get_state(self) -> 'ClusterState': + """ + Gets current cluster state. + + :return: Current cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. + """ + return cluster_get_state(self._client.random_node) + + @status_to_exception(ClusterError) + def set_state(self, state: 'ClusterState'): + """ + Changes current cluster state to the given. + + Note: Deactivation clears in-memory caches (without persistence) + including the system caches. + + :param state: New cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. + """ + return cluster_set_state(self._client.random_node, state) diff --git a/pyignite/connection/__init__.py b/pyignite/connection/__init__.py index 1f6f0c0..14e820a 100644 --- a/pyignite/connection/__init__.py +++ b/pyignite/connection/__init__.py @@ -33,301 +33,7 @@ as well as Ignite protocol handshaking. """ -import socket +from .connection import Connection +from .aio_connection import AioConnection -from pyignite.constants import * -from pyignite.exceptions import ( - HandshakeError, ParameterError, ReconnectError, SocketError, -) - -from pyignite.utils import is_iterable -from .handshake import HandshakeRequest, read_response -from .ssl import wrap - - -__all__ = ['Connection'] - - -class Connection: - """ - This is a `pyignite` class, that represents a connection to Ignite - node. It serves multiple purposes: - - * socket wrapper. Detects fragmentation and network errors. See also - https://docs.python.org/3/howto/sockets.html, - * binary protocol connector. Incapsulates handshake, data read-ahead and - failover reconnection. - """ - - _socket = None - nodes = None - host = None - port = None - timeout = None - prefetch = None - username = None - password = None - - @staticmethod - def _check_kwargs(kwargs): - expected_args = [ - 'timeout', - 'use_ssl', - 'ssl_version', - 'ssl_ciphers', - 'ssl_cert_reqs', - 'ssl_keyfile', - 'ssl_keyfile_password', - 'ssl_certfile', - 'ssl_ca_certfile', - 'username', - 'password', - ] - for kw in kwargs: - if kw not in expected_args: - raise ParameterError(( - 'Unexpected parameter for connection initialization: `{}`' - ).format(kw)) - - def __init__(self, prefetch: bytes=b'', **kwargs): - """ - Initialize connection. - - For the use of the SSL-related parameters see - https://docs.python.org/3/library/ssl.html#ssl-certificates. - - :param prefetch: (optional) initialize the read-ahead data buffer. - Empty by default, - :param timeout: (optional) sets timeout (in seconds) for each socket - operation including `connect`. 0 means non-blocking mode, which is - virtually guaranteed to fail. Can accept integer or float value. - Default is None (blocking mode), - :param use_ssl: (optional) set to True if Ignite server uses SSL - on its binary connector. Defaults to use SSL when username - and password has been supplied, not to use SSL otherwise, - :param ssl_version: (optional) SSL version constant from standard - `ssl` module. Defaults to TLS v1.1, as in Ignite 2.5, - :param ssl_ciphers: (optional) ciphers to use. If not provided, - `ssl` default ciphers are used, - :param ssl_cert_reqs: (optional) determines how the remote side - certificate is treated: - - * `ssl.CERT_NONE` − remote certificate is ignored (default), - * `ssl.CERT_OPTIONAL` − remote certificate will be validated, - if provided, - * `ssl.CERT_REQUIRED` − valid remote certificate is required, - - :param ssl_keyfile: (optional) a path to SSL key file to identify - local (client) party, - :param ssl_keyfile_password: (optional) password for SSL key file, - can be provided when key file is encrypted to prevent OpenSSL - password prompt, - :param ssl_certfile: (optional) a path to ssl certificate file - to identify local (client) party, - :param ssl_ca_certfile: (optional) a path to a trusted certificate - or a certificate chain. Required to check the validity of the remote - (server-side) certificate, - :param username: (optional) user name to authenticate to Ignite - cluster, - :param password: (optional) password to authenticate to Ignite cluster. - """ - self.prefetch = prefetch - self._check_kwargs(kwargs) - self.timeout = kwargs.pop('timeout', None) - self.username = kwargs.pop('username', None) - self.password = kwargs.pop('password', None) - if all([self.username, self.password, 'use_ssl' not in kwargs]): - kwargs['use_ssl'] = True - self.init_kwargs = kwargs - - read_response = read_response - _wrap = wrap - - @property - def socket(self) -> socket.socket: - """ - Network socket. - """ - if self._socket is None: - self._reconnect() - return self._socket - - def __repr__(self) -> str: - if self.host and self.port: - return '{}:{}'.format(self.host, self.port) - else: - return '' - - def _connect(self, host: str, port: int): - """ - Actually connect socket. - """ - self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self._socket.settimeout(self.timeout) - self._socket = self._wrap(self.socket) - self._socket.connect((host, port)) - - hs_request = HandshakeRequest(self.username, self.password) - self.send(hs_request) - hs_response = self.read_response() - if hs_response['op_code'] == 0: - self.close() - error_text = 'Handshake error: {}'.format(hs_response['message']) - # if handshake fails for any reason other than protocol mismatch - # (i.e. authentication error), server version is 0.0.0 - if any([ - hs_response['version_major'], - hs_response['version_minor'], - hs_response['version_patch'], - ]): - error_text += ( - ' Server expects binary protocol version ' - '{version_major}.{version_minor}.{version_patch}. Client ' - 'provides {client_major}.{client_minor}.{client_patch}.' - ).format( - client_major=PROTOCOL_VERSION_MAJOR, - client_minor=PROTOCOL_VERSION_MINOR, - client_patch=PROTOCOL_VERSION_PATCH, - **hs_response - ) - raise HandshakeError(error_text) - self.host, self.port = host, port - - def connect(self, *args): - """ - Connect to the server. Connection parameters may be either one node - (host and port), or list (or other iterable) of nodes. - - :param host: Ignite server host, - :param port: Ignite server port, - :param nodes: iterable of (host, port) tuples. - """ - self.nodes = iter([]) - if len(args) == 0: - host, port = IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT - elif len(args) == 1 and is_iterable(args[0]): - self.nodes = iter(args[0]) - host, port = next(self.nodes) - elif ( - len(args) == 2 - and isinstance(args[0], str) - and isinstance(args[1], int) - ): - host, port = args - else: - raise ConnectionError('Connection parameters are not valid.') - - self._connect(host, port) - - def _reconnect(self): - """ - Restore the connection using the next node in `nodes` iterable. - """ - for host, port in self.nodes: - try: - self._connect(host, port) - return - except OSError: - pass - self.host = self.port = self.nodes = None - # exception chaining gives a misleading traceback here - raise ReconnectError('Can not reconnect: out of nodes') from None - - def _transfer_params(self, to: 'Connection'): - """ - Transfer non-SSL parameters to target connection object. - - :param target: connection object to transfer parameters to. - """ - to.username = self.username - to.password = self.password - to.nodes = self.nodes - - def clone(self, prefetch: bytes=b'') -> 'Connection': - """ - Clones this connection in its current state. - - :return: `Connection` object. - """ - clone = self.__class__(**self.init_kwargs) - self._transfer_params(to=clone) - if self.port and self.host: - clone._connect(self.host, self.port) - clone.prefetch = prefetch - return clone - - def send(self, data: bytes, flags=None): - """ - Send data down the socket. - - :param data: bytes to send, - :param flags: (optional) OS-specific flags. - """ - kwargs = {} - if flags is not None: - kwargs['flags'] = flags - data = bytes(data) - total_bytes_sent = 0 - - while total_bytes_sent < len(data): - try: - bytes_sent = self.socket.send(data[total_bytes_sent:], **kwargs) - except OSError: - self._socket = self.host = self.port = None - raise - if bytes_sent == 0: - self.socket.close() - raise SocketError('Socket connection broken.') - total_bytes_sent += bytes_sent - - def recv(self, buffersize, flags=None) -> bytes: - """ - Receive data from socket or read-ahead buffer. - - :param buffersize: bytes to receive, - :param flags: (optional) OS-specific flags, - :return: data received. - """ - pref_size = len(self.prefetch) - if buffersize > pref_size: - result = self.prefetch - self.prefetch = b'' - try: - result += self._recv(buffersize-pref_size, flags) - except (SocketError, OSError): - self._socket = self.host = self.port = None - raise - return result - else: - result = self.prefetch[:buffersize] - self.prefetch = self.prefetch[buffersize:] - return result - - def _recv(self, buffersize, flags=None) -> bytes: - """ - Handle socket data reading. - """ - kwargs = {} - if flags is not None: - kwargs['flags'] = flags - chunks = [] - bytes_rcvd = 0 - - while bytes_rcvd < buffersize: - chunk = self.socket.recv(buffersize-bytes_rcvd, **kwargs) - if chunk == b'': - self.socket.close() - raise SocketError('Socket connection broken.') - chunks.append(chunk) - bytes_rcvd += len(chunk) - - return b''.join(chunks) - - def close(self): - """ - Mark socket closed. This is recommended but not required, since - sockets are automatically closed when they are garbage-collected. - """ - self._socket.shutdown(socket.SHUT_RDWR) - self._socket.close() - self._socket = self.host = self.port = None +__all__ = ['Connection', 'AioConnection'] diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py new file mode 100644 index 0000000..13ab681 --- /dev/null +++ b/pyignite/connection/aio_connection.py @@ -0,0 +1,299 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +from collections import OrderedDict +from typing import Union + +from pyignite.constants import PROTOCOLS, PROTOCOL_BYTE_ORDER +from pyignite.exceptions import HandshakeError, SocketError, connection_errors, AuthenticationError +from .bitmask_feature import BitmaskFeature +from .connection import BaseConnection + +from .handshake import HandshakeRequest, HandshakeResponse +from .protocol_context import ProtocolContext +from .ssl import create_ssl_context +from ..stream.binary_stream import BinaryStreamBase + + +class BaseProtocol(asyncio.Protocol): + def __init__(self, conn, handshake_fut): + super().__init__() + self._buffer = bytearray() + self._conn = conn + self._handshake_fut = handshake_fut + + def connection_lost(self, exc): + self.__process_connection_error(exc if exc else SocketError("Connection closed")) + + def connection_made(self, transport: asyncio.WriteTransport) -> None: + try: + self.__send_handshake(transport, self._conn) + except Exception as e: + if not self._handshake_fut.done(): + self._handshake_fut.set_exception(e) + + def data_received(self, data: bytes) -> None: + self._buffer += data + while self.__has_full_response(): + packet_sz = self.__packet_size(self._buffer) + packet = self._buffer[0:packet_sz] + if not self._handshake_fut.done(): + hs_response = self.__parse_handshake(packet, self._conn.client) + self._handshake_fut.set_result(hs_response) + elif not self._handshake_fut.cancelled() or not self._handshake_fut.exception(): + self._conn.process_message(packet) + self._buffer = self._buffer[packet_sz:len(self._buffer)] + + def __has_full_response(self): + if len(self._buffer) > 4: + response_len = int.from_bytes(self._buffer[0:4], byteorder=PROTOCOL_BYTE_ORDER, signed=True) + return response_len + 4 <= len(self._buffer) + + @staticmethod + def __packet_size(buffer): + return int.from_bytes(buffer[0:4], byteorder=PROTOCOL_BYTE_ORDER, signed=True) + 4 + + def __process_connection_error(self, exc): + connected = self._handshake_fut.done() + if not connected: + self._handshake_fut.set_exception(exc) + self._conn.process_connection_lost(exc, connected) + + @staticmethod + def __send_handshake(transport, conn): + hs_request = HandshakeRequest(conn.protocol_context, conn.username, conn.password) + with BinaryStreamBase(client=conn.client) as stream: + hs_request.from_python(stream) + transport.write(stream.getvalue()) + + @staticmethod + def __parse_handshake(data, client): + with BinaryStreamBase(client, data) as stream: + return HandshakeResponse.parse(stream, client.protocol_context) + + +class AioConnection(BaseConnection): + """ + Asyncio connection to Ignite node. It serves multiple purposes: + + * wrapper of asyncio streams. See also https://docs.python.org/3/library/asyncio-stream.html + * encapsulates handshake and reconnection. + """ + + def __init__(self, client: 'AioClient', host: str, port: int, username: str = None, password: str = None, + **ssl_params): + """ + Initialize connection. + + For the use of the SSL-related parameters see + https://docs.python.org/3/library/ssl.html#ssl-certificates. + + :param client: Ignite client object, + :param host: Ignite server node's host name or IP, + :param port: Ignite server node's port number, + :param handshake_timeout: (optional) sets timeout (in seconds) for performing handshake (connection) + with node. Default is 10.0 seconds, + :param use_ssl: (optional) set to True if Ignite server uses SSL + on its binary connector. Defaults to use SSL when username + and password has been supplied, not to use SSL otherwise, + :param ssl_version: (optional) SSL version constant from standard + `ssl` module. Defaults to TLS v1.2, + :param ssl_ciphers: (optional) ciphers to use. If not provided, + `ssl` default ciphers are used, + :param ssl_cert_reqs: (optional) determines how the remote side + certificate is treated: + + * `ssl.CERT_NONE` − remote certificate is ignored (default), + * `ssl.CERT_OPTIONAL` − remote certificate will be validated, + if provided, + * `ssl.CERT_REQUIRED` − valid remote certificate is required, + + :param ssl_keyfile: (optional) a path to SSL key file to identify + local (client) party, + :param ssl_keyfile_password: (optional) password for SSL key file, + can be provided when key file is encrypted to prevent OpenSSL + password prompt, + :param ssl_certfile: (optional) a path to ssl certificate file + to identify local (client) party, + :param ssl_ca_certfile: (optional) a path to a trusted certificate + or a certificate chain. Required to check the validity of the remote + (server-side) certificate, + :param username: (optional) user name to authenticate to Ignite + cluster, + :param password: (optional) password to authenticate to Ignite cluster. + """ + super().__init__(client, host, port, username, password, **ssl_params) + self._pending_reqs = {} + self._transport = None + self._loop = asyncio.get_event_loop() + self._closed = False + self._transport_closed_fut = None + + @property + def closed(self) -> bool: + """ Tells if socket is closed. """ + return self._closed or not self._transport or self._transport.is_closing() + + async def connect(self): + """ + Connect to the given server node with protocol version fallback. + """ + if self.alive: + return + await self._connect() + + async def _connect(self): + detecting_protocol = False + + # choose highest version first + if self.client.protocol_context is None: + detecting_protocol = True + self.client.protocol_context = ProtocolContext(max(PROTOCOLS), BitmaskFeature.all_supported()) + + while True: + try: + self._on_handshake_start() + result = await self._connect_version() + self._on_handshake_success(result) + return + except HandshakeError as e: + if e.expected_version in PROTOCOLS: + self.client.protocol_context.version = e.expected_version + continue + else: + self._on_handshake_fail(e) + raise e + except AuthenticationError as e: + self._on_handshake_fail(e) + raise e + except Exception as e: + self._on_handshake_fail(e) + # restore undefined protocol version + if detecting_protocol: + self.client.protocol_context = None + raise e + + def process_connection_lost(self, err, reconnect=False): + self.failed = True + for _, fut in self._pending_reqs.items(): + if not fut.done(): + fut.set_exception(err) + self._pending_reqs.clear() + + if self._transport_closed_fut and not self._transport_closed_fut.done(): + self._transport_closed_fut.set_result(None) + + if reconnect and not self._closed: + self._on_connection_lost(err) + self._loop.create_task(self._reconnect()) + + def process_message(self, data): + req_id = int.from_bytes(data[4:12], byteorder=PROTOCOL_BYTE_ORDER, signed=True) + + req_fut = self._pending_reqs.get(req_id) + if req_fut: + if not req_fut.done(): + req_fut.set_result(data) + del self._pending_reqs[req_id] + + async def _connect_version(self) -> Union[dict, OrderedDict]: + """ + Connect to the given server node using protocol version + defined on client. + """ + + ssl_context = create_ssl_context(self.ssl_params) + handshake_fut = self._loop.create_future() + self._closed = False + self._transport, _ = await self._loop.create_connection(lambda: BaseProtocol(self, handshake_fut), + host=self.host, port=self.port, ssl=ssl_context) + try: + hs_response = await asyncio.wait_for(handshake_fut, self.handshake_timeout) + except asyncio.TimeoutError: + raise ConnectionError('timed out') + + if hs_response.op_code == 0: + await self.close() + self._process_handshake_error(hs_response) + + return hs_response + + async def reconnect(self): + await self._reconnect() + + async def _reconnect(self): + if self.alive: + return + + await self._close_transport() + # connect and silence the connection errors + try: + await self._connect() + except connection_errors: + pass + + async def request(self, query_id, data: Union[bytes, bytearray]) -> bytearray: + """ + Perform request. + :param query_id: id of query. + :param data: bytes to send. + """ + if not self.alive: + raise SocketError('Attempt to use closed connection.') + + return await self._send(query_id, data) + + async def _send(self, query_id, data): + fut = self._loop.create_future() + self._pending_reqs[query_id] = fut + self._transport.write(data) + return await fut + + async def close(self): + self._closed = True + await self._close_transport() + + async def _close_transport(self): + """ + Close connection. + """ + if self._transport and not self._transport.is_closing(): + self._transport_closed_fut = self._loop.create_future() + + self._transport.close() + self._transport = None + try: + await asyncio.wait_for(self._transport_closed_fut, 1.0) + except asyncio.TimeoutError: + pass + finally: + self._on_connection_lost(expected=True) + self._transport_closed_fut = None diff --git a/pyignite/connection/bitmask_feature.py b/pyignite/connection/bitmask_feature.py new file mode 100644 index 0000000..80d51ad --- /dev/null +++ b/pyignite/connection/bitmask_feature.py @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from enum import IntFlag +from typing import Optional + +from pyignite.constants import PROTOCOL_BYTE_ORDER + + +class BitmaskFeature(IntFlag): + CLUSTER_API = 1 << 2 + + def __bytes__(self) -> bytes: + """ + Convert feature flags array to bytearray bitmask. + + :return: Bitmask as bytearray. + """ + full_bytes = self.bit_length() // 8 + 1 + return self.to_bytes(full_bytes, byteorder=PROTOCOL_BYTE_ORDER) + + @staticmethod + def all_supported() -> 'BitmaskFeature': + """ + Get all supported features. + + :return: All supported features. + """ + supported = BitmaskFeature(0) + for feature in BitmaskFeature: + supported |= feature + return supported + + @staticmethod + def from_array(features_array: bytes) -> Optional['BitmaskFeature']: + """ + Get features from bytearray. + + :param features_array: Feature bitmask as array, + :return: Return features. + """ + if features_array is None: + return None + return BitmaskFeature.from_bytes(features_array, byteorder=PROTOCOL_BYTE_ORDER) diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py new file mode 100644 index 0000000..4596e23 --- /dev/null +++ b/pyignite/connection/connection.py @@ -0,0 +1,397 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from collections import OrderedDict +import socket +from typing import Union + +from pyignite.constants import PROTOCOLS, IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT, PROTOCOL_BYTE_ORDER +from pyignite.exceptions import HandshakeError, SocketError, connection_errors, AuthenticationError, ParameterError +from .bitmask_feature import BitmaskFeature + +from .handshake import HandshakeRequest, HandshakeResponse +from .protocol_context import ProtocolContext +from .ssl import wrap, check_ssl_params +from ..stream import BinaryStream + +CLIENT_STATUS_AUTH_FAILURE = 2000 + +logger = logging.getLogger('.'.join(__name__.split('.')[:-1])) + + +class BaseConnection: + def __init__(self, client, host: str = None, port: int = None, username: str = None, password: str = None, + handshake_timeout: float = 10.0, **ssl_params): + self.client = client + self.handshake_timeout = handshake_timeout + self.host = host if host else IGNITE_DEFAULT_HOST + self.port = port if port else IGNITE_DEFAULT_PORT + self.username = username + self.password = password + self.uuid = None + + if handshake_timeout <= 0.0: + raise ParameterError("handshake_timeout should be positive") + + check_ssl_params(ssl_params) + + self.ssl_params = ssl_params + self._failed = False + + @property + def closed(self) -> bool: + """ Tells if socket is closed. """ + raise NotImplementedError + + @property + def failed(self) -> bool: + """ Tells if connection is failed. """ + return self._failed + + @failed.setter + def failed(self, value): + self._failed = value + + @property + def alive(self) -> bool: + """ Tells if connection is up and no failure detected. """ + return not self.failed and not self.closed + + def __repr__(self) -> str: + return '{}:{}'.format(self.host or '?', self.port or '?') + + @property + def protocol_context(self): + """ + Returns protocol context, or None, if no connection to the Ignite + cluster was yet established. + """ + return self.client.protocol_context + + def _process_handshake_error(self, response): + # if handshake fails for any reason other than protocol mismatch + # (i.e. authentication error), server version is 0.0.0 + if response.client_status == CLIENT_STATUS_AUTH_FAILURE: + raise AuthenticationError(response.message) + + protocol_version = self.client.protocol_context.version + server_version = (response.version_major, response.version_minor, response.version_patch) + error_text = f'Handshake error: {response.message}' + if any(server_version): + error_text += f' Server expects binary protocol version ' \ + f'{server_version[0]}.{server_version[1]}.{server_version[2]}. ' \ + f'Client provides ' \ + f'{protocol_version[0]}.{protocol_version[1]}.{protocol_version[2]}.' + raise HandshakeError(server_version, error_text) + + def _on_handshake_start(self): + if logger.isEnabledFor(logging.DEBUG): + logger.debug("Connecting to node(address=%s, port=%d) with protocol context %s", + self.host, self.port, self.protocol_context) + if self._enabled_connection_listener: + self._connection_listener.publish_handshake_start(self.host, self.port, self.protocol_context) + + def _on_handshake_success(self, result): + features = BitmaskFeature.from_array(result.get('features', None)) + self.client.protocol_context.features = features + self.uuid = result.get('node_uuid', None) # version-specific (1.4+) + self.failed = False + + if logger.isEnabledFor(logging.DEBUG): + logger.debug("Connected to node(address=%s, port=%d, node_uuid=%s) with protocol context %s", + self.host, self.port, self.uuid, self.protocol_context) + if self._enabled_connection_listener: + self._connection_listener.publish_handshake_success(self.host, self.port, self.protocol_context, self.uuid) + + def _on_handshake_fail(self, err): + self.failed = True + + if isinstance(err, AuthenticationError): + logger.error("Authentication failed while connecting to node(address=%s, port=%d): %s", + self.host, self.port, err) + if self._enabled_connection_listener: + self._connection_listener.publish_authentication_fail(self.host, self.port, self.protocol_context, err) + else: + logger.error("Failed to perform handshake, connection to node(address=%s, port=%d) " + "with protocol context %s failed: %s", + self.host, self.port, self.protocol_context, err, exc_info=True) + if self._enabled_connection_listener: + self._connection_listener.publish_handshake_fail(self.host, self.port, self.protocol_context, err) + + def _on_connection_lost(self, err=None, expected=False): + if expected: + if logger.isEnabledFor(logging.DEBUG): + logger.debug("Connection closed to node(address=%s, port=%d, node_uuid=%s)", + self.host, self.port, self.uuid) + if self._enabled_connection_listener: + self._connection_listener.publish_connection_closed(self.host, self.port, self.uuid) + else: + logger.info("Connection lost to node(address=%s, port=%d, node_uuid=%s): %s", + self.host, self.port, self.uuid, err) + if self._enabled_connection_listener: + self._connection_listener.publish_connection_lost(self.host, self.port, self.uuid, err) + + @property + def _enabled_connection_listener(self): + return self.client._event_listeners and self.client._event_listeners.enabled_connection_listener + + @property + def _connection_listener(self): + return self.client._event_listeners + + +DEFAULT_INITIAL_BUF_SIZE = 1024 + + +class Connection(BaseConnection): + """ + This is a `pyignite` class, that represents a connection to Ignite + node. It serves multiple purposes: + + * socket wrapper. Detects fragmentation and network errors. See also + https://docs.python.org/3/howto/sockets.html, + * binary protocol connector. Encapsulates handshake and failover reconnection. + """ + + def __init__(self, client: 'Client', host: str, port: int, username: str = None, password: str = None, + timeout: float = None, handshake_timeout: float = 10.0, + **ssl_params): + """ + Initialize connection. + + For the use of the SSL-related parameters see + https://docs.python.org/3/library/ssl.html#ssl-certificates. + + :param client: Ignite client object, + :param host: Ignite server node's host name or IP, + :param port: Ignite server node's port number, + :param timeout: (optional) sets timeout (in seconds) for each socket + operation including `connect`. 0 means non-blocking mode, which is + virtually guaranteed to fail. Can accept integer or float value. + Default is None (blocking mode), + :param handshake_timeout: (optional) sets timeout (in seconds) for performing handshake (connection) + with node. Default is 10.0. + :param use_ssl: (optional) set to True if Ignite server uses SSL + on its binary connector. Defaults to use SSL when username + and password has been supplied, not to use SSL otherwise, + :param ssl_version: (optional) SSL version constant from standard + `ssl` module. Defaults to TLS v1.2, + :param ssl_ciphers: (optional) ciphers to use. If not provided, + `ssl` default ciphers are used, + :param ssl_cert_reqs: (optional) determines how the remote side + certificate is treated: + + * `ssl.CERT_NONE` − remote certificate is ignored (default), + * `ssl.CERT_OPTIONAL` − remote certificate will be validated, + if provided, + * `ssl.CERT_REQUIRED` − valid remote certificate is required, + + :param ssl_keyfile: (optional) a path to SSL key file to identify + local (client) party, + :param ssl_keyfile_password: (optional) password for SSL key file, + can be provided when key file is encrypted to prevent OpenSSL + password prompt, + :param ssl_certfile: (optional) a path to ssl certificate file + to identify local (client) party, + :param ssl_ca_certfile: (optional) a path to a trusted certificate + or a certificate chain. Required to check the validity of the remote + (server-side) certificate, + :param username: (optional) user name to authenticate to Ignite + cluster, + :param password: (optional) password to authenticate to Ignite cluster. + """ + super().__init__(client, host, port, username, password, handshake_timeout, **ssl_params) + self.timeout = timeout + self._socket = None + + @property + def closed(self) -> bool: + return self._socket is None + + def connect(self): + """ + Connect to the given server node with protocol version fallback. + """ + detecting_protocol = False + + # choose highest version first + if self.client.protocol_context is None: + detecting_protocol = True + self.client.protocol_context = ProtocolContext(max(PROTOCOLS), BitmaskFeature.all_supported()) + + while True: + try: + self._on_handshake_start() + result = self._connect_version() + self._socket.settimeout(self.timeout) + self._on_handshake_success(result) + return + except HandshakeError as e: + if e.expected_version in PROTOCOLS: + self.client.protocol_context.version = e.expected_version + continue + else: + self._on_handshake_fail(e) + raise e + except AuthenticationError as e: + self._on_handshake_fail(e) + raise e + except Exception as e: + self._on_handshake_fail(e) + # restore undefined protocol version + if detecting_protocol: + self.client.protocol_context = None + raise e + + def _connect_version(self) -> Union[dict, OrderedDict]: + """ + Connect to the given server node using protocol version + defined on client. + """ + + self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._socket.settimeout(self.handshake_timeout) + self._socket = wrap(self._socket, self.ssl_params) + self._socket.connect((self.host, self.port)) + + protocol_context = self.client.protocol_context + + hs_request = HandshakeRequest( + protocol_context, + self.username, + self.password + ) + + with BinaryStream(self.client) as stream: + hs_request.from_python(stream) + self.send(stream.getvalue(), reconnect=False) + + with BinaryStream(self.client, self.recv(reconnect=False)) as stream: + hs_response = HandshakeResponse.parse(stream, self.protocol_context) + + if hs_response.op_code == 0: + self.close() + self._process_handshake_error(hs_response) + + return hs_response + + def reconnect(self): + if self.alive: + return + + self.close(on_reconnect=True) + + # connect and silence the connection errors + try: + self.connect() + except connection_errors: + pass + + def request(self, data: Union[bytes, bytearray], flags=None) -> bytearray: + """ + Perform request. + + :param data: bytes to send, + :param flags: (optional) OS-specific flags. + """ + self.send(data, flags=flags) + return self.recv() + + def send(self, data: Union[bytes, bytearray], flags=None, reconnect=True): + """ + Send data down the socket. + + :param data: bytes to send, + :param flags: (optional) OS-specific flags. + :param reconnect: (optional) reconnect on failure, default True. + """ + if self.closed: + raise SocketError('Attempt to use closed connection.') + + kwargs = {} + if flags is not None: + kwargs['flags'] = flags + + try: + self._socket.sendall(data, **kwargs) + except connection_errors as e: + self.failed = True + if reconnect: + self._on_connection_lost(e) + self.reconnect() + raise e + + def recv(self, flags=None, reconnect=True) -> bytearray: + """ + Receive data from the socket. + + :param flags: (optional) OS-specific flags. + :param reconnect: (optional) reconnect on failure, default True. + """ + if self.closed: + raise SocketError('Attempt to use closed connection.') + + kwargs = {} + if flags is not None: + kwargs['flags'] = flags + + data = bytearray(DEFAULT_INITIAL_BUF_SIZE) + buffer = memoryview(data) + total_rcvd, packet_len = 0, 0 + while True: + try: + bytes_rcvd = self._socket.recv_into(buffer, len(buffer), **kwargs) + if bytes_rcvd == 0: + raise SocketError('Connection broken.') + total_rcvd += bytes_rcvd + except connection_errors as e: + self.failed = True + if reconnect: + self._on_connection_lost(e) + self.reconnect() + raise e + + if packet_len == 0 and total_rcvd > 4: + packet_len = int.from_bytes(data[0:4], PROTOCOL_BYTE_ORDER, signed=True) + 4 + if packet_len > len(data): + buffer.release() + data.extend(bytearray(packet_len - len(data))) + buffer = memoryview(data)[total_rcvd:] + continue + + if 0 < packet_len <= total_rcvd: + buffer.release() + break + + buffer = buffer[bytes_rcvd:] + + return data + + def close(self, on_reconnect=False): + """ + Try to mark socket closed, then unlink it. This is recommended but + not required, since sockets are automatically closed when + garbage-collected. + """ + if self._socket: + try: + self._socket.shutdown(socket.SHUT_RDWR) + self._socket.close() + except connection_errors: + pass + if not on_reconnect and not self.failed: + self._on_connection_lost(expected=True) + self._socket = None diff --git a/pyignite/connection/generators.py b/pyignite/connection/generators.py deleted file mode 100644 index d76db0e..0000000 --- a/pyignite/connection/generators.py +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class RoundRobin: - """ - Round-robin generator for use with `Client.connect()`. Cycles a node - list until a maximum number of reconnects is reached (if set). - """ - - def __init__(self, nodes: list, max_reconnects: int=None): - """ - :param nodes: list of two-tuples of (host, port) format, - :param max_reconnects: (optional) maximum number of reconnect attempts. - defaults to None (cycle nodes infinitely). - """ - self.nodes = nodes - self.max_reconnects = max_reconnects - self.node_index = 0 - self.reconnects = 0 - - def __iter__(self) -> 'RoundRobin': - return self - - def __next__(self) -> tuple: - if self.max_reconnects is not None: - if self.reconnects >= self.max_reconnects: - raise StopIteration - else: - self.reconnects += 1 - - if self.node_index >= len(self.nodes): - self.node_index = 0 - node = self.nodes[self.node_index] - self.node_index += 1 - return node diff --git a/pyignite/connection/handshake.py b/pyignite/connection/handshake.py index 13d57fe..af7bdb3 100644 --- a/pyignite/connection/handshake.py +++ b/pyignite/connection/handshake.py @@ -15,9 +15,10 @@ from typing import Optional -from pyignite.constants import * -from pyignite.datatypes import Byte, Int, Short, String +from pyignite.connection.protocol_context import ProtocolContext +from pyignite.datatypes import Byte, Int, Short, String, UUIDObject, ByteArrayObject from pyignite.datatypes.internal import Struct +from pyignite.stream import READ_BACKWARD OP_HANDSHAKE = 1 @@ -27,9 +28,11 @@ class HandshakeRequest: handshake_struct = None username = None password = None + protocol_context = None def __init__( - self, username: Optional[str]=None, password: Optional[str]=None + self, protocol_context: 'ProtocolContext', + username: Optional[str] = None, password: Optional[str] = None ): fields = [ ('length', Int), @@ -39,6 +42,9 @@ def __init__( ('version_patch', Short), ('client_code', Byte), ] + self.protocol_context = protocol_context + if self.protocol_context.is_feature_flags_supported(): + fields.append(('features', ByteArrayObject)) if username and password: self.username = username self.password = password @@ -48,15 +54,26 @@ def __init__( ]) self.handshake_struct = Struct(fields) - def __bytes__(self) -> bytes: + def from_python(self, stream): + self.handshake_struct.from_python(stream, self.__create_handshake_data()) + + async def from_python_async(self, stream): + await self.handshake_struct.from_python_async(stream, self.__create_handshake_data()) + + def __create_handshake_data(self): + version = self.protocol_context.version handshake_data = { 'length': 8, 'op_code': OP_HANDSHAKE, - 'version_major': PROTOCOL_VERSION_MAJOR, - 'version_minor': PROTOCOL_VERSION_MINOR, - 'version_patch': PROTOCOL_VERSION_PATCH, + 'version_major': version[0], + 'version_minor': version[1], + 'version_patch': version[2], 'client_code': 2, # fixed value defined by protocol } + if self.protocol_context.is_feature_flags_supported(): + features = bytes(self.protocol_context.features) + handshake_data['features'] = features + handshake_data['length'] += 5 + len(features) if self.username and self.password: handshake_data.update({ 'username': self.username, @@ -67,25 +84,71 @@ def __bytes__(self) -> bytes: len(self.username), len(self.password), ]) - return self.handshake_struct.from_python(handshake_data) + return handshake_data -def read_response(client): - response_start = Struct([ +class HandshakeResponse(dict): + """ + Handshake response. + """ + __response_start = Struct([ ('length', Int), ('op_code', Byte), ]) - start_class, start_buffer = response_start.parse(client) - start = start_class.from_buffer_copy(start_buffer) - data = response_start.to_python(start) - if data['op_code'] == 0: - response_end = Struct([ - ('version_major', Short), - ('version_minor', Short), - ('version_patch', Short), - ('message', String), - ]) - end_class, end_buffer = response_end.parse(client) - end = end_class.from_buffer_copy(end_buffer) - data.update(response_end.to_python(end)) - return data + + def __init__(self, data): + super().__init__() + self.update(data) + + def __getattr__(self, item): + return self.get(item) + + @classmethod + def parse(cls, stream, protocol_context): + start_class = cls.__response_start.parse(stream) + start = stream.read_ctype(start_class, direction=READ_BACKWARD) + data = cls.__response_start.to_python(start) + + response_end = cls.__create_response_end(data, protocol_context) + if response_end: + end_class = response_end.parse(stream) + end = stream.read_ctype(end_class, direction=READ_BACKWARD) + data.update(response_end.to_python(end)) + + return cls(data) + + @classmethod + async def parse_async(cls, stream, protocol_context): + start_class = cls.__response_start.parse(stream) + start = stream.read_ctype(start_class, direction=READ_BACKWARD) + data = await cls.__response_start.to_python_async(start) + + response_end = cls.__create_response_end(data, protocol_context) + if response_end: + end_class = await response_end.parse_async(stream) + end = stream.read_ctype(end_class, direction=READ_BACKWARD) + data.update(await response_end.to_python_async(end)) + + return cls(data) + + @classmethod + def __create_response_end(cls, start_data, protocol_context): + response_end = None + if start_data['op_code'] == 0: + response_end = Struct([ + ('version_major', Short), + ('version_minor', Short), + ('version_patch', Short), + ('message', String), + ('client_status', Int) + ]) + elif protocol_context.is_feature_flags_supported(): + response_end = Struct([ + ('features', ByteArrayObject), + ('node_uuid', UUIDObject), + ]) + elif protocol_context.is_partition_awareness_supported(): + response_end = Struct([ + ('node_uuid', UUIDObject), + ]) + return response_end diff --git a/pyignite/connection/protocol_context.py b/pyignite/connection/protocol_context.py new file mode 100644 index 0000000..f60d45b --- /dev/null +++ b/pyignite/connection/protocol_context.py @@ -0,0 +1,118 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +from pyignite.connection.bitmask_feature import BitmaskFeature + + +class ProtocolContext: + """ + Protocol context. Provides ability to easily check supported supported + protocol features. + """ + + def __init__(self, version: Tuple[int, int, int], features: BitmaskFeature = None): + self._version = version + self._features = features + self._ensure_consistency() + + def __hash__(self): + return hash((self._version, self._features)) + + def __eq__(self, other): + return isinstance(other, ProtocolContext) and \ + self.version == other.version and \ + self.features == other.features + + def __str__(self): + return f'ProtocolContext(version={self._version}, features={self._features})' + + def __repr__(self): + return self.__str__() + + def _ensure_consistency(self): + if not self.is_feature_flags_supported(): + self._features = None + + def copy(self): + return ProtocolContext(self.version, self.features) + + @property + def version(self): + return getattr(self, '_version', None) + + @version.setter + def version(self, version: Tuple[int, int, int]): + """ + Set version. + + This call may result in features being reset to None if the protocol + version does not support feature masks. + + :param version: Version to set. + """ + setattr(self, '_version', version) + self._ensure_consistency() + + @property + def features(self): + return getattr(self, '_features', None) + + @features.setter + def features(self, features: BitmaskFeature): + """ + Try and set new feature set. + + If features are not supported by the protocol, None is set as features + instead. + + :param features: Features to set. + """ + setattr(self, '_features', features) + self._ensure_consistency() + + def is_partition_awareness_supported(self) -> bool: + """ + Check whether partition awareness supported by the current protocol. + """ + return self.version >= (1, 4, 0) + + def is_status_flags_supported(self) -> bool: + """ + Check whether status flags supported by the current protocol. + """ + return self.version >= (1, 4, 0) + + def is_transactions_supported(self) -> bool: + """ + Check whether transactions supported by the current protocol. + """ + return self.version >= (1, 6, 0) + + def is_feature_flags_supported(self) -> bool: + """ + Check whether feature flags supported by the current protocol. + """ + return self.version >= (1, 7, 0) + + def is_cluster_api_supported(self) -> bool: + """ + Check whether cluster API supported by the current protocol. + """ + return self.features and BitmaskFeature.CLUSTER_API in self.features + + def is_expiry_policy_supported(self) -> bool: + return self.version >= (1, 6, 0) diff --git a/pyignite/connection/ssl.py b/pyignite/connection/ssl.py index 044b103..385b414 100644 --- a/pyignite/connection/ssl.py +++ b/pyignite/connection/ssl.py @@ -16,34 +16,62 @@ import ssl from ssl import SSLContext -from pyignite.constants import * +from pyignite.constants import SSL_DEFAULT_CIPHERS, SSL_DEFAULT_VERSION +from pyignite.exceptions import ParameterError -def wrap(client, _socket): +def wrap(socket, ssl_params): """ Wrap socket in SSL wrapper. """ - if client.init_kwargs.get('use_ssl', None): - keyfile = client.init_kwargs.get('ssl_keyfile', None) - certfile = client.init_kwargs.get('ssl_certfile', None) - - if keyfile and not certfile: - raise ValueError("certfile must be specified") - - password = client.init_kwargs.get('ssl_keyfile_password', None) - ssl_version = client.init_kwargs.get('ssl_version', SSL_DEFAULT_VERSION) - ciphers = client.init_kwargs.get('ssl_ciphers', SSL_DEFAULT_CIPHERS) - cert_reqs = client.init_kwargs.get('ssl_cert_reqs', ssl.CERT_NONE) - ca_certs = client.init_kwargs.get('ssl_ca_certfile', None) - - context = SSLContext(ssl_version) - context.verify_mode = cert_reqs - - if ca_certs: - context.load_verify_locations(ca_certs) - if certfile: - context.load_cert_chain(certfile, keyfile, password) - if ciphers: - context.set_ciphers(ciphers) - - _socket = context.wrap_socket(sock=_socket) - - return _socket + if not ssl_params.get('use_ssl'): + return socket + + context = create_ssl_context(ssl_params) + + return context.wrap_socket(sock=socket) + + +def check_ssl_params(params): + expected_args = [ + 'use_ssl', + 'ssl_version', + 'ssl_ciphers', + 'ssl_cert_reqs', + 'ssl_keyfile', + 'ssl_keyfile_password', + 'ssl_certfile', + 'ssl_ca_certfile', + ] + for param in params: + if param not in expected_args: + raise ParameterError(( + 'Unexpected parameter for connection initialization: `{}`' + ).format(param)) + + +def create_ssl_context(ssl_params): + if not ssl_params.get('use_ssl'): + return None + + keyfile = ssl_params.get('ssl_keyfile', None) + certfile = ssl_params.get('ssl_certfile', None) + + if keyfile and not certfile: + raise ValueError("certfile must be specified") + + password = ssl_params.get('ssl_keyfile_password', None) + ssl_version = ssl_params.get('ssl_version', SSL_DEFAULT_VERSION) + ciphers = ssl_params.get('ssl_ciphers', SSL_DEFAULT_CIPHERS) + cert_reqs = ssl_params.get('ssl_cert_reqs', ssl.CERT_NONE) + ca_certs = ssl_params.get('ssl_ca_certfile', None) + + context = SSLContext(ssl_version) + context.verify_mode = cert_reqs + + if ca_certs: + context.load_verify_locations(ca_certs) + if certfile: + context.load_cert_chain(certfile, keyfile, password) + if ciphers: + context.set_ciphers(ciphers) + + return context diff --git a/pyignite/constants.py b/pyignite/constants.py index 78c9379..c08a3ce 100644 --- a/pyignite/constants.py +++ b/pyignite/constants.py @@ -21,17 +21,27 @@ __all__ = [ - 'PROTOCOL_VERSION_MAJOR', 'PROTOCOL_VERSION_MINOR', - 'PROTOCOL_VERSION_PATCH', 'MAX_LONG', 'MIN_LONG', 'MAX_INT', 'MIN_INT', + 'PROTOCOLS', 'MAX_LONG', 'MIN_LONG', 'MAX_INT', 'MIN_INT', 'PROTOCOL_BYTE_ORDER', 'PROTOCOL_STRING_ENCODING', 'PROTOCOL_CHAR_ENCODING', 'SSL_DEFAULT_VERSION', 'SSL_DEFAULT_CIPHERS', 'FNV1_OFFSET_BASIS', 'FNV1_PRIME', 'IGNITE_DEFAULT_HOST', 'IGNITE_DEFAULT_PORT', + 'RHF_ERROR', 'RHF_TOPOLOGY_CHANGED', 'AFFINITY_DELAY', 'AFFINITY_RETRIES', + 'RECONNECT_BACKOFF_SEQUENCE', ] +PROTOCOLS = { + (1, 7, 0), + (1, 6, 0), + (1, 5, 0), + (1, 4, 0), + (1, 3, 0), + (1, 2, 0), +} + PROTOCOL_VERSION_MAJOR = 1 -PROTOCOL_VERSION_MINOR = 2 -PROTOCOL_VERSION_PATCH = 0 +PROTOCOL_VERSION_MINOR = 7 +PROTOCOL_VERSION_PATCH = 1 MAX_LONG = 9223372036854775807 MIN_LONG = -9223372036854775808 @@ -42,7 +52,7 @@ PROTOCOL_STRING_ENCODING = 'utf-8' PROTOCOL_CHAR_ENCODING = 'utf-16le' -SSL_DEFAULT_VERSION = ssl.PROTOCOL_TLSv1_1 +SSL_DEFAULT_VERSION = ssl.PROTOCOL_TLSv1_2 SSL_DEFAULT_CIPHERS = ssl._DEFAULT_CIPHERS FNV1_OFFSET_BASIS = 0x811c9dc5 @@ -50,3 +60,12 @@ IGNITE_DEFAULT_HOST = 'localhost' IGNITE_DEFAULT_PORT = 10800 + +# response header flags +RHF_ERROR = 1 +RHF_TOPOLOGY_CHANGED = 2 + +AFFINITY_DELAY = 0.01 +AFFINITY_RETRIES = 32 + +RECONNECT_BACKOFF_SEQUENCE = [0, 1, 1, 2, 3, 5, 8, 13] diff --git a/pyignite/cursors.py b/pyignite/cursors.py new file mode 100644 index 0000000..a690d94 --- /dev/null +++ b/pyignite/cursors.py @@ -0,0 +1,384 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains sync and async cursors for different types of queries. +""" + +import asyncio + +from pyignite.api import ( + scan, scan_cursor_get_page, resource_close, scan_async, scan_cursor_get_page_async, resource_close_async, sql, + sql_cursor_get_page, sql_fields, sql_fields_cursor_get_page, sql_fields_cursor_get_page_async, sql_fields_async +) +from pyignite.exceptions import CacheError, SQLError + + +__all__ = ['ScanCursor', 'SqlCursor', 'SqlFieldsCursor', 'AioScanCursor', 'AioSqlFieldsCursor'] + + +class BaseCursorMixin: + @property + def connection(self): + """ + Ignite cluster connection. + """ + return getattr(self, '_conn', None) + + @connection.setter + def connection(self, value): + setattr(self, '_conn', value) + + @property + def cursor_id(self): + """ + Cursor id. + """ + return getattr(self, '_cursor_id', None) + + @cursor_id.setter + def cursor_id(self, value): + setattr(self, '_cursor_id', value) + + @property + def more(self): + """ + Whether cursor has more values. + """ + return getattr(self, '_more', None) + + @more.setter + def more(self, value): + setattr(self, '_more', value) + + @property + def cache_info(self): + """ + Cache id. + """ + return getattr(self, '_cache_info', None) + + @cache_info.setter + def cache_info(self, value): + setattr(self, '_cache_info', value) + + @property + def client(self): + """ + Apache Ignite client. + """ + return getattr(self, '_client', None) + + @client.setter + def client(self, value): + setattr(self, '_client', value) + + @property + def data(self): + """ + Current fetched data. + """ + return getattr(self, '_data', None) + + @data.setter + def data(self, value): + setattr(self, '_data', value) + + +class CursorMixin(BaseCursorMixin): + def __enter__(self): + return self + + def __iter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def close(self): + """ + Close cursor. + """ + if self.connection and self.cursor_id and self.more: + resource_close(self.connection, self.cursor_id) + + +class AioCursorMixin(BaseCursorMixin): + def __await__(self): + return (yield from self.__aenter__().__await__()) + + def __aiter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + async def close(self): + """ + Close cursor. + """ + if self.connection and self.cursor_id and self.more: + await resource_close_async(self.connection, self.cursor_id) + + +class AbstractScanCursor: + def __init__(self, client, cache_info, page_size, partitions, local): + self.client = client + self.cache_info = cache_info + self._page_size = page_size + self._partitions = partitions + self._local = local + + def _finalize_init(self, result): + if result.status != 0: + raise CacheError(result.message) + + self.cursor_id, self.more = result.value['cursor'], result.value['more'] + self.data = iter(result.value['data'].items()) + + def _process_page_response(self, result): + if result.status != 0: + raise CacheError(result.message) + + self.data, self.more = iter(result.value['data'].items()), result.value['more'] + + +class ScanCursor(AbstractScanCursor, CursorMixin): + """ + Synchronous scan cursor. + """ + def __init__(self, client, cache_info, page_size, partitions, local): + """ + :param client: Synchronous Apache Ignite client. + :param cache_info: Cache meta info. + :param page_size: page size. + :param partitions: number of partitions to query (negative to query entire cache). + :param local: pass True if this query should be executed on local node only. + """ + super().__init__(client, cache_info, page_size, partitions, local) + + self.connection = self.client.random_node + result = scan(self.connection, self.cache_info, self._page_size, self._partitions, self._local) + self._finalize_init(result) + + def __next__(self): + if not self.data: + raise StopIteration + + try: + k, v = next(self.data) + except StopIteration: + if self.more: + self._process_page_response(scan_cursor_get_page(self.connection, self.cursor_id)) + k, v = next(self.data) + else: + raise StopIteration + + return self.client.unwrap_binary(k), self.client.unwrap_binary(v) + + +class AioScanCursor(AbstractScanCursor, AioCursorMixin): + """ + Asynchronous scan query cursor. + """ + def __init__(self, client, cache_info, page_size, partitions, local): + """ + :param client: Asynchronous Apache Ignite client. + :param cache_info: Cache meta info. + :param page_size: page size. + :param partitions: number of partitions to query (negative to query entire cache). + :param local: pass True if this query should be executed on local node only. + """ + super().__init__(client, cache_info, page_size, partitions, local) + + async def __aenter__(self): + if not self.connection: + self.connection = await self.client.random_node() + result = await scan_async(self.connection, self.cache_info, self._page_size, self._partitions, self._local) + self._finalize_init(result) + return self + + async def __anext__(self): + if not self.connection: + raise CacheError("Using uninitialized cursor, initialize it using async with expression.") + + if not self.data: + raise StopAsyncIteration + + try: + k, v = next(self.data) + except StopIteration: + if self.more: + self._process_page_response(await scan_cursor_get_page_async(self.connection, self.cursor_id)) + try: + k, v = next(self.data) + except StopIteration: + raise StopAsyncIteration + else: + raise StopAsyncIteration + + return await asyncio.gather( + *[self.client.unwrap_binary(k), self.client.unwrap_binary(v)] + ) + + +class SqlCursor(CursorMixin): + """ + Synchronous SQL query cursor. + """ + def __init__(self, client, cache_info, *args, **kwargs): + """ + :param client: Synchronous Apache Ignite client. + :param cache_info: Cache meta info. + """ + self.client = client + self.cache_info = cache_info + self.connection = self.client.random_node + result = sql(self.connection, self.cache_info, *args, **kwargs) + if result.status != 0: + raise SQLError(result.message) + + self.cursor_id, self.more = result.value['cursor'], result.value['more'] + self.data = iter(result.value['data'].items()) + + def __next__(self): + if not self.data: + raise StopIteration + + try: + k, v = next(self.data) + except StopIteration: + if self.more: + result = sql_cursor_get_page(self.connection, self.cursor_id) + if result.status != 0: + raise SQLError(result.message) + self.data, self.more = iter(result.value['data'].items()), result.value['more'] + + k, v = next(self.data) + else: + raise StopIteration + + return self.client.unwrap_binary(k), self.client.unwrap_binary(v) + + +class AbstractSqlFieldsCursor: + def __init__(self, client, cache_info): + self.client = client + self.cache_info = cache_info + + def _finalize_init(self, result): + if result.status != 0: + raise SQLError(result.message) + + self.cursor_id, self.more = result.value['cursor'], result.value['more'] + self.data = iter(result.value['data']) + self._field_names = result.value.get('fields', None) + if self._field_names: + self._field_count = len(self._field_names) + else: + self._field_count = result.value['field_count'] + + +class SqlFieldsCursor(AbstractSqlFieldsCursor, CursorMixin): + """ + Synchronous SQL fields query cursor. + """ + def __init__(self, client, cache_info, *args, **kwargs): + """ + :param client: Synchronous Apache Ignite client. + :param cache_info: Cache meta info. + """ + super().__init__(client, cache_info) + self.connection = self.client.random_node + self._finalize_init(sql_fields(self.connection, self.cache_info, *args, **kwargs)) + + def __next__(self): + if not self.data: + raise StopIteration + + if self._field_names: + result = self._field_names + self._field_names = None + return result + + try: + row = next(self.data) + except StopIteration: + if self.more: + result = sql_fields_cursor_get_page(self.connection, self.cursor_id, self._field_count) + if result.status != 0: + raise SQLError(result.message) + + self.data, self.more = iter(result.value['data']), result.value['more'] + + row = next(self.data) + else: + raise StopIteration + + return [self.client.unwrap_binary(v) for v in row] + + +class AioSqlFieldsCursor(AbstractSqlFieldsCursor, AioCursorMixin): + """ + Asynchronous SQL fields query cursor. + """ + def __init__(self, client, cache_info, *args, **kwargs): + """ + :param client: Synchronous Apache Ignite client. + :param cache_info: Cache meta info. + """ + super().__init__(client, cache_info) + self._params = (args, kwargs) + + async def __aenter__(self): + await self._initialize(*self._params[0], *self._params[1]) + return self + + async def __anext__(self): + if not self.connection: + raise SQLError("Attempting to use uninitialized aio cursor, please await on it or use with expression.") + + if not self.data: + raise StopAsyncIteration + + if self._field_names: + result = self._field_names + self._field_names = None + return result + + try: + row = next(self.data) + except StopIteration: + if self.more: + result = await sql_fields_cursor_get_page_async(self.connection, self.cursor_id, self._field_count) + if result.status != 0: + raise SQLError(result.message) + + self.data, self.more = iter(result.value['data']), result.value['more'] + try: + row = next(self.data) + except StopIteration: + raise StopAsyncIteration + else: + raise StopAsyncIteration + + return await asyncio.gather(*[self.client.unwrap_binary(v) for v in row]) + + async def _initialize(self, *args, **kwargs): + if self.connection and self.cursor_id: + return + + self.connection = await self.client.random_node() + self._finalize_init(await sql_fields_async(self.connection, self.cache_info, *args, **kwargs)) diff --git a/pyignite/datatypes/__init__.py b/pyignite/datatypes/__init__.py index 5024f79..0ebe56a 100644 --- a/pyignite/datatypes/__init__.py +++ b/pyignite/datatypes/__init__.py @@ -25,3 +25,6 @@ from .primitive_arrays import * from .primitive_objects import * from .standard import * +from .cluster_state import ClusterState +from .expiry_policy import ExpiryPolicy +from .transactions import TransactionIsolation, TransactionConcurrency diff --git a/pyignite/datatypes/base.py b/pyignite/datatypes/base.py index a0522c0..5a4c780 100644 --- a/pyignite/datatypes/base.py +++ b/pyignite/datatypes/base.py @@ -13,12 +13,68 @@ # See the License for the specific language governing permissions and # limitations under the License. -from abc import ABC +class IgniteDataTypeProps: + """ + Add `type_name` and `type_id` properties for all classes and objects + of Ignite type hierarchy. + """ + @property + def type_name(self) -> str: + """ Binary object type name. """ + return getattr(self, '_type_name', None) + + @property + def type_id(self) -> int: + """ Binary object type ID. """ + from pyignite.utils import entity_id + + return getattr( + self, + '_type_id', + entity_id(getattr(self, '_type_name', None)) + ) + + +class IgniteDataTypeMeta(type, IgniteDataTypeProps): + """ + Class variant of Ignate data type properties. + """ + pass -class IgniteDataType(ABC): +class IgniteDataType(metaclass=IgniteDataTypeMeta): """ This is a base class for all Ignite data types, a.k.a. parser/constructor classes, both object and payload varieties. """ - pass + @classmethod + async def hashcode_async(cls, value, **kwargs): + return cls.hashcode(value, **kwargs) + + @classmethod + def hashcode(cls, value, **kwargs): + return 0 + + @classmethod + def parse(cls, stream): + raise NotImplementedError + + @classmethod + async def parse_async(cls, stream): + return cls.parse(stream) + + @classmethod + def from_python(cls, stream, value, **kwargs): + raise NotImplementedError + + @classmethod + async def from_python_async(cls, stream, value, **kwargs): + cls.from_python(stream, value, **kwargs) + + @classmethod + def to_python(cls, ctypes_object, **kwargs): + raise NotImplementedError + + @classmethod + async def to_python_async(cls, ctypes_object, **kwargs): + return cls.to_python(ctypes_object, **kwargs) diff --git a/pyignite/datatypes/cache_config.py b/pyignite/datatypes/cache_config.py index 67b353d..4ac28e4 100644 --- a/pyignite/datatypes/cache_config.py +++ b/pyignite/datatypes/cache_config.py @@ -12,15 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from . import ExpiryPolicy from .standard import String from .internal import AnyDataObject, Struct, StructArray from .primitive import * __all__ = [ - 'cache_config_struct', 'CacheMode', 'PartitionLossPolicy', + 'get_cache_config_struct', 'CacheMode', 'PartitionLossPolicy', 'RebalanceMode', 'WriteSynchronizationMode', 'IndexType', + 'CacheAtomicityMode' ] @@ -118,36 +119,40 @@ class CacheAtomicityMode(Int): ]) -cache_config_struct = Struct([ - ('length', Int), - ('backups_number', Int), - ('cache_mode', CacheMode), - ('cache_atomicity_mode', CacheAtomicityMode), - ('copy_on_read', Bool), - ('data_region_name', String), - ('eager_ttl', Bool), - ('statistics_enabled', Bool), - ('group_name', String), - ('invalidate', Int), - ('default_lock_timeout', Long), - ('max_query_iterators', Int), - ('name', String), - ('is_onheap_cache_enabled', Bool), - ('partition_loss_policy', PartitionLossPolicy), - ('query_detail_metric_size', Int), - ('query_parallelism', Int), - ('read_from_backup', Bool), - ('rebalance_batch_size', Int), - ('rebalance_batches_prefetch_count', Long), - ('rebalance_delay', Long), - ('rebalance_mode', RebalanceMode), - ('rebalance_order', Int), - ('rebalance_throttle', Long), - ('rebalance_timeout', Long), - ('sql_escape_all', Bool), - ('sql_index_inline_max_size', Int), - ('sql_schema', String), - ('write_synchronization_mode', WriteSynchronizationMode), - ('cache_key_configuration', CacheKeyConfiguration), - ('query_entities', QueryEntities), -]) +def get_cache_config_struct(protocol_context): + fields = [ + ('length', Int), + ('cache_atomicity_mode', CacheAtomicityMode), + ('backups_number', Int), + ('cache_mode', CacheMode), + ('copy_on_read', Bool), + ('data_region_name', String), + ('eager_ttl', Bool), + ('statistics_enabled', Bool), + ('group_name', String), + ('default_lock_timeout', Long), + ('max_concurrent_async_operations', Int), + ('max_query_iterators', Int), + ('name', String), + ('is_onheap_cache_enabled', Bool), + ('partition_loss_policy', PartitionLossPolicy), + ('query_detail_metric_size', Int), + ('query_parallelism', Int), + ('read_from_backup', Bool), + ('rebalance_batch_size', Int), + ('rebalance_batches_prefetch_count', Long), + ('rebalance_delay', Long), + ('rebalance_mode', RebalanceMode), + ('rebalance_order', Int), + ('rebalance_throttle', Long), + ('rebalance_timeout', Long), + ('sql_escape_all', Bool), + ('sql_index_inline_max_size', Int), + ('sql_schema', String), + ('write_synchronization_mode', WriteSynchronizationMode), + ('cache_key_configuration', CacheKeyConfiguration), + ('query_entities', QueryEntities), + ] + if protocol_context.is_expiry_policy_supported(): + fields.append(('expiry_policy', ExpiryPolicy)) + return Struct(fields=fields) diff --git a/pyignite/datatypes/cache_properties.py b/pyignite/datatypes/cache_properties.py index e94db5f..0d7f402 100644 --- a/pyignite/datatypes/cache_properties.py +++ b/pyignite/datatypes/cache_properties.py @@ -14,7 +14,10 @@ # limitations under the License. import ctypes +import math +from typing import Union +from . import ExpiryPolicy from .prop_codes import * from .cache_config import ( CacheMode, CacheAtomicityMode, PartitionLossPolicy, RebalanceMode, @@ -23,7 +26,6 @@ from .primitive import * from .standard import * - __all__ = [ 'PropName', 'PropCacheMode', 'PropCacheAtomicityMode', 'PropBackupsNumber', 'PropWriteSynchronizationMode', 'PropCopyOnRead', 'PropReadFromBackup', @@ -35,7 +37,7 @@ 'PropRebalanceOrder', 'PropRebalanceThrottle', 'PropGroupName', 'PropCacheKeyConfiguration', 'PropDefaultLockTimeout', 'PropMaxConcurrentAsyncOperation', 'PropPartitionLossPolicy', - 'PropEagerTTL', 'PropStatisticsEnabled', 'prop_map', 'AnyProperty', + 'PropEagerTTL', 'PropStatisticsEnabled', 'PropExpiryPolicy', 'prop_map', 'AnyProperty', ] @@ -68,9 +70,10 @@ def prop_map(code: int): PROP_CACHE_KEY_CONFIGURATION: PropCacheKeyConfiguration, PROP_DEFAULT_LOCK_TIMEOUT: PropDefaultLockTimeout, PROP_MAX_CONCURRENT_ASYNC_OPERATIONS: PropMaxConcurrentAsyncOperation, - PROP_PARTITION_LOSS_POLICY: PartitionLossPolicy, + PROP_PARTITION_LOSS_POLICY: PropPartitionLossPolicy, PROP_EAGER_TTL: PropEagerTTL, PROP_STATISTICS_ENABLED: PropStatisticsEnabled, + PROP_EXPIRY_POLICY: PropExpiryPolicy, }[code] @@ -81,7 +84,7 @@ class PropBase: @classmethod def build_header(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -92,10 +95,11 @@ def build_header(cls): ) @classmethod - def parse(cls, connection: 'Connection'): + def parse(cls, stream): + init_pos = stream.tell() header_class = cls.build_header() - header_buffer = connection.recv(ctypes.sizeof(header_class)) - data_class, data_buffer = cls.prop_data_class.parse(connection) + data_class = cls.prop_data_class.parse(stream) + prop_class = type( cls.__name__, (header_class,), @@ -106,20 +110,47 @@ def parse(cls, connection: 'Connection'): ], } ) - return prop_class, header_buffer + data_buffer + + stream.seek(init_pos + ctypes.sizeof(prop_class)) + return prop_class @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - return cls.prop_data_class.to_python( - ctype_object.data, *args, **kwargs - ) + async def parse_async(cls, stream): + return cls.parse(stream) + + @classmethod + def to_python(cls, ctypes_object, **kwargs): + return cls.prop_data_class.to_python(ctypes_object.data, **kwargs) @classmethod - def from_python(cls, value): + async def to_python_async(cls, ctypes_object, **kwargs): + return cls.to_python(ctypes_object, **kwargs) + + @classmethod + def from_python(cls, stream, value): header_class = cls.build_header() header = header_class() header.prop_code = cls.prop_code - return bytes(header) + cls.prop_data_class.from_python(value) + stream.write(bytes(header)) + cls.prop_data_class.from_python(stream, value) + + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + + +class TimeoutProp(PropBase): + prop_data_class = Long + + @classmethod + def from_python(cls, stream, value: int): + if not isinstance(value, int) or value < 0: + raise ValueError(f'Timeout value should be a positive integer, {value} passed instead') + return super().from_python(stream, value) + + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) class PropName(PropBase): @@ -212,9 +243,8 @@ class PropRebalanceDelay(PropBase): prop_data_class = Long -class PropRebalanceTimeout(PropBase): +class PropRebalanceTimeout(TimeoutProp): prop_code = PROP_REBALANCE_TIMEOUT - prop_data_class = Long class PropRebalanceBatchSize(PropBase): @@ -247,9 +277,8 @@ class PropCacheKeyConfiguration(PropBase): prop_data_class = CacheKeyConfiguration -class PropDefaultLockTimeout(PropBase): +class PropDefaultLockTimeout(TimeoutProp): prop_code = PROP_DEFAULT_LOCK_TIMEOUT - prop_data_class = Long class PropMaxConcurrentAsyncOperation(PropBase): @@ -272,16 +301,21 @@ class PropStatisticsEnabled(PropBase): prop_data_class = Bool +class PropExpiryPolicy(PropBase): + prop_code = PROP_EXPIRY_POLICY + prop_data_class = ExpiryPolicy + + class AnyProperty(PropBase): @classmethod - def from_python(cls, value): + def from_python(cls, stream, value): raise Exception( 'You must choose a certain type ' 'for your cache configuration property' ) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - prop_data_class = prop_map(ctype_object.prop_code) - return prop_data_class.to_python(ctype_object.data, *args, **kwargs) + def to_python(cls, ctypes_object, **kwargs): + prop_data_class = prop_map(ctypes_object.prop_code) + return prop_data_class.to_python(ctypes_object.data, **kwargs) diff --git a/tests/test_get_names.py b/pyignite/datatypes/cluster_state.py similarity index 63% rename from tests/test_get_names.py rename to pyignite/datatypes/cluster_state.py index 0e50f3d..def5591 100644 --- a/tests/test_get_names.py +++ b/pyignite/datatypes/cluster_state.py @@ -13,18 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pyignite.api import cache_create, cache_get_names +from enum import IntEnum -def test_get_names(client): +class ClusterState(IntEnum): + """ + Cluster states. + """ - bucket_names = ['my_bucket', 'my_bucket_2', 'my_bucket_3'] - for name in bucket_names: - cache_create(client, name) + #: Cluster deactivated. Cache operations aren't allowed. + INACTIVE = 0 - result = cache_get_names(client) - assert result.status == 0 - assert type(result.value) == list - assert len(result.value) >= len(bucket_names) - for i, name in enumerate(bucket_names): - assert name in result.value + #: Cluster activated. All cache operations are allowed. + ACTIVE = 1 + + #: Cluster activated. Cache read operation allowed, Cache data change operation + #: aren't allowed. + ACTIVE_READ_ONLY = 2 diff --git a/pyignite/datatypes/complex.py b/pyignite/datatypes/complex.py index 87e5130..cddf743 100644 --- a/pyignite/datatypes/complex.py +++ b/pyignite/datatypes/complex.py @@ -12,106 +12,132 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import asyncio from collections import OrderedDict import ctypes -import inspect +from io import SEEK_CUR +from typing import Optional from pyignite.constants import * from pyignite.exceptions import ParseError -from pyignite.utils import entity_id, hashcode, is_hinted from .base import IgniteDataType -from .internal import AnyDataObject, infer_from_python +from .internal import AnyDataObject, Struct, infer_from_python, infer_from_python_async from .type_codes import * +from .type_ids import * +from .type_names import * +from .null_object import Null, Nullable +from ..stream import AioBinaryStream, BinaryStream - -__all__ = [ - 'Map', 'ObjectArrayObject', 'CollectionObject', 'MapObject', - 'WrappedDataObject', 'BinaryObject', -] +__all__ = ['Map', 'ObjectArrayObject', 'CollectionObject', 'MapObject', 'WrappedDataObject', 'BinaryObject'] -class ObjectArrayObject(IgniteDataType): +class ObjectArrayObject(Nullable): """ - Array of objects of any type. Its Python representation is - tuple(type_id, iterable of any type). + Array of Ignite objects of any consistent type. Its Python representation + is tuple(type_id, iterable of any type). The only type ID that makes sense + in Python client is :py:attr:`~OBJECT`, that corresponds directly to + the root object type in Java type hierarchy (`java.lang.Object`). """ + OBJECT = -1 + + _type_name = NAME_OBJ_ARR + _type_id = TYPE_OBJ_ARR + _fields = [ + ('type_code', ctypes.c_byte), + ('type_id', ctypes.c_int), + ('length', ctypes.c_int) + ] type_code = TC_OBJECT_ARRAY - type_or_id_name = 'type_id' @classmethod - def build_header(cls): - return type( - cls.__name__+'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('type_id', ctypes.c_int), - ('length', ctypes.c_int), - ], - } - ) + def parse_not_null(cls, stream): + length, fields = cls.__get_length(stream), [] + + for i in range(length): + c_type = AnyDataObject.parse(stream) + fields.append((f'element_{i}', c_type)) + + return cls.__build_final_class(fields) @classmethod - def parse(cls, client: 'Client'): - header_class = cls.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] + async def parse_not_null_async(cls, stream): + length, fields = cls.__get_length(stream), [] + for i in range(length): + c_type = await AnyDataObject.parse_async(stream) + fields.append((f'element_{i}', c_type)) - for i in range(header.length): - c_type, buffer_fragment = AnyDataObject.parse(client) - buffer += buffer_fragment - fields.append(('element_{}'.format(i), c_type)) + return cls.__build_final_class(fields) - final_class = type( + @classmethod + def __get_length(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz + int_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(2 * int_sz + b_sz, SEEK_CUR) + return length + + @classmethod + def __build_final_class(cls, fields): + return type( cls.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, - '_fields_': fields, + '_fields_': cls._fields + fields, } ) - return final_class, buffer @classmethod - def to_python(cls, ctype_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): result = [] - for i in range(ctype_object.length): + for i in range(ctypes_object.length): result.append( AnyDataObject.to_python( - getattr(ctype_object, 'element_{}'.format(i)), - *args, **kwargs + getattr(ctypes_object, f'element_{i}'), **kwargs ) ) - return getattr(ctype_object, cls.type_or_id_name), result + return ctypes_object.type_id, result @classmethod - def from_python(cls, value): - type_or_id, value = value - header_class = cls.build_header() - header = header_class() - header.type_code = int.from_bytes( - cls.type_code, - byteorder=PROTOCOL_BYTE_ORDER - ) + async def to_python_not_null_async(cls, ctypes_object, **kwargs): + result = [ + await AnyDataObject.to_python_async( + getattr(ctypes_object, f'element_{i}'), **kwargs + ) + for i in range(ctypes_object.length)] + return ctypes_object.type_id, result + + @classmethod + def from_python_not_null(cls, stream, value, *args, **kwargs): + value = cls.__write_header(stream, value) + for x in value: + infer_from_python(stream, x) + + @classmethod + async def from_python_not_null_async(cls, stream, value, *args, **kwargs): + value = cls.__write_header(stream, value) + for x in value: + await infer_from_python_async(stream, x) + + @classmethod + def __write_header(cls, stream, value): + type_id, value = value try: length = len(value) except TypeError: value = [value] length = 1 - header.length = length - setattr(header, cls.type_or_id_name, type_or_id) - buffer = bytes(header) - for x in value: - buffer += infer_from_python(x) - return buffer + stream.write(cls.type_code) + stream.write(type_id.to_bytes(ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER, signed=True)) + stream.write(length.to_bytes(ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER)) + return value -class WrappedDataObject(IgniteDataType): + +class WrappedDataObject(Nullable): """ One or more binary objects can be wrapped in an array. This allows reading, storing, passing and writing objects efficiently without understanding @@ -123,202 +149,391 @@ class WrappedDataObject(IgniteDataType): type_code = TC_ARRAY_WRAPPED_OBJECTS @classmethod - def build_header(cls): - return type( - cls.__name__+'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('length', ctypes.c_int), - ], - } + def parse_not_null(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER ) - @classmethod - def parse(cls, client: 'Client'): - header_class = cls.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - final_class = type( cls.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': [ - ('payload', ctypes.c_byte*header.length), + ('type_code', ctypes.c_byte), + ('length', ctypes.c_int), + ('payload', ctypes.c_byte * length), ('offset', ctypes.c_int), ], } ) - buffer += client.recv( - ctypes.sizeof(final_class) - ctypes.sizeof(header_class) - ) - return final_class, buffer + + stream.seek(ctypes.sizeof(final_class), SEEK_CUR) + return final_class @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - return bytes(ctype_object.payload), ctype_object.offset + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + return bytes(ctypes_object.payload), ctypes_object.offset @classmethod - def from_python(cls, value): + def from_python_not_null(cls, stream, value, *args, **kwargs): raise ParseError('Send unwrapped data.') -class CollectionObject(ObjectArrayObject): +class CollectionObject(Nullable): """ - Just like object array, but contains deserialization type hint instead of - type id. This hint is also useless in Python, because the list type along - covers all the use cases. - - Also represented as tuple(type_id, iterable of any type) in Python. + Similar to object array, but contains platform-agnostic deserialization + type hint instead of type ID. + + Represented as tuple(hint, iterable of any type) in Python. Hints are: + + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.USER_SET` − + a set of unique Ignite thin data objects. The exact Java type of a set + is undefined, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.USER_COL` − + a collection of Ignite thin data objects. The exact Java type + of a collection is undefined, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.ARR_LIST` − + represents the `java.util.ArrayList` type, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.LINKED_LIST` − + represents the `java.util.LinkedList` type, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.HASH_SET`− + represents the `java.util.HashSet` type, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.LINKED_HASH_SET` − + represents the `java.util.LinkedHashSet` type, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.SINGLETON_LIST` − + represents the return type of the `java.util.Collection.singletonList` + method. + + It is safe to say that `USER_SET` (`set` in Python) and `USER_COL` (`list`) + can cover all the imaginable use cases from Python perspective. """ + USER_SET = -1 + USER_COL = 0 + ARR_LIST = 1 + LINKED_LIST = 2 + HASH_SET = 3 + LINKED_HASH_SET = 4 + SINGLETON_LIST = 5 + + _type_name = NAME_COL + _type_id = TYPE_COL + _header_class = None type_code = TC_COLLECTION - type_or_id_name = 'type' - pythonic = list - default = [] @classmethod - def build_header(cls): + def parse_not_null(cls, stream): + fields, length = cls.__parse_header(stream) + + for i in range(length): + c_type = AnyDataObject.parse(stream) + fields.append((f'element_{i}', c_type)) + + return cls.__build_final_class(fields) + + @classmethod + async def parse_not_null_async(cls, stream): + fields, length = cls.__parse_header(stream) + + for i in range(length): + c_type = await AnyDataObject.parse_async(stream) + fields.append((f'element_{i}', c_type)) + + return cls.__build_final_class(fields) + + @classmethod + def __parse_header(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + header_fields = [('type_code', ctypes.c_byte), ('length', ctypes.c_int), ('type', ctypes.c_byte)] + length = int.from_bytes( + stream.slice(stream.tell() + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(int_sz + 2 * b_sz, SEEK_CUR) + return header_fields, length + + @classmethod + def __build_final_class(cls, fields): return type( - cls.__name__+'Header', + cls.__name__, (ctypes.LittleEndianStructure,), { '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('length', ctypes.c_int), - ('type', ctypes.c_byte), - ], + '_fields_': fields, } ) + @classmethod + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + result = [ + AnyDataObject.to_python(getattr(ctypes_object, f'element_{i}'), **kwargs) + for i in range(ctypes_object.length) + ] + return ctypes_object.type, result -class Map(IgniteDataType): - """ - Dictionary type, payload-only. + @classmethod + async def to_python_not_null_async(cls, ctypes_object, *args, **kwargs): + result_coro = [ + AnyDataObject.to_python_async(getattr(ctypes_object, f'element_{i}'), **kwargs) + for i in range(ctypes_object.length) + ] - Ignite does not track the order of key-value pairs in its caches, hence - the ordinary Python dict type, not the collections.OrderedDict. - """ + return ctypes_object.type, await asyncio.gather(*result_coro) + + @classmethod + def from_python_not_null(cls, stream, value, *args, **kwargs): + type_id, value = value + try: + length = len(value) + except TypeError: + value = [value] + length = 1 + + cls.__write_header(stream, type_id, length) + for x in value: + infer_from_python(stream, x) + + @classmethod + async def from_python_not_null_async(cls, stream, value, *args, **kwargs): + type_id, value = value + try: + length = len(value) + except TypeError: + value = [value] + length = 1 + + cls.__write_header(stream, type_id, length) + for x in value: + await infer_from_python_async(stream, x) + + @classmethod + def __write_header(cls, stream, type_id, length): + stream.write(cls.type_code) + stream.write(length.to_bytes( + ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER + )) + stream.write(type_id.to_bytes( + length=ctypes.sizeof(ctypes.c_byte), + byteorder=PROTOCOL_BYTE_ORDER, + signed=True) + ) + + +class _MapBase: HASH_MAP = 1 LINKED_HASH_MAP = 2 @classmethod - def build_header(cls): - return type( - cls.__name__+'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ], - } - ) + def _parse_header(cls, stream): + raise NotImplementedError @classmethod - def parse(cls, client: 'Client'): - header_class = cls.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] + def _parse(cls, stream): + fields, length = cls._parse_header(stream) + for i in range(length << 1): + c_type = AnyDataObject.parse(stream) + fields.append((f'element_{i}', c_type)) + return cls.__build_final_class(fields) - for i in range(header.length << 1): - c_type, buffer_fragment = AnyDataObject.parse(client) - buffer += buffer_fragment - fields.append(('element_{}'.format(i), c_type)) + @classmethod + async def _parse_async(cls, stream): + fields, length = cls._parse_header(stream) + for i in range(length << 1): + c_type = await AnyDataObject.parse_async(stream) + fields.append((f'element_{i}', c_type)) - final_class = type( + return cls.__build_final_class(fields) + + @classmethod + def __build_final_class(cls, fields): + return type( cls.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': fields, } ) - return final_class, buffer @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - map_type = getattr(ctype_object, 'type', cls.HASH_MAP) - result = OrderedDict() if map_type == cls.LINKED_HASH_MAP else {} + def _to_python(cls, ctypes_object, **kwargs): + map_cls = cls.__get_map_class(ctypes_object) - for i in range(0, ctype_object.length << 1, 2): - k = AnyDataObject.to_python( - getattr(ctype_object, 'element_{}'.format(i)), - *args, **kwargs - ) - v = AnyDataObject.to_python( - getattr(ctype_object, 'element_{}'.format(i + 1)), - *args, **kwargs - ) + result = map_cls() + for i in range(0, ctypes_object.length << 1, 2): + k = AnyDataObject.to_python(getattr(ctypes_object, f'element_{i}'), **kwargs) + v = AnyDataObject.to_python(getattr(ctypes_object, f'element_{i + 1}'), **kwargs) result[k] = v return result @classmethod - def from_python(cls, value, type_id=None): - header_class = cls.build_header() - header = header_class() - length = len(value) - header.length = length - if hasattr(header, 'type_code'): - header.type_code = int.from_bytes( - cls.type_code, - byteorder=PROTOCOL_BYTE_ORDER - ) - if hasattr(header, 'type'): - header.type = type_id - buffer = bytes(header) + async def _to_python_async(cls, ctypes_object, **kwargs): + map_cls = cls.__get_map_class(ctypes_object) + + kv_pairs_coro = [ + asyncio.gather( + AnyDataObject.to_python_async( + getattr(ctypes_object, f'element_{i}'), **kwargs + ), + AnyDataObject.to_python_async( + getattr(ctypes_object, f'element_{i + 1}'), **kwargs + ) + ) for i in range(0, ctypes_object.length << 1, 2) + ] + + return map_cls(await asyncio.gather(*kv_pairs_coro)) + + @classmethod + def __get_map_class(cls, ctypes_object): + map_type = getattr(ctypes_object, 'type', cls.HASH_MAP) + return OrderedDict if map_type == cls.LINKED_HASH_MAP else dict + + @classmethod + def _from_python(cls, stream, value, type_id=None): + cls._write_header(stream, type_id, len(value)) + for k, v in value.items(): + infer_from_python(stream, k) + infer_from_python(stream, v) + @classmethod + async def _from_python_async(cls, stream, value, type_id): + cls._write_header(stream, type_id, len(value)) for k, v in value.items(): - buffer += infer_from_python(k) - buffer += infer_from_python(v) - return buffer + await infer_from_python_async(stream, k) + await infer_from_python_async(stream, v) + + @classmethod + def _write_header(cls, stream, type_id, length): + raise NotImplementedError -class MapObject(Map): +class Map(IgniteDataType, _MapBase): """ - This is a dictionary type. Type conversion hint can be a `HASH_MAP` - (ordinary dict) or `LINKED_HASH_MAP` (collections.OrderedDict). + Dictionary type, payload-only. - Keys and values in map are independent data objects, but `count` - counts pairs. Very annoying. + Ignite does not track the order of key-value pairs in its caches, hence + the ordinary Python dict type, not the collections.OrderedDict. """ - type_code = TC_MAP - pythonic = dict - default = {} + _type_name = NAME_MAP + _type_id = TYPE_MAP @classmethod - def build_header(cls): - return type( - cls.__name__+'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('length', ctypes.c_int), - ('type', ctypes.c_byte), - ], - } + def parse(cls, stream): + return cls._parse(stream) + + @classmethod + async def parse_async(cls, stream): + return await cls._parse_async(stream) + + @classmethod + def _parse_header(cls, stream): + int_sz = ctypes.sizeof(ctypes.c_int) + length = int.from_bytes( + stream.slice(stream.tell(), int_sz), + byteorder=PROTOCOL_BYTE_ORDER ) + stream.seek(int_sz, SEEK_CUR) + return [('length', ctypes.c_int)], length + + @classmethod + def to_python(cls, ctypes_object, **kwargs): + return cls._to_python(ctypes_object, **kwargs) + + @classmethod + async def to_python_async(cls, ctypes_object, **kwargs): + return await cls._to_python_async(ctypes_object, **kwargs) + + @classmethod + def from_python(cls, stream, value, type_id=None): + return cls._from_python(stream, value, type_id) + + @classmethod + async def from_python_async(cls, stream, value, type_id=None): + return await cls._from_python_async(stream, value, type_id) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - return ctype_object.type, super().to_python( - ctype_object, *args, **kwargs + def _write_header(cls, stream, type_id, length): + stream.write(length.to_bytes( + length=ctypes.sizeof(ctypes.c_int), + byteorder=PROTOCOL_BYTE_ORDER + )) + + +class MapObject(Nullable, _MapBase): + """ + This is a dictionary type. + + Represented as tuple(type_id, value). + + Type ID can be a :py:attr:`~HASH_MAP` (corresponds to an ordinary `dict` + in Python) or a :py:attr:`~LINKED_HASH_MAP` (`collections.OrderedDict`). + """ + _type_name = NAME_MAP + _type_id = TYPE_MAP + type_code = TC_MAP + + @classmethod + def parse_not_null(cls, stream): + return cls._parse(stream) + + @classmethod + async def parse_not_null_async(cls, stream): + return await cls._parse_async(stream) + + @classmethod + def _parse_header(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER ) + stream.seek(int_sz + 2 * b_sz, SEEK_CUR) + fields = [('type_code', ctypes.c_byte), ('length', ctypes.c_int), ('type', ctypes.c_byte)] + return fields, length + + @classmethod + def to_python_not_null(cls, ctypes_object, **kwargs): + return ctypes_object.type, cls._to_python(ctypes_object, **kwargs) @classmethod - def from_python(cls, value): + async def to_python_not_null_async(cls, ctypes_object, **kwargs): + return ctypes_object.type, await cls._to_python_async(ctypes_object, **kwargs) + + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): type_id, value = value - return super().from_python(value, type_id) + if value is None: + Null.from_python(stream) + else: + cls._from_python(stream, value, type_id) + @classmethod + async def from_python_not_null_async(cls, stream, value, **kwargs): + type_id, value = value + if value is None: + Null.from_python(stream) + else: + await cls._from_python_async(stream, value, type_id) -class BinaryObject(IgniteDataType): + @classmethod + def _write_header(cls, stream, type_id, length): + stream.write(cls.type_code) + stream.write(length.to_bytes( + length=ctypes.sizeof(ctypes.c_int), + byteorder=PROTOCOL_BYTE_ORDER) + ) + stream.write(type_id.to_bytes( + length=ctypes.sizeof(ctypes.c_byte), + byteorder=PROTOCOL_BYTE_ORDER, + signed=True) + ) + + +class BinaryObject(Nullable): + _type_id = TYPE_BINARY_OBJ + _header_class = None type_code = TC_COMPLEX_OBJECT USER_TYPE = 0x0001 @@ -329,24 +544,45 @@ class BinaryObject(IgniteDataType): COMPACT_FOOTER = 0x0020 @classmethod - def build_header(cls): - return type( - cls.__name__, - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('version', ctypes.c_byte), - ('flags', ctypes.c_short), - ('type_id', ctypes.c_int), - ('hash_code', ctypes.c_int), - ('length', ctypes.c_int), - ('schema_id', ctypes.c_int), - ('schema_offset', ctypes.c_int), - ], - } - ) + def hashcode(cls, value: object, client: Optional['Client'] = None) -> int: + # binary objects's hashcode implementation is special in the sense + # that you need to fully serialize the object to calculate + # its hashcode + if not value._hashcode and client: + with BinaryStream(client) as stream: + value._from_python(stream, save_to_buf=True) + + return value._hashcode + + @classmethod + async def hashcode_async(cls, value: object, client: Optional['AioClient'] = None) -> int: + if not value._hashcode and client: + with AioBinaryStream(client) as stream: + await value._from_python_async(stream, save_to_buf=True) + + return value._hashcode + + @classmethod + def get_header_class(cls): + if not cls._header_class: + cls._header_class = type( + cls.__name__, + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': [ + ('type_code', ctypes.c_byte), + ('version', ctypes.c_byte), + ('flags', ctypes.c_short), + ('type_id', ctypes.c_int), + ('hash_code', ctypes.c_int), + ('length', ctypes.c_int), + ('schema_id', ctypes.c_int), + ('schema_offset', ctypes.c_int), + ], + } + ) + return cls._header_class @classmethod def offset_c_type(cls, flags: int): @@ -372,36 +608,50 @@ def schema_type(cls, flags: int): }, ) - @staticmethod - def get_dataclass(client: 'Client', header) -> OrderedDict: - # get field names from outer space - temp_conn = client.clone() - result = temp_conn.query_binary_type(header.type_id, header.schema_id) - temp_conn.close() - if not result: - raise ParseError('Binary type is not registered') - return result - @classmethod - def parse(cls, client: 'Client'): - from pyignite.datatypes import Struct + def parse_not_null(cls, stream): + header, header_class = cls.__parse_header(stream) + + # ignore full schema, always retrieve fields' types and order + # from complex types registry + data_class = stream.get_dataclass(header) + object_fields_struct = cls.__build_object_fields_struct(data_class) + object_fields = object_fields_struct.parse(stream) - header_class = cls.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) + return cls.__build_final_class(stream, header, header_class, object_fields, + len(object_fields_struct.fields)) + + @classmethod + async def parse_not_null_async(cls, stream): + header, header_class = cls.__parse_header(stream) # ignore full schema, always retrieve fields' types and order # from complex types registry - data_class = cls.get_dataclass(client, header) + data_class = await stream.get_dataclass(header) + object_fields_struct = cls.__build_object_fields_struct(data_class) + object_fields = await object_fields_struct.parse_async(stream) + + return cls.__build_final_class(stream, header, header_class, object_fields, + len(object_fields_struct.fields)) + + @classmethod + def __parse_header(cls, stream): + header_class = cls.get_header_class() + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + return header, header_class + + @staticmethod + def __build_object_fields_struct(data_class): fields = data_class.schema.items() - object_fields_struct = Struct(fields) - object_fields, object_fields_buffer = object_fields_struct.parse(client) - buffer += object_fields_buffer - final_class_fields = [('object_fields', object_fields)] + return Struct(fields) + @classmethod + def __build_final_class(cls, stream, header, header_class, object_fields, fields_len): + final_class_fields = [('object_fields', object_fields)] if header.flags & cls.HAS_SCHEMA: - schema = cls.schema_type(header.flags) * len(fields) - buffer += client.recv(ctypes.sizeof(schema)) + schema = cls.schema_type(header.flags) * fields_len + stream.seek(ctypes.sizeof(schema), SEEK_CUR) final_class_fields.append(('schema', schema)) final_class = type( @@ -413,114 +663,75 @@ def parse(cls, client: 'Client'): } ) # register schema encoding approach - client.compact_footer = bool(header.flags & cls.COMPACT_FOOTER) - return final_class, buffer + stream.compact_footer = bool(header.flags & cls.COMPACT_FOOTER) + return final_class @classmethod - def to_python(cls, ctype_object, client: 'Client'=None, *args, **kwargs): - + def to_python_not_null(cls, ctypes_object, client: 'Client' = None, **kwargs): + type_id = ctypes_object.type_id if not client: - raise ParseError( - 'Can not query binary type {}'.format(ctype_object.type_id) - ) + raise ParseError(f'Can not query binary type {type_id}') - data_class = client.query_binary_type( - ctype_object.type_id, - ctype_object.schema_id - ) + data_class = client.query_binary_type(type_id, ctypes_object.schema_id) result = data_class() + result.version = ctypes_object.version - result.version = ctype_object.version for field_name, field_type in data_class.schema.items(): setattr( result, field_name, field_type.to_python( - getattr(ctype_object.object_fields, field_name), - client, *args, **kwargs + getattr(ctypes_object.object_fields, field_name), client=client, **kwargs ) ) return result @classmethod - def from_python(cls, value: object): - - def find_client(): - """ - A nice hack. Extracts the nearest `Client` instance from the - call stack. - """ - from pyignite import Client - - frame = None - try: - for rec in inspect.stack()[2:]: - frame = rec[0] - code = frame.f_code - for varname in code.co_varnames: - suspect = frame.f_locals[varname] - if isinstance(suspect, Client): - return suspect - finally: - del frame - - compact_footer = True # this is actually used - client = find_client() - if client: - # if no client can be found, the class of the `value` is discarded - # and the new dataclass is automatically registered later on - client.register_binary_type(value.__class__) - compact_footer = client.compact_footer - else: - raise Warning( - 'Can not register binary type {}'.format(value.type_name) - ) + async def to_python_not_null_async(cls, ctypes_object, client: 'AioClient' = None, **kwargs): + type_id = ctypes_object.type_id + if not client: + raise ParseError(f'Can not query binary type {type_id}') - # prepare header - header_class = cls.build_header() - header = header_class() - header.type_code = int.from_bytes( - cls.type_code, - byteorder=PROTOCOL_BYTE_ORDER - ) + data_class = await client.query_binary_type(type_id, ctypes_object.schema_id) + result = data_class() + result.version = ctypes_object.version - header.flags = cls.USER_TYPE | cls.HAS_SCHEMA - if compact_footer: - header.flags |= cls.COMPACT_FOOTER - header.version = value.version - header.type_id = value.type_id - header.schema_id = value.schema_id - - # create fields and calculate offsets - field_buffer = b'' - offsets = [ctypes.sizeof(header_class)] - schema_items = list(value.schema.items()) - for field_name, field_type in schema_items: - partial_buffer = field_type.from_python( - getattr( - value, field_name, getattr(field_type, 'default', None) + field_values = await asyncio.gather( + *[ + field_type.to_python_async( + getattr(ctypes_object.object_fields, field_name), client=client, **kwargs ) - ) - offsets.append(max(offsets) + len(partial_buffer)) - field_buffer += partial_buffer - - offsets = offsets[:-1] - - # create footer - if max(offsets, default=0) < 255: - header.flags |= cls.OFFSET_ONE_BYTE - elif max(offsets) < 65535: - header.flags |= cls.OFFSET_TWO_BYTES - schema_class = cls.schema_type(header.flags) * len(offsets) - schema = schema_class() - if compact_footer: - for i, offset in enumerate(offsets): - schema[i] = offset - else: - for i, offset in enumerate(offsets): - schema[i].field_id = entity_id(schema_items[i][0]) - schema[i].offset = offset - # calculate size and hash code - header.schema_offset = ctypes.sizeof(header_class) + len(field_buffer) - header.length = header.schema_offset + ctypes.sizeof(schema_class) - header.hash_code = hashcode(field_buffer + bytes(schema)) - - return bytes(header) + field_buffer + bytes(schema) + for field_name, field_type in data_class.schema.items() + ] + ) + + for i, field_name in enumerate(data_class.schema.keys()): + setattr(result, field_name, field_values[i]) + + return result + + @classmethod + def __get_type_id(cls, ctypes_object, client): + type_id = getattr(ctypes_object, "type_id", None) + if type_id: + if not client: + raise ParseError(f'Can not query binary type {type_id}') + return type_id + return None + + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): + if cls.__write_fast_path(stream, value): + stream.register_binary_type(value.__class__) + value._from_python(stream) + + @classmethod + async def from_python_not_null_async(cls, stream, value, **kwargs): + if cls.__write_fast_path(stream, value): + await stream.register_binary_type(value.__class__) + await value._from_python_async(stream) + + @classmethod + def __write_fast_path(cls, stream, value): + if getattr(value, '_buffer', None): + stream.write(value._buffer) + return False + return True diff --git a/pyignite/datatypes/expiry_policy.py b/pyignite/datatypes/expiry_policy.py new file mode 100644 index 0000000..95e37db --- /dev/null +++ b/pyignite/datatypes/expiry_policy.py @@ -0,0 +1,115 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ctypes +import math +from datetime import timedelta +from io import SEEK_CUR +from typing import Union + +import attr + +from pyignite.constants import PROTOCOL_BYTE_ORDER + + +def _positive(_, attrib, value): + if isinstance(value, timedelta): + value = value.total_seconds() * 1000 + + if value < 0 and value not in [ExpiryPolicy.UNCHANGED, ExpiryPolicy.ETERNAL]: + raise ValueError(f"'{attrib.name}' value must not be negative") + + +def _write_duration(stream, value): + if isinstance(value, timedelta): + value = math.floor(value.total_seconds() * 1000) + + stream.write(value.to_bytes(8, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) + + +@attr.s +class ExpiryPolicy: + """ + Set expiry policy for the cache. + """ + #: Set TTL unchanged. + UNCHANGED = -2 + + #: Set TTL eternal. + ETERNAL = -1 + + #: Set TTL for create in milliseconds or :py:class:`~time.timedelta` + create = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, timedelta], + validator=[attr.validators.instance_of((int, timedelta)), _positive]) + + #: Set TTL for update in milliseconds or :py:class:`~time.timedelta` + update = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, timedelta], + validator=[attr.validators.instance_of((int, timedelta)), _positive]) + + #: Set TTL for access in milliseconds or :py:class:`~time.timedelta` + access = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, timedelta], + validator=[attr.validators.instance_of((int, timedelta)), _positive]) + + class _CType(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('not_null', ctypes.c_byte), + ('create', ctypes.c_longlong), + ('update', ctypes.c_longlong), + ('access', ctypes.c_longlong) + ] + + @classmethod + def parse(cls, stream): + init = stream.tell() + not_null = int.from_bytes(stream.slice(init, 1), byteorder=PROTOCOL_BYTE_ORDER) + if not_null: + stream.seek(ctypes.sizeof(ExpiryPolicy._CType), SEEK_CUR) + return ExpiryPolicy._CType + stream.seek(ctypes.sizeof(ctypes.c_byte), SEEK_CUR) + return ctypes.c_byte + + @classmethod + async def parse_async(cls, stream): + return cls.parse(stream) + + @classmethod + def to_python(cls, ctypes_object, **kwargs): + if ctypes_object == 0: + return None + + return ExpiryPolicy(create=ctypes_object.create, update=ctypes_object.update, access=ctypes_object.access) + + @classmethod + async def to_python_async(cls, ctypes_object, **kwargs): + return cls.to_python(ctypes_object) + + @classmethod + def from_python(cls, stream, value): + if not value: + stream.write(b'\x00') + return + + stream.write(b'\x01') + cls.write_policy(stream, value) + + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + + @classmethod + def write_policy(cls, stream, value): + _write_duration(stream, value.create) + _write_duration(stream, value.update) + _write_duration(stream, value.access) diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index 844e0ef..54d72bf 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -12,26 +12,34 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import asyncio from collections import OrderedDict import ctypes import decimal from datetime import date, datetime, timedelta -from typing import Any, Tuple +from io import SEEK_CUR +from typing import Any, Union, Callable, List import uuid import attr -from pyignite.constants import * +from pyignite.constants import PROTOCOL_BYTE_ORDER from pyignite.exceptions import ParseError from pyignite.utils import is_binary, is_hinted, is_iterable from .type_codes import * -__all__ = ['AnyDataArray', 'AnyDataObject', 'Struct', 'StructArray', 'tc_map'] +__all__ = [ + 'AnyDataArray', 'AnyDataObject', 'Struct', 'StructArray', 'tc_map', 'infer_from_python', 'infer_from_python_async' +] + +from ..stream import READ_BACKWARD + + +_tc_map = {} -def tc_map(key: bytes, _memo_map: dict={}): +def tc_map(key: bytes): """ Returns a default parser/generator class for the given type code. @@ -44,7 +52,8 @@ def tc_map(key: bytes, _memo_map: dict={}): of the “type code-type class” mapping, :return: parser/generator class for the type code. """ - if not _memo_map: + global _tc_map + if not _tc_map: from pyignite.datatypes import ( Null, ByteObject, ShortObject, IntObject, LongObject, FloatObject, DoubleObject, CharObject, BoolObject, UUIDObject, DateObject, @@ -59,7 +68,7 @@ def tc_map(key: bytes, _memo_map: dict={}): MapObject, BinaryObject, WrappedDataObject, ) - _memo_map = { + _tc_map = { TC_NULL: Null, TC_BYTE: ByteObject, @@ -105,7 +114,37 @@ def tc_map(key: bytes, _memo_map: dict={}): TC_COMPLEX_OBJECT: BinaryObject, TC_ARRAY_WRAPPED_OBJECTS: WrappedDataObject, } - return _memo_map[key] + return _tc_map[key] + + +class Conditional: + def __init__(self, fields: List, predicate1: Callable[[any], bool], + predicate2: Callable[[any], bool], var1, var2): + self.fields = fields + self.predicate1 = predicate1 + self.predicate2 = predicate2 + self.var1 = var1 + self.var2 = var2 + + def parse(self, stream, context): + if self.predicate1(context): + return self.var1.parse(stream) + return self.var2.parse(stream) + + async def parse_async(self, stream, context): + if self.predicate1(context): + return await self.var1.parse_async(stream) + return await self.var2.parse_async(stream) + + def to_python(self, ctypes_object, context, **kwargs): + if self.predicate2(context): + return self.var1.to_python(ctypes_object, **kwargs) + return self.var2.to_python(ctypes_object, **kwargs) + + async def to_python_async(self, ctypes_object, context, **kwargs): + if self.predicate2(context): + return await self.var1.to_python_async(ctypes_object, **kwargs) + return await self.var2.to_python_async(ctypes_object, **kwargs) @attr.s @@ -115,67 +154,82 @@ class StructArray: counter_type = attr.ib(default=ctypes.c_int) defaults = attr.ib(type=dict, default={}) - def build_header_class(self): - return type( - self.__class__.__name__+'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', self.counter_type), - ], - }, - ) + def parse(self, stream): + fields, length = self.__parse_header(stream) - def parse(self, client: 'Client'): - buffer = client.recv(ctypes.sizeof(self.counter_type)) - length = int.from_bytes(buffer, byteorder=PROTOCOL_BYTE_ORDER) - fields = [] + for i in range(length): + c_type = Struct(self.following).parse(stream) + fields.append((f'element_{i}', c_type)) + + return self.build_c_type(fields) + + async def parse_async(self, stream): + fields, length = self.__parse_header(stream) for i in range(length): - c_type, buffer_fragment = Struct(self.following).parse(client) - buffer += buffer_fragment - fields.append(('element_{}'.format(i), c_type)) + c_type = await Struct(self.following).parse_async(stream) + fields.append((f'element_{i}', c_type)) + + return self.build_c_type(fields) - data_class = type( + def __parse_header(self, stream): + counter_sz = ctypes.sizeof(self.counter_type) + length = int.from_bytes( + stream.slice(offset=counter_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(counter_sz, SEEK_CUR) + return [('length', self.counter_type)], length + + @staticmethod + def build_c_type(fields): + return type( 'StructArray', - (self.build_header_class(),), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': fields, }, ) - return data_class, buffer + def to_python(self, ctypes_object, **kwargs): + length = getattr(ctypes_object, 'length', 0) + return [ + Struct(self.following, dict_type=dict).to_python(getattr(ctypes_object, f'element_{i}'), **kwargs) + for i in range(length) + ] - def to_python(self, ctype_object, *args, **kwargs): - result = [] - length = getattr(ctype_object, 'length', 0) - for i in range(length): - result.append( - Struct( - self.following, dict_type=dict - ).to_python( - getattr(ctype_object, 'element_{}'.format(i)), - *args, **kwargs - ) - ) - return result + async def to_python_async(self, ctypes_object, **kwargs): + length = getattr(ctypes_object, 'length', 0) + result_coro = [ + Struct(self.following, dict_type=dict).to_python_async(getattr(ctypes_object, f'element_{i}'), **kwargs) + for i in range(length) + ] + return await asyncio.gather(*result_coro) + + def from_python(self, stream, value): + self.__write_header(stream, len(value)) + + for v in value: + for default_key, default_value in self.defaults.items(): + v.setdefault(default_key, default_value) + for name, el_class in self.following: + el_class.from_python(stream, v[name]) - def from_python(self, value): - length = len(value) - header_class = self.build_header_class() - header = header_class() - header.length = length - buffer = bytes(header) + async def from_python_async(self, stream, value): + self.__write_header(stream, len(value)) - for i, v in enumerate(value): + for v in value: for default_key, default_value in self.defaults.items(): v.setdefault(default_key, default_value) for name, el_class in self.following: - buffer += el_class.from_python(v[name]) + await el_class.from_python_async(stream, v[name]) - return buffer + def __write_header(self, stream, length): + stream.write( + length.to_bytes(ctypes.sizeof(self.counter_type), + byteorder=PROTOCOL_BYTE_ORDER) + ) @attr.s @@ -185,17 +239,41 @@ class Struct: dict_type = attr.ib(default=OrderedDict) defaults = attr.ib(type=dict, default={}) - def parse(self, client: 'Client') -> Tuple[type, bytes]: - buffer = b'' - fields = [] + def parse(self, stream): + fields, ctx = [], self.__prepare_conditional_ctx() for name, c_type in self.fields: - c_type, buffer_fragment = c_type.parse(client) - buffer += buffer_fragment + is_cond = isinstance(c_type, Conditional) + c_type = c_type.parse(stream, ctx) if is_cond else c_type.parse(stream) + fields.append((name, c_type)) + if name in ctx: + ctx[name] = stream.read_ctype(c_type, direction=READ_BACKWARD) + return self.build_c_type(fields) + + async def parse_async(self, stream): + fields, ctx = [], self.__prepare_conditional_ctx() + + for name, c_type in self.fields: + is_cond = isinstance(c_type, Conditional) + c_type = await c_type.parse_async(stream, ctx) if is_cond else await c_type.parse_async(stream) fields.append((name, c_type)) + if name in ctx: + ctx[name] = stream.read_ctype(c_type, direction=READ_BACKWARD) + + return self.build_c_type(fields) - data_class = type( + def __prepare_conditional_ctx(self): + ctx = {} + for _, c_type in self.fields: + if isinstance(c_type, Conditional): + for name in c_type.fields: + ctx[name] = None + return ctx + + @staticmethod + def build_c_type(fields): + return type( 'Struct', (ctypes.LittleEndianStructure,), { @@ -204,27 +282,54 @@ def parse(self, client: 'Client') -> Tuple[type, bytes]: }, ) - return data_class, buffer - - def to_python(self, ctype_object, *args, **kwargs) -> Any: + def to_python(self, ctypes_object, **kwargs) -> Union[dict, OrderedDict]: result = self.dict_type() for name, c_type in self.fields: + is_cond = isinstance(c_type, Conditional) result[name] = c_type.to_python( - getattr(ctype_object, name), - *args, **kwargs + getattr(ctypes_object, name), + result, + **kwargs + ) if is_cond else c_type.to_python( + getattr(ctypes_object, name), + **kwargs ) return result - def from_python(self, value) -> bytes: - buffer = b'' + async def to_python_async(self, ctypes_object, **kwargs) -> Union[dict, OrderedDict]: + result = self.dict_type() + for name, c_type in self.fields: + is_cond = isinstance(c_type, Conditional) - for default_key, default_value in self.defaults.items(): - value.setdefault(default_key, default_value) + if is_cond: + value = await c_type.to_python_async( + getattr(ctypes_object, name), + result, + **kwargs + ) + else: + value = await c_type.to_python_async( + getattr(ctypes_object, name), + **kwargs + ) + result[name] = value + return result + + def from_python(self, stream, value): + self.__set_defaults(value) for name, el_class in self.fields: - buffer += el_class.from_python(value[name]) + el_class.from_python(stream, value[name]) - return buffer + async def from_python_async(self, stream, value): + self.__set_defaults(value) + + for name, el_class in self.fields: + await el_class.from_python_async(stream, value[name]) + + def __set_defaults(self, value): + for default_key, default_value in self.defaults.items(): + value.setdefault(default_key, default_value) class AnyDataObject: @@ -234,6 +339,9 @@ class AnyDataObject: """ _python_map = None _python_array_map = None + _map_obj_type = None + _collection_obj_type = None + _binary_obj_type = None @staticmethod def get_subtype(iterable, allow_none=False): @@ -262,48 +370,65 @@ def get_subtype(iterable, allow_none=False): # if an iterable contains items of more than one non-nullable type, # return None - if all([ - isinstance(x, type_first) - or ((x is None) and allow_none) for x in iterator - ]): + if all(isinstance(x, type_first) or ((x is None) and allow_none) for x in iterator): return type_first @classmethod - def parse(cls, client: 'Client'): - type_code = client.recv(ctypes.sizeof(ctypes.c_byte)) + def parse(cls, stream): + data_class = cls.__data_class_parse(stream) + return data_class.parse(stream) + + @classmethod + async def parse_async(cls, stream): + data_class = cls.__data_class_parse(stream) + return await data_class.parse_async(stream) + + @classmethod + def __data_class_parse(cls, stream): + type_code = stream.slice(offset=ctypes.sizeof(ctypes.c_byte)) try: - data_class = tc_map(type_code) + return tc_map(type_code) except KeyError: raise ParseError('Unknown type code: `{}`'.format(type_code)) - client.prefetch += type_code - return data_class.parse(client) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - type_code = ctype_object.type_code.to_bytes( + def to_python(cls, ctypes_object, **kwargs): + data_class = cls.__data_class_from_ctype(ctypes_object) + return data_class.to_python(ctypes_object, **kwargs) + + @classmethod + async def to_python_async(cls, ctypes_object, **kwargs): + data_class = cls.__data_class_from_ctype(ctypes_object) + return await data_class.to_python_async(ctypes_object, **kwargs) + + @classmethod + def __data_class_from_ctype(cls, ctypes_object): + type_code = ctypes_object.type_code.to_bytes( ctypes.sizeof(ctypes.c_byte), byteorder=PROTOCOL_BYTE_ORDER ) - data_class = tc_map(type_code) - return data_class.to_python(ctype_object) + return tc_map(type_code) @classmethod - def _init_python_map(cls): + def _init_python_mapping(cls): """ Optimizes Python types→Ignite types map creation for speed. Local imports seem inevitable here. """ from pyignite.datatypes import ( - LongObject, DoubleObject, String, BoolObject, Null, UUIDObject, - DateObject, TimeObject, DecimalObject, + LongObject, DoubleObject, String, BoolObject, Null, UUIDObject, DateObject, TimeObject, + DecimalObject, ByteArrayObject, LongArrayObject, DoubleArrayObject, StringArrayObject, + BoolArrayObject, UUIDArrayObject, DateArrayObject, TimeArrayObject, DecimalArrayObject, + MapObject, CollectionObject, BinaryObject ) cls._python_map = { int: LongObject, float: DoubleObject, str: String, - bytes: String, + bytes: ByteArrayObject, + bytearray: ByteArrayObject, bool: BoolObject, type(None): Null, uuid.UUID: UUIDObject, @@ -313,22 +438,10 @@ def _init_python_map(cls): decimal.Decimal: DecimalObject, } - @classmethod - def _init_python_array_map(cls): - """ - Optimizes Python types→Ignite array types map creation for speed. - """ - from pyignite.datatypes import ( - LongArrayObject, DoubleArrayObject, StringArrayObject, - BoolArrayObject, UUIDArrayObject, DateArrayObject, TimeArrayObject, - DecimalArrayObject, - ) - cls._python_array_map = { int: LongArrayObject, float: DoubleArrayObject, str: StringArrayObject, - bytes: StringArrayObject, bool: BoolArrayObject, uuid.UUID: UUIDArrayObject, datetime: DateArrayObject, @@ -337,31 +450,33 @@ def _init_python_array_map(cls): decimal.Decimal: DecimalArrayObject, } + cls._map_obj_type = MapObject + cls._collection_obj_type = CollectionObject + cls._binary_obj_type = BinaryObject + @classmethod def map_python_type(cls, value): - from pyignite.datatypes import ( - MapObject, ObjectArrayObject, BinaryObject, - ) - - if cls._python_map is None: - cls._init_python_map() - if cls._python_array_map is None: - cls._init_python_array_map() + if cls._python_map is None or cls._python_array_map is None: + cls._init_python_mapping() value_type = type(value) - if is_iterable(value) and value_type is not str: + + if value_type in cls._python_map: + return cls._python_map[value_type] + + if is_iterable(value) and value_type not in (str, bytearray, bytes): value_subtype = cls.get_subtype(value) if value_subtype in cls._python_array_map: return cls._python_array_map[value_subtype] - # a little heuristics (order may be important) + # a little heuristics (order is important) if all([ value_subtype is None, len(value) == 2, isinstance(value[0], int), isinstance(value[1], dict), ]): - return MapObject + return cls._map_obj_type if all([ value_subtype is None, @@ -369,38 +484,57 @@ def map_python_type(cls, value): isinstance(value[0], int), is_iterable(value[1]), ]): - return ObjectArrayObject + return cls._collection_obj_type + + # no default for ObjectArrayObject, sorry raise TypeError( 'Type `array of {}` is invalid'.format(value_subtype) ) if is_binary(value): - return BinaryObject + return cls._binary_obj_type - if value_type in cls._python_map: - return cls._python_map[value_type] raise TypeError( 'Type `{}` is invalid.'.format(value_type) ) @classmethod - def from_python(cls, value): - return cls.map_python_type(value).from_python(value) + def from_python(cls, stream, value): + p_type = cls.map_python_type(value) + p_type.from_python(stream, value) + + @classmethod + async def from_python_async(cls, stream, value): + p_type = cls.map_python_type(value) + await p_type.from_python_async(stream, value) -def infer_from_python(value: Any): +def infer_from_python(stream, value: Any): """ Convert pythonic value to ctypes buffer, type hint-aware. :param value: pythonic value or (value, type_hint) tuple, :return: bytes. """ + value, data_type = __unpack_hinted(value) + + data_type.from_python(stream, value) + + +async def infer_from_python_async(stream, value: Any): + """ + Async version of infer_from_python + """ + value, data_type = __unpack_hinted(value) + + await data_type.from_python_async(stream, value) + + +def __unpack_hinted(value): if is_hinted(value): - value, data_type = value - else: - data_type = AnyDataObject - return data_type.from_python(value) + return value + return value, AnyDataObject @attr.s @@ -410,63 +544,84 @@ class AnyDataArray(AnyDataObject): """ counter_type = attr.ib(default=ctypes.c_int) - def build_header(self): - return type( - self.__class__.__name__+'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', self.counter_type), - ], - } - ) - - def parse(self, client: 'Client'): - header_class = self.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] + def parse(self, stream): + fields, length = self.__parse_header(stream) + for i in range(length): + c_type = super().parse(stream) + fields.append((f'element_{i}', c_type)) + return self.build_c_type(fields) - for i in range(header.length): - c_type, buffer_fragment = super().parse(client) - buffer += buffer_fragment - fields.append(('element_{}'.format(i), c_type)) + async def parse_async(self, stream): + fields, length = self.__parse_header(stream) + for i in range(length): + c_type = await super().parse_async(stream) + fields.append((f'element_{i}', c_type)) + return self.build_c_type(fields) + + def __parse_header(self, stream): + cnt_sz = ctypes.sizeof(self.counter_type) + length = int.from_bytes( + stream.slice(stream.tell(), cnt_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(cnt_sz, SEEK_CUR) + return [('length', self.counter_type)], length - final_class = type( + def build_c_type(self, fields): + return type( self.__class__.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': fields, } ) - return final_class, buffer @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - result = [] - for i in range(ctype_object.length): - result.append( + def to_python(cls, ctypes_object, **kwargs): + length = getattr(ctypes_object, "length", 0) + + return [ + super().to_python(getattr(ctypes_object, f'element_{i}'), **kwargs) + for i in range(length) + ] + + @classmethod + async def to_python_async(cls, ctypes_object, **kwargs): + length = getattr(ctypes_object, "length", 0) + + values = asyncio.gather( + *[ super().to_python( - getattr(ctype_object, 'element_{}'.format(i)), + getattr(ctypes_object, f'element_{i}'), *args, **kwargs - ) - ) - return result + ) for i in range(length) + ] + ) + return await values - def from_python(self, value): - header_class = self.build_header() - header = header_class() + def from_python(self, stream, value): + value = self.__write_header_and_process_value(stream, value) + for x in value: + infer_from_python(stream, x) + + async def from_python_async(self, stream, value): + value = self.__write_header_and_process_value(stream, value) + + for x in value: + await infer_from_python_async(stream, x) + + def __write_header_and_process_value(self, stream, value): try: length = len(value) except TypeError: value = [value] length = 1 - header.length = length - buffer = bytes(header) - for x in value: - buffer += infer_from_python(x) - return buffer + stream.write(length.to_bytes( + ctypes.sizeof(self.counter_type), + byteorder=PROTOCOL_BYTE_ORDER + )) + + return value diff --git a/pyignite/datatypes/key_value.py b/pyignite/datatypes/key_value.py index 0f21ac6..46ac07d 100644 --- a/pyignite/datatypes/key_value.py +++ b/pyignite/datatypes/key_value.py @@ -13,12 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .primitive_arrays import ByteArray +from enum import IntEnum -class PeekModes(ByteArray): - +class PeekModes(IntEnum): ALL = 0 NEAR = 1 PRIMARY = 2 BACKUP = 3 + ONHEAP = 4 + OFFHEAP = 5 diff --git a/pyignite/datatypes/null_object.py b/pyignite/datatypes/null_object.py index a648e30..d51e5fb 100644 --- a/pyignite/datatypes/null_object.py +++ b/pyignite/datatypes/null_object.py @@ -20,12 +20,15 @@ """ import ctypes +from io import SEEK_CUR from .base import IgniteDataType from .type_codes import TC_NULL -__all__ = ['Null'] +__all__ = ['Null', 'Nullable'] + +from ..constants import PROTOCOL_BYTE_ORDER class Null(IgniteDataType): @@ -49,16 +52,100 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def parse(cls, client: 'Client'): - buffer = client.recv(ctypes.sizeof(ctypes.c_byte)) - data_type = cls.build_c_type() - return data_type, buffer + def parse(cls, stream): + stream.seek(ctypes.sizeof(ctypes.c_byte), SEEK_CUR) + return cls.build_c_type() - @staticmethod - def to_python(*args, **kwargs): + @classmethod + def to_python(cls, ctypes_object, **kwargs): return None - @staticmethod - def from_python(*args): - return TC_NULL + @classmethod + def from_python(cls, stream, *args): + stream.write(TC_NULL) + + +class Nullable(IgniteDataType): + @classmethod + def parse_not_null(cls, stream): + raise NotImplementedError + + @classmethod + async def parse_not_null_async(cls, stream): + return cls.parse_not_null(stream) + + @classmethod + def parse(cls, stream): + is_null, null_type = cls.__check_null_input(stream) + + if is_null: + return null_type + + return cls.parse_not_null(stream) + + @classmethod + async def parse_async(cls, stream): + is_null, null_type = cls.__check_null_input(stream) + + if is_null: + return null_type + + return await cls.parse_not_null_async(stream) + + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): + raise NotImplementedError + + @classmethod + async def from_python_not_null_async(cls, stream, value, **kwargs): + return cls.from_python_not_null(stream, value, **kwargs) + @classmethod + def from_python(cls, stream, value, **kwargs): + if value is None: + Null.from_python(stream) + else: + cls.from_python_not_null(stream, value, **kwargs) + + @classmethod + async def from_python_async(cls, stream, value, **kwargs): + if value is None: + Null.from_python(stream) + else: + await cls.from_python_not_null_async(stream, value, **kwargs) + + @classmethod + def to_python_not_null(cls, ctypes_object, **kwargs): + raise NotImplementedError + + @classmethod + async def to_python_not_null_async(cls, ctypes_object, **kwargs): + return cls.to_python_not_null(ctypes_object, **kwargs) + + @classmethod + def to_python(cls, ctypes_object, **kwargs): + if cls.__is_null(ctypes_object): + return None + + return cls.to_python_not_null(ctypes_object, **kwargs) + + @classmethod + async def to_python_async(cls, ctypes_object, **kwargs): + if cls.__is_null(ctypes_object): + return None + + return await cls.to_python_not_null_async(ctypes_object, **kwargs) + + @classmethod + def __check_null_input(cls, stream): + type_len = ctypes.sizeof(ctypes.c_byte) + + if stream.slice(offset=type_len) == TC_NULL: + stream.seek(type_len, SEEK_CUR) + return True, Null.build_c_type() + + return False, None + + @classmethod + def __is_null(cls, ctypes_object): + return ctypes_object.type_code == int.from_bytes(TC_NULL, byteorder=PROTOCOL_BYTE_ORDER) diff --git a/pyignite/datatypes/primitive.py b/pyignite/datatypes/primitive.py index d1e9f4e..2213f3d 100644 --- a/pyignite/datatypes/primitive.py +++ b/pyignite/datatypes/primitive.py @@ -14,9 +14,13 @@ # limitations under the License. import ctypes +import struct +from io import SEEK_CUR from pyignite.constants import * from .base import IgniteDataType +from .type_ids import * +from .type_names import * __all__ = [ @@ -38,69 +42,114 @@ class Primitive(IgniteDataType): - Char, - Bool. """ - + _type_name = None + _type_id = None c_type = None @classmethod - def parse(cls, client: 'Client'): - return cls.c_type, client.recv(ctypes.sizeof(cls.c_type)) - - @staticmethod - def to_python(ctype_object, *args, **kwargs): - return ctype_object + def parse(cls, stream): + stream.seek(ctypes.sizeof(cls.c_type), SEEK_CUR) + return cls.c_type @classmethod - def from_python(cls, value): - return bytes(cls.c_type(value)) + def to_python(cls, ctypes_object, **kwargs): + return ctypes_object class Byte(Primitive): + _type_name = NAME_BYTE + _type_id = TYPE_BYTE c_type = ctypes.c_byte + @classmethod + def from_python(cls, stream, value): + stream.write(struct.pack(" int: + return value + class ShortObject(DataObject): + _type_name = NAME_SHORT + _type_id = TYPE_SHORT c_type = ctypes.c_short type_code = TC_SHORT pythonic = int default = 0 + @classmethod + def hashcode(cls, value: int, **kwargs) -> int: + return value + class IntObject(DataObject): + _type_name = NAME_INT + _type_id = TYPE_INT c_type = ctypes.c_int type_code = TC_INT pythonic = int default = 0 + @classmethod + def hashcode(cls, value: int, **kwargs) -> int: + return value + class LongObject(DataObject): + _type_name = NAME_LONG + _type_id = TYPE_LONG c_type = ctypes.c_longlong type_code = TC_LONG pythonic = int default = 0 + @classmethod + def hashcode(cls, value: int, **kwargs) -> int: + return value ^ (unsigned(value, ctypes.c_ulonglong) >> 32) + class FloatObject(DataObject): + _type_name = NAME_FLOAT + _type_id = TYPE_FLOAT c_type = ctypes.c_float type_code = TC_FLOAT pythonic = float default = 0.0 + @classmethod + def hashcode(cls, value: float, **kwargs) -> int: + return ctypes.cast( + ctypes.pointer(ctypes.c_float(value)), + ctypes.POINTER(ctypes.c_int) + ).contents.value + class DoubleObject(DataObject): + _type_name = NAME_DOUBLE + _type_id = TYPE_DOUBLE c_type = ctypes.c_double type_code = TC_DOUBLE pythonic = float default = 0.0 + @classmethod + def hashcode(cls, value: float, **kwargs) -> int: + bits = ctypes.cast( + ctypes.pointer(ctypes.c_double(value)), + ctypes.POINTER(ctypes.c_longlong) + ).contents.value + return (bits & 0xffffffff) ^ (unsigned(bits, ctypes.c_longlong) >> 32) + class CharObject(DataObject): """ @@ -125,34 +172,51 @@ class CharObject(DataObject): to/from UTF-8 to keep the coding hassle to minimum. Bear in mind though: decoded character may take 1..4 bytes in UTF-8. """ + _type_name = NAME_CHAR + _type_id = TYPE_CHAR c_type = ctypes.c_short type_code = TC_CHAR pythonic = str default = ' ' @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - return ctype_object.value.to_bytes( + def hashcode(cls, value: str, **kwargs) -> int: + return ord(value) + + @classmethod + def to_python_not_null(cls, ctypes_object, **kwargs): + value = ctypes_object.value + return value.to_bytes( ctypes.sizeof(cls.c_type), byteorder=PROTOCOL_BYTE_ORDER ).decode(PROTOCOL_CHAR_ENCODING) @classmethod - def from_python(cls, value): + def from_python_not_null(cls, stream, value, **kwargs): if type(value) is str: value = value.encode(PROTOCOL_CHAR_ENCODING) # assuming either a bytes or an integer if type(value) is bytes: value = int.from_bytes(value, byteorder=PROTOCOL_BYTE_ORDER) # assuming a valid integer - return cls.type_code + value.to_bytes( - ctypes.sizeof(cls.c_type), - byteorder=PROTOCOL_BYTE_ORDER + stream.write(cls.type_code) + stream.write( + value.to_bytes(ctypes.sizeof(cls.c_type), byteorder=PROTOCOL_BYTE_ORDER) ) class BoolObject(DataObject): - c_type = ctypes.c_bool + _type_name = NAME_BOOLEAN + _type_id = TYPE_BOOLEAN + c_type = ctypes.c_byte # Use c_byte because c_bool throws endianness conversion error on BE systems. type_code = TC_BOOL pythonic = bool default = False + + @classmethod + def hashcode(cls, value: bool, **kwargs) -> int: + return 1231 if value else 1237 + + @classmethod + def to_python_not_null(cls, ctypes_object, **kwargs): + return ctypes_object.value != 0 diff --git a/pyignite/datatypes/prop_codes.py b/pyignite/datatypes/prop_codes.py index adea281..9709313 100644 --- a/pyignite/datatypes/prop_codes.py +++ b/pyignite/datatypes/prop_codes.py @@ -47,5 +47,4 @@ PROP_PARTITION_LOSS_POLICY = 404 PROP_EAGER_TTL = 405 PROP_STATISTICS_ENABLED = 406 - -PROP_INVALIDATE = -1 +PROP_EXPIRY_POLICY = 407 diff --git a/pyignite/datatypes/standard.py b/pyignite/datatypes/standard.py index 8808da2..9357e8f 100644 --- a/pyignite/datatypes/standard.py +++ b/pyignite/datatypes/standard.py @@ -16,14 +16,18 @@ import ctypes from datetime import date, datetime, time, timedelta import decimal +from io import SEEK_CUR from math import ceil +from typing import Tuple, Union import uuid from pyignite.constants import * +from pyignite.utils import datetime_hashcode, decimal_hashcode, hashcode from .base import IgniteDataType from .type_codes import * -from .null_object import Null - +from .type_ids import * +from .type_names import * +from .null_object import Nullable __all__ = [ 'String', 'DecimalObject', 'UUIDObject', 'TimestampObject', 'DateObject', @@ -40,7 +44,9 @@ ] -class StandardObject(IgniteDataType): +class StandardObject(Nullable): + _type_name = None + _type_id = None type_code = None @classmethod @@ -48,25 +54,26 @@ def build_c_type(cls): raise NotImplementedError('This object is generic') @classmethod - def parse(cls, client: 'Client'): - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type - - c_type = cls.build_c_type() - buffer = tc_type + client.recv(ctypes.sizeof(c_type) - len(tc_type)) - return c_type, buffer + def parse_not_null(cls, stream): + data_type = cls.build_c_type() + stream.seek(ctypes.sizeof(data_type), SEEK_CUR) + return data_type -class String(IgniteDataType): +class String(Nullable): """ Pascal-style string: `c_int` counter, followed by count*bytes. UTF-8-encoded, so that one character may take 1 to 4 bytes. """ + _type_name = NAME_STRING + _type_id = TYPE_STRING type_code = TC_STRING pythonic = str + @classmethod + def hashcode(cls, value: str, **kwargs) -> int: + return hashcode(value) + @classmethod def build_c_type(cls, length: int): return type( @@ -83,35 +90,25 @@ def build_c_type(cls, length: int): ) @classmethod - def parse(cls, client: 'Client'): - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - # String or Null - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type - - buffer = tc_type + client.recv(ctypes.sizeof(ctypes.c_int)) - length = int.from_bytes(buffer[1:], byteorder=PROTOCOL_BYTE_ORDER) + def parse_not_null(cls, stream): + length = int.from_bytes( + stream.slice(stream.tell() + ctypes.sizeof(ctypes.c_byte), ctypes.sizeof(ctypes.c_int)), + byteorder=PROTOCOL_BYTE_ORDER + ) data_type = cls.build_c_type(length) - buffer += client.recv(ctypes.sizeof(data_type) - len(buffer)) + stream.seek(ctypes.sizeof(data_type), SEEK_CUR) + return data_type - return data_type, buffer + @classmethod + def to_python_not_null(cls, ctypes_object, **kwargs): + if ctypes_object.length > 0: + return ctypes_object.data.decode(PROTOCOL_STRING_ENCODING) - @staticmethod - def to_python(ctype_object, *args, **kwargs): - length = getattr(ctype_object, 'length', None) - if length is None: - return None - elif length > 0: - return ctype_object.data.decode(PROTOCOL_STRING_ENCODING) - else: - return '' + return '' @classmethod - def from_python(cls, value): - if value is None: - return Null.from_python() - + def from_python_not_null(cls, stream, value, **kwargs): if isinstance(value, str): value = value.encode(PROTOCOL_STRING_ENCODING) length = len(value) @@ -123,16 +120,23 @@ def from_python(cls, value): ) data_object.length = length data_object.data = value - return bytes(data_object) + + stream.write(data_object) -class DecimalObject(IgniteDataType): +class DecimalObject(Nullable): + _type_name = NAME_DECIMAL + _type_id = TYPE_DECIMAL type_code = TC_DECIMAL pythonic = decimal.Decimal default = decimal.Decimal('0.00') @classmethod - def build_c_header(cls): + def hashcode(cls, value: decimal.Decimal, **kwargs) -> int: + return decimal_hashcode(value) + + @classmethod + def build_c_type(cls, length): return type( cls.__name__, (ctypes.LittleEndianStructure,), @@ -142,72 +146,45 @@ def build_c_header(cls): ('type_code', ctypes.c_byte), ('scale', ctypes.c_int), ('length', ctypes.c_int), - ], + ('data', ctypes.c_ubyte * length) + ] } ) @classmethod - def parse(cls, client: 'Client'): - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - # Decimal or Null - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type - - header_class = cls.build_c_header() - buffer = tc_type + client.recv( - ctypes.sizeof(header_class) - - len(tc_type) - ) - header = header_class.from_buffer_copy(buffer) - data_type = type( - cls.__name__, - (header_class,), - { - '_pack_': 1, - '_fields_': [ - ('data', ctypes.c_ubyte * header.length), - ], - } - ) - buffer += client.recv( - ctypes.sizeof(data_type) - - ctypes.sizeof(header_class) + def parse_not_null(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + int_sz + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER ) - return data_type, buffer + data_type = cls.build_c_type(length) + stream.seek(ctypes.sizeof(data_type), SEEK_CUR) + return data_type @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - if getattr(ctype_object, 'length', None) is None: - return None - - sign = 1 if ctype_object.data[0] & 0x80 else 0 - data = ctype_object.data[1:] - data.insert(0, ctype_object.data[0] & 0x7f) + def to_python_not_null(cls, ctypes_object, **kwargs): + sign = 1 if ctypes_object.data[0] & 0x80 else 0 + data = ctypes_object.data[1:] + data.insert(0, ctypes_object.data[0] & 0x7f) # decode n-byte integer result = sum([ [x for x in reversed(data)][i] * 0x100 ** i for i in range(len(data)) ]) # apply scale - result = ( - result - / decimal.Decimal('10') - ** decimal.Decimal(ctype_object.scale) - ) + result = result / decimal.Decimal('10') ** decimal.Decimal(ctypes_object.scale) if sign: # apply sign result = -result return result @classmethod - def from_python(cls, value: decimal.Decimal): - if value is None: - return Null.from_python() - + def from_python_not_null(cls, stream, value: decimal.Decimal, **kwargs): sign, digits, scale = value.normalize().as_tuple() integer = int(''.join([str(d) for d in digits])) # calculate number of bytes (at least one, and not forget the sign bit) - length = ceil((integer.bit_length() + 1)/8) + length = ceil((integer.bit_length() + 1) / 8) # write byte string data = [] for i in range(length): @@ -219,17 +196,7 @@ def from_python(cls, value: decimal.Decimal): data[0] |= 0x80 else: data[0] &= 0x7f - header_class = cls.build_c_header() - data_class = type( - cls.__name__, - (header_class,), - { - '_pack_': 1, - '_fields_': [ - ('data', ctypes.c_ubyte * length), - ], - } - ) + data_class = cls.build_c_type(length) data_object = data_class() data_object.type_code = int.from_bytes( cls.type_code, @@ -239,7 +206,8 @@ def from_python(cls, value: decimal.Decimal): data_object.scale = -scale for i in range(length): data_object.data[i] = data[i] - return bytes(data_object) + + stream.write(data_object) class UUIDObject(StandardObject): @@ -251,11 +219,20 @@ class UUIDObject(StandardObject): and :py:meth:`~pyignite.datatypes.standard.UUIDObject.from_python` methods is changed for compatibility with `java.util.UUID`. """ - type_code = TC_UUID + _type_name = NAME_UUID + _type_id = TYPE_UUID _object_c_type = None + type_code = TC_UUID UUID_BYTE_ORDER = (7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8) + @classmethod + def hashcode(cls, value: 'UUID', **kwargs) -> int: + msb = value.int >> 64 + lsb = value.int & 0xffffffffffffffff + hilo = msb ^ lsb + return (hilo >> 32) ^ (hilo & 0xffffffff) + @classmethod def build_c_type(cls): if cls._object_c_type is None: @@ -273,7 +250,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: uuid.UUID): + def from_python_not_null(cls, stream, value: uuid.UUID, **kwargs): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -282,15 +259,11 @@ def from_python(cls, value: uuid.UUID): ) for i, byte in zip(cls.UUID_BYTE_ORDER, bytearray(value.bytes)): data_object.value[i] = byte - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None + def to_python_not_null(cls, ctypes_object, **kwargs): uuid_array = bytearray(ctypes_object.value) return uuid.UUID( bytes=bytes([uuid_array[i] for i in cls.UUID_BYTE_ORDER]) @@ -308,10 +281,16 @@ class TimestampObject(StandardObject): `epoch` and `fraction` stored separately and represented as tuple(datetime.datetime, integer). """ + _type_name = NAME_TIMESTAMP + _type_id = TYPE_TIMESTAMP + _object_c_type = None type_code = TC_TIMESTAMP pythonic = tuple default = (datetime(1970, 1, 1), 0) - _object_c_type = None + + @classmethod + def hashcode(cls, value: Tuple[datetime, int], **kwargs) -> int: + return datetime_hashcode(int(value[0].timestamp() * 1000)) @classmethod def build_c_type(cls): @@ -331,9 +310,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: tuple): - if value is None: - return Null.from_python() + def from_python_not_null(cls, stream, value: tuple, **kwargs): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -342,17 +319,13 @@ def from_python(cls, value: tuple): ) data_object.epoch = int(value[0].timestamp() * 1000) data_object.fraction = value[1] - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None + def to_python_not_null(cls, ctypes_object, **kwargs): return ( - datetime.fromtimestamp(ctypes_object.epoch/1000), + datetime.fromtimestamp(ctypes_object.epoch / 1000), ctypes_object.fraction ) @@ -364,10 +337,16 @@ class DateObject(StandardObject): Represented as a naive datetime.datetime in Python. """ + _type_name = NAME_DATE + _type_id = TYPE_DATE + _object_c_type = None type_code = TC_DATE pythonic = datetime default = datetime(1970, 1, 1) - _object_c_type = None + + @classmethod + def hashcode(cls, value: datetime, **kwargs) -> int: + return datetime_hashcode(int(value.timestamp() * 1000)) @classmethod def build_c_type(cls): @@ -386,9 +365,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: [date, datetime]): - if value is None: - return Null.from_python() + def from_python_not_null(cls, stream, value: Union[date, datetime], **kwargs): if type(value) is date: value = datetime.combine(value, time()) data_type = cls.build_c_type() @@ -398,16 +375,12 @@ def from_python(cls, value: [date, datetime]): byteorder=PROTOCOL_BYTE_ORDER ) data_object.epoch = int(value.timestamp() * 1000) - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None - return datetime.fromtimestamp(ctypes_object.epoch/1000) + def to_python_not_null(cls, ctypes_object, **kwargs): + return datetime.fromtimestamp(ctypes_object.epoch / 1000) class TimeObject(StandardObject): @@ -416,10 +389,16 @@ class TimeObject(StandardObject): Represented as a datetime.timedelta in Python. """ + _type_name = NAME_TIME + _type_id = TYPE_TIME + _object_c_type = None type_code = TC_TIME pythonic = timedelta default = timedelta() - _object_c_type = None + + @classmethod + def hashcode(cls, value: timedelta, **kwargs) -> int: + return datetime_hashcode(int(value.total_seconds() * 1000)) @classmethod def build_c_type(cls): @@ -438,9 +417,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: timedelta): - if value is None: - return Null.from_python() + def from_python_not_null(cls, stream, value: timedelta, **kwargs): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -448,15 +425,11 @@ def from_python(cls, value: timedelta): byteorder=PROTOCOL_BYTE_ORDER ) data_object.value = int(value.total_seconds() * 1000) - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None + def to_python_not_null(cls, ctypes_object, **kwargs): return timedelta(milliseconds=ctypes_object.value) @@ -468,8 +441,10 @@ class EnumObject(StandardObject): (using language-specific type serialization is a good way to kill the interoperability though), so it represented by tuple(int, int) in Python. """ - type_code = TC_ENUM + _type_name = 'Enum' + _type_id = TYPE_ENUM _object_c_type = None + type_code = TC_ENUM @classmethod def build_c_type(cls): @@ -489,28 +464,19 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: tuple): - if value is None: - return Null.from_python() - + def from_python_not_null(cls, stream, value: tuple, **kwargs): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( cls.type_code, byteorder=PROTOCOL_BYTE_ORDER ) - if value is None: - return Null.from_python(value) data_object.type_id, data_object.ordinal = value - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None + def to_python_not_null(cls, ctypes_object, **kwargs): return ctypes_object.type_id, ctypes_object.ordinal @@ -518,78 +484,94 @@ class BinaryEnumObject(EnumObject): """ Another way of representing the enum type. Same, but different. """ + _type_name = 'Enum' + _type_id = TYPE_BINARY_ENUM type_code = TC_BINARY_ENUM -class StandardArray(IgniteDataType): - """ - Base class for array of primitives. Payload-only. - """ +class _StandardArrayBase: standard_type = None - type_code = None @classmethod - def build_header_class(cls): + def _parse_header(cls, stream): + raise NotImplementedError + + @classmethod + def _parse(cls, stream): + fields, length = cls._parse_header(stream) + + for i in range(length): + c_type = cls.standard_type.parse(stream) + fields.append((f'element_{i}', c_type)) + return type( - cls.__name__+'Header', + cls.__name__, (ctypes.LittleEndianStructure,), { '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ], + '_fields_': fields, } ) @classmethod - def parse(cls, client: 'Client'): - header_class = cls.build_header_class() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] - for i in range(header.length): - c_type, buffer_fragment = cls.standard_type.parse(client) - buffer += buffer_fragment - fields.append(('element_{}'.format(i), c_type)) + def _write_header(cls, stream, value, **kwargs): + raise NotImplementedError - final_class = type( - cls.__name__, - (header_class,), - { - '_pack_': 1, - '_fields_': fields, - } + @classmethod + def _from_python(cls, stream, value, **kwargs): + cls._write_header(stream, value, **kwargs) + for x in value: + cls.standard_type.from_python(stream, x) + + @classmethod + def _to_python(cls, ctypes_object, *args, **kwargs): + length = ctypes_object.length + return [ + cls.standard_type.to_python( + getattr(ctypes_object, f'element_{i}'), *args, **kwargs + ) for i in range(length) + ] + + +class StandardArray(IgniteDataType, _StandardArrayBase): + """ + Base class for array of primitives. Payload-only. + """ + _type_name = None + _type_id = None + type_code = None + + @classmethod + def _parse_header(cls, stream): + int_sz = ctypes.sizeof(ctypes.c_int) + length = int.from_bytes( + stream.slice(stream.tell(), int_sz), + byteorder=PROTOCOL_BYTE_ORDER ) - return final_class, buffer + stream.seek(int_sz, SEEK_CUR) + + return [('length', ctypes.c_int)], length @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - result = [] - for i in range(ctype_object.length): - result.append( - cls.standard_type.to_python( - getattr(ctype_object, 'element_{}'.format(i)), - *args, **kwargs - ) - ) - return result + def parse(cls, stream): + return cls._parse(stream) @classmethod - def from_python(cls, value): - header_class = cls.build_header_class() - header = header_class() - if hasattr(header, 'type_code'): - header.type_code = int.from_bytes( - cls.type_code, + def _write_header(cls, stream, value, **kwargs): + stream.write( + len(value).to_bytes( + length=ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER ) - length = len(value) - header.length = length - buffer = bytes(header) + ) - for x in value: - buffer += cls.standard_type.from_python(x) - return buffer + @classmethod + def from_python(cls, stream, value, **kwargs): + cls._from_python(stream, value, **kwargs) + + @classmethod + def to_python(cls, ctypes_object, **kwargs): + return cls._to_python(ctypes_object, **kwargs) class StringArray(StandardArray): @@ -599,66 +581,109 @@ class StringArray(StandardArray): List(str) in Python. """ + _type_name = NAME_STRING_ARR + _type_id = TYPE_STRING_ARR standard_type = String class DecimalArray(StandardArray): + _type_name = NAME_DECIMAL_ARR + _type_id = TYPE_DECIMAL_ARR standard_type = DecimalObject class UUIDArray(StandardArray): + _type_name = NAME_UUID_ARR + _type_id = TYPE_UUID_ARR standard_type = UUIDObject class TimestampArray(StandardArray): + _type_name = NAME_TIMESTAMP_ARR + _type_id = TYPE_TIMESTAMP_ARR standard_type = TimestampObject class DateArray(StandardArray): + _type_name = NAME_DATE_ARR + _type_id = TYPE_DATE_ARR standard_type = DateObject class TimeArray(StandardArray): + _type_name = NAME_TIME_ARR + _type_id = TYPE_TIME_ARR standard_type = TimeObject class EnumArray(StandardArray): + _type_name = 'Enum[]' + _type_id = TYPE_ENUM_ARR standard_type = EnumObject -class StandardArrayObject(StandardArray): +class StandardArrayObject(Nullable, _StandardArrayBase): + _type_name = None + _type_id = None + standard_type = None + type_code = None pythonic = list default = [] @classmethod - def build_header_class(cls): - return type( - cls.__name__+'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('length', ctypes.c_int), - ], - } + def _parse_header(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER ) + stream.seek(int_sz + b_sz, SEEK_CUR) + + return [('type_code', ctypes.c_byte), ('length', ctypes.c_int)], length + + @classmethod + def parse_not_null(cls, stream): + return cls._parse(stream) + + @classmethod + def _write_header(cls, stream, value, **kwargs): + stream.write(cls.type_code) + stream.write( + len(value).to_bytes( + length=ctypes.sizeof(ctypes.c_int), + byteorder=PROTOCOL_BYTE_ORDER + ) + ) + + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): + cls._from_python(stream, value, **kwargs) + + @classmethod + def to_python_not_null(cls, ctypes_object, **kwargs): + return cls._to_python(ctypes_object, **kwargs) class StringArrayObject(StandardArrayObject): """ List of strings. """ + _type_name = NAME_STRING_ARR + _type_id = TYPE_STRING_ARR standard_type = String type_code = TC_STRING_ARRAY class DecimalArrayObject(StandardArrayObject): """ List of decimal.Decimal objects. """ + _type_name = NAME_DECIMAL_ARR + _type_id = TYPE_DECIMAL_ARR standard_type = DecimalObject type_code = TC_DECIMAL_ARRAY class UUIDArrayObject(StandardArrayObject): - """ Translated into Python as a list(uuid.UUID)""" + """ Translated into Python as a list(uuid.UUID). """ + _type_name = NAME_UUID_ARR + _type_id = TYPE_UUID_ARR standard_type = UUIDObject type_code = TC_UUID_ARRAY @@ -667,18 +692,24 @@ class TimestampArrayObject(StandardArrayObject): """ Translated into Python as a list of (datetime.datetime, integer) tuples. """ + _type_name = NAME_TIMESTAMP_ARR + _type_id = TYPE_TIMESTAMP_ARR standard_type = TimestampObject type_code = TC_TIMESTAMP_ARRAY class DateArrayObject(StandardArrayObject): """ List of datetime.datetime type values. """ + _type_name = NAME_DATE_ARR + _type_id = TYPE_DATE_ARR standard_type = DateObject type_code = TC_DATE_ARRAY class TimeArrayObject(StandardArrayObject): """ List of datetime.timedelta type values. """ + _type_name = NAME_TIME_ARR + _type_id = TYPE_TIME_ARR standard_type = TimeObject type_code = TC_TIME_ARRAY @@ -688,47 +719,48 @@ class EnumArrayObject(StandardArrayObject): Array of (int, int) tuples, plus it holds a `type_id` in its header. The only `type_id` value of -1 (user type) works from Python perspective. """ + _type_name = 'Enum[]' + _type_id = TYPE_ENUM_ARR standard_type = EnumObject type_code = TC_ENUM_ARRAY + OBJECT = -1 + @classmethod - def build_header_class(cls): - return type( - cls.__name__+'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('type_id', ctypes.c_int), - ('length', ctypes.c_int), - ], - } + def _parse_header(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz + int_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER ) + stream.seek(2 * int_sz + b_sz, SEEK_CUR) + return [('type_code', ctypes.c_byte), ('type_id', ctypes.c_int), ('length', ctypes.c_int)], length @classmethod - def from_python(cls, value): - type_id, value = value - header_class = cls.build_header_class() - header = header_class() - if hasattr(header, 'type_code'): - header.type_code = int.from_bytes( - cls.type_code, + def _write_header(cls, stream, value, type_id=-1): + stream.write(cls.type_code) + stream.write( + type_id.to_bytes( + length=ctypes.sizeof(ctypes.c_int), + byteorder=PROTOCOL_BYTE_ORDER, + signed=True + ) + ) + stream.write( + len(value).to_bytes( + length=ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER ) - length = len(value) - header.length = length - header.type_id = type_id - buffer = bytes(header) + ) - for x in value: - buffer += cls.standard_type.from_python(x) - return buffer + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): + type_id, value = value + super().from_python_not_null(stream, value, type_id=type_id) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - type_id = ctype_object.type_id - return type_id, super().to_python(ctype_object, *args, **kwargs) + def to_python_not_null(cls, ctypes_object, **kwargs): + return ctypes_object.type_id, cls._to_python(ctypes_object, **kwargs) class BinaryEnumArrayObject(EnumArrayObject): diff --git a/pyignite/datatypes/transactions.py b/pyignite/datatypes/transactions.py new file mode 100644 index 0000000..83e6c06 --- /dev/null +++ b/pyignite/datatypes/transactions.py @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from enum import IntEnum + + +class TransactionConcurrency(IntEnum): + """ + Defines different cache transaction concurrency control. + """ + + #: Optimistic concurrency control. + OPTIMISTIC = 0 + + #: Pessimistic concurrency control. + PESSIMISTIC = 1 + + +class TransactionIsolation(IntEnum): + """ + Defines different cache transaction isolation levels. + """ + + #: Read committed isolation level.Read committed isolation level. + READ_COMMITTED = 0 + + #: Repeatable read isolation level. + REPEATABLE_READ = 1 + + #: Serializable isolation level. + SERIALIZABLE = 2 diff --git a/pyignite/datatypes/type_ids.py b/pyignite/datatypes/type_ids.py new file mode 100644 index 0000000..be2d9c3 --- /dev/null +++ b/pyignite/datatypes/type_ids.py @@ -0,0 +1,52 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TYPE_BYTE = 1 +TYPE_SHORT = 2 +TYPE_INT = 3 +TYPE_LONG = 4 +TYPE_FLOAT = 5 +TYPE_DOUBLE = 6 +TYPE_CHAR = 7 +TYPE_BOOLEAN = 8 +TYPE_STRING = 9 +TYPE_UUID = 10 +TYPE_DATE = 11 +TYPE_BYTE_ARR = 12 +TYPE_SHORT_ARR = 13 +TYPE_INT_ARR = 14 +TYPE_LONG_ARR = 15 +TYPE_FLOAT_ARR = 16 +TYPE_DOUBLE_ARR = 17 +TYPE_CHAR_ARR = 18 +TYPE_BOOLEAN_ARR = 19 +TYPE_STRING_ARR = 20 +TYPE_UUID_ARR = 21 +TYPE_DATE_ARR = 22 +TYPE_OBJ_ARR = 23 +TYPE_COL = 24 +TYPE_MAP = 25 +TYPE_BINARY_OBJ = 27 +TYPE_ENUM = 28 +TYPE_ENUM_ARR = 29 +TYPE_DECIMAL = 30 +TYPE_DECIMAL_ARR = 31 +TYPE_CLASS = 32 +TYPE_TIMESTAMP = 33 +TYPE_TIMESTAMP_ARR = 34 +TYPE_PROXY = 35 +TYPE_TIME = 36 +TYPE_TIME_ARR = 37 +TYPE_BINARY_ENUM = 38 diff --git a/pyignite/datatypes/type_names.py b/pyignite/datatypes/type_names.py new file mode 100644 index 0000000..08ce75d --- /dev/null +++ b/pyignite/datatypes/type_names.py @@ -0,0 +1,46 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +NAME_BYTE = 'java.lang.Byte' +NAME_SHORT = 'java.lang.Short' +NAME_INT = 'java.lang.Integer' +NAME_LONG = 'java.lang.Long' +NAME_FLOAT = 'java.lang.Float' +NAME_DOUBLE = 'java.land.Double' +NAME_CHAR = 'java.lang.Character' +NAME_BOOLEAN = 'java.lang.Boolean' +NAME_STRING = 'java.lang.String' +NAME_UUID = 'java.util.UUID' +NAME_DATE = 'java.util.Date' +NAME_BYTE_ARR = 'class [B' +NAME_SHORT_ARR = 'class [S' +NAME_INT_ARR = 'class [I' +NAME_LONG_ARR = 'class [J' +NAME_FLOAT_ARR = 'class [F' +NAME_DOUBLE_ARR = 'class [D' +NAME_CHAR_ARR = 'class [C' +NAME_BOOLEAN_ARR = 'class [Z' +NAME_STRING_ARR = 'class [Ljava.lang.String;' +NAME_UUID_ARR = 'class [Ljava.util.UUID;' +NAME_DATE_ARR = 'class [Ljava.util.Date;' +NAME_OBJ_ARR = 'class [Ljava.lang.Object;' +NAME_COL = 'java.util.Collection' +NAME_MAP = 'java.util.Map' +NAME_DECIMAL = 'java.math.BigDecimal' +NAME_DECIMAL_ARR = 'class [Ljava.math.BigDecimal;' +NAME_TIMESTAMP = 'java.sql.Timestamp' +NAME_TIMESTAMP_ARR = 'class [Ljava.sql.Timestamp;' +NAME_TIME = 'java.sql.Time' +NAME_TIME_ARR = 'class [Ljava.sql.Time;' diff --git a/pyignite/exceptions.py b/pyignite/exceptions.py index 2bc5996..7419512 100644 --- a/pyignite/exceptions.py +++ b/pyignite/exceptions.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Tuple from socket import error as SocketError @@ -24,13 +25,25 @@ class ParseError(Exception): pass +class AuthenticationError(Exception): + """ + This exception is raised on authentication failure. + """ + + def __init__(self, message: str): + self.message = message + + class HandshakeError(SocketError): """ This exception is raised on Ignite binary protocol handshake failure, as defined in - https://apacheignite.readme.io/docs/binary-client-protocol#section-handshake + https://ignite.apache.org/docs/latest/binary-client-protocol/binary-client-protocol#connection-handshake """ - pass + + def __init__(self, expected_version: Tuple[int, int, int], message: str): + self.expected_version = expected_version + self.message = message class ReconnectError(Exception): @@ -52,7 +65,7 @@ class ParameterError(Exception): class CacheError(Exception): """ - This exception is raised, whenever any remote Thin client operation + This exception is raised, whenever any remote Thin client cache operation returns an error. """ pass @@ -78,3 +91,30 @@ class SQLError(CacheError): An error in SQL query. """ pass + + +class ClusterError(Exception): + """ + This exception is raised, whenever any remote Thin client cluster operation + returns an error. + """ + pass + + +class NotSupportedByClusterError(Exception): + """ + This exception is raised, whenever cluster does not supported specific + operation probably because it is outdated. + """ + pass + + +class NotSupportedError(Exception): + """ + This exception is raised, whenever client does not support specific + operation. + """ + pass + + +connection_errors = (IOError, OSError, EOFError) diff --git a/pyignite/monitoring.py b/pyignite/monitoring.py new file mode 100644 index 0000000..997a5f8 --- /dev/null +++ b/pyignite/monitoring.py @@ -0,0 +1,455 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Tools to monitor client's events. + +For example, a simple query logger might be implemented like this:: + + import logging + + from pyignite import monitoring + + class QueryLogger(monitoring.QueryEventListener): + + def on_query_start(self, event): + logging.info(f"Query {event.op_name} with query id " + f"{event.query_id} started on server " + f"{event.host}:{event.port}") + + def on_query_fail(self, event): + logging.info(f"Query {event.op_name} with query id " + f"{event.query_id} on server " + f"{event.host}:{event.port} " + f"failed in {event.duration}ms " + f"with error {event.error_msg}") + + def on_query_success(self, event): + logging.info(f"Query {event.op_name} with query id " + f"{event.query_id} on server " \ + f"{event.host}:{event.port} " \ + f"succeeded in {event.duration}ms") + +:class:`~ConnectionEventListener` is also available. + +Event listeners can be registered by passing parameter to :class:`~pyignite.client.Client` or +:class:`~pyignite.aio_client.AioClient` constructor:: + + client = Client(event_listeners=[QueryLogger()]) + with client.connect('127.0.0.1', 10800): + .... + +.. note:: Events are delivered **synchronously**. Application threads block + waiting for event handlers. Care must be taken to ensure that your event handlers are efficient + enough to not adversely affect overall application performance. + +.. note:: Debug logging is also available, standard ``logging`` is used. Just set ``DEBUG`` level to + *pyignite* logger. +""" +from typing import Optional, Sequence + + +class _BaseEvent: + def __init__(self, **kwargs): + if kwargs: + for k, v in kwargs.items(): + object.__setattr__(self, k, v) + + def __setattr__(self, name, value): + raise TypeError(f'{self.__class__.__name__} is immutable') + + def __repr__(self): + pass + + +class _ConnectionEvent(_BaseEvent): + __slots__ = ('host', 'port') + host: str + port: int + + def __init__(self, host, port, **kwargs): + super().__init__(host=host, port=port, **kwargs) + + +class _HandshakeEvent(_ConnectionEvent): + __slots__ = ('protocol_context',) + protocol_context: Optional['ProtocolContext'] + + def __init__(self, host, port, protocol_context=None, **kwargs): + super().__init__(host, port, protocol_context=protocol_context.copy() if protocol_context else None, **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"protocol_context={self.protocol_context})" + + +class HandshakeStartEvent(_HandshakeEvent): + """ + Published when a handshake started. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar protocol_context: Client's protocol context. + """ + def __init__(self, host, port, protocol_context=None, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, protocol_context, **kwargs) + + +class HandshakeFailedEvent(_HandshakeEvent): + """ + Published when a handshake failed. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar protocol_context: Client's protocol context, + :ivar error_msg: Error message. + """ + __slots__ = ('error_msg',) + error_msg: str + + def __init__(self, host, port, protocol_context=None, err=None, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, protocol_context, error_msg=repr(err) if err else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"protocol_context={self.protocol_context}, error_msg={self.error_msg})" + + +class AuthenticationFailedEvent(HandshakeFailedEvent): + """ + Published when an authentication is failed. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar protocol_context: Client protocol context, + :ivar error_msg: Error message. + """ + pass + + +class HandshakeSuccessEvent(_HandshakeEvent): + """ + Published when a handshake succeeded. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar protocol_context: Client's protocol context, + :ivar node_uuid: Node's uuid, string. + """ + __slots__ = ('node_uuid',) + node_uuid: str + + def __init__(self, host, port, protocol_context, node_uuid, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, protocol_context, node_uuid=str(node_uuid) if node_uuid else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, protocol_context={self.protocol_context})" + + +class ConnectionClosedEvent(_ConnectionEvent): + """ + Published when a connection to the node is expectedly closed. + + :ivar host: Address of node to connect, + :ivar port: Port number of node to connect, + :ivar node_uuid: Node uuid, string. + """ + __slots__ = ('node_uuid',) + node_uuid: str + + def __init__(self, host, port, node_uuid, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, node_uuid=str(node_uuid) if node_uuid else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, node_uuid={self.node_uuid})" + + +class ConnectionLostEvent(ConnectionClosedEvent): + """ + Published when a connection to the node is lost. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar node_uuid: Node's uuid, string, + :ivar error_msg: Error message. + """ + __slots__ = ('error_msg',) + node_uuid: str + error_msg: str + + def __init__(self, host, port, node_uuid, err, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, node_uuid, error_msg=repr(err) if err else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, error_msg={self.error_msg})" + + +class _EventListener: + pass + + +class ConnectionEventListener(_EventListener): + """ + Base class for connection event listeners. + """ + def on_handshake_start(self, event: HandshakeStartEvent): + """ + Handle handshake start event. + + :param event: Instance of :class:`HandshakeStartEvent`. + """ + pass + + def on_handshake_success(self, event: HandshakeSuccessEvent): + """ + Handle handshake success event. + + :param event: Instance of :class:`HandshakeSuccessEvent`. + """ + pass + + def on_handshake_fail(self, event: HandshakeFailedEvent): + """ + Handle handshake failed event. + + :param event: Instance of :class:`HandshakeFailedEvent`. + """ + pass + + def on_authentication_fail(self, event: AuthenticationFailedEvent): + """ + Handle authentication failed event. + + :param event: Instance of :class:`AuthenticationFailedEvent`. + """ + pass + + def on_connection_closed(self, event: ConnectionClosedEvent): + """ + Handle connection closed event. + + :param event: Instance of :class:`ConnectionClosedEvent`. + """ + pass + + def on_connection_lost(self, event: ConnectionLostEvent): + """ + Handle connection lost event. + + :param event: Instance of :class:`ConnectionLostEvent`. + """ + pass + + +class _QueryEvent(_BaseEvent): + __slots__ = ('host', 'port', 'node_uuid', 'query_id', 'op_code', 'op_name') + host: str + port: int + node_uuid: str + query_id: int + op_code: int + op_name: str + + def __init__(self, host, port, node_uuid, query_id, op_code, op_name, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host=host, port=port, node_uuid=str(node_uuid) if node_uuid else '', + query_id=query_id, op_code=op_code, op_name=op_name, **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, query_id={self.query_id}, " \ + f"op_code={self.op_code}, op_name={self.op_name})" + + +class QueryStartEvent(_QueryEvent): + """ + Published when a client's query started. + + :ivar host: Address of the node on which the query is executed, + :ivar port: Port number of the node on which the query is executed, + :ivar node_uuid: Node's uuid, string, + :ivar query_id: Query's id, + :ivar op_code: Operation's id, + :ivar op_name: Operation's name. + """ + pass + + +class QuerySuccessEvent(_QueryEvent): + """ + Published when a client's query finished successfully. + + :ivar host: Address of the node on which the query is executed, + :ivar port: Port number of the node on which the query is executed, + :ivar node_uuid: Node's uuid, string, + :ivar query_id: Query's id, + :ivar op_code: Operation's id, + :ivar op_name: Operation's name, + :ivar duration: Query's duration in milliseconds. + """ + __slots__ = ('duration', ) + duration: int + + def __init__(self, host, port, node_uuid, query_id, op_code, op_name, duration, **kwargs): + super().__init__(host, port, node_uuid, query_id, op_code, op_name, duration=duration, **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, query_id={self.query_id}, " \ + f"op_code={self.op_code}, op_name={self.op_name}, duration={self.duration})" + + +class QueryFailEvent(_QueryEvent): + """ + Published when a client's query failed. + + :ivar host: Address of the node on which the query is executed, + :ivar port: Port number of the node on which the query is executed, + :ivar node_uuid: Node's uuid, string, + :ivar query_id: Query's id, + :ivar op_code: Operation's id, + :ivar op_name: Operation's name, + :ivar duration: Query's duration in milliseconds, + :ivar error_msg: Error message. + """ + __slots__ = ('duration', 'err_msg') + duration: int + err_msg: str + + def __init__(self, host, port, node_uuid, query_id, op_code, op_name, duration, err, **kwargs): + super().__init__(host, port, node_uuid, query_id, op_code, op_name, duration=duration, + err_msg=repr(err) if err else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, query_id={self.query_id}, op_code={self.op_code}, " \ + f"op_name={self.op_name}, duration={self.duration}, err_msg={self.err_msg})" + + +class QueryEventListener(_EventListener): + """ + Base class for query event listeners. + """ + def on_query_start(self, event: QueryStartEvent): + """ + Handle query start event. + + :param event: Instance of :class:`QueryStartEvent`. + """ + pass + + def on_query_success(self, event: QuerySuccessEvent): + """ + Handle query success event. + + :param event: Instance of :class:`QuerySuccessEvent`. + """ + pass + + def on_query_fail(self, event: QueryFailEvent): + """ + Handle query fail event. + + :param event: Instance of :class:`QueryFailEvent`. + """ + pass + + +class _EventListeners: + def __init__(self, listeners: Optional[Sequence]): + self.__connection_listeners = [] + self.__query_listeners = [] + if listeners: + for listener in listeners: + if isinstance(listener, ConnectionEventListener): + self.__connection_listeners.append(listener) + elif isinstance(listener, QueryEventListener): + self.__query_listeners.append(listener) + + @property + def enabled_connection_listener(self): + return bool(self.__connection_listeners) + + @property + def enabled_query_listener(self): + return bool(self.__query_listeners) + + def publish_handshake_start(self, host, port, protocol_context): + evt = HandshakeStartEvent(host, port, protocol_context) + self.__publish_connection_events(lambda listener: listener.on_handshake_start(evt)) + + def publish_handshake_success(self, host, port, protocol_context, node_uuid): + evt = HandshakeSuccessEvent(host, port, protocol_context, node_uuid) + self.__publish_connection_events(lambda listener: listener.on_handshake_success(evt)) + + def publish_handshake_fail(self, host, port, protocol_context, err): + evt = HandshakeFailedEvent(host, port, protocol_context, err) + self.__publish_connection_events(lambda listener: listener.on_handshake_fail(evt)) + + def publish_authentication_fail(self, host, port, protocol_context, err): + evt = AuthenticationFailedEvent(host, port, protocol_context, err) + self.__publish_connection_events(lambda listener: listener.on_authentication_fail(evt)) + + def publish_connection_closed(self, host, port, node_uuid): + evt = ConnectionClosedEvent(host, port, node_uuid) + self.__publish_connection_events(lambda listener: listener.on_connection_closed(evt)) + + def publish_connection_lost(self, host, port, node_uuid, err): + evt = ConnectionLostEvent(host, port, node_uuid, err) + self.__publish_connection_events(lambda listener: listener.on_connection_lost(evt)) + + def publish_query_start(self, host, port, node_uuid, query_id, op_code, op_name): + evt = QueryStartEvent(host, port, node_uuid, query_id, op_code, op_name) + self.__publish_query_events(lambda listener: listener.on_query_start(evt)) + + def publish_query_success(self, host, port, node_uuid, query_id, op_code, op_name, duration): + evt = QuerySuccessEvent(host, port, node_uuid, query_id, op_code, op_name, duration) + self.__publish_query_events(lambda listener: listener.on_query_success(evt)) + + def publish_query_fail(self, host, port, node_uuid, query_id, op_code, op_name, duration, err): + evt = QueryFailEvent(host, port, node_uuid, query_id, op_code, op_name, duration, err) + self.__publish_query_events(lambda listener: listener.on_query_fail(evt)) + + def __publish_connection_events(self, callback): + try: + for listener in self.__connection_listeners: + callback(listener) + except: # noqa: 13 + pass + + def __publish_query_events(self, callback): + try: + for listener in self.__query_listeners: + callback(listener) + except: # noqa: 13 + pass diff --git a/pyignite/queries/__init__.py b/pyignite/queries/__init__.py index 2c2d254..56c6347 100644 --- a/pyignite/queries/__init__.py +++ b/pyignite/queries/__init__.py @@ -21,319 +21,4 @@ :mod:`pyignite.datatypes` binary parser/generator classes. """ -from collections import OrderedDict -import ctypes -from random import randint - -import attr - -from pyignite.api.result import APIResult -from pyignite.constants import * -from pyignite.datatypes import ( - AnyDataObject, Bool, Int, Long, String, StringArray, Struct, -) -from .op_codes import * - - -@attr.s -class Response: - following = attr.ib(type=list, factory=list) - _response_header = None - - def __attrs_post_init__(self): - # replace None with empty list - self.following = self.following or [] - - @classmethod - def build_header(cls): - if cls._response_header is None: - cls._response_header = type( - 'ResponseHeader', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ('query_id', ctypes.c_longlong), - ('status_code', ctypes.c_int), - ], - }, - ) - return cls._response_header - - def parse(self, client: 'Client'): - header_class = self.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] - - if header.status_code == OP_SUCCESS: - for name, ignite_type in self.following: - c_type, buffer_fragment = ignite_type.parse(client) - buffer += buffer_fragment - fields.append((name, c_type)) - else: - c_type, buffer_fragment = String.parse(client) - buffer += buffer_fragment - fields.append(('error_message', c_type)) - - response_class = type( - 'Response', - (header_class,), - { - '_pack_': 1, - '_fields_': fields, - } - ) - return response_class, buffer - - def to_python(self, ctype_object, *args, **kwargs): - result = OrderedDict() - - for name, c_type in self.following: - result[name] = c_type.to_python( - getattr(ctype_object, name), - *args, **kwargs - ) - - return result if result else None - - -@attr.s -class SQLResponse(Response): - """ - The response class of SQL functions is special in the way the row-column - data is counted in it. Basically, Ignite thin client API is following a - “counter right before the counted objects” rule in most of its parts. - SQL ops are breaking this rule. - """ - include_field_names = attr.ib(type=bool, default=False) - has_cursor = attr.ib(type=bool, default=False) - - def fields_or_field_count(self): - if self.include_field_names: - return 'fields', StringArray - return 'field_count', Int - - def parse(self, client: 'Client'): - header_class = self.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] - - if header.status_code == OP_SUCCESS: - following = [ - self.fields_or_field_count(), - ('row_count', Int), - ] - if self.has_cursor: - following.insert(0, ('cursor', Long)) - body_struct = Struct(following) - body_class, body_buffer = body_struct.parse(client) - body = body_class.from_buffer_copy(body_buffer) - - if self.include_field_names: - field_count = body.fields.length - else: - field_count = body.field_count - - data_fields = [] - data_buffer = b'' - for i in range(body.row_count): - row_fields = [] - row_buffer = b'' - for j in range(field_count): - field_class, field_buffer = AnyDataObject.parse(client) - row_fields.append(('column_{}'.format(j), field_class)) - row_buffer += field_buffer - - row_class = type( - 'SQLResponseRow', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': row_fields, - } - ) - data_fields.append(('row_{}'.format(i), row_class)) - data_buffer += row_buffer - - data_class = type( - 'SQLResponseData', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': data_fields, - } - ) - fields += body_class._fields_ + [ - ('data', data_class), - ('more', ctypes.c_bool), - ] - buffer += body_buffer + data_buffer - else: - c_type, buffer_fragment = String.parse(client) - buffer += buffer_fragment - fields.append(('error_message', c_type)) - - final_class = type( - 'SQLResponse', - (header_class,), - { - '_pack_': 1, - '_fields_': fields, - } - ) - buffer += client.recv(ctypes.sizeof(final_class) - len(buffer)) - return final_class, buffer - - def to_python(self, ctype_object, *args, **kwargs): - if ctype_object.status_code == 0: - result = { - 'more': Bool.to_python( - ctype_object.more, *args, **kwargs - ), - 'data': [], - } - if hasattr(ctype_object, 'fields'): - result['fields'] = StringArray.to_python( - ctype_object.fields, *args, **kwargs - ) - else: - result['field_count'] = Int.to_python( - ctype_object.field_count, *args, **kwargs - ) - if hasattr(ctype_object, 'cursor'): - result['cursor'] = Long.to_python( - ctype_object.cursor, *args, **kwargs - ) - for row_item in ctype_object.data._fields_: - row_name = row_item[0] - row_object = getattr(ctype_object.data, row_name) - row = [] - for col_item in row_object._fields_: - col_name = col_item[0] - col_object = getattr(row_object, col_name) - row.append( - AnyDataObject.to_python(col_object, *args, **kwargs) - ) - result['data'].append(row) - return result - - -@attr.s -class Query: - op_code = attr.ib(type=int) - following = attr.ib(type=list, factory=list) - query_id = attr.ib(type=int, default=None) - _query_c_type = None - - @classmethod - def build_c_type(cls): - if cls._query_c_type is None: - cls._query_c_type = type( - cls.__name__, - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ('op_code', ctypes.c_short), - ('query_id', ctypes.c_longlong), - ], - }, - ) - return cls._query_c_type - - def from_python(self, values: dict=None): - if values is None: - values = {} - buffer = b'' - - header_class = self.build_c_type() - header = header_class() - header.op_code = self.op_code - if self.query_id is None: - header.query_id = randint(MIN_LONG, MAX_LONG) - - for name, c_type in self.following: - buffer += c_type.from_python(values[name]) - - header.length = ( - len(buffer) - + ctypes.sizeof(header_class) - - ctypes.sizeof(ctypes.c_int) - ) - return header.query_id, bytes(header) + buffer - - def perform( - self, conn: 'Connection', query_params: dict=None, - response_config: list=None, - ) -> APIResult: - """ - Perform query and process result. - - :param conn: connection to Ignite server, - :param query_params: (optional) dict of named query parameters. - Defaults to no parameters, - :param response_config: (optional) response configuration − list of - (name, type_hint) tuples. Defaults to empty return value, - :return: instance of :class:`~pyignite.api.result.APIResult` with raw - value (may undergo further processing in API functions). - """ - _, send_buffer = self.from_python(query_params) - conn.send(send_buffer) - response_struct = Response(response_config) - response_ctype, recv_buffer = response_struct.parse(conn) - response = response_ctype.from_buffer_copy(recv_buffer) - result = APIResult(response) - if result.status == 0: - result.value = response_struct.to_python(response) - return result - - -class ConfigQuery(Query): - """ - This is a special query, used for creating caches with configuration. - """ - _query_c_type = None - - @classmethod - def build_c_type(cls): - if cls._query_c_type is None: - cls._query_c_type = type( - cls.__name__, - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ('op_code', ctypes.c_short), - ('query_id', ctypes.c_longlong), - ('config_length', ctypes.c_int), - ], - }, - ) - return cls._query_c_type - - def from_python(self, values: dict = None): - if values is None: - values = {} - buffer = b'' - - header_class = self.build_c_type() - header = header_class() - header.op_code = self.op_code - if self.query_id is None: - header.query_id = randint(MIN_LONG, MAX_LONG) - - for name, c_type in self.following: - buffer += c_type.from_python(values[name]) - - header.length = ( - len(buffer) - + ctypes.sizeof(header_class) - - ctypes.sizeof(ctypes.c_int) - ) - header.config_length = header.length - ctypes.sizeof(header_class) - return header.query_id, bytes(header) + buffer +from .query import Query, ConfigQuery, query_perform diff --git a/pyignite/queries/cache_info.py b/pyignite/queries/cache_info.py new file mode 100644 index 0000000..6caf3ce --- /dev/null +++ b/pyignite/queries/cache_info.py @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import attr + +from pyignite.api.tx_api import get_tx_id +from pyignite.connection.protocol_context import ProtocolContext +from pyignite.constants import PROTOCOL_BYTE_ORDER +from pyignite.datatypes import ExpiryPolicy +from pyignite.exceptions import NotSupportedByClusterError + + +@attr.s +class CacheInfo: + cache_id = attr.ib(kw_only=True, type=int, default=0) + expiry_policy = attr.ib(kw_only=True, type=ExpiryPolicy, default=None) + protocol_context = attr.ib(kw_only=True, type=ProtocolContext) + + TRANSACTIONS_MASK = 0x02 + EXPIRY_POLICY_MASK = 0x04 + + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + + @classmethod + def from_python(cls, stream, value): + cache_id = value.cache_id if value else 0 + expiry_policy = value.expiry_policy if value else None + flags = 0 + + stream.write(cache_id.to_bytes(4, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) + + if expiry_policy: + if not value.protocol_context.is_expiry_policy_supported(): + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + flags |= cls.EXPIRY_POLICY_MASK + + tx_id = get_tx_id() + if value.protocol_context.is_transactions_supported() and tx_id: + flags |= cls.TRANSACTIONS_MASK + + stream.write(flags.to_bytes(1, byteorder=PROTOCOL_BYTE_ORDER)) + + if expiry_policy: + ExpiryPolicy.write_policy(stream, expiry_policy) + + if flags & cls.TRANSACTIONS_MASK: + stream.write(tx_id.to_bytes(4, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) diff --git a/pyignite/queries/op_codes.py b/pyignite/queries/op_codes.py index 1396e83..cf19b11 100644 --- a/pyignite/queries/op_codes.py +++ b/pyignite/queries/op_codes.py @@ -43,6 +43,7 @@ OP_CACHE_REMOVE_KEYS = 1018 OP_CACHE_REMOVE_ALL = 1019 OP_CACHE_GET_SIZE = 1020 +OP_CACHE_LOCAL_PEEK = 1021 OP_CACHE_GET_NAMES = 1050 OP_CACHE_CREATE_WITH_NAME = 1051 @@ -51,6 +52,7 @@ OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION = 1054 OP_CACHE_GET_CONFIGURATION = 1055 OP_CACHE_DESTROY = 1056 +OP_CACHE_PARTITIONS = 1101 OP_QUERY_SCAN = 2000 OP_QUERY_SCAN_CURSOR_GET_PAGE = 2001 @@ -59,7 +61,13 @@ OP_QUERY_SQL_FIELDS = 2004 OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE = 2005 -P_GET_BINARY_TYPE_NAME = 3000 +OP_GET_BINARY_TYPE_NAME = 3000 OP_REGISTER_BINARY_TYPE_NAME = 3001 OP_GET_BINARY_TYPE = 3002 OP_PUT_BINARY_TYPE = 3003 + +OP_TX_START = 4000 +OP_TX_END = 4001 + +OP_CLUSTER_GET_STATE = 5000 +OP_CLUSTER_CHANGE_STATE = 5001 diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py new file mode 100644 index 0000000..c141b26 --- /dev/null +++ b/pyignite/queries/query.py @@ -0,0 +1,299 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ctypes +import inspect +import logging +import time +from io import SEEK_CUR + +import attr + +from pyignite.api.result import APIResult +from pyignite.connection import Connection, AioConnection +from pyignite.constants import MAX_LONG, RHF_TOPOLOGY_CHANGED +from pyignite.queries import op_codes +from pyignite.queries.response import Response +from pyignite.stream import AioBinaryStream, BinaryStream, READ_BACKWARD + +logger = logging.getLogger('.'.join(__name__.split('.')[:-1])) + + +def query_perform(query_struct, conn, post_process_fun=None, **kwargs): + async def _async_internal(): + result = await query_struct.perform_async(conn, **kwargs) + if post_process_fun: + return post_process_fun(result) + return result + + def _internal(): + result = query_struct.perform(conn, **kwargs) + if post_process_fun: + return post_process_fun(result) + return result + + if isinstance(conn, AioConnection): + return _async_internal() + return _internal() + + +_QUERY_COUNTER = 0 + + +def _get_query_id(): + global _QUERY_COUNTER + if _QUERY_COUNTER >= MAX_LONG: + return 0 + _QUERY_COUNTER += 1 + return _QUERY_COUNTER + + +_OP_CODES = {code: name for name, code in inspect.getmembers(op_codes) if name.startswith('OP_')} + + +def _get_op_code_name(code): + global _OP_CODES + return _OP_CODES.get(code) + + +def _sec_to_millis(secs): + return int(secs * 1000) + + +@attr.s +class Query: + op_code = attr.ib(type=int) + following = attr.ib(type=list, factory=list) + query_id = attr.ib(type=int) + response_type = attr.ib(type=type(Response), default=Response) + _query_c_type = None + _start_ts = 0.0 + + @query_id.default + def _set_query_id(self): + return _get_query_id() + + @classmethod + def build_c_type(cls): + if cls._query_c_type is None: + cls._query_c_type = type( + cls.__name__, + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': [ + ('length', ctypes.c_int), + ('op_code', ctypes.c_short), + ('query_id', ctypes.c_longlong), + ], + }, + ) + return cls._query_c_type + + def from_python(self, stream, values: dict = None): + init_pos, header = stream.tell(), self._build_header(stream) + values = values if values else None + + for name, c_type in self.following: + c_type.from_python(stream, values[name]) + + self.__write_header(stream, header, init_pos) + + async def from_python_async(self, stream, values: dict = None): + init_pos, header = stream.tell(), self._build_header(stream) + values = values if values else None + + for name, c_type in self.following: + await c_type.from_python_async(stream, values[name]) + + self.__write_header(stream, header, init_pos) + + def _build_header(self, stream): + global _QUERY_COUNTER + header_class = self.build_c_type() + header_len = ctypes.sizeof(header_class) + stream.seek(header_len, SEEK_CUR) + + header = header_class() + header.op_code = self.op_code + header.query_id = self.query_id + + return header + + @staticmethod + def __write_header(stream, header, init_pos): + header.length = stream.tell() - init_pos - ctypes.sizeof(ctypes.c_int) + stream.seek(init_pos) + stream.write(header) + + def perform( + self, conn: Connection, query_params: dict = None, + response_config: list = None, **kwargs, + ) -> APIResult: + """ + Perform query and process result. + + :param conn: connection to Ignite server, + :param query_params: (optional) dict of named query parameters. + Defaults to no parameters, + :param response_config: (optional) response configuration − list of + (name, type_hint) tuples. Defaults to empty return value, + :return: instance of :class:`~pyignite.api.result.APIResult` with raw + value (may undergo further processing in API functions). + """ + try: + self._on_query_started(conn) + + with BinaryStream(conn.client) as stream: + self.from_python(stream, query_params) + response_data = conn.request(stream.getvalue()) + + response_struct = self.response_type(protocol_context=conn.protocol_context, + following=response_config, **kwargs) + + with BinaryStream(conn.client, response_data) as stream: + response_ctype = response_struct.parse(stream) + response = stream.read_ctype(response_ctype, direction=READ_BACKWARD) + + result = self.__post_process_response(conn, response_struct, response) + if result.status == 0: + result.value = response_struct.to_python(response) + self._on_query_finished(conn, result=result) + return result + except Exception as e: + self._on_query_finished(conn, err=e) + raise e + + async def perform_async( + self, conn: AioConnection, query_params: dict = None, + response_config: list = None, **kwargs, + ) -> APIResult: + """ + Perform query and process result. + + :param conn: connection to Ignite server, + :param query_params: (optional) dict of named query parameters. + Defaults to no parameters, + :param response_config: (optional) response configuration − list of + (name, type_hint) tuples. Defaults to empty return value, + :return: instance of :class:`~pyignite.api.result.APIResult` with raw + value (may undergo further processing in API functions). + """ + try: + self._on_query_started(conn) + + with AioBinaryStream(conn.client) as stream: + await self.from_python_async(stream, query_params) + data = await conn.request(self.query_id, stream.getvalue()) + + response_struct = self.response_type(protocol_context=conn.protocol_context, + following=response_config, **kwargs) + + with AioBinaryStream(conn.client, data) as stream: + response_ctype = await response_struct.parse_async(stream) + response = stream.read_ctype(response_ctype, direction=READ_BACKWARD) + + result = self.__post_process_response(conn, response_struct, response) + if result.status == 0: + result.value = await response_struct.to_python_async(response) + self._on_query_finished(conn, result=result) + return result + except Exception as e: + self._on_query_finished(conn, err=e) + raise e + + @staticmethod + def __post_process_response(conn, response_struct, response): + if getattr(response, 'flags', False) & RHF_TOPOLOGY_CHANGED: + # update latest affinity version + new_affinity = (response.affinity_version, response.affinity_minor) + old_affinity = conn.client.affinity_version + + if new_affinity > old_affinity: + conn.client.affinity_version = new_affinity + + # build result + return APIResult(response) + + @staticmethod + def _enabled_query_listener(conn): + client = conn.client + return client._event_listeners and client._event_listeners.enabled_query_listener + + @staticmethod + def _event_listener(conn): + return conn.client._event_listeners + + def _on_query_started(self, conn): + self._start_ts = time.monotonic() + if logger.isEnabledFor(logging.DEBUG): + logger.debug("Start query(query_id=%d, op_type=%s, host=%s, port=%d, node_id=%s)", + self.query_id, _get_op_code_name(self.op_code), conn.host, conn.port, conn.uuid) + + if self._enabled_query_listener(conn): + self._event_listener(conn).publish_query_start(conn.host, conn.port, conn.uuid, self.query_id, + self.op_code, _get_op_code_name(self.op_code)) + + def _on_query_finished(self, conn, result=None, err=None): + if logger.isEnabledFor(logging.DEBUG): + dur_ms = _sec_to_millis(time.monotonic() - self._start_ts) + if result and result.status != 0: + err = result.message + if err: + logger.debug("Failed to perform query(query_id=%d, op_type=%s, host=%s, port=%d, node_id=%s) " + "in %d ms: %s", self.query_id, _get_op_code_name(self.op_code), + conn.host, conn.port, conn.uuid, dur_ms, err) + if self._enabled_query_listener(conn): + self._event_listener(conn).publish_query_fail(conn.host, conn.port, conn.uuid, self.query_id, + self.op_code, _get_op_code_name(self.op_code), + dur_ms, err) + else: + logger.debug("Finished query(query_id=%d, op_type=%s, host=%s, port=%d, node_id=%s) " + "successfully in %d ms", self.query_id, _get_op_code_name(self.op_code), + conn.host, conn.port, conn.uuid, dur_ms) + if self._enabled_query_listener(conn): + self._event_listener(conn).publish_query_success(conn.host, conn.port, conn.uuid, self.query_id, + self.op_code, _get_op_code_name(self.op_code), + dur_ms) + + +class ConfigQuery(Query): + """ + This is a special query, used for creating caches with configuration. + """ + _query_c_type = None + + @classmethod + def build_c_type(cls): + if cls._query_c_type is None: + cls._query_c_type = type( + cls.__name__, + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': [ + ('length', ctypes.c_int), + ('op_code', ctypes.c_short), + ('query_id', ctypes.c_longlong), + ('config_length', ctypes.c_int), + ], + }, + ) + return cls._query_c_type + + def _build_header(self, stream): + header = super()._build_header(stream) + header.config_length = header.length - ctypes.sizeof(type(header)) + return header diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py new file mode 100644 index 0000000..11e71a7 --- /dev/null +++ b/pyignite/queries/response.py @@ -0,0 +1,350 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +from io import SEEK_CUR + +import attr +from collections import OrderedDict +import ctypes + +from pyignite.connection.protocol_context import ProtocolContext +from pyignite.constants import RHF_TOPOLOGY_CHANGED, RHF_ERROR +from pyignite.datatypes import AnyDataObject, Bool, Int, Long, String, StringArray, Struct +from pyignite.datatypes.binary import body_struct, enum_struct, schema_struct +from pyignite.queries.op_codes import OP_SUCCESS +from pyignite.stream import READ_BACKWARD + + +class StatusFlagResponseHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('length', ctypes.c_int), + ('query_id', ctypes.c_longlong), + ('flags', ctypes.c_short) + ] + + +class ResponseHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('length', ctypes.c_int), + ('query_id', ctypes.c_longlong), + ('status_code', ctypes.c_int) + ] + + +@attr.s +class Response: + following = attr.ib(type=list, factory=list) + protocol_context = attr.ib(type=type(ProtocolContext), default=None) + _response_class_name = 'Response' + + def __attrs_post_init__(self): + # replace None with empty list + self.following = self.following or [] + + def __parse_header(self, stream): + init_pos = stream.tell() + + if self.protocol_context.is_status_flags_supported(): + header_class = StatusFlagResponseHeader + else: + header_class = ResponseHeader + + header_len = ctypes.sizeof(header_class) + header = stream.read_ctype(header_class) + stream.seek(header_len, SEEK_CUR) + + fields = [] + has_error = False + if self.protocol_context.is_status_flags_supported(): + if header.flags & RHF_TOPOLOGY_CHANGED: + fields = [ + ('affinity_version', ctypes.c_longlong), + ('affinity_minor', ctypes.c_int), + ] + + if header.flags & RHF_ERROR: + fields.append(('status_code', ctypes.c_int)) + has_error = True + else: + has_error = header.status_code != OP_SUCCESS + + if fields: + stream.seek(sum(ctypes.sizeof(c_type) for _, c_type in fields), SEEK_CUR) + + if has_error: + msg_type = String.parse(stream) + fields.append(('error_message', msg_type)) + + return not has_error, init_pos, header_class, fields + + def __build_response_class(self, stream, init_pos, header_class, fields): + response_class = type( + self._response_class_name, + (header_class,), + { + '_pack_': 1, + '_fields_': fields, + } + ) + + stream.seek(init_pos + ctypes.sizeof(response_class)) + return response_class + + def parse(self, stream): + success, init_pos, header_class, fields = self.__parse_header(stream) + if success: + self._parse_success(stream, fields) + + return self.__build_response_class(stream, init_pos, header_class, fields) + + async def parse_async(self, stream): + success, init_pos, header_class, fields = self.__parse_header(stream) + if success: + await self._parse_success_async(stream, fields) + + return self.__build_response_class(stream, init_pos, header_class, fields) + + def _parse_success(self, stream, fields: list): + for name, ignite_type in self.following: + c_type = ignite_type.parse(stream) + fields.append((name, c_type)) + + async def _parse_success_async(self, stream, fields: list): + for name, ignite_type in self.following: + c_type = await ignite_type.parse_async(stream) + fields.append((name, c_type)) + + def to_python(self, ctypes_object, **kwargs): + if not self.following: + return None + + result = OrderedDict() + for name, c_type in self.following: + result[name] = c_type.to_python(getattr(ctypes_object, name), **kwargs) + + return result + + async def to_python_async(self, ctypes_object, **kwargs): + if not self.following: + return None + + values = await asyncio.gather( + *[c_type.to_python_async(getattr(ctypes_object, name), **kwargs) for name, c_type in self.following] + ) + + return OrderedDict([(name, values[i]) for i, (name, _) in enumerate(self.following)]) + + +@attr.s +class SQLResponse(Response): + """ + The response class of SQL functions is special in the way the row-column + data is counted in it. Basically, Ignite thin client API is following a + “counter right before the counted objects” rule in most of its parts. + SQL ops are breaking this rule. + """ + include_field_names = attr.ib(type=bool, default=False) + has_cursor = attr.ib(type=bool, default=False) + _response_class_name = 'SQLResponse' + + def fields_or_field_count(self): + if self.include_field_names: + return 'fields', StringArray + return 'field_count', Int + + def _parse_success(self, stream, fields: list): + body_struct = self.__create_body_struct() + body_class = body_struct.parse(stream) + body = stream.read_ctype(body_class, direction=READ_BACKWARD) + + data_fields, field_count = [], self.__get_fields_count(body) + for i in range(body.row_count): + row_fields = [] + for j in range(field_count): + field_class = AnyDataObject.parse(stream) + row_fields.append(('column_{}'.format(j), field_class)) + + self.__row_post_process(i, row_fields, data_fields) + + self.__body_class_post_process(body_class, fields, data_fields) + + async def _parse_success_async(self, stream, fields: list): + body_struct = self.__create_body_struct() + body_class = await body_struct.parse_async(stream) + body = stream.read_ctype(body_class, direction=READ_BACKWARD) + + data_fields, field_count = [], self.__get_fields_count(body) + for i in range(body.row_count): + row_fields = [] + for j in range(field_count): + field_class = await AnyDataObject.parse_async(stream) + row_fields.append(('column_{}'.format(j), field_class)) + + self.__row_post_process(i, row_fields, data_fields) + + self.__body_class_post_process(body_class, fields, data_fields) + + def __create_body_struct(self): + following = [self.fields_or_field_count(), ('row_count', Int)] + if self.has_cursor: + following.insert(0, ('cursor', Long)) + return Struct(following) + + def __get_fields_count(self, body): + if self.include_field_names: + return body.fields.length + return body.field_count + + @staticmethod + def __row_post_process(idx, row_fields, data_fields): + row_class = type( + 'SQLResponseRow', + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': row_fields, + } + ) + data_fields.append((f'row_{idx}', row_class)) + + @staticmethod + def __body_class_post_process(body_class, fields, data_fields): + data_class = type( + 'SQLResponseData', + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': data_fields, + } + ) + fields += body_class._fields_ + [ + ('data', data_class), + ('more', ctypes.c_byte), + ] + + def to_python(self, ctypes_object, **kwargs): + if getattr(ctypes_object, 'status_code', 0) == 0: + result = self.__to_python_result_header(ctypes_object, **kwargs) + + for row_item in ctypes_object.data._fields_: + row_name = row_item[0] + row_object = getattr(ctypes_object.data, row_name) + row = [] + for col_item in row_object._fields_: + col_name = col_item[0] + col_object = getattr(row_object, col_name) + row.append(AnyDataObject.to_python(col_object, **kwargs)) + result['data'].append(row) + return result + + async def to_python_async(self, ctypes_object, **kwargs): + if getattr(ctypes_object, 'status_code', 0) == 0: + result = self.__to_python_result_header(ctypes_object, **kwargs) + + data_coro = [] + for row_item in ctypes_object.data._fields_: + row_name = row_item[0] + row_object = getattr(ctypes_object.data, row_name) + row_coro = [] + for col_item in row_object._fields_: + col_name = col_item[0] + col_object = getattr(row_object, col_name) + row_coro.append(AnyDataObject.to_python_async(col_object, **kwargs)) + + data_coro.append(asyncio.gather(*row_coro)) + + result['data'] = await asyncio.gather(*data_coro) + return result + + @staticmethod + def __to_python_result_header(ctypes_object, *args, **kwargs): + result = { + 'more': Bool.to_python(ctypes_object.more, *args, **kwargs), + 'data': [], + } + if hasattr(ctypes_object, 'fields'): + result['fields'] = StringArray.to_python(ctypes_object.fields, *args, **kwargs) + else: + result['field_count'] = Int.to_python(ctypes_object.field_count, *args, **kwargs) + + if hasattr(ctypes_object, 'cursor'): + result['cursor'] = Long.to_python(ctypes_object.cursor, *args, **kwargs) + return result + + +class BinaryTypeResponse(Response): + _response_class_name = 'GetBinaryTypeResponse' + + def _parse_success(self, stream, fields: list): + type_exists = self.__process_type_exists(stream, fields) + + if type_exists.value: + resp_body_type = body_struct.parse(stream) + fields.append(('body', resp_body_type)) + resp_body = stream.read_ctype(resp_body_type, direction=READ_BACKWARD) + if resp_body.is_enum: + resp_enum = enum_struct.parse(stream) + fields.append(('enums', resp_enum)) + + resp_schema_type = schema_struct.parse(stream) + fields.append(('schema', resp_schema_type)) + + async def _parse_success_async(self, stream, fields: list): + type_exists = self.__process_type_exists(stream, fields) + + if type_exists.value: + resp_body_type = await body_struct.parse_async(stream) + fields.append(('body', resp_body_type)) + resp_body = stream.read_ctype(resp_body_type, direction=READ_BACKWARD) + if resp_body.is_enum: + resp_enum = await enum_struct.parse_async(stream) + fields.append(('enums', resp_enum)) + + resp_schema_type = await schema_struct.parse_async(stream) + fields.append(('schema', resp_schema_type)) + + @staticmethod + def __process_type_exists(stream, fields): + fields.append(('type_exists', ctypes.c_byte)) + type_exists = stream.read_ctype(ctypes.c_byte) + stream.seek(ctypes.sizeof(ctypes.c_byte), SEEK_CUR) + + return type_exists + + def to_python(self, ctypes_object, **kwargs): + if getattr(ctypes_object, 'status_code', 0) == 0: + result = { + 'type_exists': Bool.to_python(ctypes_object.type_exists) + } + + if hasattr(ctypes_object, 'body'): + result.update(body_struct.to_python(ctypes_object.body)) + + if hasattr(ctypes_object, 'enums'): + result['enums'] = enum_struct.to_python(ctypes_object.enums) + + if hasattr(ctypes_object, 'schema'): + result['schema'] = { + x['schema_id']: [ + z['schema_field_id'] for z in x['schema_fields'] + ] + for x in schema_struct.to_python(ctypes_object.schema) + } + return result + + async def to_python_async(self, ctypes_object, **kwargs): + return self.to_python(ctypes_object, **kwargs) diff --git a/pyignite/stream/__init__.py b/pyignite/stream/__init__.py new file mode 100644 index 0000000..76d171d --- /dev/null +++ b/pyignite/stream/__init__.py @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .binary_stream import BinaryStream, AioBinaryStream, READ_FORWARD, READ_BACKWARD + +__all__ = ['BinaryStream', 'AioBinaryStream', 'READ_BACKWARD', 'READ_FORWARD'] diff --git a/pyignite/stream/binary_stream.py b/pyignite/stream/binary_stream.py new file mode 100644 index 0000000..3923a3b --- /dev/null +++ b/pyignite/stream/binary_stream.py @@ -0,0 +1,144 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ctypes +from io import BytesIO +from typing import Union, Optional + +import pyignite +import pyignite.utils as ignite_utils + +READ_FORWARD = 0 +READ_BACKWARD = 1 + + +class BinaryStreamBase: + def __init__(self, client, buf=None): + self.client = client + self.stream = BytesIO(buf) if buf else BytesIO() + self._buffer = None + + @property + def compact_footer(self) -> bool: + return self.client.compact_footer + + @compact_footer.setter + def compact_footer(self, value: bool): + self.client.compact_footer = value + + def read(self, size): + buf = bytearray(size) + self.stream.readinto(buf) + return buf + + def read_ctype(self, ctype_class, position=None, direction=READ_FORWARD): + ctype_len = ctypes.sizeof(ctype_class) + + if position is not None and position >= 0: + init_position = position + else: + init_position = self.tell() + + if direction == READ_FORWARD: + start, end = init_position, init_position + ctype_len + else: + start, end = init_position - ctype_len, init_position + + with self.getbuffer()[start:end] as buf: + return ctype_class.from_buffer_copy(buf) + + def write(self, buf): + self._release_buffer() + return self.stream.write(buf) + + def tell(self): + return self.stream.tell() + + def seek(self, *args, **kwargs): + return self.stream.seek(*args, **kwargs) + + def getbuffer(self): + if self._buffer: + return self._buffer + + self._buffer = self.stream.getbuffer() + return self._buffer + + def getvalue(self): + return self.stream.getvalue() + + def slice(self, start=-1, offset=0): + start = start if start >= 0 else self.tell() + with self.getbuffer()[start:start + offset] as buf: + return bytes(buf) + + def hashcode(self, start, bytes_len): + with self.getbuffer()[start:start + bytes_len] as buf: + return ignite_utils.hashcode(buf) + + def _release_buffer(self): + if self._buffer: + self._buffer.release() + self._buffer = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._release_buffer() + self.stream.close() + + +class BinaryStream(BinaryStreamBase): + """ + Synchronous binary stream. + """ + def __init__(self, client: 'pyignite.Client', buf: Optional[Union[bytes, bytearray, memoryview]] = None): + """ + :param client: Client instance, required. + :param buf: Buffer, optional parameter. If not passed, creates empty BytesIO. + """ + super().__init__(client, buf) + + def get_dataclass(self, header): + result = self.client.query_binary_type(header.type_id, header.schema_id) + if not result: + raise RuntimeError('Binary type is not registered') + return result + + def register_binary_type(self, *args, **kwargs): + self.client.register_binary_type(*args, **kwargs) + + +class AioBinaryStream(BinaryStreamBase): + """ + Asyncio binary stream. + """ + def __init__(self, client: 'pyignite.AioClient', buf: Optional[Union[bytes, bytearray, memoryview]] = None): + """ + Initialize binary stream around buffers. + + :param client: AioClient instance, required. + :param buf: Buffer, optional parameter. If not passed, creates empty BytesIO. + """ + super().__init__(client, buf) + + async def get_dataclass(self, header): + result = await self.client.query_binary_type(header.type_id, header.schema_id) + if not result: + raise RuntimeError('Binary type is not registered') + return result + + async def register_binary_type(self, *args, **kwargs): + await self.client.register_binary_type(*args, **kwargs) diff --git a/pyignite/transaction.py b/pyignite/transaction.py new file mode 100644 index 0000000..3003eb6 --- /dev/null +++ b/pyignite/transaction.py @@ -0,0 +1,148 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import IntEnum +from typing import Union, Type + +from pyignite.api.tx_api import tx_end, tx_start, tx_end_async, tx_start_async +from pyignite.datatypes import TransactionIsolation, TransactionConcurrency +from pyignite.exceptions import CacheError +from pyignite.utils import status_to_exception + + +def _validate_int_enum_param(value: Union[int, IntEnum], cls: Type[IntEnum]): + if value not in set(v.value for v in cls): # Use this trick to disable warning on python 3.7 + raise ValueError(f'{value} not in {cls}') + return value + + +def _validate_timeout(value): + if not isinstance(value, int) or value < 0: + raise ValueError(f'Timeout value should be a positive integer, {value} passed instead') + return value + + +def _validate_label(value): + if value and not isinstance(value, str): + raise ValueError(f'Label should be str, {type(value)} passed instead') + return value + + +class _BaseTransaction: + def __init__(self, client, concurrency=TransactionConcurrency.PESSIMISTIC, + isolation=TransactionIsolation.REPEATABLE_READ, timeout=0, label=None): + self.client = client + self.concurrency = _validate_int_enum_param(concurrency, TransactionConcurrency) + self.isolation = _validate_int_enum_param(isolation, TransactionIsolation) + self.timeout = _validate_timeout(timeout) + self.label, self.closed = _validate_label(label), False + + +class Transaction(_BaseTransaction): + """ + Thin client transaction. + """ + def __init__(self, client, concurrency=TransactionConcurrency.PESSIMISTIC, + isolation=TransactionIsolation.REPEATABLE_READ, timeout=0, label=None): + super().__init__(client, concurrency, isolation, timeout, label) + self.tx_id = self.__start_tx() + + def commit(self) -> None: + """ + Commit transaction. + """ + if not self.closed: + self.closed = True + return self.__end_tx(True) + + def rollback(self) -> None: + """ + Rollback transaction. + """ + self.close() + + def close(self) -> None: + """ + Close transaction. + """ + if not self.closed: + self.closed = True + return self.__end_tx(False) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + @status_to_exception(CacheError) + def __start_tx(self): + conn = self.client.random_node + return tx_start(conn, self.concurrency, self.isolation, self.timeout, self.label) + + @status_to_exception(CacheError) + def __end_tx(self, committed): + return tx_end(self.tx_id, committed) + + +class AioTransaction(_BaseTransaction): + """ + Async thin client transaction. + """ + def __init__(self, client, concurrency=TransactionConcurrency.PESSIMISTIC, + isolation=TransactionIsolation.REPEATABLE_READ, timeout=0, label=None): + super().__init__(client, concurrency, isolation, timeout, label) + + def __await__(self): + return (yield from self.__aenter__().__await__()) + + async def commit(self) -> None: + """ + Commit transaction. + """ + if not self.closed: + self.closed = True + return await self.__end_tx(True) + + async def rollback(self) -> None: + """ + Rollback transaction. + """ + await self.close() + + async def close(self) -> None: + """ + Close transaction. + """ + if not self.closed: + self.closed = True + return await self.__end_tx(False) + + async def __aenter__(self): + self.tx_id = await self.__start_tx() + self.closed = False + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + @status_to_exception(CacheError) + async def __start_tx(self): + conn = await self.client.random_node() + return await tx_start_async(conn, self.concurrency, self.isolation, self.timeout, self.label) + + @status_to_exception(CacheError) + async def __end_tx(self, committed): + return await tx_end_async(self.tx_id, committed) diff --git a/pyignite/utils.py b/pyignite/utils.py index 1d4298e..5fcbd38 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -13,14 +13,35 @@ # See the License for the specific language governing permissions and # limitations under the License. +import ctypes +import decimal +import inspect +import warnings + from functools import wraps -from typing import Any, Type, Union +from typing import Any, Optional, Type, Tuple, Union from pyignite.datatypes.base import IgniteDataType from .constants import * +FALLBACK = False + +try: + from pyignite import _cutils +except ImportError: + FALLBACK = True + + +LONG_MASK = 0xffffffff +DIGITS_PER_INT = 9 + + +def is_pow2(value: int) -> bool: + """ Check if value is power of two. """ + return value > 0 and ((value & (value - 1)) == 0) + -def is_iterable(value): +def is_iterable(value: Any) -> bool: """ Check if value is iterable. """ try: iter(value) @@ -45,23 +66,8 @@ def is_hinted(value): """ Check if a value is a tuple of data item and its type hint. """ - return ( - isinstance(value, tuple) - and len(value) == 2 - and issubclass(value[1], IgniteDataType) - ) - - -def is_wrapped(value: Any) -> bool: - """ - Check if a value is of WrappedDataObject type. - """ - return ( - type(value) is tuple - and len(value) == 2 - and type(value[0]) is bytes - and type(value[1]) is int - ) + return isinstance(value, tuple) and len(value) == 2 and inspect.isclass(value[1]) and \ + issubclass(value[1], IgniteDataType) def int_overflow(value: int) -> int: @@ -71,40 +77,47 @@ def int_overflow(value: int) -> int: return ((value ^ 0x80000000) & 0xffffffff) - 0x80000000 -def unwrap_binary(client: 'Client', wrapped: tuple): +def hashcode(data: Union[str, bytes, bytearray, memoryview]) -> int: """ - Unwrap wrapped BinaryObject and convert it to Python data. + Calculate hash code used for identifying objects in Ignite binary API. - :param client: connection to Ignite cluster, - :param wrapped: `WrappedDataObject` value, - :return: dict representing wrapped BinaryObject. + :param data: UTF-8-encoded string identifier of binary buffer or byte array + :return: hash code. """ - from pyignite.datatypes.complex import BinaryObject + if FALLBACK: + return __hashcode_fallback(data) - blob, offset = wrapped - client_clone = client.clone(prefetch=blob) - client_clone.pos = offset - data_class, data_bytes = BinaryObject.parse(client_clone) - return BinaryObject.to_python( - data_class.from_buffer_copy(data_bytes), - client, - ) + return _cutils.hashcode(data) -def hashcode(string: Union[str, bytes]) -> int: - """ - Calculate hash code used for identifying objects in Ignite binary API. +def __hashcode_fallback(data: Union[str, bytes, bytearray, memoryview]) -> int: + if data is None: + return 0 - :param string: UTF-8-encoded string identifier of binary buffer, - :return: hash code. - """ - result = 0 - for char in string: - try: - char = ord(char) - except TypeError: - pass - result = int_overflow(31 * result + char) + if isinstance(data, str): + """ + For strings we iterate over code point which are of the int type + and can take up to 4 bytes and can only be positive. + """ + result = 0 + for char in data: + try: + char_val = ord(char) + result = int_overflow(31 * result + char_val) + except TypeError: + pass + else: + """ + For byte array we iterate over bytes which only take 1 byte. But + according to protocol, bytes during hashing should be treated as signed + integer numbers 8 bits long. On other hand elements in Python's `bytes` + are unsigned. For this reason we use ctypes.c_byte() to make them + signed. + """ + result = 1 + for byte in data: + byte = ctypes.c_byte(byte).value + result = int_overflow(31 * result + byte) return result @@ -118,13 +131,15 @@ def cache_id(cache: Union[str, int]) -> int: return cache if type(cache) is int else hashcode(cache) -def entity_id(cache: Union[str, int]) -> int: +def entity_id(cache: Union[str, int]) -> Optional[int]: """ Create a type ID from type name or field ID from field name. :param cache: entity name or ID, :return: entity ID. """ + if cache is None: + return None return cache if type(cache) is int else hashcode(cache.lower()) @@ -135,13 +150,21 @@ def schema_id(schema: Union[int, dict]) -> int: :param schema: a dict of field names: field types, :return: schema ID. """ - if type(schema) is int: + if FALLBACK: + return __schema_id_fallback(schema) + return _cutils.schema_id(schema) + + +def __schema_id_fallback(schema: Union[int, dict]) -> int: + if isinstance(schema, int): return schema + if schema is None: return 0 + s_id = FNV1_OFFSET_BASIS if schema else 0 for field_name in schema.keys(): - field_id = entity_id(field_name) + field_id = __hashcode_fallback(field_name.lower()) s_id ^= (field_id & 0xff) s_id = int_overflow(s_id * FNV1_PRIME) s_id ^= ((field_id >> 8) & 0xff) @@ -153,20 +176,120 @@ def schema_id(schema: Union[int, dict]) -> int: return s_id +def decimal_hashcode(value: decimal.Decimal) -> int: + """ + This is a translation of `java.math.BigDecimal` class `hashCode()` method + to Python. + + :param value: pythonic decimal value, + :return: hashcode. + """ + sign, digits, scale = value.normalize().as_tuple() + sign = -1 if sign else 1 + value = int(''.join([str(d) for d in digits])) + + if value < MAX_LONG: + # this is the case when Java BigDecimal digits are stored + # compactly, in the internal 64-bit integer field + int_hash = ( + (unsigned(value, ctypes.c_ulonglong) >> 32) * 31 + (value & LONG_MASK) + ) & LONG_MASK + else: + # digits are not fit in the 64-bit long, so they get split internally + # to an array of values within 32-bit integer range each (it is really + # a part of `java.math.BigInteger` class internals) + magnitude = [] + order = 0 + while True: + elem = value >> order + if elem > 1: + magnitude.insert(0, ctypes.c_int(elem).value) + order += 32 + else: + break + + int_hash = 0 + for v in magnitude: + int_hash = (31 * int_hash + (v & LONG_MASK)) & LONG_MASK + + return ctypes.c_int(31 * int_hash * sign - scale).value + + +def datetime_hashcode(value: int) -> int: + """ + Calculates hashcode from UNIX epoch. + + :param value: UNIX time, + :return: Java hashcode. + """ + return (value & LONG_MASK) ^ (unsigned(value, ctypes.c_ulonglong) >> 32) + + def status_to_exception(exc: Type[Exception]): """ Converts erroneous status code with error message to an exception - of the given class. + of the given class. Supports coroutines. :param exc: the class of exception to raise, - :return: decorator. + :return: decorated function. """ + def process_result(result): + if result.status != 0: + raise exc(result.message) + return result.value + def ste_decorator(fn): - @wraps(fn) - def ste_wrapper(*args, **kwargs): - result = fn(*args, **kwargs) - if result.status != 0: - raise exc(result.message) - return result.value - return ste_wrapper + if inspect.iscoroutinefunction(fn): + @wraps(fn) + async def ste_wrapper_async(*args, **kwargs): + return process_result(await fn(*args, **kwargs)) + return ste_wrapper_async + else: + @wraps(fn) + def ste_wrapper(*args, **kwargs): + return process_result(fn(*args, **kwargs)) + return ste_wrapper return ste_decorator + + +def get_field_by_id(obj: 'GenericObjectMeta', field_id: int) -> Tuple[Any, IgniteDataType]: + """ + Returns a complex object's field value, given the field's entity ID. + + :param obj: complex object, + :param field_id: field ID, + :return: complex object field's value and type. + """ + for fname, ftype in obj._schema.items(): + if entity_id(fname) == field_id: + return getattr(obj, fname, getattr(ftype, 'default')), ftype + + +def unsigned(value: int, c_type: ctypes._SimpleCData = ctypes.c_uint) -> int: + """ Convert signed integer value to unsigned. """ + return c_type(value).value + + +def capitalize(string: str) -> str: + """ + Capitalizing the string, assuming the first character is a letter. + Does not touch any other character, unlike the `string.capitalize()`. + """ + return string[:1].upper() + string[1:] + + +def process_delimiter(name: str, delimiter: str) -> str: + """ + Splits the name by delimiter, capitalize each part, merge. + """ + return ''.join([capitalize(x) for x in name.split(delimiter)]) + + +def deprecated(version, reason): + def decorator_deprecated(fn): + @wraps(fn) + def wrapper_deprecated(*args, **kwds): + warnings.warn(f'Deprecated since {version}. The reason: {reason}', category=DeprecationWarning) + return fn(*args, **kwds) + return wrapper_deprecated + return decorator_deprecated diff --git a/requirements/docs.txt b/requirements/docs.txt index 75ab231..d088fff 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -2,5 +2,6 @@ # (look up the prebuilt docs in `docs/generated`) -r install.txt -Sphinx==1.7.5 -sphinxcontrib-fulltoc==1.2.0 +wheel==0.36.2 +Sphinx==4.3.2 +sphinxcontrib-fulltoc==1.2.0 \ No newline at end of file diff --git a/requirements/install.txt b/requirements/install.txt index 9b87ae8..aa8290f 100644 --- a/requirements/install.txt +++ b/requirements/install.txt @@ -1,4 +1,4 @@ # these pip packages are necessary for the pyignite to run -typing==3.6.6; python_version<'3.5' -attrs==18.1.0 +attrs>=20.3.0 +contextvars>=2.4;python_version<"3.7" diff --git a/requirements/setup.txt b/requirements/setup.txt deleted file mode 100644 index 7c55f83..0000000 --- a/requirements/setup.txt +++ /dev/null @@ -1,3 +0,0 @@ -# additional package for integrating pytest in setuptools - -pytest-runner==4.2 diff --git a/requirements/tests.txt b/requirements/tests.txt index c107c8b..5dc815a 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,5 +1,10 @@ # these packages are used for testing -pytest==3.6.1 -pytest-cov==2.5.1 -teamcity-messages==1.21 +pytest==6.2.5 +pytest-cov==2.11.1 +pytest-asyncio==0.14.0 +teamcity-messages==1.28 +psutil==5.8.0 +jinja2==3.0.3 +markupsafe==2.0.1 +flake8==3.8.4 diff --git a/scripts/BuildWheels.ps1 b/scripts/BuildWheels.ps1 new file mode 100644 index 0000000..9098d58 --- /dev/null +++ b/scripts/BuildWheels.ps1 @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +$PyVers="37","38","39","310" + +[System.Collections.ArrayList]$PyVersFull = $PyVers +foreach ($Ver in $PyVers) +{ + [Void]$PyVersFull.Add("$Ver-32") +} + +foreach ($Ver in $PyVersFull) +{ + & "$env:LOCALAPPDATA\Programs\Python\Python$Ver\python.exe" -m venv epy$Ver + + . ".\epy$Ver\Scripts\Activate.ps1" + pip install -e . + pip install wheel + pip wheel . --no-deps -w distr +} + diff --git a/scripts/apply_pull_request.sh b/scripts/apply_pull_request.sh new file mode 100755 index 0000000..ba05a82 --- /dev/null +++ b/scripts/apply_pull_request.sh @@ -0,0 +1,239 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Pull request applier. +# + +# +# Start of Functions. +# + +# +# Prints usage. +# +usage () { + echo 'Usage: scripts/apply-pull-request.sh [-tb|--targetbranch ] [--with-gpg] [-s|--sign-off]' + echo 'The script takes pull-request by given id and merges (with squash) all changes to target branch (master by default).' + echo "Argument 'pull-request-id' is mandatory." + echo "Target branch can be overwritten by using [-tb|--targetbranch ] argument paramethers." +} + +# +# End of Functions. +# + +if [ "${GIT_HOME}" = "" ]; then + GIT_HOME="$(dirname "$(cd "$(dirname "$0")"; "pwd")")"; +fi + +cd "${GIT_HOME}" || { echo "failed to change director ${GIT_HOME}"; exit 1; } + +if [ "${SCRIPTS_HOME}" = "" ]; then + SCRIPTS_HOME="${GIT_HOME}/scripts/" +fi + +. "${SCRIPTS_HOME}"/git_patch_functions.sh # Import patch functions. + +PR_ID=$1 + +# +# Start reading of command line params. +# +if [ "${PR_ID}" = "" ]; then + echo "$0, ERROR:" + echo >&2 "You have to specify 'pull-request-id'." + echo + usage + exit 1 +fi + +if [ "${PR_ID}" = "-h" ]; then + usage + exit 0 +fi + +if [ "${PR_ID}" = "--help" ]; then + usage + exit 0 +fi + + +while [[ $# -ge 2 ]] +do + key="$2" + + case $key in + -tb|--targetbranch) + TARGET_BRANCH="$3" + shift 2 + ;; + + --with-gpg) + WITH_GPG="true" + shift + ;; + + -s|--sign-off) + WITH_SIGN_OFF="true" + shift + ;; + + *) + echo "Unknown parameter: ${key}" + echo + usage + exit 1 + ;; + esac +done +# +# Enf reading of command line params. +# + + +# Script variables. +if [ "${APACHE_GIT}" = "" ]; then + APACHE_GIT="https://gitbox.apache.org/repos/asf/ignite-python-thin-client.git" +fi + +if [ "${GITHUB_MIRROR}" = "" ]; then + GITHUB_MIRROR="git@github.com:apache/ignite-python-thin-client.git" +fi + +if [ "${TARGET_BRANCH}" = "" ]; then + TARGET_BRANCH="master" +fi + +requireCleanWorkTree "${GIT_HOME}" + +CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD) + +if [ "$CURRENT_BRANCH" != "${TARGET_BRANCH}" ]; then + echo "$0, ERROR:" + echo "You have to be on ${TARGET_BRANCH} branch." + + exit 1 +fi + +# Check that target branch is up-to-date. +APACHE_GIT_TARGET_BRANCH="apache-git-target-br-tmp" + +git fetch ${APACHE_GIT} ${TARGET_BRANCH}:${APACHE_GIT_TARGET_BRANCH} &> /dev/null +if test $? != 0; then + echo "$0, ERROR:" + echo >&2 "Couldn't fetch '${TARGET_BRANCH}' branch from ${APACHE_GIT}." + exit 1 +fi + +LOCAL_TARGET_BR_HASH=$(git rev-parse @) +REMOTE_TARGET_BR_HASH=$(git rev-parse ${APACHE_GIT_TARGET_BRANCH}) +BASE_HASH=$(git merge-base @ ${APACHE_GIT_TARGET_BRANCH}) + +git branch -D ${APACHE_GIT_TARGET_BRANCH} &> /dev/null + +if [ "$LOCAL_TARGET_BR_HASH" != "$REMOTE_TARGET_BR_HASH" ]; then + echo "$0, ERROR:" + + if [ "$LOCAL_TARGET_BR_HASH" = "$BASE_HASH" ]; then + echo "Your local ${TARGET_BRANCH} branch is not up-to-date. You need to pull." + elif [ "$REMOTE_TARGET_BR_HASH" = "$BASE_HASH" ]; then + echo "Your local ${TARGET_BRANCH} branch is ahead of ${TARGET_BRANCH} branch at Apache git. You need to push." + else + echo "Your local ${TARGET_BRANCH} and Apache git ${TARGET_BRANCH} branches diverged. You need to pull, merge and pull." + fi + + exit 1 +fi + +echo "Local ${TARGET_BRANCH} is Up-to-date." +echo + +# Checkout pull-request branch. +PR_BRANCH_NAME="pull-${PR_ID}-head" + +git fetch "${GITHUB_MIRROR}" "pull/${PR_ID}/head:${PR_BRANCH_NAME}" &> /dev/null +if test $? != 0; then + echo "$0, ERROR:" + echo >&2 "There was not found pull request by ID = '${PR_ID}'." + exit 1 +fi + +# Get author name number. +git checkout "${PR_BRANCH_NAME}" &> /dev/null +if test $? != 0; then + echo "$0, ERROR:" + echo >&2 "Failed to checkout '${PR_BRANCH_NAME}' branch (the branch not found or already exists)." + exit 1 +fi + +AUTHOR="$(git --no-pager show -s --format="%aN <%aE>" HEAD)" +ORIG_COMMENT="$(git log -1 --pretty=%B)" + +echo "Author of pull-request: '$AUTHOR'." +echo + +# Update local target branch. +git checkout ${TARGET_BRANCH} &> /dev/null + +# Take changes. +git merge --squash "${PR_BRANCH_NAME}" &> /dev/null +if test $? != 0; then + git reset --hard &> /dev/null + + echo "$0, ERROR:" + echo >&2 "Could not merge the pull-request to ${TARGET_BRANCH} without conflicts. All local changes have been discarded. You're on ${TARGET_BRANCH} branch." + exit 1 +fi + +echo "Original comment is" +echo "\"${ORIG_COMMENT}\"" +echo "Press [ENTER] if you're agree with the comment or type your comment and press [ENTER]:" +read -r COMMENT +echo + +if [ "${COMMENT}" == "" ]; then + COMMENT=${ORIG_COMMENT} +fi + +COMMENT="${COMMENT} - Fixes #${PR_ID}." + +if [ "${EXCLUDE_SPECIAL_FILE}" = "true" ]; then + git checkout HEAD ignite-pull-request-id +fi + +SIGN_OPTION="" +if [ -n "${WITH_GPG}" ]; then + SIGN_OPTION="-S" +fi + +if [ -n "${WITH_SIGN_OFF}" ]; then + SIGN_OPTION="${SIGN_OPTION} -s" +fi + +git commit --author "${AUTHOR}" -a ${SIGN_OPTION} -m "${COMMENT}" &> /dev/null + +echo "Squash commit for pull request with id='${PR_ID}' has been added. The commit has been added with comment '${COMMENT}'." +echo "Now you can review changes of the last commit at ${TARGET_BRANCH} and push it into ${APACHE_GIT} git after." +echo "If you want to decline changes, you can remove the last commit from your repo by 'git reset --hard HEAD^'." +echo + +# Clean-up. +git branch -D "${PR_BRANCH_NAME}" &> /dev/null + +echo 'Successfully completed.' diff --git a/scripts/build_wheels.sh b/scripts/build_wheels.sh new file mode 100755 index 0000000..b30c3b7 --- /dev/null +++ b/scripts/build_wheels.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e -u -x + +function repair_wheel { + wheel="$1" + if ! auditwheel show "$wheel"; then + echo "Skipping non-platform wheel $wheel" + else + auditwheel repair "$wheel" --plat "$PLAT" -w /wheels + fi +} + +# Compile wheels +for PYBIN in /opt/python/*/bin; do + if [[ $PYBIN =~ ^(.*)cp3[7891](.*)$ ]]; then + "${PYBIN}/pip" wheel /pyignite/ --no-deps -w /wheels + fi +done + +# Bundle external shared libraries into the wheels +for whl in /wheels/*.whl; do + repair_wheel "$whl" +done + +for whl in /wheels/*.whl; do + if [[ ! $whl =~ ^(.*)manylinux(.*)$ ]]; then + rm "$whl" + else + chmod 666 "$whl" + fi +done + +rm -rf /pyignite/*.egg-info +rm -rf /pyignite/.eggs diff --git a/scripts/create_distr.sh b/scripts/create_distr.sh new file mode 100755 index 0000000..b86ac1e --- /dev/null +++ b/scripts/create_distr.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DISTR_DIR="$(pwd)/distr/" +SRC_DIR="$(pwd)" +DEFAULT_DOCKER_IMAGE="quay.io/pypa/manylinux2010_x86_64" + +usage() { + cat < ${PATCH_FILE} + echo "Patch file created." + + git checkout ${PATCHED_BRANCH} + + git branch -D tmppatch # Delete tmp branch. + + echo + echo "Patch created: ${PATCH_FILE}" +} + +# +# Determines Current branch. +# +# Params: +# - Git home. +# Return - Current branch. +# +determineCurrentBranch () { + GIT_HOME=$1 + + cd ${GIT_HOME} || { echo "failed to change directory to $1"; exit 1; } + + CURRENT_BRANCH=`git rev-parse --abbrev-ref HEAD` + + echo "$CURRENT_BRANCH" +} + +# +# Checks that given git repository has clean work tree (there is no uncommited changes). +# Exit with code 1 in error case. +# +# Params: +# - Git home. +# +requireCleanWorkTree () { + cd "$1" || { echo "failed to change directory to $1"; exit 1; } # At git home. + + # Update the index + git update-index -q --ignore-submodules --refresh + err=0 + + # Disallow unstaged changes in the working tree + if ! git diff-files --quiet --ignore-submodules -- + then + echo "$0, ERROR:" + echo >&2 "You have unstaged changes." + git diff-files --name-status -r --ignore-submodules -- >&2 + err=1 + fi + + # Disallow uncommitted changes in the index + if ! git diff-index --cached --quiet HEAD --ignore-submodules -- + then + echo "$0, ERROR:" + echo >&2 "Your index contains uncommitted changes." + git diff-index --cached --name-status -r --ignore-submodules HEAD -- >&2 + err=1 + fi + + if [ $err = 1 ] + then + echo >&2 "Please commit or stash them." + exit 1 + fi +} diff --git a/setup.py b/setup.py index 583eaa3..827066a 100644 --- a/setup.py +++ b/setup.py @@ -12,30 +12,46 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import re +from distutils.command.build_ext import build_ext +from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError -from collections import defaultdict import setuptools import sys -PYTHON_REQUIRED = (3, 4) -PYTHON_INSTALLED = sys.version_info[:2] +cext = setuptools.Extension( + "pyignite._cutils", + sources=[ + "./cext/cutils.c" + ], + include_dirs=["./cext"] +) -if PYTHON_INSTALLED < PYTHON_REQUIRED: - sys.stderr.write(''' +if sys.platform == 'win32': + ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError, ValueError) +else: + ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) -`pyignite` is not compatible with Python {}.{}! -Use Python {}.{} or above. +class BuildFailed(Exception): + pass -'''.format( - PYTHON_INSTALLED[0], - PYTHON_INSTALLED[1], - PYTHON_REQUIRED[0], - PYTHON_REQUIRED[1], - ) - ) - sys.exit(1) + +class ve_build_ext(build_ext): + # This class allows C extension building to fail. + + def run(self): + try: + build_ext.run(self) + except DistutilsPlatformError: + raise BuildFailed() + + def build_extension(self, ext): + try: + build_ext.build_extension(self, ext) + except ext_errors: + raise BuildFailed() def is_a_requirement(line): @@ -46,59 +62,80 @@ def is_a_requirement(line): ]) -requirement_sections = [ - 'install', - 'setup', - 'tests', - 'docs', -] -requirements = defaultdict(list) - -for section in requirement_sections: - with open( - 'requirements/{}.txt'.format(section), - 'r', - encoding='utf-8', - ) as requirements_file: - for line in requirements_file.readlines(): - line = line.strip('\n') - if is_a_requirement(line): - requirements[section].append(line) +install_requirements = [] +with open('requirements/install.txt', 'r', encoding='utf-8') as requirements_file: + for line in requirements_file.readlines(): + line = line.strip('\n') + if is_a_requirement(line): + install_requirements.append(line) with open('README.md', 'r', encoding='utf-8') as readme_file: long_description = readme_file.read() -setuptools.setup( - name='pyignite', - version='0.3.4', - python_requires='>={}.{}'.format(*PYTHON_REQUIRED), - author='Dmitry Melnichuk', - author_email='dmitry.melnichuk@nobitlost.com', - description='Apache Ignite binary client Python API', - long_description=long_description, - long_description_content_type='text/markdown', - url=( - 'https://github.com/apache/ignite/tree/master' - '/modules/platforms/python' - ), - packages=setuptools.find_packages(), - install_requires=requirements['install'], - tests_require=requirements['tests'], - setup_requires=requirements['setup'], - extras_require={ - 'docs': requirements['docs'], - }, - classifiers=[ - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3 :: Only', - 'Intended Audience :: Developers', - 'Topic :: Database :: Front-Ends', - 'Topic :: Software Development :: Libraries :: Python Modules', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', - ], -) +with open('pyignite/__init__.py', 'r') as fd: + version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', + fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError('Cannot find version information') + + +def run_setup(with_binary=True): + if with_binary: + kw = dict( + ext_modules=[cext], + cmdclass=dict(build_ext=ve_build_ext), + ) + else: + kw = dict() + + setuptools.setup( + name='pyignite', + version=version, + python_requires='>=3.7', + author='The Apache Software Foundation', + author_email='dev@ignite.apache.org', + description='Apache Ignite binary client Python API', + long_description=long_description, + long_description_content_type='text/markdown', + url='https://github.com/apache/ignite-python-thin-client', + packages=setuptools.find_packages(), + install_requires=install_requirements, + license="Apache License 2.0", + license_files=('LICENSE', 'NOTICE'), + classifiers=[ + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3 :: Only', + 'Intended Audience :: Developers', + 'Topic :: Database :: Front-Ends', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + ], + **kw + ) + + +try: + run_setup() +except BuildFailed: + BUILD_EXT_WARNING = ("WARNING: The C extension could not be compiled, " + "speedups are not enabled.") + print('*' * 75) + print(BUILD_EXT_WARNING) + print("Failure information, if any, is above.") + print("I'm retrying the build without the C extension now.") + print('*' * 75) + + run_setup(False) + + print('*' * 75) + print(BUILD_EXT_WARNING) + print("Plain python installation succeeded.") + print('*' * 75) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..03803a9 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,14 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/affinity/conftest.py b/tests/affinity/conftest.py new file mode 100644 index 0000000..eca31b2 --- /dev/null +++ b/tests/affinity/conftest.py @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client, AioClient +from tests.util import start_ignite_gen + +# Sometimes on slow testing servers and unstable topology +# default timeout is not enough for cache ops. +CLIENT_SOCKET_TIMEOUT = 20.0 + + +@pytest.fixture(scope='module', autouse=True) +def server1(): + yield from start_ignite_gen(1) + + +@pytest.fixture(scope='module', autouse=True) +def server2(): + yield from start_ignite_gen(2) + + +@pytest.fixture(scope='module', autouse=True) +def server3(): + yield from start_ignite_gen(3) + + +@pytest.fixture +def connection_param(): + return [('127.0.0.1', 10800 + i) for i in range(1, 4)] + + +@pytest.fixture +def client(connection_param): + client = Client(partition_aware=True, timeout=CLIENT_SOCKET_TIMEOUT) + try: + client.connect(connection_param) + yield client + finally: + client.close() + + +@pytest.fixture +async def async_client(connection_param, event_loop): + client = AioClient(partition_aware=True) + try: + await client.connect(connection_param) + yield client + finally: + await client.close() + + +@pytest.fixture(scope='module', autouse=True) +def skip_if_no_affinity(request, server1): + client = Client(partition_aware=True) + with client.connect('127.0.0.1', 10801): + if not client.partition_awareness_supported_by_protocol: + pytest.skip(f'skipped {request.node.name}, partition awareness is not supported.') diff --git a/tests/affinity/test_affinity.py b/tests/affinity/test_affinity.py new file mode 100644 index 0000000..c9a6b60 --- /dev/null +++ b/tests/affinity/test_affinity.py @@ -0,0 +1,330 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import decimal +from datetime import datetime, timedelta +from uuid import UUID, uuid4 + +import pytest + +from pyignite import GenericObjectMeta, AioClient +from pyignite.api import ( + cache_get_node_partitions, cache_get_node_partitions_async, cache_local_peek, cache_local_peek_async +) +from pyignite.constants import MAX_INT +from pyignite.datatypes import ( + BinaryObject, ByteArray, ByteObject, IntObject, ShortObject, LongObject, FloatObject, DoubleObject, BoolObject, + CharObject, String, UUIDObject, DecimalObject, TimestampObject, TimeObject +) +from pyignite.datatypes.cache_config import CacheMode +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_MODE, PROP_CACHE_KEY_CONFIGURATION +from tests.util import wait_for_condition, wait_for_condition_async + + +def test_get_node_partitions(client, caches): + cache_ids = [cache.cache_id for cache in caches] + mappings = __get_mappings(client, cache_ids) + __check_mappings(mappings, cache_ids) + + +@pytest.mark.asyncio +async def test_get_node_partitions_async(async_client, async_caches): + cache_ids = [cache.cache_id for cache in async_caches] + mappings = await __get_mappings(async_client, cache_ids) + __check_mappings(mappings, cache_ids) + + +def __wait_for_ready_affinity(client, cache_ids): + def inner(): + def condition(): + result = __get_mappings(client, cache_ids) + return len(result.value['partition_mapping']) == len(cache_ids) + + wait_for_condition(condition) + + async def inner_async(): + async def condition(): + result = await __get_mappings(client, cache_ids) + return len(result.value['partition_mapping']) == len(cache_ids) + + await wait_for_condition_async(condition) + + return inner_async() if isinstance(client, AioClient) else inner() + + +def __get_mappings(client, cache_ids): + def inner(): + conn = client.random_node + result = cache_get_node_partitions(conn, cache_ids) + assert result.status == 0, result.message + return result + + async def inner_async(): + conn = await client.random_node() + result = await cache_get_node_partitions_async(conn, cache_ids) + assert result.status == 0, result.message + return result + + return inner_async() if isinstance(client, AioClient) else inner() + + +def __check_mappings(result, cache_ids): + partition_mapping = result.value['partition_mapping'] + + for i, cache_id in enumerate(cache_ids): + cache_mapping = partition_mapping[cache_id] + assert 'is_applicable' in cache_mapping + + # Check replicated cache + if i == 3: + assert not cache_mapping['is_applicable'] + assert 'node_mapping' not in cache_mapping + assert cache_mapping['number_of_partitions'] == 0 + else: + # Check cache config + if i == 2: + assert cache_mapping['cache_config'] + + assert cache_mapping['is_applicable'] + assert cache_mapping['node_mapping'] + assert cache_mapping['number_of_partitions'] == 1024 + + +@pytest.fixture +def caches(client): + yield from __create_caches_fixture(client) + + +@pytest.fixture +async def async_caches(async_client): + async for caches in __create_caches_fixture(async_client): + yield caches + + +def __create_caches_fixture(client): + caches_to_create = [] + for i in range(0, 5): + cache_name = f'test_cache_{i}' + if i == 2: + caches_to_create.append(( + cache_name, + { + PROP_NAME: cache_name, + PROP_CACHE_KEY_CONFIGURATION: [ + { + 'type_name': ByteArray.type_name, + 'affinity_key_field_name': 'byte_affinity', + } + ] + })) + elif i == 3: + caches_to_create.append(( + cache_name, + { + PROP_NAME: cache_name, + PROP_CACHE_MODE: CacheMode.REPLICATED + } + )) + else: + caches_to_create.append((cache_name, None)) + + def generate_caches(): + caches = [] + for name, config in caches_to_create: + if config: + cache = client.get_or_create_cache(config) + else: + cache = client.get_or_create_cache(name) + caches.append(cache) + return asyncio.gather(*caches) if isinstance(client, AioClient) else caches + + def inner(): + caches = [] + try: + caches = generate_caches() + __wait_for_ready_affinity(client, [cache.cache_id for cache in caches]) + yield caches + finally: + for cache in caches: + cache.destroy() + + async def inner_async(): + caches = [] + try: + caches = await generate_caches() + await __wait_for_ready_affinity(client, [cache.cache_id for cache in caches]) + yield caches + finally: + await asyncio.gather(*[cache.destroy() for cache in caches]) + + return inner_async() if isinstance(client, AioClient) else inner() + + +@pytest.fixture +def cache(client): + cache = client.get_or_create_cache({ + PROP_NAME: 'test_cache_1', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + }) + try: + __wait_for_ready_affinity(client, [cache.cache_id]) + yield cache + finally: + cache.destroy() + + +@pytest.fixture +async def async_cache(async_client): + cache = await async_client.get_or_create_cache({ + PROP_NAME: 'test_cache_1', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + }) + try: + await __wait_for_ready_affinity(async_client, [cache.cache_id]) + yield cache + finally: + await cache.destroy() + + +affinity_primitives_params = [ + # integers + (42, None), + (43, ByteObject), + (-44, ByteObject), + (45, IntObject), + (-46, IntObject), + (47, ShortObject), + (-48, ShortObject), + (49, LongObject), + (MAX_INT - 50, LongObject), + (MAX_INT + 51, LongObject), + + # floating point + (5.2, None), + (5.354, FloatObject), + (-5.556, FloatObject), + (-57.58, DoubleObject), + + # boolean + (True, None), + (True, BoolObject), + (False, BoolObject), + + # char + ('A', CharObject), + ('Z', CharObject), + ('⅓', CharObject), + ('á', CharObject), + ('ы', CharObject), + ('カ', CharObject), + ('Ø', CharObject), + ('ß', CharObject), + + # string + ('This is a test string', None), + ('Кириллица', None), + ('Little Mary had a lamb', String), + + # UUID + (UUID('12345678123456789876543298765432'), None), + (UUID('74274274274274274274274274274274'), UUIDObject), + (uuid4(), None), + + # decimal (long internal representation in Java) + (decimal.Decimal('-234.567'), None), + (decimal.Decimal('200.0'), None), + (decimal.Decimal('123.456'), DecimalObject), + (decimal.Decimal('1.0'), None), + (decimal.Decimal('0.02'), None), + + # decimal (BigInteger internal representation in Java) + (decimal.Decimal('12345671234567123.45671234567'), None), + (decimal.Decimal('-845678456.7845678456784567845'), None), + + # date and time + (datetime(1980, 1, 1), None), + ((datetime(1980, 1, 1), 999), TimestampObject), + (timedelta(days=99), TimeObject) +] + + +@pytest.mark.parametrize('key, key_hint', affinity_primitives_params) +def test_affinity(client, cache, key, key_hint): + __check_best_node_calculation(client, cache, key, 42, key_hint=key_hint) + + +@pytest.mark.parametrize('key, key_hint', affinity_primitives_params) +@pytest.mark.asyncio +async def test_affinity_async(async_client, async_cache, key, key_hint): + await __check_best_node_calculation(async_client, async_cache, key, 42, key_hint=key_hint) + + +@pytest.fixture +def key_generic_object(): + class KeyClass( + metaclass=GenericObjectMeta, + schema={ + 'NO': IntObject, + 'NAME': String, + }, + ): + pass + + key = KeyClass() + key.NO = 1 + key.NAME = 'test_string' + yield key + + +@pytest.mark.parametrize('with_type_hint', [True, False]) +def test_affinity_for_generic_object(client, cache, key_generic_object, with_type_hint): + key_hint = BinaryObject if with_type_hint else None + __check_best_node_calculation(client, cache, key_generic_object, 42, key_hint=key_hint) + + +@pytest.mark.parametrize('with_type_hint', [True, False]) +@pytest.mark.asyncio +async def test_affinity_for_generic_object_async(async_client, async_cache, key_generic_object, with_type_hint): + key_hint = BinaryObject if with_type_hint else None + await __check_best_node_calculation(async_client, async_cache, key_generic_object, 42, key_hint=key_hint) + + +def __check_best_node_calculation(client, cache, key, value, key_hint=None): + def check_peek_value(node, best_node, result): + if node is best_node: + assert result.value == value, f'Affinity calculation error for {key}' + else: + assert result.value is None, f'Affinity calculation error for {key}' + + def inner(): + cache.put(key, value, key_hint=key_hint) + best_node = client.get_best_node(cache, key, key_hint=key_hint) + + for node in filter(lambda n: n.alive, client._nodes): + result = cache_local_peek(node, cache.cache_info, key, key_hint=key_hint) + + check_peek_value(node, best_node, result) + + async def inner_async(): + await cache.put(key, value, key_hint=key_hint) + best_node = await client.get_best_node(cache, key, key_hint=key_hint) + + for node in filter(lambda n: n.alive, client._nodes): + result = await cache_local_peek_async(node, cache.cache_info, key, key_hint=key_hint) + + check_peek_value(node, best_node, result) + + return inner_async() if isinstance(client, AioClient) else inner() diff --git a/tests/affinity/test_affinity_bad_servers.py b/tests/affinity/test_affinity_bad_servers.py new file mode 100644 index 0000000..f5eec21 --- /dev/null +++ b/tests/affinity/test_affinity_bad_servers.py @@ -0,0 +1,133 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client, AioClient +from pyignite.exceptions import ReconnectError, connection_errors +from tests.affinity.conftest import CLIENT_SOCKET_TIMEOUT +from tests.util import start_ignite, kill_process_tree + + +@pytest.fixture(params=['with-partition-awareness', 'without-partition-awareness']) +def with_partition_awareness(request): + yield request.param == 'with-partition-awareness' + + +def test_client_with_multiple_bad_servers(with_partition_awareness): + with pytest.raises(ReconnectError, match="Can not connect."): + client = Client(partition_aware=with_partition_awareness) + with client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]): + pass + + +@pytest.mark.asyncio +async def test_client_with_multiple_bad_servers_async(with_partition_awareness): + with pytest.raises(ReconnectError, match="Can not connect."): + client = AioClient(partition_aware=with_partition_awareness) + async with client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]): + pass + + +def test_client_with_failed_server(request, with_partition_awareness): + srv = start_ignite(idx=4) + try: + client = Client(partition_aware=with_partition_awareness) + with client.connect([("127.0.0.1", 10804)]): + cache = client.get_or_create_cache(request.node.name) + cache.put(1, 1) + kill_process_tree(srv.pid) + + if with_partition_awareness: + ex_class = (ReconnectError, ConnectionResetError) + else: + ex_class = ConnectionResetError + + with pytest.raises(ex_class): + cache.get(1) + finally: + kill_process_tree(srv.pid) + + +@pytest.mark.asyncio +async def test_client_with_failed_server_async(request, with_partition_awareness): + srv = start_ignite(idx=4) + try: + client = AioClient(partition_aware=with_partition_awareness) + async with client.connect([("127.0.0.1", 10804)]): + cache = await client.get_or_create_cache(request.node.name) + await cache.put(1, 1) + kill_process_tree(srv.pid) + + if with_partition_awareness: + ex_class = (ReconnectError, ConnectionResetError) + else: + ex_class = ConnectionResetError + + with pytest.raises(ex_class): + await cache.get(1) + finally: + kill_process_tree(srv.pid) + + +def test_client_with_recovered_server(request, with_partition_awareness): + srv = start_ignite(idx=4) + try: + client = Client(partition_aware=with_partition_awareness, timeout=CLIENT_SOCKET_TIMEOUT) + with client.connect([("127.0.0.1", 10804)]): + cache = client.get_or_create_cache(request.node.name) + cache.put(1, 1) + + # Kill and restart server + kill_process_tree(srv.pid) + srv = start_ignite(idx=4) + + # First request may fail. + try: + cache.put(1, 2) + except connection_errors: + pass + + # Retry succeeds + cache.put(1, 2) + assert cache.get(1) == 2 + finally: + kill_process_tree(srv.pid) + + +@pytest.mark.asyncio +async def test_client_with_recovered_server_async(request, with_partition_awareness): + srv = start_ignite(idx=4) + try: + client = AioClient(partition_aware=with_partition_awareness) + async with client.connect([("127.0.0.1", 10804)]): + cache = await client.get_or_create_cache(request.node.name) + await cache.put(1, 1) + + # Kill and restart server + kill_process_tree(srv.pid) + srv = start_ignite(idx=4) + + # First request may fail. + try: + await cache.put(1, 2) + except connection_errors: + pass + + # Retry succeeds + await cache.put(1, 2) + assert await cache.get(1) == 2 + finally: + kill_process_tree(srv.pid) diff --git a/tests/affinity/test_affinity_request_routing.py b/tests/affinity/test_affinity_request_routing.py new file mode 100644 index 0000000..b73eff3 --- /dev/null +++ b/tests/affinity/test_affinity_request_routing.py @@ -0,0 +1,497 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import contextlib +from collections import OrderedDict, deque +import random + +import pytest + +from pyignite import GenericObjectMeta, AioClient, Client +from pyignite.aio_cache import AioCache +from pyignite.datatypes import String, LongObject +from pyignite.datatypes.cache_config import CacheMode +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_BACKUPS_NUMBER, PROP_CACHE_KEY_CONFIGURATION, PROP_CACHE_MODE +from pyignite.monitoring import QueryEventListener +from tests.util import wait_for_condition, wait_for_condition_async, start_ignite, kill_process_tree + +try: + from contextlib import asynccontextmanager +except ImportError: + from async_generator import asynccontextmanager + +requests = deque() + + +class QueryRouteListener(QueryEventListener): + def on_query_start(self, event): + if 1000 <= event.op_code < 1100: + requests.append(event.port % 100) + + +client_connection_string = [('127.0.0.1', 10800 + idx) for idx in range(1, 5)] + + +@pytest.fixture +def client(): + client = Client(partition_aware=True, event_listeners=[QueryRouteListener()]) + try: + client.connect(client_connection_string) + yield client + finally: + requests.clear() + client.close() + + +@pytest.fixture +async def async_client(event_loop): + client = AioClient(partition_aware=True, event_listeners=[QueryRouteListener()]) + try: + await client.connect(client_connection_string) + yield client + finally: + requests.clear() + await client.close() + + +def wait_for_affinity_distribution(cache, key, node_idx, timeout=30): + real_node_idx = 0 + + def check_grid_idx(): + nonlocal real_node_idx + try: + cache.get(key) + real_node_idx = requests.pop() + except (OSError, IOError): + return False + return real_node_idx == node_idx + + res = wait_for_condition(check_grid_idx, timeout=timeout) + + if not res: + raise TimeoutError(f"failed to wait for affinity distribution, expected node_idx {node_idx}," + f"got {real_node_idx} instead") + + +async def wait_for_affinity_distribution_async(cache, key, node_idx, timeout=30): + real_node_idx = 0 + + async def check_grid_idx(): + nonlocal real_node_idx + try: + await cache.get(key) + real_node_idx = requests.pop() + except (OSError, IOError): + return False + return real_node_idx == node_idx + + res = await wait_for_condition_async(check_grid_idx, timeout=timeout) + + if not res: + raise TimeoutError(f"failed to wait for affinity distribution, expected node_idx {node_idx}," + f"got {real_node_idx} instead") + + +@pytest.mark.parametrize("key,grid_idx", [(1, 1), (2, 2), (3, 3), (4, 1), (5, 1), (6, 2), (11, 1), (13, 1), (19, 1)]) +@pytest.mark.parametrize("backups", [0, 1, 2, 3]) +def test_cache_operation_on_primitive_key_routes_request_to_primary_node(request, key, grid_idx, backups, + client): + cache = client.get_or_create_cache({ + PROP_NAME: request.node.name + str(backups), + PROP_BACKUPS_NUMBER: backups, + }) + try: + __perform_operations_on_primitive_key(client, cache, key, grid_idx) + finally: + cache.destroy() + + +@pytest.mark.parametrize("key,grid_idx", [(1, 1), (2, 2), (3, 3), (4, 1), (5, 1), (6, 2), (11, 1), (13, 1), (19, 1)]) +@pytest.mark.parametrize("backups", [0, 1, 2, 3]) +@pytest.mark.asyncio +async def test_cache_operation_on_primitive_key_routes_request_to_primary_node_async( + request, key, grid_idx, backups, async_client): + cache = await async_client.get_or_create_cache({ + PROP_NAME: request.node.name + str(backups), + PROP_BACKUPS_NUMBER: backups, + }) + try: + await __perform_operations_on_primitive_key(async_client, cache, key, grid_idx) + finally: + await cache.destroy() + + +def __perform_operations_on_primitive_key(client, cache, key, grid_idx): + operations = [ + ('get', 1), ('put', 2), ('replace', 2), ('clear_key', 1), ('contains_key', 1), ('get_and_put', 2), + ('get_and_put_if_absent', 2), ('put_if_absent', 2), ('get_and_remove', 1), ('get_and_replace', 2), + ('remove_key', 1), ('remove_if_equals', 2), ('replace', 2), ('replace_if_equals', 3) + ] + + def inner(): + cache.put(key, key) + wait_for_affinity_distribution(cache, key, grid_idx) + + for op_name, param_nums in operations: + op = getattr(cache, op_name) + args = [random.randint(-100, 100) for _ in range(0, param_nums - 1)] + op(key, *args) + assert requests.pop() == grid_idx + + async def inner_async(): + await cache.put(key, key) + await wait_for_affinity_distribution_async(cache, key, grid_idx) + + for op_name, param_nums in operations: + op = getattr(cache, op_name) + args = [random.randint(-100, 100) for _ in range(0, param_nums - 1)] + await op(key, *args) + + assert requests.pop() == grid_idx + + return inner_async() if isinstance(client, AioClient) else inner() + + +@pytest.mark.skip(reason="Custom key objects are not supported yet") +def test_cache_operation_on_complex_key_routes_request_to_primary_node(): + pass + + +@pytest.mark.parametrize("key,grid_idx", [(1, 2), (2, 1), (3, 1), (4, 2), (5, 2), (6, 3)]) +@pytest.mark.skip(reason="Custom key objects are not supported yet") +def test_cache_operation_on_custom_affinity_key_routes_request_to_primary_node(request, client, key, grid_idx): + class AffinityTestType1( + metaclass=GenericObjectMeta, + type_name='AffinityTestType1', + schema=OrderedDict([ + ('test_str', String), + ('test_int', LongObject) + ]) + ): + pass + + cache_config = { + PROP_NAME: request.node.name, + PROP_CACHE_KEY_CONFIGURATION: [ + { + 'type_name': 'AffinityTestType1', + 'affinity_key_field_name': 'test_int', + }, + ], + } + cache = client.create_cache(cache_config) + + # noinspection PyArgumentList + key_obj = AffinityTestType1( + test_str="abc", + test_int=key + ) + + cache.put(key_obj, 1) + cache.put(key_obj, 2) + + assert requests.pop() == grid_idx + + +@pytest.fixture +def client_cache(client, request): + yield client.get_or_create_cache(request.node.name) + + +@pytest.fixture +async def async_client_cache(async_client, request): + cache = await async_client.get_or_create_cache(request.node.name) + yield cache + + +def test_cache_operation_routed_to_new_cluster_node(client_cache): + __perform_cache_operation_routed_to_new_node(client_cache) + + +@pytest.mark.asyncio +async def test_cache_operation_routed_to_new_cluster_node_async(async_client_cache): + await __perform_cache_operation_routed_to_new_node(async_client_cache) + + +def __perform_cache_operation_routed_to_new_node(cache): + key = 12 + + def inner(): + wait_for_affinity_distribution(cache, key, 3) + cache.put(key, key) + cache.put(key, key) + assert requests.pop() == 3 + + srv = start_ignite(idx=4) + try: + # Wait for rebalance and partition map exchange + wait_for_affinity_distribution(cache, key, 4) + + # Response is correct and comes from the new node + res = cache.get_and_remove(key) + assert res == key + assert requests.pop() == 4 + finally: + kill_process_tree(srv.pid) + + async def inner_async(): + await wait_for_affinity_distribution_async(cache, key, 3) + await cache.put(key, key) + await cache.put(key, key) + assert requests.pop() == 3 + + srv = start_ignite(idx=4) + try: + # Wait for rebalance and partition map exchange + await wait_for_affinity_distribution_async(cache, key, 4) + + # Response is correct and comes from the new node + res = await cache.get_and_remove(key) + assert res == key + assert requests.pop() == 4 + finally: + kill_process_tree(srv.pid) + + return inner_async() if isinstance(cache, AioCache) else inner() + + +@pytest.fixture +def replicated_cache(request, client): + cache = client.get_or_create_cache({ + PROP_NAME: request.node.name, + PROP_CACHE_MODE: CacheMode.REPLICATED, + }) + try: + yield cache + finally: + cache.destroy() + + +@pytest.fixture +async def async_replicated_cache(request, async_client): + cache = await async_client.get_or_create_cache({ + PROP_NAME: request.node.name, + PROP_CACHE_MODE: CacheMode.REPLICATED, + }) + try: + yield cache + finally: + await cache.destroy() + + +def test_replicated_cache_operation_routed_to_random_node(replicated_cache): + verify_random_node(replicated_cache) + + +@pytest.mark.asyncio +async def test_replicated_cache_operation_routed_to_random_node_async(async_replicated_cache): + await verify_random_node(async_replicated_cache) + + +def test_replicated_cache_operation_not_routed_to_failed_node(replicated_cache): + srv = start_ignite(idx=4) + try: + while True: + replicated_cache.put(1, 1) + + if requests.pop() == 4: + break + + kill_process_tree(srv.pid) + + num_failures = 0 + for i in range(100): + # Request may fail one time, because query can be requested before affinity update or connection + # lost will be detected. + try: + replicated_cache.put(1, 1) + except: # noqa 13 + num_failures += 1 + assert num_failures <= 1, "Expected no more than 1 failure." + finally: + kill_process_tree(srv.pid) + + +@pytest.mark.asyncio +async def test_replicated_cache_operation_not_routed_to_failed_node_async(async_replicated_cache): + srv = start_ignite(idx=4) + try: + while True: + await async_replicated_cache.put(1, 1) + + if requests.pop() == 4: + break + + kill_process_tree(srv.pid) + + num_failures = 0 + for i in range(100): + # Request may fail one time, because query can be requested before affinity update or connection + # lost will be detected. + try: + await async_replicated_cache.put(1, 1) + except: # noqa 13 + num_failures += 1 + assert num_failures <= 1, "Expected no more than 1 failure." + finally: + kill_process_tree(srv.pid) + + +def verify_random_node(cache): + key = 1 + + def inner(): + cache.put(key, key) + + idx1 = requests.pop() + idx2 = idx1 + + # Try 10 times - random node may end up being the same + for _ in range(1, 10): + cache.put(key, key) + idx2 = requests.pop() + if idx2 != idx1: + break + assert idx1 != idx2 + + async def inner_async(): + await cache.put(key, key) + + idx1 = requests.pop() + + idx2 = idx1 + + # Try 10 times - random node may end up being the same + for _ in range(1, 10): + await cache.put(key, key) + idx2 = requests.pop() + + if idx2 != idx1: + break + assert idx1 != idx2 + + return inner_async() if isinstance(cache, AioCache) else inner() + + +@contextlib.contextmanager +def create_caches(client): + caches = [] + try: + caches = [client.create_cache(f'test_cache_{i}') for i in range(0, 10)] + yield caches + finally: + for cache in caches: + try: + cache.destroy() + except: # noqa: 13 + cache.destroy() # Retry if connection failed. + pass + + +@asynccontextmanager +async def create_caches_async(client): + caches = [] + try: + caches = await asyncio.gather(*[client.create_cache(f'test_cache_{i}') for i in range(0, 10)]) + yield caches + finally: + for cache in caches: + try: + await cache.destroy() + except: # noqa: 13 + await cache.destroy() # Retry if connection failed. + pass + + +def test_new_registered_cache_affinity(client): + with create_caches(client) as caches: + key = 12 + test_cache = random.choice(caches) + test_cache.put(key, key) + wait_for_affinity_distribution(test_cache, key, 3) + + caches.append(client.create_cache('new_cache')) + + for cache in caches: + cache.get(key) + assert requests.pop() == 3 + + +@pytest.mark.asyncio +async def test_new_registered_cache_affinity_async(async_client): + async with create_caches_async(async_client) as caches: + key = 12 + test_cache = random.choice(caches) + await test_cache.put(key, key) + await wait_for_affinity_distribution_async(test_cache, key, 3) + + caches.append(await async_client.create_cache('new_cache')) + + for cache in caches: + await cache.get(key) + assert requests.pop() == 3 + + +def test_all_registered_cache_updated_on_new_server(client): + with create_caches(client) as caches: + key = 12 + test_cache = random.choice(caches) + wait_for_affinity_distribution(test_cache, key, 3) + test_cache.put(key, key) + assert requests.pop() == 3 + + srv = start_ignite(idx=4) + try: + # Wait for rebalance and partition map exchange + wait_for_affinity_distribution(test_cache, key, 4) + + for cache in caches: + cache.get(key) + assert requests.pop() == 4 + finally: + kill_process_tree(srv.pid) + + +@pytest.mark.asyncio +async def test_all_registered_cache_updated_on_new_server_async(async_client): + async with create_caches_async(async_client) as caches: + key = 12 + test_cache = random.choice(caches) + await wait_for_affinity_distribution_async(test_cache, key, 3) + await test_cache.put(key, key) + assert requests.pop() == 3 + + srv = start_ignite(idx=4) + try: + # Wait for rebalance and partition map exchange + await wait_for_affinity_distribution_async(test_cache, key, 4) + + for cache in caches: + await cache.get(key) + assert requests.pop() == 4 + finally: + kill_process_tree(srv.pid) + + +@pytest.mark.asyncio +async def test_update_affinity_concurrently(async_client): + async with create_caches_async(async_client) as caches: + key = 12 + await asyncio.gather(*[cache.put(key, key) for cache in caches]) + + for cache in caches: + await cache.get(key) + assert requests.pop() == 3 diff --git a/tests/affinity/test_affinity_single_connection.py b/tests/affinity/test_affinity_single_connection.py new file mode 100644 index 0000000..c679bdd --- /dev/null +++ b/tests/affinity/test_affinity_single_connection.py @@ -0,0 +1,207 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client, AioClient + + +@pytest.fixture +def client(): + client = Client(partition_aware=True) + try: + client.connect('127.0.0.1', 10801) + yield client + finally: + client.close() + + +@pytest.fixture +async def async_client(event_loop): + client = AioClient(partition_aware=True) + try: + await client.connect('127.0.0.1', 10801) + yield client + finally: + await client.close() + + +def test_all_cache_operations_with_partition_aware_client_on_single_server(request, client): + cache = client.get_or_create_cache(request.node.name) + key = 1 + key2 = 2 + + # Put/Get + cache.put(key, key) + assert cache.get(key) == key + + # Replace + res = cache.replace(key, key2) + assert res + assert cache.get(key) == key2 + + # Clear + cache.put(key2, key2) + cache.clear_key(key2) + assert cache.get(key2) is None + + # ContainsKey + assert cache.contains_key(key) + assert not cache.contains_key(key2) + + # GetAndPut + cache.put(key, key) + res = cache.get_and_put(key, key2) + assert res == key + assert cache.get(key) == key2 + + # GetAndPutIfAbsent + cache.clear_key(key) + res = cache.get_and_put_if_absent(key, key) + res2 = cache.get_and_put_if_absent(key, key2) + assert res is None + assert res2 == key + assert cache.get(key) == key + + # PutIfAbsent + cache.clear_key(key) + res = cache.put_if_absent(key, key) + res2 = cache.put_if_absent(key, key2) + assert res + assert not res2 + assert cache.get(key) == key + + # GetAndRemove + cache.put(key, key) + res = cache.get_and_remove(key) + assert res == key + assert cache.get(key) is None + + # GetAndReplace + cache.put(key, key) + res = cache.get_and_replace(key, key2) + assert res == key + assert cache.get(key) == key2 + + # RemoveKey + cache.put(key, key) + cache.remove_key(key) + assert cache.get(key) is None + + # RemoveIfEquals + cache.put(key, key) + res = cache.remove_if_equals(key, key2) + res2 = cache.remove_if_equals(key, key) + assert not res + assert res2 + assert cache.get(key) is None + + # Replace + cache.put(key, key) + cache.replace(key, key2) + assert cache.get(key) == key2 + + # ReplaceIfEquals + cache.put(key, key) + res = cache.replace_if_equals(key, key2, key2) + res2 = cache.replace_if_equals(key, key, key2) + assert not res + assert res2 + assert cache.get(key) == key2 + + +@pytest.mark.asyncio +async def test_all_cache_operations_with_partition_aware_client_on_single_server_async(request, async_client): + cache = await async_client.get_or_create_cache(request.node.name) + key = 1 + key2 = 2 + + # Put/Get + await cache.put(key, key) + assert await cache.get(key) == key + + # Replace + res = await cache.replace(key, key2) + assert res + assert await cache.get(key) == key2 + + # Clear + await cache.put(key2, key2) + await cache.clear_key(key2) + assert await cache.get(key2) is None + + # ContainsKey + assert await cache.contains_key(key) + assert not await cache.contains_key(key2) + + # GetAndPut + await cache.put(key, key) + res = await cache.get_and_put(key, key2) + assert res == key + assert await cache.get(key) == key2 + + # GetAndPutIfAbsent + await cache.clear_key(key) + res = await cache.get_and_put_if_absent(key, key) + res2 = await cache.get_and_put_if_absent(key, key2) + assert res is None + assert res2 == key + assert await cache.get(key) == key + + # PutIfAbsent + await cache.clear_key(key) + res = await cache.put_if_absent(key, key) + res2 = await cache.put_if_absent(key, key2) + assert res + assert not res2 + assert await cache.get(key) == key + + # GetAndRemove + await cache.put(key, key) + res = await cache.get_and_remove(key) + assert res == key + assert await cache.get(key) is None + + # GetAndReplace + await cache.put(key, key) + res = await cache.get_and_replace(key, key2) + assert res == key + assert await cache.get(key) == key2 + + # RemoveKey + await cache.put(key, key) + await cache.remove_key(key) + assert await cache.get(key) is None + + # RemoveIfEquals + await cache.put(key, key) + res = await cache.remove_if_equals(key, key2) + res2 = await cache.remove_if_equals(key, key) + assert not res + assert res2 + assert await cache.get(key) is None + + # Replace + await cache.put(key, key) + await cache.replace(key, key2) + assert await cache.get(key) == key2 + + # ReplaceIfEquals + await cache.put(key, key) + res = await cache.replace_if_equals(key, key2, key2) + res2 = await cache.replace_if_equals(key, key, key2) + assert not res + assert res2 + assert await cache.get(key) == key2 diff --git a/tests/affinity/test_connection_context_manager.py b/tests/affinity/test_connection_context_manager.py new file mode 100644 index 0000000..8056c7d --- /dev/null +++ b/tests/affinity/test_connection_context_manager.py @@ -0,0 +1,83 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client, AioClient + + +@pytest.fixture +def connection_param(): + return [('127.0.0.1', 10800 + i) for i in range(1, 4)] + + +@pytest.mark.parametrize('partition_aware', ['with_partition_aware', 'wo_partition_aware']) +def test_connection_context(connection_param, partition_aware): + is_partition_aware = partition_aware == 'with_partition_aware' + client = Client(partition_aware=is_partition_aware) + + # Check context manager + with client.connect(connection_param): + __check_open(client, is_partition_aware) + __check_closed(client) + + # Check standard way + try: + client.connect(connection_param) + __check_open(client, is_partition_aware) + finally: + client.close() + __check_closed(client) + + +@pytest.mark.asyncio +@pytest.mark.parametrize('partition_aware', ['with_partition_aware', 'wo_partition_aware']) +async def test_connection_context_async(connection_param, partition_aware): + is_partition_aware = partition_aware == 'with_partition_aware' + client = AioClient(partition_aware=is_partition_aware) + + # Check async context manager. + async with client.connect(connection_param): + await __check_open(client, is_partition_aware) + __check_closed(client) + + # Check standard way. + try: + await client.connect(connection_param) + await __check_open(client, is_partition_aware) + finally: + await client.close() + __check_closed(client) + + +def __check_open(client, is_partition_aware): + def inner_sync(): + if is_partition_aware: + assert client.random_node.alive + else: + all(n.alive for n in client._nodes) + + async def inner_async(): + if is_partition_aware: + random_node = await client.random_node() + assert random_node.alive + else: + all(n.alive for n in client._nodes) + + return inner_sync() if isinstance(client, Client) else inner_async() + + +def __check_closed(client): + assert all(not n.alive for n in client._nodes) diff --git a/tests/common/conftest.py b/tests/common/conftest.py new file mode 100644 index 0000000..0f28f7e --- /dev/null +++ b/tests/common/conftest.py @@ -0,0 +1,83 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client, AioClient +from tests.util import start_ignite_gen + + +@pytest.fixture(scope='module', autouse=True) +def server1(): + yield from start_ignite_gen(1) + + +@pytest.fixture(scope='module', autouse=True) +def server2(): + yield from start_ignite_gen(2) + + +@pytest.fixture(scope='module', autouse=True) +def server3(): + yield from start_ignite_gen(3) + + +@pytest.fixture(scope='module') +def client(): + client = Client() + try: + client.connect('127.0.0.1', 10801) + yield client + finally: + client.close() + + +@pytest.fixture(scope='module') +async def async_client(event_loop): + client = AioClient() + try: + await client.connect('127.0.0.1', 10801) + yield client + finally: + await client.close() + + +@pytest.fixture +async def async_cache(async_client: 'AioClient'): + cache = await async_client.create_cache('my_bucket') + try: + yield cache + finally: + await cache.destroy() + + +@pytest.fixture +def cache(client): + cache = client.create_cache('my_bucket') + try: + yield cache + finally: + cache.destroy() + + +@pytest.fixture(autouse=True) +def expiry_policy_supported(request, server1): + client = Client() + with client.connect('127.0.0.1', 10801): + result = client.protocol_context.is_expiry_policy_supported() + if not result and request.node.get_closest_marker('skip_if_no_expiry_policy'): + pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.') + + return result diff --git a/tests/common/test_binary.py b/tests/common/test_binary.py new file mode 100644 index 0000000..449709e --- /dev/null +++ b/tests/common/test_binary.py @@ -0,0 +1,558 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re +from collections import OrderedDict +from decimal import Decimal + +import pytest + +from pyignite import GenericObjectMeta +from pyignite.aio_cache import AioCache +from pyignite.datatypes import ( + BinaryObject, BoolObject, IntObject, DecimalObject, LongObject, String, ByteObject, ShortObject, FloatObject, + DoubleObject, CharObject, UUIDObject, DateObject, TimestampObject, TimeObject, EnumObject, BinaryEnumObject, + ByteArrayObject, ShortArrayObject, IntArrayObject, LongArrayObject, FloatArrayObject, DoubleArrayObject, + CharArrayObject, BoolArrayObject, UUIDArrayObject, DateArrayObject, TimestampArrayObject, TimeArrayObject, + EnumArrayObject, StringArrayObject, DecimalArrayObject, ObjectArrayObject, CollectionObject, MapObject) +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_SQL_SCHEMA, PROP_QUERY_ENTITIES + +insert_data = [ + [1, True, 'asdf', 42, Decimal('2.4')], + [2, False, 'zxcvb', 43, Decimal('2.5')], + [3, True, 'qwerty', 44, Decimal('2.6')], +] + +page_size = 100 + +scheme_name = 'PUBLIC' + +table_sql_name = 'AllDataType' +table_cache_name = 'SQL_{}_{}'.format( + scheme_name, + table_sql_name.upper(), +) + +create_query = ''' +CREATE TABLE {} ( + test_pk INTEGER(11) PRIMARY KEY, + test_bool BOOLEAN, + test_str VARCHAR(24), + test_int INTEGER(11), + test_decimal DECIMAL(11, 5), +) +'''.format(table_sql_name) + +insert_query = ''' +INSERT INTO {} ( + test_pk, test_bool, test_str, test_int, test_decimal, +) VALUES (?, ?, ?, ?, ?)'''.format(table_sql_name) + +select_query = '''SELECT * FROM {}'''.format(table_sql_name) + +drop_query = 'DROP TABLE {} IF EXISTS'.format(table_sql_name) + + +@pytest.fixture +def table_cache_read(client): + client.sql(drop_query) + client.sql(create_query) + + for line in insert_data: + client.sql(insert_query, query_args=line) + + cache = client.get_cache(table_cache_name) + yield cache + client.sql(drop_query) + + +@pytest.fixture +async def table_cache_read_async(async_client): + await async_client.sql(drop_query) + await async_client.sql(create_query) + + for line in insert_data: + await async_client.sql(insert_query, query_args=line) + + cache = await async_client.get_cache(table_cache_name) + yield cache + await async_client.sql(drop_query) + + +def test_sql_read_as_binary(table_cache_read): + with table_cache_read.scan() as cursor: + # convert Binary object fields' values to a tuple + # to compare it with the initial data + for key, value in cursor: + assert key in {x[0] for x in insert_data} + assert (value.TEST_BOOL, value.TEST_STR, value.TEST_INT, value.TEST_DECIMAL) \ + in {tuple(x[1:]) for x in insert_data} + + +@pytest.mark.asyncio +async def test_sql_read_as_binary_async(table_cache_read_async): + async with table_cache_read_async.scan() as cursor: + # convert Binary object fields' values to a tuple + # to compare it with the initial data + async for key, value in cursor: + assert key in {x[0] for x in insert_data} + assert (value.TEST_BOOL, value.TEST_STR, value.TEST_INT, value.TEST_DECIMAL) \ + in {tuple(x[1:]) for x in insert_data} + + +class AllDataType( + metaclass=GenericObjectMeta, + type_name=table_cache_name, + schema=OrderedDict([ + ('TEST_BOOL', BoolObject), + ('TEST_STR', String), + ('TEST_INT', IntObject), + ('TEST_DECIMAL', DecimalObject), + ]), +): + pass + + +@pytest.fixture +def table_cache_write_settings(): + return { + PROP_NAME: table_cache_name, + PROP_SQL_SCHEMA: scheme_name, + PROP_QUERY_ENTITIES: [ + { + 'table_name': table_sql_name.upper(), + 'key_field_name': 'TEST_PK', + 'key_type_name': 'java.lang.Integer', + 'field_name_aliases': [], + 'query_fields': [ + { + 'name': 'TEST_PK', + 'type_name': 'java.lang.Integer', + 'is_notnull_constraint_field': True, + }, + { + 'name': 'TEST_BOOL', + 'type_name': 'java.lang.Boolean', + }, + { + 'name': 'TEST_STR', + 'type_name': 'java.lang.String', + }, + { + 'name': 'TEST_INT', + 'type_name': 'java.lang.Integer', + }, + { + 'name': 'TEST_DECIMAL', + 'type_name': 'java.math.BigDecimal', + 'default_value': Decimal('0.00'), + 'precision': 11, + 'scale': 2, + }, + ], + 'query_indexes': [], + 'value_type_name': table_cache_name, + 'value_field_name': None, + }, + ], + } + + +@pytest.fixture +def table_cache_write(client, table_cache_write_settings): + cache = client.get_or_create_cache(table_cache_write_settings) + assert cache.settings, 'SQL table cache settings are empty' + + for row in insert_data: + value = AllDataType() + ( + value.TEST_BOOL, + value.TEST_STR, + value.TEST_INT, + value.TEST_DECIMAL, + ) = row[1:] + cache.put(row[0], value, key_hint=IntObject) + + data = cache.scan() + assert len(list(data)) == len(insert_data), 'Not all data was read as key-value' + + yield cache + cache.destroy() + + +@pytest.fixture +async def async_table_cache_write(async_client, table_cache_write_settings): + cache = await async_client.get_or_create_cache(table_cache_write_settings) + assert await cache.settings(), 'SQL table cache settings are empty' + + for row in insert_data: + value = AllDataType() + ( + value.TEST_BOOL, + value.TEST_STR, + value.TEST_INT, + value.TEST_DECIMAL, + ) = row[1:] + await cache.put(row[0], value, key_hint=IntObject) + + async with cache.scan() as cursor: + data = [a async for a in cursor] + assert len(data) == len(insert_data), 'Not all data was read as key-value' + + yield cache + await cache.destroy() + + +def test_sql_write_as_binary(client, table_cache_write): + # read rows as SQL + data = client.sql(select_query, include_field_names=True) + + header_row = next(data) + for field_name in AllDataType.schema.keys(): + assert field_name in header_row, 'Not all field names in header row' + + data = list(data) + assert len(data) == len(insert_data), 'Not all data was read as SQL rows' + + +@pytest.mark.asyncio +async def test_sql_write_as_binary_async(async_client, async_table_cache_write): + # read rows as SQL + async with async_client.sql(select_query, include_field_names=True) as cursor: + header_row = await cursor.__anext__() + for field_name in AllDataType.schema.keys(): + assert field_name in header_row, 'Not all field names in header row' + + data = [v async for v in cursor] + assert len(data) == len(insert_data), 'Not all data was read as SQL rows' + + +def test_nested_binary_objects(cache): + __check_nested_binary_objects(cache) + + +@pytest.mark.asyncio +async def test_nested_binary_objects_async(async_cache): + await __check_nested_binary_objects(async_cache) + + +def __check_nested_binary_objects(cache): + class InnerType( + metaclass=GenericObjectMeta, + schema=OrderedDict([ + ('inner_int', LongObject), + ('inner_str', String), + ]), + ): + pass + + class OuterType( + metaclass=GenericObjectMeta, + schema=OrderedDict([ + ('outer_int', LongObject), + ('nested_binary', BinaryObject), + ('outer_str', String), + ]), + ): + pass + + def prepare_obj(): + inner = InnerType(inner_int=42, inner_str='This is a test string') + + return OuterType( + outer_int=43, + nested_binary=inner, + outer_str='This is another test string' + ) + + def check_obj(result): + assert result.outer_int == 43 + assert result.outer_str == 'This is another test string' + assert result.nested_binary.inner_int == 42 + assert result.nested_binary.inner_str == 'This is a test string' + + async def inner_async(): + await cache.put(1, prepare_obj()) + check_obj(await cache.get(1)) + + def inner(): + cache.put(1, prepare_obj()) + check_obj(cache.get(1)) + + return inner_async() if isinstance(cache, AioCache) else inner() + + +def test_add_schema_to_binary_object(cache): + __check_add_schema_to_binary_object(cache) + + +@pytest.mark.asyncio +async def test_add_schema_to_binary_object_async(async_cache): + await __check_add_schema_to_binary_object(async_cache) + + +def __check_add_schema_to_binary_object(cache): + class MyBinaryType( + metaclass=GenericObjectMeta, + schema=OrderedDict([ + ('test_str', String), + ('test_int', LongObject), + ('test_bool', BoolObject), + ]), + ): + pass + + def prepare_bo_v1(): + return MyBinaryType(test_str='Test string', test_int=42, test_bool=True) + + def check_bo_v1(result): + assert result.test_str == 'Test string' + assert result.test_int == 42 + assert result.test_bool is True + + def prepare_bo_v2(): + modified_schema = MyBinaryType.schema.copy() + modified_schema['test_decimal'] = DecimalObject + del modified_schema['test_bool'] + + class MyBinaryTypeV2( + metaclass=GenericObjectMeta, + type_name='MyBinaryType', + schema=modified_schema, + ): + pass + + assert MyBinaryType.type_id == MyBinaryTypeV2.type_id + assert MyBinaryType.schema_id != MyBinaryTypeV2.schema_id + + return MyBinaryTypeV2(test_str='Another test', test_int=43, test_decimal=Decimal('2.34')) + + def check_bo_v2(result): + assert result.test_str == 'Another test' + assert result.test_int == 43 + assert result.test_decimal == Decimal('2.34') + assert not hasattr(result, 'test_bool') + + async def inner_async(): + await cache.put(1, prepare_bo_v1()) + check_bo_v1(await cache.get(1)) + await cache.put(2, prepare_bo_v2()) + check_bo_v2(await cache.get(2)) + + def inner(): + cache.put(1, prepare_bo_v1()) + check_bo_v1(cache.get(1)) + cache.put(2, prepare_bo_v2()) + check_bo_v2(cache.get(2)) + + return inner_async() if isinstance(cache, AioCache) else inner() + + +def test_complex_object_names(cache): + """ + Test the ability to work with Complex types, which names contains symbols + not suitable for use in Python identifiers. + """ + __check_complex_object_names(cache) + + +@pytest.mark.asyncio +async def test_complex_object_names_async(async_cache): + await __check_complex_object_names(async_cache) + + +def __check_complex_object_names(cache): + type_name = 'Non.Pythonic#type-name$' + key = 'key' + data = 'test' + + class NonPythonicallyNamedType( + metaclass=GenericObjectMeta, + type_name=type_name, + schema=OrderedDict([ + ('field', String), + ]) + ): + pass + + def check(obj): + assert obj.type_name == type_name, 'Complex type name mismatch' + assert obj.field == data, 'Complex object data failure' + + async def inner_async(): + await cache.put(key, NonPythonicallyNamedType(field=data)) + check(await cache.get(key)) + + def inner(): + cache.put(key, NonPythonicallyNamedType(field=data)) + check(cache.get(key)) + + return inner_async() if isinstance(cache, AioCache) else inner() + + +class Internal( + metaclass=GenericObjectMeta, type_name='Internal', + schema=OrderedDict([ + ('id', IntObject), + ('str', String) + ]) +): + pass + + +class NestedObject( + metaclass=GenericObjectMeta, type_name='NestedObject', + schema=OrderedDict([ + ('id', IntObject), + ('str', String), + ('internal', BinaryObject) + ]) +): + pass + + +@pytest.fixture +def complex_objects(): + fixtures = [] + + obj_ascii = NestedObject() + obj_ascii.id = 1 + obj_ascii.str = 'test_string' + + obj_ascii.internal = Internal() + obj_ascii.internal.id = 2 + obj_ascii.internal.str = 'lorem ipsum' + + fixtures.append((obj_ascii, -1314567146)) + + obj_utf8 = NestedObject() + obj_utf8.id = 1 + obj_utf8.str = 'юникод' + + obj_utf8.internal = Internal() + obj_utf8.internal.id = 2 + obj_utf8.internal.str = 'ユニコード' + + fixtures.append((obj_utf8, -1945378474)) + + yield fixtures + + +def test_complex_object_hash(client, complex_objects): + for obj, hash in complex_objects: + assert hash == BinaryObject.hashcode(obj, client=client) + + +@pytest.mark.asyncio +async def test_complex_object_hash_async(async_client, complex_objects): + for obj, hash in complex_objects: + assert hash == await BinaryObject.hashcode_async(obj, client=async_client) + + +def camel_to_snake(name): + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() + + +fields = {camel_to_snake(type_.__name__): type_ for type_ in [ + ByteObject, ShortObject, IntObject, LongObject, FloatObject, DoubleObject, CharObject, BoolObject, UUIDObject, + DateObject, TimestampObject, TimeObject, EnumObject, BinaryEnumObject, ByteArrayObject, ShortArrayObject, + IntArrayObject, LongArrayObject, FloatArrayObject, DoubleArrayObject, CharArrayObject, BoolArrayObject, + UUIDArrayObject, DateArrayObject, TimestampArrayObject, TimeArrayObject, EnumArrayObject, String, + StringArrayObject, DecimalObject, DecimalArrayObject, ObjectArrayObject, CollectionObject, MapObject, + BinaryObject]} + + +class AllTypesObject(metaclass=GenericObjectMeta, type_name='AllTypesObject', schema=fields): + pass + + +@pytest.fixture +def null_fields_object(): + res = AllTypesObject() + + for field in fields.keys(): + setattr(res, field, None) + + yield res + + +def test_complex_object_null_fields(cache, null_fields_object): + """ + Test that Python client can correctly write and read binary object that + contains null fields. + """ + cache.put(1, null_fields_object) + assert cache.get(1) == null_fields_object, 'Objects mismatch' + + +@pytest.mark.asyncio +async def test_complex_object_null_fields_async(async_cache, null_fields_object): + """ + Test that Python client can correctly write and read binary object that + contains null fields. + """ + await async_cache.put(1, null_fields_object) + assert await async_cache.get(1) == null_fields_object, 'Objects mismatch' + + +def test_object_with_collections_of_binary_objects(cache): + __check_object_with_collections_of_binary_objects(cache) + + +@pytest.mark.asyncio +async def test_object_with_collections_of_binary_objects_async(async_cache): + await __check_object_with_collections_of_binary_objects(async_cache) + + +def __check_object_with_collections_of_binary_objects(cache): + class Container( + metaclass=GenericObjectMeta, + schema={ + 'id': IntObject, + 'collection': CollectionObject, + 'array': ObjectArrayObject, + 'map': MapObject + } + ): + pass + + class Value( + metaclass=GenericObjectMeta, + schema={ + 'id': IntObject, + 'name': String + } + ): + pass + + def fixtures(): + map_obj = (MapObject.HASH_MAP, {i: Value(i, f'val_{i}') for i in range(10)}) + col_obj = (CollectionObject.ARR_LIST, [Value(i, f'val_{i}') for i in range(10)]) + arr_obj = (ObjectArrayObject.OBJECT, [Value(i, f'val_{i}') for i in range(10)]) + return [ + Container(1, map=map_obj, collection=col_obj, array=arr_obj), + Container(2), # Check if collections are not set + ] + + async def inner_async(): + for i, val in enumerate(fixtures()): + await cache.put(i, val) + assert await cache.get(i) == val + + def inner(): + for i, val in enumerate(fixtures()): + cache.put(i, val) + assert cache.get(i) == val + + return inner_async() if isinstance(cache, AioCache) else inner() diff --git a/tests/common/test_cache_class.py b/tests/common/test_cache_class.py new file mode 100644 index 0000000..b035d8f --- /dev/null +++ b/tests/common/test_cache_class.py @@ -0,0 +1,215 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +from decimal import Decimal + +import pytest + +from pyignite import GenericObjectMeta +from pyignite.datatypes import BoolObject, DecimalObject, FloatObject, IntObject, String +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_KEY_CONFIGURATION +from pyignite.exceptions import CacheError, ParameterError + + +def test_cache_create(client): + cache = client.get_or_create_cache('my_oop_cache') + try: + assert cache.name == cache.settings[PROP_NAME] == 'my_oop_cache' + finally: + cache.destroy() + + +@pytest.mark.asyncio +async def test_cache_create_async(async_client): + cache = await async_client.get_or_create_cache('my_oop_cache') + try: + assert cache.name == (await cache.settings())[PROP_NAME] == 'my_oop_cache' + finally: + await cache.destroy() + + +def test_get_cache(client): + my_cache = client.get_or_create_cache('my_cache') + try: + assert my_cache.settings[PROP_NAME] == 'my_cache' + finally: + my_cache.destroy() + + my_cache = client.get_cache('my_cache') + with pytest.raises(CacheError): + _ = my_cache.settings[PROP_NAME] + + +@pytest.mark.asyncio +async def test_get_cache_async(async_client): + my_cache = await async_client.get_or_create_cache('my_cache') + try: + assert (await my_cache.settings())[PROP_NAME] == 'my_cache' + finally: + await my_cache.destroy() + + my_cache = await async_client.get_cache('my_cache') + with pytest.raises(CacheError): + _ = (await my_cache.settings())[PROP_NAME] + + +@pytest.fixture +def cache_config(): + yield { + PROP_NAME: 'my_oop_cache', + PROP_CACHE_KEY_CONFIGURATION: [ + { + 'type_name': 'blah', + 'affinity_key_field_name': 'abc1234', + }, + ], + } + + +def test_cache_config(client, cache_config): + client.create_cache(cache_config) + cache = client.get_or_create_cache('my_oop_cache') + try: + assert cache.name == cache_config[PROP_NAME] + assert cache.settings[PROP_CACHE_KEY_CONFIGURATION] == cache_config[PROP_CACHE_KEY_CONFIGURATION] + finally: + cache.destroy() + + +@pytest.mark.asyncio +async def test_cache_config_async(async_client, cache_config): + await async_client.create_cache(cache_config) + cache = await async_client.get_or_create_cache('my_oop_cache') + try: + assert cache.name == cache_config[PROP_NAME] + assert (await cache.settings())[PROP_CACHE_KEY_CONFIGURATION] == cache_config[PROP_CACHE_KEY_CONFIGURATION] + finally: + await cache.destroy() + + +@pytest.fixture +def binary_type_fixture(): + class TestBinaryType( + metaclass=GenericObjectMeta, + schema=OrderedDict([ + ('test_bool', BoolObject), + ('test_str', String), + ('test_int', IntObject), + ('test_decimal', DecimalObject), + ]), + ): + pass + + return TestBinaryType( + test_bool=True, + test_str='This is a test', + test_int=42, + test_decimal=Decimal('34.56'), + ) + + +def test_cache_binary_get_put(cache, binary_type_fixture): + cache.put('my_key', binary_type_fixture) + value = cache.get('my_key') + assert value.test_bool == binary_type_fixture.test_bool + assert value.test_str == binary_type_fixture.test_str + assert value.test_int == binary_type_fixture.test_int + assert value.test_decimal == binary_type_fixture.test_decimal + + +@pytest.mark.asyncio +async def test_cache_binary_get_put_async(async_cache, binary_type_fixture): + await async_cache.put('my_key', binary_type_fixture) + + value = await async_cache.get('my_key') + assert value.test_bool == binary_type_fixture.test_bool + assert value.test_str == binary_type_fixture.test_str + assert value.test_int == binary_type_fixture.test_int + assert value.test_decimal == binary_type_fixture.test_decimal + + +@pytest.fixture +def binary_type_schemas_fixture(): + schemas = [ + OrderedDict([ + ('TEST_BOOL', BoolObject), + ('TEST_STR', String), + ('TEST_INT', IntObject), + ]), + OrderedDict([ + ('TEST_BOOL', BoolObject), + ('TEST_STR', String), + ('TEST_INT', IntObject), + ('TEST_FLOAT', FloatObject), + ]), + OrderedDict([ + ('TEST_BOOL', BoolObject), + ('TEST_STR', String), + ('TEST_INT', IntObject), + ('TEST_DECIMAL', DecimalObject), + ]) + ] + yield 'TestBinaryType', schemas + + +def test_get_binary_type(client, binary_type_schemas_fixture): + type_name, schemas = binary_type_schemas_fixture + + for schema in schemas: + client.put_binary_type(type_name, schema=schema) + + binary_type_info = client.get_binary_type('TestBinaryType') + assert len(binary_type_info['schemas']) == 3 + + binary_type_info = client.get_binary_type('NonExistentType') + assert binary_type_info['type_exists'] is False + assert len(binary_type_info) == 1 + + +@pytest.mark.asyncio +async def test_get_binary_type_async(async_client, binary_type_schemas_fixture): + type_name, schemas = binary_type_schemas_fixture + + for schema in schemas: + await async_client.put_binary_type(type_name, schema=schema) + + binary_type_info = await async_client.get_binary_type('TestBinaryType') + assert len(binary_type_info['schemas']) == 3 + + binary_type_info = await async_client.get_binary_type('NonExistentType') + assert binary_type_info['type_exists'] is False + assert len(binary_type_info) == 1 + + +def test_get_cache_errors(client): + cache = client.get_cache('missing-cache') + + with pytest.raises(CacheError, match=r'Cache does not exist \[cacheId='): + cache.put(1, 1) + + with pytest.raises(ParameterError, match="You should supply at least cache name"): + client.create_cache(None) + + +@pytest.mark.asyncio +async def test_get_cache_errors_async(async_client): + cache = await async_client.get_cache('missing-cache') + + with pytest.raises(CacheError, match=r'Cache does not exist \[cacheId='): + await cache.put(1, 1) + + with pytest.raises(ParameterError, match="You should supply at least cache name"): + await async_client.create_cache(None) diff --git a/tests/common/test_cache_config.py b/tests/common/test_cache_config.py new file mode 100644 index 0000000..e5ed33c --- /dev/null +++ b/tests/common/test_cache_config.py @@ -0,0 +1,192 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from inspect import getmembers + +import pyignite +import pytest + +from pyignite.datatypes.cache_config import ( + CacheMode, CacheAtomicityMode, WriteSynchronizationMode, PartitionLossPolicy, RebalanceMode +) +from pyignite.datatypes.prop_codes import ( + PROP_NAME, PROP_CACHE_KEY_CONFIGURATION, PROP_CACHE_MODE, PROP_CACHE_ATOMICITY_MODE, PROP_BACKUPS_NUMBER, + PROP_WRITE_SYNCHRONIZATION_MODE, PROP_COPY_ON_READ, PROP_READ_FROM_BACKUP, PROP_DATA_REGION_NAME, + PROP_IS_ONHEAP_CACHE_ENABLED, PROP_GROUP_NAME, PROP_DEFAULT_LOCK_TIMEOUT, PROP_MAX_CONCURRENT_ASYNC_OPERATIONS, + PROP_PARTITION_LOSS_POLICY, PROP_EAGER_TTL, PROP_STATISTICS_ENABLED, PROP_REBALANCE_MODE, PROP_REBALANCE_DELAY, + PROP_REBALANCE_TIMEOUT, PROP_REBALANCE_BATCH_SIZE, PROP_REBALANCE_BATCHES_PREFETCH_COUNT, PROP_REBALANCE_ORDER, + PROP_REBALANCE_THROTTLE, PROP_QUERY_ENTITIES, PROP_QUERY_PARALLELISM, PROP_QUERY_DETAIL_METRIC_SIZE, + PROP_SQL_SCHEMA, PROP_SQL_INDEX_INLINE_MAX_SIZE, PROP_SQL_ESCAPE_ALL, PROP_MAX_QUERY_ITERATORS, PROP_EXPIRY_POLICY +) +from pyignite.exceptions import CacheError + +cache_name = 'config_cache' + + +@pytest.fixture +def test_cache_settings(expiry_policy_supported): + settings = { + PROP_NAME: cache_name, + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL, + PROP_BACKUPS_NUMBER: 2, + PROP_WRITE_SYNCHRONIZATION_MODE: WriteSynchronizationMode.FULL_SYNC, + PROP_COPY_ON_READ: True, + PROP_READ_FROM_BACKUP: True, + PROP_DATA_REGION_NAME: 'SmallDataRegion', + PROP_IS_ONHEAP_CACHE_ENABLED: True, + PROP_QUERY_ENTITIES: [{ + 'table_name': cache_name + '_table', + 'key_field_name': 'KEY', + 'key_type_name': 'java.lang.String', + 'value_field_name': 'VAL', + 'value_type_name': 'java.lang.String', + 'field_name_aliases': [ + {'alias': 'val', 'field_name': 'VAL'}, + {'alias': 'key', 'field_name': 'KEY'} + ], + 'query_fields': [ + { + 'name': 'KEY', + 'type_name': 'java.lang.String' + }, + { + 'name': 'VAL', + 'type_name': 'java.lang.String' + } + ], + 'query_indexes': [] + }], + PROP_QUERY_PARALLELISM: 20, + PROP_QUERY_DETAIL_METRIC_SIZE: 10, + PROP_SQL_SCHEMA: 'PUBLIC', + PROP_SQL_INDEX_INLINE_MAX_SIZE: 1024, + PROP_SQL_ESCAPE_ALL: True, + PROP_MAX_QUERY_ITERATORS: 200, + PROP_REBALANCE_MODE: RebalanceMode.SYNC, + PROP_REBALANCE_DELAY: 1000, + PROP_REBALANCE_TIMEOUT: 5000, + PROP_REBALANCE_BATCH_SIZE: 100, + PROP_REBALANCE_BATCHES_PREFETCH_COUNT: 10, + PROP_REBALANCE_ORDER: 3, + PROP_REBALANCE_THROTTLE: 10, + PROP_GROUP_NAME: cache_name + '_group', + PROP_CACHE_KEY_CONFIGURATION: [ + { + 'type_name': 'java.lang.String', + 'affinity_key_field_name': 'abc1234', + } + ], + PROP_DEFAULT_LOCK_TIMEOUT: 3000, + PROP_MAX_CONCURRENT_ASYNC_OPERATIONS: 100, + PROP_PARTITION_LOSS_POLICY: PartitionLossPolicy.READ_WRITE_ALL, + PROP_EAGER_TTL: True, + PROP_STATISTICS_ENABLED: True + } + + if expiry_policy_supported: + settings[PROP_EXPIRY_POLICY] = None + elif 'PROP_EXPIRY_POLICY' in ALL_PROPS: + del ALL_PROPS['PROP_EXPIRY_POLICY'] + + return settings + + +@pytest.fixture +def cache(client): + cache = client.get_or_create_cache(cache_name) + yield cache + cache.destroy() + + +@pytest.fixture +async def async_cache(async_client): + cache = await async_client.get_or_create_cache(cache_name) + yield cache + await cache.destroy() + + +@pytest.fixture +def cache_with_config(client, test_cache_settings): + cache = client.get_or_create_cache(test_cache_settings) + yield cache + cache.destroy() + + +@pytest.fixture +async def async_cache_with_config(async_client, test_cache_settings): + cache = await async_client.get_or_create_cache(test_cache_settings) + yield cache + await cache.destroy() + + +def test_cache_get_configuration(client, cache): + assert cache_name in client.get_cache_names() + assert cache.settings[PROP_NAME] == cache_name + + +@pytest.mark.asyncio +async def test_cache_get_configuration_async(async_client, async_cache): + assert cache_name in (await async_client.get_cache_names()) + assert (await async_cache.settings())[PROP_NAME] == cache_name + + +def test_get_or_create_with_config_existing(client, cache_with_config, test_cache_settings): + assert cache_name in client.get_cache_names() + + with pytest.raises(CacheError): + client.create_cache(test_cache_settings) + + cache = client.get_or_create_cache(test_cache_settings) + assert cache.settings == cache_with_config.settings + + +@pytest.mark.asyncio +async def test_get_or_create_with_config_existing_async(async_client, async_cache_with_config, test_cache_settings): + assert cache_name in (await async_client.get_cache_names()) + + with pytest.raises(CacheError): + await async_client.create_cache(test_cache_settings) + + cache = await async_client.get_or_create_cache(test_cache_settings) + assert (await cache.settings()) == (await async_cache_with_config.settings()) + +ALL_PROPS = {name: value for name, value in getmembers(pyignite.datatypes.prop_codes) if name.startswith('PROP')} + + +def test_get_or_create_with_config_new(client, test_cache_settings): + assert cache_name not in client.get_cache_names() + cache = client.get_or_create_cache(test_cache_settings) + try: + assert cache_name in client.get_cache_names() + real_cache_settings = cache.settings + assert real_cache_settings == test_cache_settings + assert set(real_cache_settings.keys()) == set(ALL_PROPS.values()) + finally: + cache.destroy() + + +@pytest.mark.asyncio +async def test_get_or_create_with_config_new_async(async_client, test_cache_settings): + assert cache_name not in (await async_client.get_cache_names()) + + cache = await async_client.get_or_create_cache(test_cache_settings) + try: + assert cache_name in (await async_client.get_cache_names()) + real_cache_settings = await cache.settings() + assert real_cache_settings == test_cache_settings + assert set(real_cache_settings.keys()) == set(ALL_PROPS.values()) + finally: + await cache.destroy() diff --git a/tests/common/test_cache_size.py b/tests/common/test_cache_size.py new file mode 100644 index 0000000..f2ec3ed --- /dev/null +++ b/tests/common/test_cache_size.py @@ -0,0 +1,64 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite.datatypes.cache_config import WriteSynchronizationMode +from pyignite.datatypes.key_value import PeekModes +from pyignite.datatypes.prop_codes import ( + PROP_NAME, PROP_IS_ONHEAP_CACHE_ENABLED, PROP_BACKUPS_NUMBER, PROP_WRITE_SYNCHRONIZATION_MODE +) +from tests.util import get_or_create_cache, get_or_create_cache_async + +test_params = [ + [ + { + PROP_NAME: 'cache_onheap_backups_2', + PROP_IS_ONHEAP_CACHE_ENABLED: True, + PROP_BACKUPS_NUMBER: 2, + PROP_WRITE_SYNCHRONIZATION_MODE: WriteSynchronizationMode.FULL_SYNC + }, + [ + [None, 1], + [PeekModes.PRIMARY, 1], + [PeekModes.BACKUP, 2], + [PeekModes.ALL, 3], + [[PeekModes.PRIMARY, PeekModes.BACKUP], 3], + [PeekModes.ONHEAP, 1], + [PeekModes.OFFHEAP, 1] + ] + ] +] + + +@pytest.mark.parametrize("cache_settings, cache_sizes", test_params) +def test_cache_size(client, cache_settings, cache_sizes): + with get_or_create_cache(client, cache_settings) as cache: + cache.put(1, 1) + + for props, exp_value in cache_sizes: + value = cache.get_size(props) + assert value == exp_value, f"expected {exp_value} for {props}, got {value} instead." + + +@pytest.mark.asyncio +@pytest.mark.parametrize("cache_settings, cache_sizes", test_params) +async def test_cache_size_async(async_client, cache_settings, cache_sizes): + async with get_or_create_cache_async(async_client, cache_settings) as cache: + await cache.put(1, 1) + + for props, exp_value in cache_sizes: + value = await cache.get_size(props) + assert value == exp_value, f"expected {exp_value} for {props}, got {value} instead." diff --git a/tests/common/test_datatypes.py b/tests/common/test_datatypes.py new file mode 100644 index 0000000..3a0ee51 --- /dev/null +++ b/tests/common/test_datatypes.py @@ -0,0 +1,312 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import ctypes +from datetime import datetime, timedelta +import decimal +import pytest +import uuid + +from pyignite import GenericObjectMeta +from pyignite.datatypes import ( + ByteObject, IntObject, FloatObject, CharObject, ShortObject, BoolObject, ByteArrayObject, IntArrayObject, + ShortArrayObject, FloatArrayObject, BoolArrayObject, CharArrayObject, TimestampObject, String, BinaryEnumObject, + TimestampArrayObject, BinaryEnumArrayObject, ObjectArrayObject, CollectionObject, MapObject +) +from pyignite.utils import unsigned + + +class Value( + metaclass=GenericObjectMeta, + schema={ + 'id': IntObject, + 'name': String, + } +): + pass + + +put_get_data_params = [ + # integers + (42, None), + (42, ByteObject), + (42, ShortObject), + (42, IntObject), + + # floats + (3.1415, None), # True for Double but not Float + (3.5, FloatObject), + + # char is never autodetected + ('ы', CharObject), + ('カ', CharObject), + + # bool + (True, None), + (False, None), + (True, BoolObject), + (False, BoolObject), + + # arrays of integers + ([1, 2, 3, 5], None), + (b'buzz', None), + (b'buzz', ByteArrayObject), + (bytearray([7, 8, 8, 11]), None), + (bytearray([7, 8, 8, 11]), ByteArrayObject), + ([1, 2, 3, 5], ShortArrayObject), + ([1, 2, 3, 5], IntArrayObject), + + # arrays of floats + ([2.2, 4.4, 6.6], None), + ([2.5, 6.5], FloatArrayObject), + + # array of char + (['ы', 'カ'], CharArrayObject), + + # array of bool + ([True, False, True], None), + ([True, False], BoolArrayObject), + ([False, True], BoolArrayObject), + ([True, False, True, False], BoolArrayObject), + + # string + ('Little Mary had a lamb', None), + ('This is a test', String), + + # decimals + (decimal.Decimal('2.5'), None), + (decimal.Decimal('-1.3'), None), + + # uuid + (uuid.uuid4(), None), + + # date + (datetime(year=1998, month=4, day=6, hour=18, minute=30), None), + + # no autodetection for timestamp either + ( + (datetime(year=1998, month=4, day=6, hour=18, minute=30), 1000), + TimestampObject + ), + + # time + (timedelta(days=4, hours=4, minutes=24), None), + + # enum is useless in Python, except for interoperability with Java. + # Also no autodetection + ((5, 6), BinaryEnumObject), + + # arrays of standard types + (['String 1', 'String 2'], None), + (['Some of us are empty', None, 'But not the others'], None), + + ([decimal.Decimal('2.71828'), decimal.Decimal('100')], None), + ([decimal.Decimal('2.1'), None, decimal.Decimal('3.1415')], None), + + ([uuid.uuid4(), uuid.uuid4()], None), + ( + [ + datetime(year=2010, month=1, day=1), + datetime(year=2010, month=12, day=31), + ], + None, + ), + ([timedelta(minutes=30), timedelta(hours=2)], None), + ( + [ + (datetime(year=2010, month=1, day=1), 1000), + (datetime(year=2010, month=12, day=31), 200), + ], + TimestampArrayObject + ), + ((-1, [(6001, 1), (6002, 2), (6003, 3)]), BinaryEnumArrayObject), + + # object array + ((ObjectArrayObject.OBJECT, [1, 2, decimal.Decimal('3'), bytearray(b'\x10\x20')]), ObjectArrayObject), + ((ObjectArrayObject.OBJECT, [Value(id=i, name=f'val_{i}') for i in range(10)]), ObjectArrayObject), + + # collection + ((CollectionObject.LINKED_LIST, [1, 2, 3]), None), + + # map + ((MapObject.HASH_MAP, {'key': 4, 5: 6.0}), None), + ((MapObject.LINKED_HASH_MAP, OrderedDict([('key', 4), (5, 6.0)])), None), +] + + +@pytest.mark.parametrize( + 'value, value_hint', + put_get_data_params +) +def test_put_get_data(cache, value, value_hint): + cache.put('my_key', value, value_hint=value_hint) + assert cache.get('my_key') == value + + +@pytest.mark.parametrize( + 'value, value_hint', + put_get_data_params +) +@pytest.mark.asyncio +async def test_put_get_data_async(async_cache, value, value_hint): + await async_cache.put('my_key', value, value_hint=value_hint) + assert await async_cache.get('my_key') == value + + +nested_array_objects_params = [ + [ + (ObjectArrayObject.OBJECT, [ + ((ObjectArrayObject.OBJECT, [ + 'test', 1, Value(1, 'test'), + ((ObjectArrayObject.OBJECT, ['test', 1, Value(1, 'test')]), ObjectArrayObject) + ]), ObjectArrayObject) + ]), + (ObjectArrayObject.OBJECT, [ + (ObjectArrayObject.OBJECT, ['test', 1, Value(1, 'test'), + (ObjectArrayObject.OBJECT, ['test', 1, Value(1, 'test')])]) + ]) + ], +] + + +@pytest.mark.parametrize( + 'hinted_value, value', + nested_array_objects_params +) +def test_put_get_nested_array_objects(cache, hinted_value, value): + cache.put('my_key', hinted_value, value_hint=ObjectArrayObject) + assert cache.get('my_key') == value + + +@pytest.mark.parametrize( + 'hinted_value, value', + nested_array_objects_params +) +@pytest.mark.asyncio +async def test_put_get_nested_array_objects_async(async_cache, hinted_value, value): + await async_cache.put('my_key', hinted_value, value_hint=ObjectArrayObject) + assert await async_cache.get('my_key') == value + + +bytearray_params = [ + ([1, 2, 3, 5], ByteArrayObject), + ((7, 8, 13, 18), ByteArrayObject), + ((-128, -1, 0, 1, 127, 255), ByteArrayObject), + (b'\x01\x03\x10', None), + (bytearray(b'\x01\x30'), None) +] + + +@pytest.mark.parametrize( + 'value,type_hint', + bytearray_params +) +def test_bytearray_from_different_input(cache, value, type_hint): + """ + ByteArrayObject's pythonic type is `bytearray`, but it should also accept + lists or tuples as a content. + """ + cache.put('my_key', value, value_hint=type_hint) + __check_bytearray_from_different_input(cache.get('my_key'), value) + + +@pytest.mark.parametrize( + 'value,type_hint', + bytearray_params +) +@pytest.mark.asyncio +async def test_bytearray_from_different_input_async(async_cache, value, type_hint): + """ + ByteArrayObject's pythonic type is `bytearray`, but it should also accept + lists or tuples as a content. + """ + await async_cache.put('my_key', value, value_hint=ByteArrayObject) + __check_bytearray_from_different_input(await async_cache.get('my_key'), value) + + +def __check_bytearray_from_different_input(result, value): + if isinstance(value, (bytes, bytearray)): + assert isinstance(result, bytes) + assert value == result + else: + assert result == bytearray([unsigned(ch, ctypes.c_ubyte) for ch in value]) + + +uuid_params = [ + 'd57babad-7bc1-4c82-9f9c-e72841b92a85', + '5946c0c0-2b76-479d-8694-a2e64a3968da', + 'a521723d-ad5d-46a6-94ad-300f850ef704', +] + +uuid_table_create_sql = "CREATE TABLE test_uuid_repr (id INTEGER PRIMARY KEY, uuid_field UUID)" +uuid_table_drop_sql = "DROP TABLE test_uuid_repr IF EXISTS" +uuid_table_insert_sql = "INSERT INTO test_uuid_repr(id, uuid_field) VALUES (?, ?)" +uuid_table_query_sql = "SELECT * FROM test_uuid_repr WHERE uuid_field=?" + + +@pytest.fixture() +async def uuid_table(client): + client.sql(uuid_table_drop_sql) + client.sql(uuid_table_create_sql) + yield None + client.sql(uuid_table_drop_sql) + + +@pytest.fixture() +async def uuid_table_async(async_client): + await async_client.sql(uuid_table_drop_sql) + await async_client.sql(uuid_table_create_sql) + yield None + await async_client.sql(uuid_table_drop_sql) + + +@pytest.mark.parametrize( + 'uuid_string', + uuid_params +) +def test_uuid_representation(client, uuid_string, uuid_table): + """ Test if textual UUID representation is correct. """ + uuid_value = uuid.UUID(uuid_string) + + # use uuid.UUID class to insert data + client.sql(uuid_table_insert_sql, query_args=[1, uuid_value]) + # use hex string to retrieve data + with client.sql(uuid_table_query_sql, query_args=[str(uuid_value)]) as cursor: + result = list(cursor) + + # if a line was retrieved, our test was successful + assert len(result) == 1 + assert result[0][1] == uuid_value + + +@pytest.mark.parametrize( + 'uuid_string', + uuid_params +) +@pytest.mark.asyncio +async def test_uuid_representation_async(async_client, uuid_string, uuid_table_async): + """ Test if textual UUID representation is correct. """ + uuid_value = uuid.UUID(uuid_string) + + # use uuid.UUID class to insert data + await async_client.sql(uuid_table_insert_sql, query_args=[1, uuid_value]) + # use hex string to retrieve data + async with async_client.sql(uuid_table_query_sql, query_args=[str(uuid_value)]) as cursor: + result = [row async for row in cursor] + + # if a line was retrieved, our test was successful + assert len(result) == 1 + assert result[0][1] == uuid_value diff --git a/tests/common/test_expiry_policy.py b/tests/common/test_expiry_policy.py new file mode 100644 index 0000000..939a380 --- /dev/null +++ b/tests/common/test_expiry_policy.py @@ -0,0 +1,186 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import time +from datetime import timedelta + +import pytest + +from pyignite.datatypes import ExpiryPolicy +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_EXPIRY_POLICY + + +@pytest.mark.skip_if_no_expiry_policy +def test_expiry_policy(cache): + ttl, num_retries = timedelta(seconds=0.6), 10 + cache_eternal = cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) + cache_created = cache.with_expire_policy(create=ttl) + cache_updated = cache.with_expire_policy(update=ttl) + cache_accessed = cache.with_expire_policy(access=ttl) + + for _ in range(num_retries): + cache.clear() + + start = time.time() + + cache_eternal.put(0, 0) + cache_created.put(1, 1) + cache_updated.put(2, 2) + cache_accessed.put(3, 3) + + time.sleep(ttl.total_seconds() * 2 / 3) + + result = [cache.contains_key(k) for k in range(4)] + + if time.time() - start >= ttl.total_seconds(): + continue + + assert all(result) + + start = time.time() + + cache_created.put(1, 2) # Check that update doesn't matter for created policy + cache_created.get(1) # Check that access doesn't matter for created policy + cache_updated.put(2, 3) # Check that update policy works. + cache_accessed.get(3) # Check that access policy works. + + time.sleep(ttl.total_seconds() * 2 / 3) + + result = [cache.contains_key(k) for k in range(4)] + + if time.time() - start >= ttl.total_seconds(): + continue + + assert result == [True, False, True, True] + + time.sleep(ttl.total_seconds() * 2 / 3) + + cache_updated.get(2) # Check that access doesn't matter for updated policy. + + time.sleep(ttl.total_seconds() * 2 / 3) + + result = [cache.contains_key(k) for k in range(0, 4)] + assert result == [True, False, False, False] + + +@pytest.mark.asyncio +@pytest.mark.skip_if_no_expiry_policy +async def test_expiry_policy_async(async_cache): + ttl, num_retries = timedelta(seconds=0.6), 10 + cache_eternal = async_cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) + cache_created = async_cache.with_expire_policy(create=ttl) + cache_updated = async_cache.with_expire_policy(update=ttl) + cache_accessed = async_cache.with_expire_policy(access=ttl) + + for _ in range(num_retries): + await async_cache.clear() + + start = time.time() + + await asyncio.gather( + cache_eternal.put(0, 0), + cache_created.put(1, 1), + cache_updated.put(2, 2), + cache_accessed.put(3, 3) + ) + + await asyncio.sleep(ttl.total_seconds() * 2 / 3) + + result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) + + if time.time() - start >= ttl.total_seconds(): + continue + + assert all(result) + + start = time.time() + + await asyncio.gather( + cache_created.put(1, 2), # Check that update doesn't matter for created policy + cache_created.get(1), # Check that access doesn't matter for created policy + cache_updated.put(2, 3), # Check that update policy works. + cache_accessed.get(3) # Check that access policy works. + ) + + await asyncio.sleep(ttl.total_seconds() * 2 / 3) + + result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) + + if time.time() - start >= ttl.total_seconds(): + continue + + assert result == [True, False, True, True] + + await asyncio.sleep(ttl.total_seconds() * 2 / 3) + + await cache_updated.get(2) # Check that access doesn't matter for updated policy. + + await asyncio.sleep(ttl.total_seconds() * 2 / 3) + + result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) + assert result == [True, False, False, False] + +create_cache_with_expiry_params = ( + 'expiry_policy', + [ + None, + ExpiryPolicy(), + ExpiryPolicy(create=ExpiryPolicy.ETERNAL), + ExpiryPolicy(create=2000, update=4000, access=6000) + ] +) + + +@pytest.mark.parametrize(*create_cache_with_expiry_params) +@pytest.mark.skip_if_no_expiry_policy +def test_create_cache_with_expiry_policy(client, expiry_policy): + cache = client.create_cache({ + PROP_NAME: 'expiry_cache', + PROP_EXPIRY_POLICY: expiry_policy + }) + try: + settings = cache.settings + assert settings[PROP_EXPIRY_POLICY] == expiry_policy + finally: + cache.destroy() + + +@pytest.mark.parametrize(*create_cache_with_expiry_params) +@pytest.mark.skip_if_no_expiry_policy +@pytest.mark.asyncio +async def test_create_cache_with_expiry_policy_async(async_client, expiry_policy): + cache = await async_client.create_cache({ + PROP_NAME: 'expiry_cache', + PROP_EXPIRY_POLICY: expiry_policy + }) + try: + settings = await cache.settings() + assert settings[PROP_EXPIRY_POLICY] == expiry_policy + finally: + await cache.destroy() + + +@pytest.mark.skip_if_no_expiry_policy +@pytest.mark.parametrize( + 'params', + [ + {'create': timedelta(seconds=-1), 'update': timedelta(seconds=-1), 'delete': timedelta(seconds=-1)}, + {'create': 0.6}, + {'create': -3} + ] +) +def test_expiry_policy_param_validation(params): + with pytest.raises((TypeError, ValueError)): + ExpiryPolicy(**params) diff --git a/tests/test_generic_object.py b/tests/common/test_generic_object.py similarity index 95% rename from tests/test_generic_object.py rename to tests/common/test_generic_object.py index 73dc870..d6c0ee1 100644 --- a/tests/test_generic_object.py +++ b/tests/common/test_generic_object.py @@ -14,11 +14,10 @@ # limitations under the License. from pyignite import GenericObjectMeta -from pyignite.datatypes import * +from pyignite.datatypes import IntObject, String def test_go(): - class GenericObject( metaclass=GenericObjectMeta, schema={ diff --git a/tests/common/test_get_names.py b/tests/common/test_get_names.py new file mode 100644 index 0000000..7fcb499 --- /dev/null +++ b/tests/common/test_get_names.py @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio + +import pytest + + +def test_get_names(client): + bucket_names = {'my_bucket', 'my_bucket_2', 'my_bucket_3'} + for name in bucket_names: + client.get_or_create_cache(name) + + assert set(client.get_cache_names()) == bucket_names + + +@pytest.mark.asyncio +async def test_get_names_async(async_client): + bucket_names = {'my_bucket', 'my_bucket_2', 'my_bucket_3'} + await asyncio.gather(*[async_client.get_or_create_cache(name) for name in bucket_names]) + + assert set(await async_client.get_cache_names()) == bucket_names diff --git a/tests/common/test_key_value.py b/tests/common/test_key_value.py new file mode 100644 index 0000000..e26d373 --- /dev/null +++ b/tests/common/test_key_value.py @@ -0,0 +1,471 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime + +import pytest + +from pyignite import GenericObjectMeta +from pyignite.datatypes import CollectionObject, IntObject, MapObject, TimestampObject, String + + +def test_put_get(cache): + cache.put('my_key', 5) + + assert cache.get('my_key') == 5 + + +@pytest.mark.asyncio +async def test_put_get_async(async_cache): + await async_cache.put('my_key', 5) + + assert await async_cache.get('my_key') == 5 + + +def test_get_all(cache): + assert cache.get_all(['key_1', 2, (3, IntObject)]) == {} + + cache.put('key_1', 4) + cache.put(3, 18, key_hint=IntObject) + + assert cache.get_all(['key_1', 2, (3, IntObject)]) == {'key_1': 4, 3: 18} + + +@pytest.mark.asyncio +async def test_get_all_async(async_cache): + assert await async_cache.get_all(['key_1', 2, (3, IntObject)]) == {} + + await async_cache.put('key_1', 4) + await async_cache.put(3, 18, key_hint=IntObject) + + assert await async_cache.get_all(['key_1', 2, (3, IntObject)]) == {'key_1': 4, 3: 18} + + +def test_put_all(cache): + test_dict = { + 1: 2, + 'key_1': 4, + (3, IntObject): 18, + } + cache.put_all(test_dict) + + result = cache.get_all(list(test_dict.keys())) + + assert len(result) == len(test_dict) + for k, v in test_dict.items(): + k = k[0] if isinstance(k, tuple) else k + assert result[k] == v + + +@pytest.mark.asyncio +async def test_put_all_async(async_cache): + test_dict = { + 1: 2, + 'key_1': 4, + (3, IntObject): 18, + } + await async_cache.put_all(test_dict) + + result = await async_cache.get_all(list(test_dict.keys())) + + assert len(result) == len(test_dict) + for k, v in test_dict.items(): + k = k[0] if isinstance(k, tuple) else k + assert result[k] == v + + +def test_contains_key(cache): + cache.put('test_key', 42) + + assert cache.contains_key('test_key') + assert not cache.contains_key('non-existent-key') + + +@pytest.mark.asyncio +async def test_contains_key_async(async_cache): + await async_cache.put('test_key', 42) + + assert await async_cache.contains_key('test_key') + assert not await async_cache.contains_key('non-existent-key') + + +def test_contains_keys(cache): + cache.put(5, 6) + cache.put('test_key', 42) + + assert cache.contains_keys([5, 'test_key']) + assert not cache.contains_keys([5, 'non-existent-key']) + + +@pytest.mark.asyncio +async def test_contains_keys_async(async_cache): + await async_cache.put(5, 6) + await async_cache.put('test_key', 42) + + assert await async_cache.contains_keys([5, 'test_key']) + assert not await async_cache.contains_keys([5, 'non-existent-key']) + + +def test_get_and_put(cache): + assert cache.get_and_put('test_key', 42) is None + assert cache.get('test_key') == 42 + assert cache.get_and_put('test_key', 1234) == 42 + assert cache.get('test_key') == 1234 + + +@pytest.mark.asyncio +async def test_get_and_put_async(async_cache): + assert await async_cache.get_and_put('test_key', 42) is None + assert await async_cache.get('test_key') == 42 + assert await async_cache.get_and_put('test_key', 1234) == 42 + assert await async_cache.get('test_key') == 1234 + + +def test_get_and_replace(cache): + assert cache.get_and_replace('test_key', 42) is None + assert cache.get('test_key') is None + cache.put('test_key', 42) + assert cache.get_and_replace('test_key', 1234) == 42 + + +@pytest.mark.asyncio +async def test_get_and_replace_async(async_cache): + assert await async_cache.get_and_replace('test_key', 42) is None + assert await async_cache.get('test_key') is None + await async_cache.put('test_key', 42) + assert await async_cache.get_and_replace('test_key', 1234) == 42 + + +def test_get_and_remove(cache): + assert cache.get_and_remove('test_key') is None + cache.put('test_key', 42) + assert cache.get_and_remove('test_key') == 42 + assert cache.get_and_remove('test_key') is None + + +@pytest.mark.asyncio +async def test_get_and_remove_async(async_cache): + assert await async_cache.get_and_remove('test_key') is None + await async_cache.put('test_key', 42) + assert await async_cache.get_and_remove('test_key') == 42 + assert await async_cache.get_and_remove('test_key') is None + + +def test_put_if_absent(cache): + assert cache.put_if_absent('test_key', 42) + assert not cache.put_if_absent('test_key', 1234) + + +@pytest.mark.asyncio +async def test_put_if_absent_async(async_cache): + assert await async_cache.put_if_absent('test_key', 42) + assert not await async_cache.put_if_absent('test_key', 1234) + + +def test_get_and_put_if_absent(cache): + assert cache.get_and_put_if_absent('test_key', 42) is None + assert cache.get_and_put_if_absent('test_key', 1234) == 42 + assert cache.get_and_put_if_absent('test_key', 5678) == 42 + assert cache.get('test_key') == 42 + + +@pytest.mark.asyncio +async def test_get_and_put_if_absent_async(async_cache): + assert await async_cache.get_and_put_if_absent('test_key', 42) is None + assert await async_cache.get_and_put_if_absent('test_key', 1234) == 42 + assert await async_cache.get_and_put_if_absent('test_key', 5678) == 42 + assert await async_cache.get('test_key') == 42 + + +def test_replace(cache): + assert cache.replace('test_key', 42) is False + cache.put('test_key', 1234) + assert cache.replace('test_key', 42) is True + assert cache.get('test_key') == 42 + + +@pytest.mark.asyncio +async def test_replace_async(async_cache): + assert await async_cache.replace('test_key', 42) is False + await async_cache.put('test_key', 1234) + assert await async_cache.replace('test_key', 42) is True + assert await async_cache.get('test_key') == 42 + + +def test_replace_if_equals(cache): + assert cache.replace_if_equals('my_test', 42, 1234) is False + cache.put('my_test', 42) + assert cache.replace_if_equals('my_test', 42, 1234) is True + assert cache.get('my_test') == 1234 + + +@pytest.mark.asyncio +async def test_replace_if_equals_async(async_cache): + assert await async_cache.replace_if_equals('my_test', 42, 1234) is False + await async_cache.put('my_test', 42) + assert await async_cache.replace_if_equals('my_test', 42, 1234) is True + assert await async_cache.get('my_test') == 1234 + + +def test_clear(cache): + cache.put('my_test', 42) + cache.clear() + assert cache.get('my_test') is None + + +@pytest.mark.asyncio +async def test_clear_async(async_cache): + await async_cache.put('my_test', 42) + await async_cache.clear() + assert await async_cache.get('my_test') is None + + +def test_clear_key(cache): + cache.put('my_test', 42) + cache.put('another_test', 24) + + cache.clear_key('my_test') + + assert cache.get('my_test') is None + assert cache.get('another_test') == 24 + + +@pytest.mark.asyncio +async def test_clear_key_async(async_cache): + await async_cache.put('my_test', 42) + await async_cache.put('another_test', 24) + + await async_cache.clear_key('my_test') + + assert await async_cache.get('my_test') is None + assert await async_cache.get('another_test') == 24 + + +def test_clear_keys(cache): + cache.put('my_test_key', 42) + cache.put('another_test', 24) + + cache.clear_keys(['my_test_key', 'nonexistent_key']) + + assert cache.get('my_test_key') is None + assert cache.get('another_test') == 24 + + +@pytest.mark.asyncio +async def test_clear_keys_async(async_cache): + await async_cache.put('my_test_key', 42) + await async_cache.put('another_test', 24) + + await async_cache.clear_keys(['my_test_key', 'nonexistent_key']) + + assert await async_cache.get('my_test_key') is None + assert await async_cache.get('another_test') == 24 + + +def test_remove_key(cache): + cache.put('my_test_key', 42) + assert cache.remove_key('my_test_key') is True + assert cache.remove_key('non_existent_key') is False + + +@pytest.mark.asyncio +async def test_remove_key_async(async_cache): + await async_cache.put('my_test_key', 42) + assert await async_cache.remove_key('my_test_key') is True + assert await async_cache.remove_key('non_existent_key') is False + + +def test_remove_if_equals(cache): + cache.put('my_test', 42) + assert cache.remove_if_equals('my_test', 1234) is False + assert cache.remove_if_equals('my_test', 42) is True + assert cache.get('my_test') is None + + +@pytest.mark.asyncio +async def test_remove_if_equals_async(async_cache): + await async_cache.put('my_test', 42) + assert await async_cache.remove_if_equals('my_test', 1234) is False + assert await async_cache.remove_if_equals('my_test', 42) is True + assert await async_cache.get('my_test') is None + + +def test_remove_keys(cache): + cache.put('my_test', 42) + + cache.put('another_test', 24) + cache.remove_keys(['my_test', 'non_existent']) + + assert cache.get('my_test') is None + assert cache.get('another_test') == 24 + + +@pytest.mark.asyncio +async def test_remove_keys_async(async_cache): + await async_cache.put('my_test', 42) + + await async_cache.put('another_test', 24) + await async_cache.remove_keys(['my_test', 'non_existent']) + + assert await async_cache.get('my_test') is None + assert await async_cache.get('another_test') == 24 + + +def test_remove_all(cache): + cache.put('my_test', 42) + cache.put('another_test', 24) + cache.remove_all() + + assert cache.get('my_test') is None + assert cache.get('another_test') is None + + +@pytest.mark.asyncio +async def test_remove_all_async(async_cache): + await async_cache.put('my_test', 42) + await async_cache.put('another_test', 24) + await async_cache.remove_all() + + assert await async_cache.get('my_test') is None + assert await async_cache.get('another_test') is None + + +def test_cache_get_size(cache): + cache.put('my_test', 42) + assert cache.get_size() == 1 + + +@pytest.mark.asyncio +async def test_cache_get_size_async(async_cache): + await async_cache.put('my_test', 42) + assert await async_cache.get_size() == 1 + + +class Value( + metaclass=GenericObjectMeta, + schema={ + 'id': IntObject, + 'name': String, + } +): + pass + + +collection_params = [ + [ + 'simple', + (CollectionObject.ARR_LIST, [ + (123, IntObject), 678, None, 55.2, ((datetime(year=1996, month=3, day=1), 0), TimestampObject) + ]), + (CollectionObject.ARR_LIST, [123, 678, None, 55.2, (datetime(year=1996, month=3, day=1), 0)]) + ], + [ + 'nested', + (CollectionObject.ARR_LIST, [ + 123, ((1, [456, 'inner_test_string', 789]), CollectionObject), 'outer_test_string' + ]), + (CollectionObject.ARR_LIST, [123, (1, [456, 'inner_test_string', 789]), 'outer_test_string']) + ], + [ + 'binary', + (CollectionObject.ARR_LIST, [Value(id=i, name=f'val_{i}') for i in range(0, 10)]), + (CollectionObject.ARR_LIST, [Value(id=i, name=f'val_{i}') for i in range(0, 10)]), + ], + [ + 'hash_map', + ( + MapObject.HASH_MAP, + { + (123, IntObject): 'test_data', + 456: ((1, [456, 'inner_test_string', 789]), CollectionObject), + 'test_key': 32.4, + 'simple_strings': ['string_1', 'string_2'] + } + ), + ( + MapObject.HASH_MAP, + { + 123: 'test_data', + 456: (1, [456, 'inner_test_string', 789]), + 'test_key': 32.4, + 'simple_strings': ['string_1', 'string_2'] + } + ) + ], + [ + 'linked_hash_map', + ( + MapObject.LINKED_HASH_MAP, + { + 'test_data': 12345, + 456: ['string_1', 'string_2'], + 'test_key': 32.4 + } + ), + ( + MapObject.LINKED_HASH_MAP, + { + 'test_data': 12345, + 456: ['string_1', 'string_2'], + 'test_key': 32.4 + } + ) + ], + [ + 'binary_map', + (MapObject.HASH_MAP, {i: Value(id=i, name=f"val_{i}") for i in range(10)}), + (MapObject.HASH_MAP, {i: Value(id=i, name=f"val_{i}") for i in range(10)}) + ] +] + + +@pytest.mark.parametrize(['key', 'hinted_value', 'value'], collection_params) +def test_put_get_collection(cache, key, hinted_value, value): + cache.put(key, hinted_value) + assert cache.get(key) == value + + +@pytest.mark.parametrize(['key', 'hinted_value', 'value'], collection_params) +@pytest.mark.asyncio +async def test_put_get_collection_async(async_cache, key, hinted_value, value): + await async_cache.put(key, hinted_value) + assert await async_cache.get(key) == value + + +@pytest.fixture +def complex_map(): + return {"test" + str(i): ((MapObject.HASH_MAP, + {"key_1": ((CollectionObject.ARR_LIST, ["value_1", 1.0]), CollectionObject), + "key_2": ((CollectionObject.ARR_LIST, [["value_2_1", "1.0"], ["value_2_2", "0.25"]]), + CollectionObject), + "key_3": ((CollectionObject.ARR_LIST, [["value_3_1", "1.0"], ["value_3_2", "0.25"]]), + CollectionObject), + "key_4": ((CollectionObject.ARR_LIST, [["value_4_1", "1.0"], ["value_4_2", "0.25"]]), + CollectionObject), + 'key_5': False, + "key_6": "value_6"}), MapObject) for i in range(10000)} + + +def test_put_all_large_complex_map(cache, complex_map): + cache.put_all(complex_map) + values = cache.get_all(complex_map.keys()) + assert len(values) == len(complex_map) + + +@pytest.mark.asyncio +async def test_put_all_large_complex_map_async(async_cache, complex_map): + await async_cache.put_all(complex_map) + values = await async_cache.get_all(complex_map.keys()) + assert len(values) == len(complex_map) diff --git a/tests/common/test_query_listener.py b/tests/common/test_query_listener.py new file mode 100644 index 0000000..8310117 --- /dev/null +++ b/tests/common/test_query_listener.py @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from pyignite import Client, AioClient +from pyignite.exceptions import CacheError +from pyignite.monitoring import QueryEventListener, QueryStartEvent, QueryFailEvent, QuerySuccessEvent +from pyignite.queries.op_codes import OP_CACHE_PUT, OP_CACHE_PARTITIONS, OP_CACHE_GET_NAMES + +events = [] + + +class QueryRouteListener(QueryEventListener): + def on_query_start(self, event): + if event.op_code != OP_CACHE_PARTITIONS: + events.append(event) + + def on_query_fail(self, event): + if event.op_code != OP_CACHE_PARTITIONS: + events.append(event) + + def on_query_success(self, event): + if event.op_code != OP_CACHE_PARTITIONS: + events.append(event) + + +@pytest.fixture +def client(): + client = Client(event_listeners=[QueryRouteListener()]) + try: + client.connect('127.0.0.1', 10801) + yield client + finally: + client.close() + events.clear() + + +@pytest.fixture +async def async_client(event_loop): + client = AioClient(event_listeners=[QueryRouteListener()]) + try: + await client.connect('127.0.0.1', 10801) + yield client + finally: + await client.close() + events.clear() + + +def test_query_fail_events(request, client): + with pytest.raises(CacheError): + cache = client.get_cache(request.node.name) + cache.put(1, 1) + + __assert_fail_events(client) + + +@pytest.mark.asyncio +async def test_query_fail_events_async(request, async_client): + with pytest.raises(CacheError): + cache = await async_client.get_cache(request.node.name) + await cache.put(1, 1) + + __assert_fail_events(async_client) + + +def __assert_fail_events(client): + assert len(events) == 2 + conn = client._nodes[0] + for ev in events: + if isinstance(ev, QueryStartEvent): + assert ev.op_code == OP_CACHE_PUT + assert ev.op_name == 'OP_CACHE_PUT' + assert ev.host == conn.host + assert ev.port == conn.port + assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + + if isinstance(ev, QueryFailEvent): + assert ev.op_code == OP_CACHE_PUT + assert ev.op_name == 'OP_CACHE_PUT' + assert ev.host == conn.host + assert ev.port == conn.port + assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + assert 'Cache does not exist' in ev.err_msg + assert ev.duration >= 0 + + +def test_query_success_events(client): + client.get_cache_names() + __assert_success_events(client) + + +@pytest.mark.asyncio +async def test_query_success_events_async(async_client): + await async_client.get_cache_names() + __assert_success_events(async_client) + + +def __assert_success_events(client): + assert len(events) == 2 + conn = client._nodes[0] + for ev in events: + if isinstance(ev, QueryStartEvent): + assert ev.op_code == OP_CACHE_GET_NAMES + assert ev.op_name == 'OP_CACHE_GET_NAMES' + assert ev.host == conn.host + assert ev.port == conn.port + assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + + if isinstance(ev, QuerySuccessEvent): + assert ev.op_code == OP_CACHE_GET_NAMES + assert ev.op_name == 'OP_CACHE_GET_NAMES' + assert ev.host == conn.host + assert ev.port == conn.port + assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + assert ev.duration >= 0 diff --git a/tests/common/test_scan.py b/tests/common/test_scan.py new file mode 100644 index 0000000..d55fd3e --- /dev/null +++ b/tests/common/test_scan.py @@ -0,0 +1,164 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import OrderedDict + +import pytest + +from pyignite import GenericObjectMeta +from pyignite.api import resource_close, resource_close_async +from pyignite.connection import AioConnection +from pyignite.datatypes import IntObject, String +from pyignite.exceptions import CacheError + + +class SimpleObject( + metaclass=GenericObjectMeta, + type_name='SimpleObject', + schema=OrderedDict([ + ('id', IntObject), + ('str', String), + ]) +): + pass + + +page_size = 10 + + +@pytest.fixture +def test_objects_data(): + yield {i: SimpleObject(id=i, str=f'str_{i}') for i in range(page_size * 2)} + + +@pytest.mark.asyncio +def test_scan_objects(cache, test_objects_data): + cache.put_all(test_objects_data) + + for p_sz in [page_size, page_size * 2, page_size * 3, page_size + 5]: + with cache.scan(p_sz) as cursor: + result = {k: v for k, v in cursor} + assert result == test_objects_data + + __check_cursor_closed(cursor) + + with pytest.raises(Exception): + with cache.scan(p_sz) as cursor: + for _ in cursor: + raise Exception + + __check_cursor_closed(cursor) + + cursor = cache.scan(page_size) + assert {k: v for k, v in cursor} == test_objects_data + __check_cursor_closed(cursor) + + +@pytest.mark.asyncio +async def test_scan_objects_async(async_cache, test_objects_data): + await async_cache.put_all(test_objects_data) + + for p_sz in [page_size, page_size * 2, page_size * 3, page_size + 5]: + async with async_cache.scan(p_sz) as cursor: + result = {k: v async for k, v in cursor} + assert result == test_objects_data + + await __check_cursor_closed(cursor) + + with pytest.raises(Exception): + async with async_cache.scan(p_sz) as cursor: + async for _ in cursor: + raise Exception + + await __check_cursor_closed(cursor) + + cursor = await async_cache.scan(page_size) + assert {k: v async for k, v in cursor} == test_objects_data + + await __check_cursor_closed(cursor) + + +@pytest.fixture +def cache_scan_data(): + yield { + 1: 'This is a test', + 2: 'One more test', + 3: 'Foo', + 4: 'Buzz', + 5: 'Bar', + 6: 'Lorem ipsum', + 7: 'dolor sit amet', + 8: 'consectetur adipiscing elit', + 9: 'Nullam aliquet', + 10: 'nisl at ante', + 11: 'suscipit', + 12: 'ut cursus', + 13: 'metus interdum', + 14: 'Nulla tincidunt', + 15: 'sollicitudin iaculis', + } + + +@pytest.mark.parametrize('page_size', range(1, 17, 5)) +def test_cache_scan(cache, cache_scan_data, page_size): + cache.put_all(cache_scan_data) + + with cache.scan(page_size=page_size) as cursor: + assert {k: v for k, v in cursor} == cache_scan_data + + +@pytest.mark.parametrize('page_size', range(1, 17, 5)) +@pytest.mark.asyncio +async def test_cache_scan_async(async_cache, cache_scan_data, page_size): + await async_cache.put_all(cache_scan_data) + + async with async_cache.scan(page_size=page_size) as cursor: + assert {k: v async for k, v in cursor} == cache_scan_data + + +def test_uninitialized_cursor(cache, test_objects_data): + cache.put_all(test_objects_data) + + cursor = cache.scan(page_size) + for _ in cursor: + break + + cursor.close() + __check_cursor_closed(cursor) + + +@pytest.mark.asyncio +async def test_uninitialized_cursor_async(async_cache, test_objects_data): + await async_cache.put_all(test_objects_data) + + # iterating of non-awaited cursor. + with pytest.raises(CacheError): + cursor = async_cache.scan(page_size) + assert {k: v async for k, v in cursor} == test_objects_data + + cursor = await async_cache.scan(page_size) + assert {k: v async for k, v in cursor} == test_objects_data + await __check_cursor_closed(cursor) + + +def __check_cursor_closed(cursor): + async def check_async(): + result = await resource_close_async(cursor.connection, cursor.cursor_id) + assert result.status != 0 + + def check(): + result = resource_close(cursor.connection, cursor.cursor_id) + assert result.status != 0 + + return check_async() if isinstance(cursor.connection, AioConnection) else check() diff --git a/tests/common/test_sql.py b/tests/common/test_sql.py new file mode 100644 index 0000000..b947fbc --- /dev/null +++ b/tests/common/test_sql.py @@ -0,0 +1,455 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import AioClient +from pyignite.aio_cache import AioCache +from pyignite.datatypes.cache_config import CacheMode +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_SQL_SCHEMA, PROP_QUERY_ENTITIES, PROP_CACHE_MODE +from pyignite.exceptions import SQLError +from pyignite.utils import entity_id + +student_table_data = [ + ('John', 'Doe', 5), + ('Jane', 'Roe', 4), + ('Joe', 'Bloggs', 4), + ('Richard', 'Public', 3), + ('Negidius', 'Numerius', 3), +] + +student_table_select_query = 'SELECT id, first_name, last_name, grade FROM Student ORDER BY ID ASC' + + +@pytest.fixture +def student_table_fixture(client): + yield from __create_student_table_fixture(client) + + +@pytest.fixture +async def async_student_table_fixture(async_client): + async for _ in __create_student_table_fixture(async_client): + yield + + +def __create_student_table_fixture(client): + create_query = '''CREATE TABLE Student ( + id INT(11) PRIMARY KEY, + first_name CHAR(24), + last_name CHAR(32), + grade INT(11))''' + + insert_query = '''INSERT INTO Student(id, first_name, last_name, grade) + VALUES (?, ?, ?, ?)''' + + drop_query = 'DROP TABLE Student IF EXISTS' + + def inner(): + client.sql(drop_query) + client.sql(create_query) + + for i, data_line in enumerate(student_table_data): + fname, lname, grade = data_line + client.sql(insert_query, query_args=[i, fname, lname, grade]) + + yield None + client.sql(drop_query) + + async def inner_async(): + await client.sql(drop_query) + await client.sql(create_query) + + for i, data_line in enumerate(student_table_data): + fname, lname, grade = data_line + await client.sql(insert_query, query_args=[i, fname, lname, grade]) + + yield None + await client.sql(drop_query) + + return inner_async() if isinstance(client, AioClient) else inner() + + +@pytest.mark.parametrize('page_size', range(1, 6, 2)) +def test_sql(client, student_table_fixture, page_size): + cache = client.get_cache('SQL_PUBLIC_STUDENT') + cache_config = cache.settings + + binary_type_name = cache_config[PROP_QUERY_ENTITIES][0]['value_type_name'] + + with cache.select_row('ORDER BY ID ASC', page_size=4) as cursor: + for i, row in enumerate(cursor): + k, v = row + assert k == i + + assert (v.FIRST_NAME, v.LAST_NAME, v.GRADE) == student_table_data[i] + assert v.type_id == entity_id(binary_type_name) + + +@pytest.mark.parametrize('page_size', range(1, 6, 2)) +def test_sql_fields(client, student_table_fixture, page_size): + with client.sql(student_table_select_query, page_size=page_size, include_field_names=True) as cursor: + for i, row in enumerate(cursor): + if i > 0: + assert tuple(row) == (i - 1,) + student_table_data[i - 1] + else: + assert row == ['ID', 'FIRST_NAME', 'LAST_NAME', 'GRADE'] + + +@pytest.mark.asyncio +@pytest.mark.parametrize('page_size', range(1, 6, 2)) +async def test_sql_fields_async(async_client, async_student_table_fixture, page_size): + async with async_client.sql(student_table_select_query, page_size=page_size, include_field_names=True) as cursor: + i = 0 + async for row in cursor: + if i > 0: + assert tuple(row) == (i - 1,) + student_table_data[i - 1] + else: + assert row == ['ID', 'FIRST_NAME', 'LAST_NAME', 'GRADE'] + i += 1 + + cursor = await async_client.sql(student_table_select_query, page_size=page_size, include_field_names=True) + try: + i = 0 + async for row in cursor: + if i > 0: + assert tuple(row) == (i - 1,) + student_table_data[i - 1] + else: + assert row == ['ID', 'FIRST_NAME', 'LAST_NAME', 'GRADE'] + i += 1 + finally: + await cursor.close() + + +multipage_fields = ["id", "abc", "ghi", "def", "jkl", "prs", "mno", "tuw", "zyz", "abc1", "def1", "jkl1", "prs1"] + + +@pytest.fixture +def long_multipage_table_fixture(client): + yield from __long_multipage_table_fixture(client) + + +@pytest.fixture +async def async_long_multipage_table_fixture(async_client): + async for _ in __long_multipage_table_fixture(async_client): + yield + + +def __long_multipage_table_fixture(client): + drop_query = 'DROP TABLE LongMultipageQuery IF EXISTS' + + create_query = "CREATE TABLE LongMultiPageQuery (%s, %s)" % ( + multipage_fields[0] + " INT(11) PRIMARY KEY", ",".join(map(lambda f: f + " INT(11)", multipage_fields[1:]))) + + insert_query = "INSERT INTO LongMultipageQuery (%s) VALUES (%s)" % ( + ",".join(multipage_fields), ",".join("?" * len(multipage_fields))) + + def query_args(_id): + return [_id] + list(i * _id for i in range(1, len(multipage_fields))) + + def inner(): + client.sql(drop_query) + client.sql(create_query) + + for i in range(1, 21): + client.sql(insert_query, query_args=query_args(i)) + yield None + + client.sql(drop_query) + + async def inner_async(): + await client.sql(drop_query) + await client.sql(create_query) + + for i in range(1, 21): + await client.sql(insert_query, query_args=query_args(i)) + yield None + + await client.sql(drop_query) + + return inner_async() if isinstance(client, AioClient) else inner() + + +def test_long_multipage_query(client, long_multipage_table_fixture): + """ + The test creates a table with 13 columns (id and 12 enumerated columns) + and 20 records with id in range from 1 to 20. Values of enumerated columns + are = column number * id. + + The goal is to ensure that all the values are selected in a right order. + """ + + with client.sql('SELECT * FROM LongMultipageQuery', page_size=1) as cursor: + for page in cursor: + assert len(page) == len(multipage_fields) + for field_number, value in enumerate(page[1:], start=1): + assert value == field_number * page[0] + + +@pytest.mark.asyncio +async def test_long_multipage_query_async(async_client, async_long_multipage_table_fixture): + async with async_client.sql('SELECT * FROM LongMultipageQuery', page_size=1) as cursor: + async for page in cursor: + assert len(page) == len(multipage_fields) + for field_number, value in enumerate(page[1:], start=1): + assert value == field_number * page[0] + + +def test_sql_not_create_cache_with_schema(client): + with pytest.raises(SQLError, match=r".*Cache does not exist.*"): + client.sql(schema=None, cache='NOT_EXISTING', query_str='select * from NotExisting') + + +@pytest.mark.asyncio +async def test_sql_not_create_cache_with_schema_async(async_client): + with pytest.raises(SQLError, match=r".*Cache does not exist.*"): + await async_client.sql(schema=None, cache='NOT_EXISTING_ASYNC', query_str='select * from NotExistingAsync') + + +def test_sql_not_create_cache_with_cache(client): + with pytest.raises(SQLError, match=r".*Failed to set schema.*"): + client.sql(schema='NOT_EXISTING', query_str='select * from NotExisting') + + +@pytest.mark.asyncio +async def test_sql_not_create_cache_with_cache_async(async_client): + with pytest.raises(SQLError, match=r".*Failed to set schema.*"): + await async_client.sql(schema='NOT_EXISTING_ASYNC', query_str='select * from NotExistingAsync') + + +@pytest.fixture +def indexed_cache_settings(): + cache_name = 'indexed_cache' + schema_name = f'{cache_name}_schema'.upper() + table_name = f'{cache_name}_table'.upper() + + yield { + PROP_NAME: cache_name, + PROP_SQL_SCHEMA: schema_name, + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_QUERY_ENTITIES: [ + { + 'table_name': table_name, + 'key_field_name': 'KEY', + 'value_field_name': 'VALUE', + 'key_type_name': 'java.lang.Long', + 'value_type_name': 'java.lang.String', + 'query_indexes': [], + 'field_name_aliases': [], + 'query_fields': [ + { + 'name': 'KEY', + 'type_name': 'java.lang.Long', + 'is_key_field': True, + 'is_notnull_constraint_field': True, + }, + { + 'name': 'VALUE', + 'type_name': 'java.lang.String', + }, + ], + }, + ], + } + + +@pytest.fixture +def indexed_cache_fixture(client, indexed_cache_settings): + cache_name = indexed_cache_settings[PROP_NAME] + schema_name = indexed_cache_settings[PROP_SQL_SCHEMA] + table_name = indexed_cache_settings[PROP_QUERY_ENTITIES][0]['table_name'] + + cache = client.create_cache(indexed_cache_settings) + + yield cache, cache_name, schema_name, table_name + cache.destroy() + + +@pytest.fixture +async def async_indexed_cache_fixture(async_client, indexed_cache_settings): + cache_name = indexed_cache_settings[PROP_NAME] + schema_name = indexed_cache_settings[PROP_SQL_SCHEMA] + table_name = indexed_cache_settings[PROP_QUERY_ENTITIES][0]['table_name'] + + cache = await async_client.create_cache(indexed_cache_settings) + + yield cache, cache_name, schema_name, table_name + await cache.destroy() + + +def test_query_with_cache(client, indexed_cache_fixture): + return __check_query_with_cache(client, indexed_cache_fixture) + + +@pytest.mark.asyncio +async def test_query_with_cache_async(async_client, async_indexed_cache_fixture): + return await __check_query_with_cache(async_client, async_indexed_cache_fixture) + + +def __check_query_with_cache(client, cache_fixture): + test_key, test_value = 42, 'Lorem ipsum' + cache, cache_name, schema_name, table_name = cache_fixture + query = f'select value from {table_name}' + + args_to_check = [ + ('schema', schema_name), + ('cache', cache), + ('cache', cache_name), + ('cache', cache.cache_id) + ] + + def inner(): + cache.put(test_key, test_value) + for param, value in args_to_check: + with client.sql(query, **{param: value}) as cursor: + received = next(cursor)[0] + assert test_value == received + + async def async_inner(): + await cache.put(test_key, test_value) + for param, value in args_to_check: + async with client.sql(query, **{param: value}) as cursor: + row = await cursor.__anext__() + received = row[0] + assert test_value == received + + return async_inner() if isinstance(cache, AioCache) else inner() + + +VARBIN_CREATE_QUERY = 'CREATE TABLE VarbinTable(id int primary key, varbin VARBINARY)' +VARBIN_DROP_QUERY = 'DROP TABLE VarbinTable' +VARBIN_MERGE_QUERY = 'MERGE INTO VarbinTable(id, varbin) VALUES (?, ?)' +VARBIN_SELECT_QUERY = 'SELECT * FROM VarbinTable' + +VARBIN_TEST_PARAMS = [ + bytearray('Test message', 'UTF-8'), + bytes('Test message', 'UTF-8') +] + + +@pytest.fixture +def varbin_table(client): + client.sql(VARBIN_CREATE_QUERY) + yield None + client.sql(VARBIN_DROP_QUERY) + + +@pytest.mark.parametrize( + 'value', VARBIN_TEST_PARAMS +) +def test_sql_cache_varbinary_handling(client, varbin_table, value): + client.sql(VARBIN_MERGE_QUERY, query_args=(1, value)) + with client.sql(VARBIN_SELECT_QUERY) as cursor: + for row in cursor: + assert isinstance(row[1], bytes) + assert row[1] == value + break + + +@pytest.fixture +async def varbin_table_async(async_client): + await async_client.sql(VARBIN_CREATE_QUERY) + yield None + await async_client.sql(VARBIN_DROP_QUERY) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'value', VARBIN_TEST_PARAMS +) +async def test_sql_cache_varbinary_handling_async(async_client, varbin_table_async, value): + await async_client.sql(VARBIN_MERGE_QUERY, query_args=(1, value)) + async with async_client.sql(VARBIN_SELECT_QUERY) as cursor: + async for row in cursor: + assert isinstance(row[1], bytes) + assert row[1] == value + break + + +@pytest.fixture +def varbin_cache_settings(): + cache_name = 'varbin_cache' + table_name = f'{cache_name}_table'.upper() + + yield { + PROP_NAME: cache_name, + PROP_SQL_SCHEMA: 'PUBLIC', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_QUERY_ENTITIES: [ + { + 'table_name': table_name, + 'key_field_name': 'ID', + 'value_field_name': 'VALUE', + 'key_type_name': 'java.lang.Long', + 'value_type_name': 'byte[]', + 'query_indexes': [], + 'field_name_aliases': [], + 'query_fields': [ + { + 'name': 'ID', + 'type_name': 'java.lang.Long', + 'is_key_field': True, + 'is_notnull_constraint_field': True, + }, + { + 'name': 'VALUE', + 'type_name': 'byte[]', + }, + ], + }, + ], + } + + +VARBIN_CACHE_TABLE_NAME = 'varbin_cache_table'.upper() +VARBIN_CACHE_SELECT_QUERY = f'SELECT * FROM {VARBIN_CACHE_TABLE_NAME}' + + +@pytest.fixture +def varbin_cache(client, varbin_cache_settings): + cache = client.get_or_create_cache(varbin_cache_settings) + yield cache + cache.destroy() + + +@pytest.mark.parametrize( + 'value', VARBIN_TEST_PARAMS +) +def test_cache_varbinary_handling(client, varbin_cache, value): + varbin_cache.put(1, value) + with client.sql(VARBIN_CACHE_SELECT_QUERY) as cursor: + for row in cursor: + assert isinstance(row[1], bytes) + assert row[1] == value + break + + +@pytest.fixture +async def varbin_cache_async(async_client, varbin_cache_settings): + cache = await async_client.get_or_create_cache(varbin_cache_settings) + yield cache + await cache.destroy() + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'value', VARBIN_TEST_PARAMS +) +async def test_cache_varbinary_handling_async(async_client, varbin_cache_async, value): + await varbin_cache_async.put(1, value) + async with async_client.sql(VARBIN_CACHE_SELECT_QUERY) as cursor: + async for row in cursor: + assert isinstance(row[1], bytes) + assert row[1] == value + break diff --git a/tests/common/test_sql_composite_key.py b/tests/common/test_sql_composite_key.py new file mode 100644 index 0000000..76de77e --- /dev/null +++ b/tests/common/test_sql_composite_key.py @@ -0,0 +1,168 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +from enum import Enum + +import pytest + +from pyignite import GenericObjectMeta, AioClient +from pyignite.datatypes import IntObject, String + + +class StudentKey( + metaclass=GenericObjectMeta, + type_name='test.model.StudentKey', + schema=OrderedDict([ + ('ID', IntObject), + ('DEPT', String) + ]) +): + pass + + +class Student( + metaclass=GenericObjectMeta, + type_name='test.model.Student', + schema=OrderedDict([ + ('NAME', String), + ]) +): + pass + + +create_query = '''CREATE TABLE StudentTable ( + id INT(11), + dept VARCHAR, + name CHAR(24), + PRIMARY KEY (id, dept)) + WITH "CACHE_NAME=StudentCache, KEY_TYPE=test.model.StudentKey, VALUE_TYPE=test.model.Student"''' + +insert_query = '''INSERT INTO StudentTable (id, dept, name) VALUES (?, ?, ?)''' + +select_query = 'SELECT id, dept, name FROM StudentTable' + +select_kv_query = 'SELECT _key, _val FROM StudentTable' + +drop_query = 'DROP TABLE StudentTable IF EXISTS' + + +@pytest.fixture +def student_table_fixture(client): + yield from __create_student_table_fixture(client) + + +@pytest.fixture +async def async_student_table_fixture(async_client): + async for _ in __create_student_table_fixture(async_client): + yield + + +def __create_student_table_fixture(client): + def inner(): + client.sql(drop_query) + client.sql(create_query) + yield None + client.sql(drop_query) + + async def inner_async(): + await client.sql(drop_query) + await client.sql(create_query) + yield None + await client.sql(drop_query) + + return inner_async() if isinstance(client, AioClient) else inner() + + +class InsertMode(Enum): + SQL = 1 + CACHE = 2 + + +@pytest.mark.parametrize('insert_mode', [InsertMode.SQL, InsertMode.CACHE]) +def test_sql_composite_key(client, insert_mode, student_table_fixture): + __perform_test(client, insert_mode) + + +@pytest.mark.asyncio +@pytest.mark.parametrize('insert_mode', [InsertMode.SQL, InsertMode.CACHE]) +async def test_sql_composite_key_async(async_client, insert_mode, async_student_table_fixture): + await __perform_test(async_client, insert_mode) + + +def __perform_test(client, insert=InsertMode.SQL): + student_key = StudentKey(2, 'Business') + student_val = Student('Abe') + + def validate_query_result(key, val, query_result): + """ + Compare query result with expected key and value. + """ + assert len(query_result) == 2 + sql_row = dict(zip(query_result[0], query_result[1])) + + assert sql_row['ID'] == key.ID + assert sql_row['DEPT'] == key.DEPT + assert sql_row['NAME'] == val.NAME + + def validate_kv_query_result(key, val, query_result): + """ + Compare query result with expected key and value. + """ + assert len(query_result) == 2 + sql_row = dict(zip(query_result[0], query_result[1])) + + sql_key, sql_val = sql_row['_KEY'], sql_row['_VAL'] + assert sql_key.ID == key.ID + assert sql_key.DEPT == key.DEPT + assert sql_val.NAME == val.NAME + + def inner(): + if insert == InsertMode.SQL: + result = client.sql(insert_query, query_args=[student_key.ID, student_key.DEPT, student_val.NAME]) + assert next(result)[0] == 1 + else: + studentCache = client.get_cache('StudentCache') + studentCache.put(student_key, student_val) + val = studentCache.get(student_key) + assert val is not None + assert val.NAME == student_val.NAME + + query_result = list(client.sql(select_query, include_field_names=True)) + validate_query_result(student_key, student_val, query_result) + + query_result = list(client.sql(select_kv_query, include_field_names=True)) + validate_kv_query_result(student_key, student_val, query_result) + + async def inner_async(): + if insert == InsertMode.SQL: + result = await client.sql(insert_query, query_args=[student_key.ID, student_key.DEPT, student_val.NAME]) + assert (await result.__anext__())[0] == 1 + else: + studentCache = await client.get_cache('StudentCache') + await studentCache.put(student_key, student_val) + val = await studentCache.get(student_key) + assert val is not None + assert val.NAME == student_val.NAME + + async with client.sql(select_query, include_field_names=True) as cursor: + query_result = [r async for r in cursor] + validate_query_result(student_key, student_val, query_result) + + async with client.sql(select_kv_query, include_field_names=True) as cursor: + query_result = [r async for r in cursor] + validate_kv_query_result(student_key, student_val, query_result) + + return inner_async() if isinstance(client, AioClient) else inner() diff --git a/tests/common/test_sync_socket.py b/tests/common/test_sync_socket.py new file mode 100644 index 0000000..cd41809 --- /dev/null +++ b/tests/common/test_sync_socket.py @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import secrets +import socket +import unittest.mock as mock + +import pytest + +from pyignite import Client +from tests.util import get_or_create_cache + +old_recv_into = socket.socket.recv_into + + +def patched_recv_into_factory(buf_len): + def patched_recv_into(self, buffer, nbytes, **kwargs): + return old_recv_into(self, buffer, min(nbytes, buf_len) if buf_len else nbytes, **kwargs) + return patched_recv_into + + +@pytest.mark.parametrize('buf_len', [0, 1, 4, 16, 32, 64, 128, 256, 512, 1024]) +def test_get_large_value(buf_len): + with mock.patch.object(socket.socket, 'recv_into', new=patched_recv_into_factory(buf_len)): + c = Client() + with c.connect("127.0.0.1", 10801): + with get_or_create_cache(c, 'test') as cache: + value = secrets.token_hex((1 << 16) + 1) + cache.put(1, value) + assert value == cache.get(1) diff --git a/tests/common/test_transactions.py b/tests/common/test_transactions.py new file mode 100644 index 0000000..e879f60 --- /dev/null +++ b/tests/common/test_transactions.py @@ -0,0 +1,252 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import itertools +import sys +import time + +import pytest + +from pyignite import AioClient, Client +from pyignite.datatypes import TransactionIsolation, TransactionConcurrency +from pyignite.datatypes.cache_config import CacheAtomicityMode +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_ATOMICITY_MODE +from pyignite.exceptions import CacheError +from pyignite.transaction import Transaction, AioTransaction + + +@pytest.fixture +def connection_param(): + return [('127.0.0.1', 10800 + i) for i in range(1, 4)] + + +@pytest.fixture(params=['with-partition-awareness', 'without-partition-awareness']) +async def async_client(request, connection_param, event_loop): + client = AioClient(partition_aware=request.param == 'with-partition-awareness') + try: + await client.connect(connection_param) + if not client.protocol_context.is_transactions_supported(): + pytest.skip(f'skipped {request.node.name}, transaction api is not supported.') + elif sys.version_info < (3, 7): + pytest.skip(f'skipped {request.node.name}, transaction api is not supported' + f'for async client on python {sys.version}') + else: + yield client + finally: + await client.close() + + +@pytest.fixture(params=['with-partition-awareness', 'without-partition-awareness']) +def client(request, connection_param): + client = Client(partition_aware=request.param == 'with-partition-awareness') + try: + client.connect(connection_param) + if not client.protocol_context.is_transactions_supported(): + pytest.skip(f'skipped {request.node.name}, transaction api is not supported.') + else: + yield client + finally: + client.close() + + +@pytest.fixture +def tx_cache(client): + cache = client.get_or_create_cache({ + PROP_NAME: 'tx_cache', + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL + }) + time.sleep(1.0) # Need to sleep because of https://issues.apache.org/jira/browse/IGNITE-14868 + yield cache + cache.destroy() + + +@pytest.fixture +async def async_tx_cache(async_client): + cache = await async_client.get_or_create_cache({ + PROP_NAME: 'tx_cache', + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL + }) + await asyncio.sleep(1.0) # Need to sleep because of https://issues.apache.org/jira/browse/IGNITE-14868 + yield cache + await cache.destroy() + + +@pytest.mark.parametrize( + ['iso_level', 'concurrency'], + itertools.product( + [iso_level for iso_level in TransactionIsolation], + [concurrency for concurrency in TransactionConcurrency] + ) +) +def test_simple_transaction(client, tx_cache, iso_level, concurrency): + with client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + tx_cache.put(1, 1) + tx.commit() + + assert tx_cache.get(1) == 1 + + with client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + tx_cache.put(1, 10) + tx.rollback() + + assert tx_cache.get(1) == 1 + + with client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + tx_cache.put(1, 10) + + assert tx_cache.get(1) == 1 + + +@pytest.mark.parametrize( + ['iso_level', 'concurrency'], + itertools.product( + [iso_level for iso_level in TransactionIsolation], + [concurrency for concurrency in TransactionConcurrency] + ) +) +@pytest.mark.asyncio +async def test_simple_transaction_async(async_client, async_tx_cache, iso_level, concurrency): + async with async_client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + await async_tx_cache.put(1, 1) + await tx.commit() + + assert await async_tx_cache.get(1) == 1 + + async with async_client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + await async_tx_cache.put(1, 10) + await tx.rollback() + + assert await async_tx_cache.get(1) == 1 + + async with async_client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + await async_tx_cache.put(1, 10) + + assert await async_tx_cache.get(1) == 1 + + +def test_transactions_timeout(client, tx_cache): + with client.tx_start(timeout=2000, label='tx-sync') as tx: + tx_cache.put(1, 1) + time.sleep(3.0) + with pytest.raises(CacheError) as to_error: + tx.commit() + assert 'tx-sync' in str(to_error) and 'timed out' in str(to_error) + + +@pytest.mark.asyncio +async def test_transactions_timeout_async(async_client, async_tx_cache): + async def update(i, timeout): + async with async_client.tx_start( + label=f'tx-{i}', timeout=timeout, isolation=TransactionIsolation.READ_COMMITTED, + concurrency=TransactionConcurrency.PESSIMISTIC + ) as tx: + k1, k2 = (1, 2) if i % 2 == 0 else (2, 1) + v = f'value-{i}' + + await async_tx_cache.put(k1, v) + await async_tx_cache.put(k2, v) + + await tx.commit() + + task = asyncio.gather(*[update(i, 2000) for i in range(20)], return_exceptions=True) + await asyncio.sleep(5.0) + assert task.done() # Check that all transactions completed or rolled-back on timeout + for i, ex in enumerate(task.result()): + if ex: + assert 'TransactionTimeoutException' in str(ex) or \ + 'Cache transaction timed out' # check that transaction was rolled back. + assert f'tx-{i}' in str(ex) # check that tx label presents in error + + +@pytest.mark.asyncio +@pytest.mark.parametrize('iso_level', [iso_level for iso_level in TransactionIsolation]) +async def test_concurrent_pessimistic_transactions_same_key(async_client, async_tx_cache, iso_level): + async def update(i): + async with async_client.tx_start( + label=f'tx_lbl_{i}', isolation=iso_level, concurrency=TransactionConcurrency.PESSIMISTIC + ) as tx: + await async_tx_cache.put(1, f'test-{i}') + await tx.commit() + + res = await asyncio.gather(*[update(i) for i in range(20)], return_exceptions=True) + assert not any(res) # Checks that all transactions proceeds + + +@pytest.mark.asyncio +async def test_concurrent_optimistic_transactions_no_deadlock(async_client, async_tx_cache, event_loop): + """ + Check that optimistic transactions are deadlock safe. + """ + async def update(i): + async with async_client.tx_start( + label=f'tx-{i}', isolation=TransactionIsolation.SERIALIZABLE, + concurrency=TransactionConcurrency.OPTIMISTIC + ) as tx: + k1, k2 = (1, 2) if i % 2 == 0 else (2, 1) + v = f'value-{i}' + + await async_tx_cache.put(k1, v) + await async_tx_cache.put(k2, v) + + await tx.commit() + + task = asyncio.gather(*[update(i) for i in range(20)], return_exceptions=True) + await asyncio.sleep(2.0) + assert task.done() # Check that there are not any deadlock. + assert not all(task.result()) # Check that some (or all) transactions proceeds. + for i, ex in enumerate(task.result()): + if ex: + assert 'lock conflict' in str(ex) # check optimistic prepare phase failed + assert f'tx-{i}' in str(ex) # check that tx label presents in error + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + ['iso_level', 'concurrency'], + itertools.product( + [iso_level for iso_level in TransactionIsolation], + [concurrency for concurrency in TransactionConcurrency] + ) +) +async def test_concurrent_transactions(async_client, async_tx_cache, iso_level, concurrency): + async def update(i): + async with async_client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + await async_tx_cache.put(i, f'test-{i}') + if i % 2 == 0: + await tx.commit() + else: + await tx.rollback() + + await asyncio.gather(*[update(i) for i in range(20)], return_exceptions=True) + assert await async_tx_cache.get_all(list(range(20))) == {i: f'test-{i}' for i in range(20) if i % 2 == 0} + + +@pytest.mark.parametrize( + "params", + [ + {'isolation': 25}, + {'concurrency': 45}, + {'timeout': 2.0}, + {'timeout': -10}, + {'label': 100500} + ] +) +def test_tx_parameter_validation(params): + with pytest.raises((TypeError, ValueError)): + Transaction(None, **params) + + with pytest.raises((TypeError, ValueError)): + AioTransaction(None, **params) diff --git a/tests/config/ignite-config.xml.jinja2 b/tests/config/ignite-config.xml.jinja2 new file mode 100644 index 0000000..22b103e --- /dev/null +++ b/tests/config/ignite-config.xml.jinja2 @@ -0,0 +1,115 @@ + + + + + + + + + + + + {% if use_persistence %} + + {% endif %} + + + + + + + + + + + + + + {% if use_auth %} + + {% endif %} + + {% if use_ssl %} + + {% endif %} + + + + + + + + {% if use_ssl %} + + + + + + + + + + + + + {% endif %} + + + + + + + + + + + + + + + + 127.0.0.1:48500..48510 + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/config/log4j.xml.jinja2 b/tests/config/log4j.xml.jinja2 new file mode 100644 index 0000000..983ae9e --- /dev/null +++ b/tests/config/log4j.xml.jinja2 @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/config/ssl.xml b/tests/config/ssl.xml deleted file mode 100644 index d9d406f..0000000 --- a/tests/config/ssl.xml +++ /dev/null @@ -1,58 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/config/ssl/README.txt b/tests/config/ssl/README.txt index eca07ea..da169fa 100644 --- a/tests/config/ssl/README.txt +++ b/tests/config/ssl/README.txt @@ -1,3 +1,3 @@ These files generated using script -`$IGNITE_SRC/modules/platforms/cpp/thin-client-test/config/ssl/generate_certificates.sh` +`./tests/config/ssl/generate_certificates.sh` To update them just run script and move files to this folder. \ No newline at end of file diff --git a/tests/config/ssl/generate_certificates.sh b/tests/config/ssl/generate_certificates.sh new file mode 100755 index 0000000..e4f41e2 --- /dev/null +++ b/tests/config/ssl/generate_certificates.sh @@ -0,0 +1,127 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +OSSL=$(command -v openssl11) + +if [ -z "$OSSL" ] +then + OSSL=$(command -v openssl) +fi + +echo "Using following openssl: $OSSL" + +function generate_ca { + CA_KEY="$1.key" + CA_CRT="$1.crt" + OU="$2" + + # Generating CA private key and self-signed certificate + $OSSL req \ + -newkey rsa:2048 -nodes -sha256 -keyout $CA_KEY \ + -subj "/C=US/ST=Massachusetts/L=Wakefield/CN=ignite.apache.org/O=The Apache Software Foundation/OU=$OU/emailAddress=dev@ignite.apache.org" \ + -x509 -days 3650 -out $CA_CRT +} + +function generate_client_key_and_crt { + CA_KEY="$1.key" + CA_CRT="$1.crt" + CA_SRL="$1.srl" + CLIENT_KEY="$2.key" + CLIENT_CSR="$2.scr" + CLIENT_CRT="$2.crt" + OU="$3" + + # Generating client private key and certificate signature request to be used for certificate signing + $OSSL req \ + -newkey rsa:2048 -nodes -sha256 -keyout $CLIENT_KEY \ + -subj "/C=US/ST=Massachusetts/L=Wakefield/CN=ignite.apache.org/O=The Apache Software Foundation/OU=$OU/emailAddress=dev@ignite.apache.org" \ + -out $CLIENT_CSR + + # Signing client cerificate + $OSSL x509 -req \ + -in $CLIENT_CSR -CA $CA_CRT -CAkey $CA_KEY -CAcreateserial \ + -days 3650 -sha256 -out $CLIENT_CRT + + # Cleaning up. + rm -f $CLIENT_CSR + + # Protecting key with the password if required + if [ "$4" == "1" ]; then + openssl rsa -aes256 -in $CLIENT_KEY -passout pass:654321 -out $CLIENT_KEY + fi +} + +function generate_jks { + CA_CRT="$1.crt" + CA_JKS="$1.jks" + SERVER_KEY="$2.key" + SERVER_CRT="$2.crt" + SERVER_PEM="$2.pem" + SERVER_P12="$2.pkcs12" + SERVER_JKS="$2.jks" + + rm -f $CA_JKS $SERVER_JKS + + cat $SERVER_KEY $SERVER_CRT > $SERVER_PEM + + $OSSL pkcs12 -export -passout pass:123456 -out $SERVER_P12 -in $SERVER_PEM + + keytool -import -v -trustcacerts \ + -file $CA_CRT -alias certificateauthority -noprompt \ + -keystore $CA_JKS -deststorepass 123456 + + keytool -v -importkeystore \ + -srckeystore $SERVER_P12 -srcstoretype PKCS12 -srcstorepass 123456 \ + -destkeystore $SERVER_JKS -deststoretype JKS -deststorepass 123456 + + rm -f $SERVER_P12 $SERVER_PEM +} + +CA='ca' +CLIENT='client' +CLIENT_WITH_PASS='client_with_pass' +SERVER='server' +CA_UNKNOWN='ca_unknown' +CLIENT_UNKNOWN='client_unknown' + +generate_ca $CA 'Apache Ignite CA' +generate_client_key_and_crt $CA $CLIENT 'Apache Ignite Client Test' +generate_client_key_and_crt $CA $CLIENT_WITH_PASS 'Apache Ignite Client Test' 1 +generate_client_key_and_crt $CA $SERVER 'Apache Ignite Server Test' + +# We won't sign up any other certs so we do not need CA key or srl +rm -f "$CA.key" "$CA.srl" + +generate_jks $CA $SERVER + +generate_ca $CA_UNKNOWN 'Unknown CA' +generate_client_key_and_crt $CA_UNKNOWN $CLIENT_UNKNOWN 'Unknown Client' + +# We do not need this CA anymore +rm -f $CA_UNKNOWN* + +# Re-naming everything as needed +cat $CLIENT.key $CLIENT.crt > "$CLIENT"_full.pem +cat $CLIENT_WITH_PASS.key $CLIENT_WITH_PASS.crt > "$CLIENT_WITH_PASS"_full.pem +cat $CLIENT_UNKNOWN.key $CLIENT_UNKNOWN.crt > $CLIENT_UNKNOWN.pem +mv $CA.jks trust.jks +mv $CA.crt ca.pem + +rm -f $CLIENT.crt $CLIENT.key $CLIENT_WITH_PASS.key $CLIENT_WITH_PASS.crt $CLIENT_UNKNOWN.key $CLIENT_UNKNOWN.crt $SERVER_KEY $SERVER_CRT + + diff --git a/tests/conftest.py b/tests/conftest.py index f7e2e1f..6f92f0c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -12,216 +12,66 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import argparse -from distutils.util import strtobool -import ssl +import asyncio +import logging +import sys import pytest -from pyignite import Client -from pyignite.constants import * -from pyignite.api import cache_create, cache_get_names, cache_destroy - - -class UseSSLParser(argparse.Action): +logger = logging.getLogger('pyignite') +logger.setLevel(logging.DEBUG) - def __call__(self, parser, namespace, values, option_string=None): - values = True if values is None else bool(strtobool(values)) - setattr(namespace, self.dest, values) +@pytest.fixture(autouse=True) +def run_examples(request): + run_examples = request.config.getoption("--examples") + if request.node.get_closest_marker('examples'): + if not run_examples: + pytest.skip('skipped examples: --examples is not passed') -class CertReqsParser(argparse.Action): - conv_map = { - 'NONE': ssl.CERT_NONE, - 'OPTIONAL': ssl.CERT_OPTIONAL, - 'REQUIRED': ssl.CERT_REQUIRED, - } - def __call__(self, parser, namespace, values, option_string=None): - value = values.upper() - if value in self.conv_map: - setattr(namespace, self.dest, self.conv_map[value]) - else: - raise ValueError( - 'Undefined argument: --ssl-cert-reqs={}'.format(value) - ) +@pytest.fixture(autouse=True) +def skip_if_no_cext(request): + skip = False + try: + from pyignite import _cutils # noqa: F401 + except ImportError: + if request.config.getoption('--force-cext'): + pytest.fail("C extension failed to build, fail test because of --force-cext is set.") + return + skip = True + if skip and request.node.get_closest_marker('skip_if_no_cext'): + pytest.skip('skipped c extensions test, c extension is not available.') -class SSLVersionParser(argparse.Action): - conv_map = { - 'TLSV1_1': ssl.PROTOCOL_TLSv1_1, - 'TLSV1_2': ssl.PROTOCOL_TLSv1_2, - } - def __call__(self, parser, namespace, values, option_string=None): - value = values.upper() - if value in self.conv_map: - setattr(namespace, self.dest, self.conv_map[value]) - else: - raise ValueError( - 'Undefined argument: --ssl-version={}'.format(value) - ) - - -@pytest.fixture(scope='module') -def client( - ignite_host, ignite_port, timeout, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, - ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, - username, password, -): - client = Client( - timeout=timeout, - use_ssl=use_ssl, - ssl_keyfile=ssl_keyfile, - ssl_keyfile_password=ssl_keyfile_password, - ssl_certfile=ssl_certfile, - ssl_ca_certfile=ssl_ca_certfile, - ssl_cert_reqs=ssl_cert_reqs, - ssl_ciphers=ssl_ciphers, - ssl_version=ssl_version, - username=username, - password=password, - ) - client.connect(ignite_host, ignite_port) - yield client - for cache_name in cache_get_names(client).value: - cache_destroy(client, cache_name) - client.close() - - -@pytest.fixture -def cache(client): - cache_name = 'my_bucket' - cache_create(client, cache_name) - yield cache_name - cache_destroy(client, cache_name) +@pytest.fixture(scope='session') +def event_loop(): + """Create an instance of the default event loop for each test case.""" + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() def pytest_addoption(parser): parser.addoption( - '--ignite-host', - action='append', - default=[IGNITE_DEFAULT_HOST], - help='Ignite binary protocol test server host (default: localhost)' - ) - parser.addoption( - '--ignite-port', - action='append', - default=[IGNITE_DEFAULT_PORT], - type=int, - help='Ignite binary protocol test server port (default: 10800)' - ) - parser.addoption( - '--timeout', - action='store', - type=float, - default=None, - help=( - 'Timeout (in seconds) for each socket operation. Can accept ' - 'integer or float value. Default is None' - ) - ) - parser.addoption( - '--use-ssl', - action=UseSSLParser, - nargs='?', - default=False, - help='Use SSL encryption' - ) - parser.addoption( - '--ssl-keyfile', - action='store', - default=None, - type=str, - help='a path to SSL key file to identify local party' - ) - parser.addoption( - '--ssl-keyfile-password', - action='store', - default=None, - type=str, - help='password for SSL key file' - ) - parser.addoption( - '--ssl-certfile', - action='store', - default=None, - type=str, - help='a path to ssl certificate file to identify local party' - ) - parser.addoption( - '--ssl-ca-certfile', - action='store', - default=None, - type=str, - help='a path to a trusted certificate or a certificate chain' - ) - parser.addoption( - '--ssl-cert-reqs', - action=CertReqsParser, - default=ssl.CERT_NONE, - help=( - 'determines how the remote side certificate is treated: ' - 'NONE (ignore, default), ' - 'OPTIONAL (validate, if provided) or ' - 'REQUIRED (valid remote certificate is required)' - ) - ) - parser.addoption( - '--ssl-ciphers', - action='store', - default=SSL_DEFAULT_CIPHERS, - type=str, - help='ciphers to use' - ) - parser.addoption( - '--ssl-version', - action=SSLVersionParser, - default=SSL_DEFAULT_VERSION, - help='SSL version: TLSV1_1 or TLSV1_2' - ) - parser.addoption( - '--username', - action='store', - type=str, - help='user name' - ) - parser.addoption( - '--password', - action='store', - type=str, - help='password' + '--examples', + action='store_true', + help='check if examples can be run', ) parser.addoption( - '--examples', + '--force-cext', action='store_true', help='check if examples can be run', ) -def pytest_generate_tests(metafunc): - session_parameters = { - 'ignite_host': IGNITE_DEFAULT_HOST, - 'ignite_port': IGNITE_DEFAULT_PORT, - 'timeout': None, - 'use_ssl': False, - 'ssl_keyfile': None, - 'ssl_keyfile_password': None, - 'ssl_certfile': None, - 'ssl_ca_certfile': None, - 'ssl_cert_reqs': ssl.CERT_NONE, - 'ssl_ciphers': SSL_DEFAULT_CIPHERS, - 'ssl_version': SSL_DEFAULT_VERSION, - 'username': None, - 'password': None, - } +def pytest_configure(config): + marker_docs = [ + "skip_if_no_cext: mark test to run only if c extension is available", + "skip_if_no_expiry_policy: mark test to run only if expiry policy is supported by server", + "examples: mark test to run only if --examples are set" + ] - for param_name in session_parameters: - if param_name in metafunc.fixturenames: - param = metafunc.config.getoption(param_name) - if param is None: - param = session_parameters[param_name] - if type(param) is not list: - param = [param] - metafunc.parametrize(param_name, param, scope='session') + for marker_doc in marker_docs: + config.addinivalue_line("markers", marker_doc) diff --git a/tests/custom/test_cluster.py b/tests/custom/test_cluster.py new file mode 100644 index 0000000..ae83ecd --- /dev/null +++ b/tests/custom/test_cluster.py @@ -0,0 +1,133 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client, AioClient +from pyignite.exceptions import CacheError +from tests.util import clear_ignite_work_dir, start_ignite_gen + +from pyignite.datatypes import ClusterState + + +@pytest.fixture(params=['with-persistence', 'without-persistence']) +def with_persistence(request): + yield request.param == 'with-persistence' + + +@pytest.fixture(autouse=True) +def cleanup(): + clear_ignite_work_dir() + yield None + clear_ignite_work_dir() + + +@pytest.fixture(autouse=True) +def server1(with_persistence, cleanup): + yield from start_ignite_gen(idx=1, use_persistence=with_persistence) + + +@pytest.fixture(autouse=True) +def server2(with_persistence, cleanup): + yield from start_ignite_gen(idx=2, use_persistence=with_persistence) + + +@pytest.fixture(autouse=True) +def cluster_api_supported(request, server1): + client = Client() + with client.connect('127.0.0.1', 10801): + if not client.protocol_context.is_cluster_api_supported(): + pytest.skip(f'skipped {request.node.name}, Cluster API is not supported.') + + +def test_cluster_set_active(with_persistence): + key = 42 + val = 42 + start_state = ClusterState.INACTIVE if with_persistence else ClusterState.ACTIVE + + client = Client() + with client.connect([("127.0.0.1", 10801), ("127.0.0.1", 10802)]): + cluster = client.get_cluster() + assert cluster.get_state() == start_state + + cluster.set_state(ClusterState.ACTIVE) + assert cluster.get_state() == ClusterState.ACTIVE + + cache = client.get_or_create_cache("test_cache") + cache.put(key, val) + assert cache.get(key) == val + + cluster.set_state(ClusterState.ACTIVE_READ_ONLY) + assert cluster.get_state() == ClusterState.ACTIVE_READ_ONLY + + assert cache.get(key) == val + with pytest.raises(CacheError): + cache.put(key, val + 1) + + cluster.set_state(ClusterState.INACTIVE) + assert cluster.get_state() == ClusterState.INACTIVE + + with pytest.raises(CacheError): + cache.get(key) + + with pytest.raises(CacheError): + cache.put(key, val + 1) + + cluster.set_state(ClusterState.ACTIVE) + assert cluster.get_state() == ClusterState.ACTIVE + + cache.put(key, val + 2) + assert cache.get(key) == val + 2 + + +@pytest.mark.asyncio +async def test_cluster_set_active_async(with_persistence): + key = 42 + val = 42 + start_state = ClusterState.INACTIVE if with_persistence else ClusterState.ACTIVE + + client = AioClient() + async with client.connect([("127.0.0.1", 10801), ("127.0.0.1", 10802)]): + cluster = client.get_cluster() + assert await cluster.get_state() == start_state + + await cluster.set_state(ClusterState.ACTIVE) + assert await cluster.get_state() == ClusterState.ACTIVE + + cache = await client.get_or_create_cache("test_cache") + await cache.put(key, val) + assert await cache.get(key) == val + + await cluster.set_state(ClusterState.ACTIVE_READ_ONLY) + assert await cluster.get_state() == ClusterState.ACTIVE_READ_ONLY + + assert await cache.get(key) == val + with pytest.raises(CacheError): + await cache.put(key, val + 1) + + await cluster.set_state(ClusterState.INACTIVE) + assert await cluster.get_state() == ClusterState.INACTIVE + + with pytest.raises(CacheError): + await cache.get(key) + + with pytest.raises(CacheError): + await cache.put(key, val + 1) + + await cluster.set_state(ClusterState.ACTIVE) + assert await cluster.get_state() == ClusterState.ACTIVE + + await cache.put(key, val + 2) + assert await cache.get(key) == val + 2 diff --git a/tests/custom/test_connection_events.py b/tests/custom/test_connection_events.py new file mode 100644 index 0000000..f49ad61 --- /dev/null +++ b/tests/custom/test_connection_events.py @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from pyignite import Client, AioClient +from pyignite.datatypes.cache_config import CacheMode +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_MODE +from pyignite.monitoring import ConnectionEventListener, ConnectionLostEvent, ConnectionClosedEvent, \ + HandshakeSuccessEvent, HandshakeFailedEvent, HandshakeStartEvent + +from tests.util import start_ignite_gen, kill_process_tree + + +@pytest.fixture(autouse=True) +def server1(): + yield from start_ignite_gen(idx=1) + + +@pytest.fixture(autouse=True) +def server2(): + yield from start_ignite_gen(idx=2) + + +events = [] + + +def teardown_function(): + events.clear() + + +class RecordingConnectionEventListener(ConnectionEventListener): + def on_handshake_start(self, event): + events.append(event) + + def on_handshake_success(self, event): + events.append(event) + + def on_handshake_fail(self, event): + events.append(event) + + def on_authentication_fail(self, event): + events.append(event) + + def on_connection_closed(self, event): + events.append(event) + + def on_connection_lost(self, event): + events.append(event) + + +def test_events(request, server2): + client = Client(event_listeners=[RecordingConnectionEventListener()]) + with client.connect([('127.0.0.1', 10800 + idx) for idx in range(1, 3)]): + protocol_context = client.protocol_context + nodes = {conn.port: conn for conn in client._nodes} + cache = client.get_or_create_cache({ + PROP_NAME: request.node.name, + PROP_CACHE_MODE: CacheMode.REPLICATED, + }) + + kill_process_tree(server2.pid) + + for _ in range(0, 100): + try: + cache.put(1, 1) + except: # noqa 13 + pass + + if any(isinstance(e, ConnectionLostEvent) for e in events): + break + + __assert_events(nodes, protocol_context) + + +@pytest.mark.asyncio +async def test_events_async(request, server2): + client = AioClient(event_listeners=[RecordingConnectionEventListener()]) + async with client.connect([('127.0.0.1', 10800 + idx) for idx in range(1, 3)]): + protocol_context = client.protocol_context + nodes = {conn.port: conn for conn in client._nodes} + cache = await client.get_or_create_cache({ + PROP_NAME: request.node.name, + PROP_CACHE_MODE: CacheMode.REPLICATED, + }) + kill_process_tree(server2.pid) + + for _ in range(0, 100): + try: + await cache.put(1, 1) + except: # noqa 13 + pass + + if any(isinstance(e, ConnectionLostEvent) for e in events): + break + + __assert_events(nodes, protocol_context) + + +def __assert_events(nodes, protocol_context): + assert len([e for e in events if isinstance(e, ConnectionLostEvent)]) == 1 + # ConnectionLostEvent is a subclass of ConnectionClosedEvent + assert 1 <= len([e for e in events if type(e) == ConnectionClosedEvent and e.node_uuid]) <= 2 + assert len([e for e in events if isinstance(e, HandshakeSuccessEvent)]) == 2 + + for ev in events: + assert ev.host == '127.0.0.1' + if isinstance(ev, ConnectionLostEvent): + assert ev.port == 10802 + assert ev.node_uuid == str(nodes[ev.port].uuid) + assert ev.error_msg + elif isinstance(ev, HandshakeStartEvent): + assert ev.port in {10801, 10802} + elif isinstance(ev, HandshakeFailedEvent): + assert ev.port == 10802 + assert ev.protocol_context == protocol_context + assert ev.error_msg + elif isinstance(ev, HandshakeSuccessEvent): + assert ev.port in {10801, 10802} + assert ev.node_uuid == str(nodes[ev.port].uuid) + assert ev.protocol_context == protocol_context + elif isinstance(ev, ConnectionClosedEvent): + assert ev.port in {10801, 10802} + if ev.node_uuid: # Possible if protocol negotiation occurred. + assert ev.node_uuid == str(nodes[ev.port].uuid) diff --git a/tests/custom/test_handshake_timeout.py b/tests/custom/test_handshake_timeout.py new file mode 100644 index 0000000..bae184d --- /dev/null +++ b/tests/custom/test_handshake_timeout.py @@ -0,0 +1,212 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import logging +import socket +import struct +import time +from concurrent.futures import ThreadPoolExecutor + +import pytest + +from pyignite import Client, AioClient +from pyignite import monitoring +from pyignite.exceptions import ReconnectError, ParameterError +from pyignite.monitoring import HandshakeFailedEvent + +logger = logging.getLogger('fake_ignite') +logger.setLevel(logging.DEBUG) + +DEFAULT_HOST = '127.0.0.1' +DEFAULT_PORT = 10800 + + +class FakeIgniteProtocol(asyncio.Protocol): + def __init__(self, server): + self._transport = None + self._server = server + self._buf = bytearray() + self._done_handshake = False + + def connection_made(self, transport): + sock = transport.get_extra_info('socket') + if sock is not None: + logger.debug('Connecting from %s', sock) + self._server.add_client(transport) + self._transport = transport + + def _handshake_response(self, error=True): + if error: + return struct.pack(' (1, 3, 0): + response = self._handshake_response(True) + logger.debug(f'Writing handshake response {response}') + self._transport.write(response) + self._transport.close() + else: + response = self._handshake_response(False) + logger.debug(f'Writing handshake response {response}') + self._transport.write(response) + self._done_handshake = True + self._buf = bytearray() + + +class FakeIgniteServer: + def __init__(self, do_handshake=False): + self.clients = [] + self.server = None + self.do_handshake = do_handshake + self.loop = asyncio.get_event_loop() + + async def start(self): + self.server = await self.loop.create_server(lambda: FakeIgniteProtocol(self), DEFAULT_HOST, DEFAULT_PORT) + + def add_client(self, client): + self.clients.append(client) + + async def close(self): + for client in self.clients: + client.close() + + if self.server: + self.server.close() + await self.server.wait_closed() + + +class HandshakeTimeoutListener(monitoring.ConnectionEventListener): + def __init__(self): + self.events = [] + + def on_handshake_fail(self, event: HandshakeFailedEvent): + self.events.append(event) + + +@pytest.fixture +async def server(): + server = FakeIgniteServer() + try: + await server.start() + yield server + finally: + await server.close() + + +@pytest.fixture +async def server_with_handshake(): + server = FakeIgniteServer(do_handshake=True) + try: + await server.start() + yield server + finally: + await server.close() + + +@pytest.mark.asyncio +async def test_handshake_timeout(server, event_loop): + def sync_client_connect(): + hs_to_listener = HandshakeTimeoutListener() + client = Client(handshake_timeout=3.0, event_listeners=[hs_to_listener]) + start = time.monotonic() + try: + client.connect(DEFAULT_HOST, DEFAULT_PORT) + except Exception as e: + return time.monotonic() - start, hs_to_listener.events, e + return time.monotonic() - start, hs_to_listener.events, None + + duration, events, err = await event_loop.run_in_executor(ThreadPoolExecutor(), sync_client_connect) + + assert isinstance(err, ReconnectError) + assert 3.0 <= duration < 4.0 + assert len(events) > 0 + for ev in events: + assert isinstance(ev, HandshakeFailedEvent) + assert 'timed out' in ev.error_msg + + +@pytest.mark.asyncio +async def test_handshake_timeout_async(server): + hs_to_listener = HandshakeTimeoutListener() + client = AioClient(handshake_timeout=3.0, event_listeners=[hs_to_listener]) + with pytest.raises(ReconnectError): + start = time.monotonic() + await client.connect(DEFAULT_HOST, DEFAULT_PORT) + + assert 3.0 <= time.monotonic() - start < 4.0 + assert len(hs_to_listener.events) > 0 + for ev in hs_to_listener.events: + assert isinstance(ev, HandshakeFailedEvent) + assert 'timed out' in ev.error_msg + + +@pytest.mark.asyncio +async def test_socket_timeout_applied_sync(server_with_handshake, event_loop): + def sync_client_connect(): + hs_to_listener = HandshakeTimeoutListener() + client = Client(timeout=5.0, handshake_timeout=3.0, event_listeners=[hs_to_listener]) + start = time.monotonic() + try: + client.connect(DEFAULT_HOST, DEFAULT_PORT) + assert all(n.alive for n in client._nodes) + client.get_cache_names() + except Exception as e: + return time.monotonic() - start, hs_to_listener.events, e + return time.monotonic() - start, hs_to_listener.events, None + + duration, events, err = await event_loop.run_in_executor(ThreadPoolExecutor(), sync_client_connect) + + assert isinstance(err, socket.timeout) + assert 5.0 <= duration < 6.0 + assert len(events) == 0 + + +@pytest.mark.asyncio +async def test_handshake_timeout_not_affected_for_others_requests_async(server_with_handshake): + hs_to_listener = HandshakeTimeoutListener() + client = AioClient(handshake_timeout=3.0, event_listeners=[hs_to_listener]) + with pytest.raises(asyncio.TimeoutError): + await client.connect(DEFAULT_HOST, DEFAULT_PORT) + assert all(n.alive for n in client._nodes) + await asyncio.wait_for(client.get_cache_names(), 5.0) + + +@pytest.mark.parametrize( + 'handshake_timeout', + [0.0, -10.0, -0.01] +) +@pytest.mark.asyncio +async def test_handshake_timeout_param_validation(handshake_timeout): + with pytest.raises(ParameterError): + await AioClient(handshake_timeout=handshake_timeout).connect(DEFAULT_HOST, DEFAULT_PORT) + + with pytest.raises(ParameterError): + Client(handshake_timeout=handshake_timeout).connect(DEFAULT_HOST, DEFAULT_PORT) diff --git a/tests/custom/test_timeouts.py b/tests/custom/test_timeouts.py new file mode 100644 index 0000000..e70fe70 --- /dev/null +++ b/tests/custom/test_timeouts.py @@ -0,0 +1,209 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import sys +from asyncio import TimeoutError, InvalidStateError + +import pytest + +from pyignite import AioClient +from tests.util import start_ignite_gen + + +@pytest.fixture(autouse=True) +def server1(): + yield from start_ignite_gen(idx=1) + + +@pytest.fixture(autouse=True) +async def proxy(event_loop, server1, cache): + proxy = ProxyServer(("127.0.0.1", 10802), ("127.0.0.1", 10801)) + try: + await proxy.start() + yield proxy + finally: + await proxy.close() + + +@pytest.fixture(autouse=True) +async def cache(server1): + c = AioClient(partition_aware=False) + async with c.connect("127.0.0.1", 10801): + try: + cache = await c.get_or_create_cache("test") + yield cache + finally: + await cache.destroy() + + +@pytest.fixture(autouse=True) +def invalid_states_errors(): + errors = [] + + def trace(_, event, arg): + if event == 'exception': + etype, _, _ = arg + if etype is InvalidStateError: + errors.append(arg) + + return trace + + try: + sys.settrace(trace) + yield errors + finally: + sys.settrace(None) + + +@pytest.mark.asyncio +async def test_cancellation_on_slow_response(event_loop, proxy, invalid_states_errors): + c = AioClient(partition_aware=False) + async with c.connect("127.0.0.1", 10802): + cache = await c.get_cache("test") + proxy.discard_response = True # Simulate slow response by discarding it + + with pytest.raises(TimeoutError): + await asyncio.wait_for(cache.put(1, 2), 0.1) + + proxy.discard_response = False + assert len(invalid_states_errors) == 0 + + +@pytest.mark.asyncio +async def test_cancellation_on_disconnect(event_loop, proxy, invalid_states_errors): + c = AioClient(partition_aware=False) + async with c.connect("127.0.0.1", 10802): + cache = await c.get_cache("test") + proxy.discard_response = True + + asyncio.ensure_future(asyncio.wait_for(cache.put(1, 2), 0.1)) + await asyncio.sleep(0.2) + await proxy.disconnect_peers() + + assert len(invalid_states_errors) == 0 + + +class ProxyServer: + """ + Proxy for simulating discarding response from ignite server + Set `discard_response` to `True` to simulate this condition. + Call `disconnect_peers()` in order to simulate lost connection to Ignite server. + """ + def __init__(self, local_host, remote_host): + self.local_host = local_host + self.remote_host = remote_host + self.peers = {} + self.discard_response = False + self.server = None + + async def start(self): + loop = asyncio.get_event_loop() + host, port = self.local_host + self.server = await loop.create_server( + lambda: ProxyTcpProtocol(self), host=host, port=port) + + async def disconnect_peers(self): + peers = dict(self.peers) + for k, v in peers.items(): + if not v: + return + + local, remote = v + if local: + await remote.close() + if remote: + await local.close() + + async def close(self): + try: + await self.disconnect_peers() + except TimeoutError: + pass + + self.server.close() + + +class ProxyTcpProtocol(asyncio.Protocol): + def __init__(self, proxy): + self.addr, self.port = proxy.remote_host + self.proxy = proxy + self.transport, self.remote_protocol, self.conn_info, self.close_fut = None, None, None, None + super().__init__() + + def connection_made(self, transport): + self.transport = transport + self.conn_info = transport.get_extra_info("peername") + + def data_received(self, data): + if self.remote_protocol and self.remote_protocol.transport: + self.remote_protocol.transport.write(data) + return + + loop = asyncio.get_event_loop() + self.remote_protocol = RemoteTcpProtocol(self.proxy, self, data) + coro = loop.create_connection(lambda: self.remote_protocol, host=self.addr, port=self.port) + asyncio.ensure_future(coro) + + self.proxy.peers[self.conn_info] = (self, self.remote_protocol) + + async def close(self): + if not self.transport: + return + + self.close_fut = asyncio.get_event_loop().create_future() + self.transport.close() + + try: + await asyncio.wait_for(self.close_fut, 0.1) + except TimeoutError: + pass + + def connection_lost(self, exc): + if self.close_fut: + self.close_fut.done() + + +class RemoteTcpProtocol(asyncio.Protocol): + def __init__(self, proxy, proxy_protocol, data): + self.proxy = proxy + self.proxy_protocol = proxy_protocol + self.data = data + self.transport, self.close_fut = None, None + super().__init__() + + def connection_made(self, transport): + self.transport = transport + self.transport.write(self.data) + + async def close(self): + if not self.transport: + return + + self.close_fut = asyncio.get_event_loop().create_future() + self.transport.close() + try: + await asyncio.wait_for(self.close_fut, 0.1) + except TimeoutError: + pass + + def connection_lost(self, exc): + if self.close_fut: + self.close_fut.done() + + def data_received(self, data): + if self.proxy.discard_response: + return + + self.proxy_protocol.transport.write(data) diff --git a/tests/security/conftest.py b/tests/security/conftest.py new file mode 100644 index 0000000..8845c31 --- /dev/null +++ b/tests/security/conftest.py @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +import pytest + +from pyignite import monitoring +from tests.util import get_test_dir + + +@pytest.fixture +def ssl_params(): + yield __create_ssl_param(False) + + +@pytest.fixture +def ssl_params_with_password(): + yield __create_ssl_param(True) + + +def __create_ssl_param(with_password=False): + cert_path = os.path.join(get_test_dir(), 'config', 'ssl') + + if with_password: + cert = os.path.join(cert_path, 'client_with_pass_full.pem') + return { + 'ssl_keyfile': cert, + 'ssl_keyfile_password': '654321', + 'ssl_certfile': cert, + 'ssl_ca_certfile': cert, + } + else: + cert = os.path.join(cert_path, 'client_full.pem') + return { + 'ssl_keyfile': cert, + 'ssl_certfile': cert, + 'ssl_ca_certfile': cert + } + + +class AccumulatingConnectionListener(monitoring.ConnectionEventListener): + def __init__(self): + self.events = [] + + def on_handshake_start(self, event): + self.events.append(event) + + def on_handshake_success(self, event): + self.events.append(event) + + def on_handshake_fail(self, event): + self.events.append(event) + + def on_authentication_fail(self, event): + self.events.append(event) + + def on_connection_closed(self, event): + self.events.append(event) + + def on_connection_lost(self, event): + self.events.append(event) diff --git a/tests/security/test_auth.py b/tests/security/test_auth.py new file mode 100644 index 0000000..f4ca29b --- /dev/null +++ b/tests/security/test_auth.py @@ -0,0 +1,182 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import re + +import pytest + +from pyignite import Client, AioClient +from pyignite.monitoring import ( + HandshakeStartEvent, HandshakeSuccessEvent, ConnectionClosedEvent, AuthenticationFailedEvent +) +from pyignite.exceptions import AuthenticationError +from tests.security.conftest import AccumulatingConnectionListener +from tests.util import start_ignite_gen, clear_ignite_work_dir + +DEFAULT_IGNITE_USERNAME = 'ignite' +DEFAULT_IGNITE_PASSWORD = 'ignite' + + +@pytest.fixture(params=['with-ssl', 'without-ssl']) +def with_ssl(request): + return request.param == 'with-ssl' + + +@pytest.fixture(autouse=True) +def server(with_ssl, cleanup): + yield from start_ignite_gen(use_ssl=with_ssl, use_auth=True) + + +@pytest.fixture(scope='module', autouse=True) +def cleanup(): + clear_ignite_work_dir() + yield None + clear_ignite_work_dir() + + +def check_auth_success(ssl_params, caplog): + listener = AccumulatingConnectionListener() + client = Client(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, + event_listeners=[listener], **ssl_params) + with caplog.at_level(logger='pyignite', level=logging.DEBUG): + with client.connect("127.0.0.1", 10801): + assert all(node.alive for node in client._nodes) + conn = client._nodes[0] + + __assert_successful_connect_log(conn, caplog) + __assert_successful_connect_events(conn, listener) + + +def test_auth_success_no_explicit_ssl(with_ssl, ssl_params, caplog): + if with_ssl: + ssl_params['use_ssl'] = with_ssl + check_auth_success(ssl_params, caplog) + + +def test_auth_success(with_ssl, ssl_params, caplog): + ssl_params['use_ssl'] = with_ssl + check_auth_success(ssl_params, caplog) + + +async def check_auth_success_async(ssl_params, caplog): + listener = AccumulatingConnectionListener() + client = AioClient(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, + event_listeners=[listener], **ssl_params) + with caplog.at_level(logger='pyignite', level=logging.DEBUG): + async with client.connect("127.0.0.1", 10801): + assert all(node.alive for node in client._nodes) + conn = client._nodes[0] + + __assert_successful_connect_log(conn, caplog) + __assert_successful_connect_events(conn, listener) + + +@pytest.mark.asyncio +async def test_auth_success_no_explicit_ssl_async(with_ssl, ssl_params, caplog): + if with_ssl: + ssl_params['use_ssl'] = with_ssl + await check_auth_success_async(ssl_params, caplog) + + +@pytest.mark.asyncio +async def test_auth_success_async(with_ssl, ssl_params, caplog): + ssl_params['use_ssl'] = with_ssl + await check_auth_success_async(ssl_params, caplog) + + +def __assert_successful_connect_log(conn, caplog): + assert any(re.match(rf'Connecting to node\(address={conn.host},\s+port={conn.port}', r.message) + for r in caplog.records) + assert any(re.match(rf'Connected to node\(address={conn.host},\s+port={conn.port}', r.message) + for r in caplog.records) + assert any(re.match(rf'Connection closed to node\(address={conn.host},\s+port={conn.port}', r.message) + for r in caplog.records) + + +def __assert_successful_connect_events(conn, listener): + event_classes = (HandshakeStartEvent, HandshakeSuccessEvent, ConnectionClosedEvent) + + for cls in event_classes: + any(isinstance(ev, cls) for ev in listener.events) + + for ev in listener.events: + if isinstance(ev, event_classes): + assert ev.host == conn.host + assert ev.port == conn.port + if isinstance(ev, (HandshakeSuccessEvent, ConnectionClosedEvent)): + if ev.node_uuid: + assert ev.node_uuid == str(conn.uuid) + if isinstance(ev, HandshakeSuccessEvent): + assert ev.protocol_context + + +auth_failed_params = [ + [DEFAULT_IGNITE_USERNAME, None], + ['invalid_user', 'invalid_password'], + [None, None] +] + + +@pytest.mark.parametrize( + 'username, password', + auth_failed_params +) +def test_auth_failed(username, password, with_ssl, ssl_params, caplog): + ssl_params['use_ssl'] = with_ssl + listener = AccumulatingConnectionListener() + with pytest.raises(AuthenticationError): + client = Client(username=username, password=password, + event_listeners=[listener], **ssl_params) + with client.connect("127.0.0.1", 10801): + pass + + __assert_auth_failed_log(caplog) + __assert_auth_failed_listener(listener) + + +@pytest.mark.parametrize( + 'username, password', + auth_failed_params +) +@pytest.mark.asyncio +async def test_auth_failed_async(username, password, with_ssl, ssl_params, caplog): + ssl_params['use_ssl'] = with_ssl + listener = AccumulatingConnectionListener() + with pytest.raises(AuthenticationError): + client = AioClient(username=username, password=password, + event_listeners=[listener], **ssl_params) + async with client.connect("127.0.0.1", 10801): + pass + + __assert_auth_failed_log(caplog) + __assert_auth_failed_listener(listener) + + +def __assert_auth_failed_log(caplog): + pattern = r'Authentication failed while connecting to node\(address=127.0.0.1,\s+port=10801' + assert any(re.match(pattern, r.message) and r.levelname == logging.getLevelName(logging.ERROR) + for r in caplog.records) + + +def __assert_auth_failed_listener(listener): + found = False + for ev in listener.events: + if isinstance(ev, AuthenticationFailedEvent): + found = True + assert ev.host == '127.0.0.1' + assert ev.port == 10801 + assert ev.protocol_context + assert 'AuthenticationError' in ev.error_msg + assert found diff --git a/tests/security/test_ssl.py b/tests/security/test_ssl.py new file mode 100644 index 0000000..ed0808b --- /dev/null +++ b/tests/security/test_ssl.py @@ -0,0 +1,117 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import re + +import pytest + +from pyignite import Client, AioClient, monitoring +from pyignite.exceptions import ReconnectError +from tests.security.conftest import AccumulatingConnectionListener +from tests.util import start_ignite_gen, get_or_create_cache, get_or_create_cache_async + + +@pytest.fixture(scope='module', autouse=True) +def server(): + yield from start_ignite_gen(use_ssl=True, use_auth=False) + + +def test_connect_ssl_keystore_with_password(ssl_params_with_password): + __test_connect_ssl(**ssl_params_with_password) + + +def test_connect_ssl(ssl_params): + __test_connect_ssl(**ssl_params) + + +@pytest.mark.asyncio +async def test_connect_ssl_keystore_with_password_async(ssl_params_with_password): + await __test_connect_ssl(is_async=True, **ssl_params_with_password) + + +@pytest.mark.asyncio +async def test_connect_ssl_async(ssl_params): + await __test_connect_ssl(is_async=True, **ssl_params) + + +def __test_connect_ssl(is_async=False, **kwargs): + kwargs['use_ssl'] = True + + def inner(): + client = Client(**kwargs) + with client.connect("127.0.0.1", 10801): + with get_or_create_cache(client, 'test-cache') as cache: + cache.put(1, 1) + + assert cache.get(1) == 1 + + async def inner_async(): + client = AioClient(**kwargs) + async with client.connect("127.0.0.1", 10801): + async with get_or_create_cache_async(client, 'test-cache') as cache: + await cache.put(1, 1) + + assert (await cache.get(1)) == 1 + + return inner_async() if is_async else inner() + + +invalid_params = [ + {'use_ssl': False}, + {'use_ssl': True}, + {'use_ssl': True, 'ssl_keyfile': 'invalid.pem', 'ssl_certfile': 'invalid.pem'} +] + + +@pytest.mark.parametrize('invalid_ssl_params', invalid_params) +def test_connection_error_with_incorrect_config(invalid_ssl_params, caplog): + listener = AccumulatingConnectionListener() + with pytest.raises(ReconnectError): + client = Client(event_listeners=[listener], **invalid_ssl_params) + with client.connect([("127.0.0.1", 10801)]): + pass + + __assert_handshake_failed_log(caplog) + __assert_handshake_failed_listener(listener) + + +@pytest.mark.parametrize('invalid_ssl_params', invalid_params) +@pytest.mark.asyncio +async def test_connection_error_with_incorrect_config_async(invalid_ssl_params, caplog): + listener = AccumulatingConnectionListener() + with pytest.raises(ReconnectError): + client = AioClient(event_listeners=[listener], **invalid_ssl_params) + async with client.connect([("127.0.0.1", 10801)]): + pass + + __assert_handshake_failed_log(caplog) + __assert_handshake_failed_listener(listener) + + +def __assert_handshake_failed_log(caplog): + pattern = r'Failed to perform handshake, connection to node\(address=127.0.0.1,\s+port=10801.*failed:' + assert any(re.match(pattern, r.message) and r.levelname == logging.getLevelName(logging.ERROR) + for r in caplog.records) + + +def __assert_handshake_failed_listener(listener): + found = False + for ev in listener.events: + if isinstance(ev, monitoring.HandshakeFailedEvent): + found = True + assert ev.host == '127.0.0.1' + assert ev.port == 10801 + assert ev.error_msg + assert found diff --git a/tests/test_binary.py b/tests/test_binary.py deleted file mode 100644 index 29ccf68..0000000 --- a/tests/test_binary.py +++ /dev/null @@ -1,280 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict -from decimal import Decimal - -from pyignite import GenericObjectMeta -from pyignite.datatypes import ( - BinaryObject, BoolObject, IntObject, DecimalObject, LongObject, String, -) -from pyignite.datatypes.prop_codes import * - - -insert_data = [ - [1, True, 'asdf', 42, Decimal('2.4')], - [2, False, 'zxcvb', 43, Decimal('2.5')], - [3, True, 'qwerty', 44, Decimal('2.6')], -] - -page_size = 100 - -scheme_name = 'PUBLIC' - -table_sql_name = 'AllDataType' -table_cache_name = 'SQL_{}_{}'.format( - scheme_name, - table_sql_name.upper(), -) - -create_query = ''' -CREATE TABLE {} ( - test_pk INTEGER(11) PRIMARY KEY, - test_bool BOOLEAN, - test_str VARCHAR(24), - test_int INTEGER(11), - test_decimal DECIMAL(11, 5), -) -'''.format(table_sql_name) - -insert_query = ''' -INSERT INTO {} ( - test_pk, test_bool, test_str, test_int, test_decimal, -) VALUES (?, ?, ?, ?, ?)'''.format(table_sql_name) - -select_query = '''SELECT * FROM {}'''.format(table_sql_name) - -drop_query = 'DROP TABLE {} IF EXISTS'.format(table_sql_name) - - -def test_sql_read_as_binary(client): - - client.sql(drop_query) - - # create table - client.sql(create_query) - - # insert some rows - for line in insert_data: - client.sql(insert_query, query_args=line) - - table_cache = client.get_cache(table_cache_name) - result = table_cache.scan() - - # convert Binary object fields' values to a tuple - # to compare it with the initial data - for key, value in result: - assert key in {x[0] for x in insert_data} - assert ( - value.TEST_BOOL, - value.TEST_STR, - value.TEST_INT, - value.TEST_DECIMAL - ) in {tuple(x[1:]) for x in insert_data} - - client.sql(drop_query) - - -def test_sql_write_as_binary(client): - - client.get_or_create_cache(scheme_name) - - # configure cache as an SQL table - type_name = table_cache_name - - # register binary type - class AllDataType( - metaclass=GenericObjectMeta, - type_name=type_name, - schema=OrderedDict([ - ('TEST_BOOL', BoolObject), - ('TEST_STR', String), - ('TEST_INT', IntObject), - ('TEST_DECIMAL', DecimalObject), - ]), - ): - pass - - table_cache = client.get_or_create_cache({ - PROP_NAME: table_cache_name, - PROP_SQL_SCHEMA: scheme_name, - PROP_QUERY_ENTITIES: [ - { - 'table_name': table_sql_name.upper(), - 'key_field_name': 'TEST_PK', - 'key_type_name': 'java.lang.Integer', - 'field_name_aliases': [], - 'query_fields': [ - { - 'name': 'TEST_PK', - 'type_name': 'java.lang.Integer', - 'is_notnull_constraint_field': True, - }, - { - 'name': 'TEST_BOOL', - 'type_name': 'java.lang.Boolean', - }, - { - 'name': 'TEST_STR', - 'type_name': 'java.lang.String', - }, - { - 'name': 'TEST_INT', - 'type_name': 'java.lang.Integer', - }, - { - 'name': 'TEST_DECIMAL', - 'type_name': 'java.math.BigDecimal', - 'default_value': Decimal('0.00'), - 'precision': 11, - 'scale': 2, - }, - ], - 'query_indexes': [], - 'value_type_name': type_name, - 'value_field_name': None, - }, - ], - }) - table_settings = table_cache.settings - assert table_settings, 'SQL table cache settings are empty' - - # insert rows as k-v - for row in insert_data: - value = AllDataType() - ( - value.TEST_BOOL, - value.TEST_STR, - value.TEST_INT, - value.TEST_DECIMAL, - ) = row[1:] - table_cache.put(row[0], value, key_hint=IntObject) - - data = table_cache.scan() - assert len(list(data)) == len(insert_data), ( - 'Not all data was read as key-value' - ) - - # read rows as SQL - data = client.sql(select_query, include_field_names=True) - - header_row = next(data) - for field_name in AllDataType.schema.keys(): - assert field_name in header_row, 'Not all field names in header row' - - data = list(data) - assert len(data) == len(insert_data), 'Not all data was read as SQL rows' - - # cleanup - table_cache.destroy() - - -def test_nested_binary_objects(client): - - nested_cache = client.get_or_create_cache('nested_binary') - - class InnerType( - metaclass=GenericObjectMeta, - schema=OrderedDict([ - ('inner_int', LongObject), - ('inner_str', String), - ]), - ): - pass - - class OuterType( - metaclass=GenericObjectMeta, - schema=OrderedDict([ - ('outer_int', LongObject), - ('nested_binary', BinaryObject), - ('outer_str', String), - ]), - ): - pass - - inner = InnerType(inner_int=42, inner_str='This is a test string') - - outer = OuterType( - outer_int=43, - nested_binary=inner, - outer_str='This is another test string' - ) - - nested_cache.put(1, outer) - - result = nested_cache.get(1) - assert result.outer_int == 43 - assert result.outer_str == 'This is another test string' - assert result.nested_binary.inner_int == 42 - assert result.nested_binary.inner_str == 'This is a test string' - - nested_cache.destroy() - - -def test_add_schema_to_binary_object(client): - - migrate_cache = client.create_cache('migrate_binary') - - class MyBinaryType( - metaclass=GenericObjectMeta, - schema=OrderedDict([ - ('test_str', String), - ('test_int', LongObject), - ('test_bool', BoolObject), - ]), - ): - pass - - binary_object = MyBinaryType( - test_str='Test string', - test_int=42, - test_bool=True, - ) - migrate_cache.put(1, binary_object) - - result = migrate_cache.get(1) - assert result.test_str == 'Test string' - assert result.test_int == 42 - assert result.test_bool is True - - modified_schema = MyBinaryType.schema.copy() - modified_schema['test_decimal'] = DecimalObject - del modified_schema['test_bool'] - - class MyBinaryTypeV2( - metaclass=GenericObjectMeta, - type_name='MyBinaryType', - schema=modified_schema, - ): - pass - - assert MyBinaryType.type_id == MyBinaryTypeV2.type_id - assert MyBinaryType.schema_id != MyBinaryTypeV2.schema_id - - binary_object_v2 = MyBinaryTypeV2( - test_str='Another test', - test_int=43, - test_decimal=Decimal('2.34') - ) - - migrate_cache.put(2, binary_object_v2) - - result = migrate_cache.get(2) - assert result.test_str == 'Another test' - assert result.test_int == 43 - assert result.test_decimal == Decimal('2.34') - assert not hasattr(result, 'test_bool') - - migrate_cache.destroy() diff --git a/tests/test_cache_class.py b/tests/test_cache_class.py deleted file mode 100644 index 22865be..0000000 --- a/tests/test_cache_class.py +++ /dev/null @@ -1,221 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict -from decimal import Decimal - -import pytest - -from pyignite import GenericObjectMeta -from pyignite.datatypes import ( - BoolObject, DecimalObject, FloatObject, IntObject, String, -) -from pyignite.datatypes.prop_codes import * -from pyignite.exceptions import CacheError - - -def test_cache_create(client): - cache = client.get_or_create_cache('my_oop_cache') - assert cache.name == cache.settings[PROP_NAME] == 'my_oop_cache' - cache.destroy() - - -def test_cache_remove(client): - cache = client.get_or_create_cache('my_cache') - cache.clear() - assert cache.get_size() == 0 - - cache.put_all({ - 'key_1': 1, - 'key_2': 2, - 'key_3': 3, - 'key_4': 4, - 'key_5': 5, - }) - assert cache.get_size() == 5 - - result = cache.remove_if_equals('key_1', 42) - assert result is False - assert cache.get_size() == 5 - - result = cache.remove_if_equals('key_1', 1) - assert result is True - assert cache.get_size() == 4 - - cache.remove_keys(['key_1', 'key_3', 'key_5', 'key_7']) - assert cache.get_size() == 2 - - cache.remove_all() - assert cache.get_size() == 0 - - -def test_cache_get(client): - client.get_or_create_cache('my_cache') - - my_cache = client.get_cache('my_cache') - assert my_cache.settings[PROP_NAME] == 'my_cache' - my_cache.destroy() - - error = None - - my_cache = client.get_cache('my_cache') - try: - _ = my_cache.settings[PROP_NAME] - except CacheError as e: - error = e - - assert type(error) is CacheError - - -def test_cache_config(client): - cache_config = { - PROP_NAME: 'my_oop_cache', - PROP_CACHE_KEY_CONFIGURATION: [ - { - 'type_name': 'blah', - 'affinity_key_field_name': 'abc1234', - }, - ], - } - client.create_cache(cache_config) - - cache = client.get_or_create_cache('my_oop_cache') - assert cache.name == cache_config[PROP_NAME] - assert ( - cache.settings[PROP_CACHE_KEY_CONFIGURATION] - == cache_config[PROP_CACHE_KEY_CONFIGURATION] - ) - - cache.destroy() - - -def test_cache_get_put(client): - cache = client.get_or_create_cache('my_oop_cache') - cache.put('my_key', 42) - result = cache.get('my_key') - assert result, 42 - cache.destroy() - - -def test_cache_binary_get_put(client): - - class TestBinaryType( - metaclass=GenericObjectMeta, - schema=OrderedDict([ - ('test_bool', BoolObject), - ('test_str', String), - ('test_int', IntObject), - ('test_decimal', DecimalObject), - ]), - ): - pass - - cache = client.create_cache('my_oop_cache') - - my_value = TestBinaryType( - test_bool=True, - test_str='This is a test', - test_int=42, - test_decimal=Decimal('34.56'), - ) - cache.put('my_key', my_value) - - value = cache.get('my_key') - assert value.test_bool is True - assert value.test_str == 'This is a test' - assert value.test_int == 42 - assert value.test_decimal == Decimal('34.56') - - cache.destroy() - - -def test_get_binary_type(client): - client.put_binary_type( - 'TestBinaryType', - schema=OrderedDict([ - ('TEST_BOOL', BoolObject), - ('TEST_STR', String), - ('TEST_INT', IntObject), - ]) - ) - client.put_binary_type( - 'TestBinaryType', - schema=OrderedDict([ - ('TEST_BOOL', BoolObject), - ('TEST_STR', String), - ('TEST_INT', IntObject), - ('TEST_FLOAT', FloatObject), - ]) - ) - client.put_binary_type( - 'TestBinaryType', - schema=OrderedDict([ - ('TEST_BOOL', BoolObject), - ('TEST_STR', String), - ('TEST_INT', IntObject), - ('TEST_DECIMAL', DecimalObject), - ]) - ) - - binary_type_info = client.get_binary_type('TestBinaryType') - assert len(binary_type_info['schemas']) == 3 - - binary_type_info = client.get_binary_type('NonExistentType') - assert binary_type_info['type_exists'] is False - assert len(binary_type_info) == 1 - - -@pytest.mark.parametrize('page_size', range(1, 17, 5)) -def test_cache_scan(client, page_size): - test_data = { - 1: 'This is a test', - 2: 'One more test', - 3: 'Foo', - 4: 'Buzz', - 5: 'Bar', - 6: 'Lorem ipsum', - 7: 'dolor sit amet', - 8: 'consectetur adipiscing elit', - 9: 'Nullam aliquet', - 10: 'nisl at ante', - 11: 'suscipit', - 12: 'ut cursus', - 13: 'metus interdum', - 14: 'Nulla tincidunt', - 15: 'sollicitudin iaculis', - } - - cache = client.get_or_create_cache('my_oop_cache') - cache.put_all(test_data) - - gen = cache.scan(page_size=page_size) - received_data = [] - for k, v in gen: - assert k in test_data.keys() - assert v in test_data.values() - received_data.append((k, v)) - assert len(received_data) == len(test_data) - - cache.destroy() - - -def test_get_and_put_if_absent(client): - cache = client.get_or_create_cache('my_oop_cache') - - value = cache.get_and_put_if_absent('my_key', 42) - assert value is None - cache.put('my_key', 43) - value = cache.get_and_put_if_absent('my_key', 42) - assert value is 43 diff --git a/tests/test_cache_class_sql.py b/tests/test_cache_class_sql.py deleted file mode 100644 index 5f72b39..0000000 --- a/tests/test_cache_class_sql.py +++ /dev/null @@ -1,103 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest - - -initial_data = [ - ('John', 'Doe', 5), - ('Jane', 'Roe', 4), - ('Joe', 'Bloggs', 4), - ('Richard', 'Public', 3), - ('Negidius', 'Numerius', 3), - ] - -create_query = '''CREATE TABLE Student ( - id INT(11) PRIMARY KEY, - first_name CHAR(24), - last_name CHAR(32), - grade INT(11))''' - -insert_query = '''INSERT INTO Student(id, first_name, last_name, grade) -VALUES (?, ?, ?, ?)''' - -select_query = 'SELECT id, first_name, last_name, grade FROM Student' - -drop_query = 'DROP TABLE Student IF EXISTS' - - -@pytest.mark.parametrize('page_size', range(1, 6, 2)) -def test_sql_fields(client, page_size): - - client.sql(drop_query, page_size) - - result = client.sql(create_query, page_size) - assert next(result)[0] == 0 - - for i, data_line in enumerate(initial_data, start=1): - fname, lname, grade = data_line - result = client.sql( - insert_query, - page_size, - query_args=[i, fname, lname, grade] - ) - assert next(result)[0] == 1 - - result = client.sql( - select_query, - page_size, - include_field_names=True, - ) - field_names = next(result) - assert set(field_names) == {'ID', 'FIRST_NAME', 'LAST_NAME', 'GRADE'} - - data = list(result) - assert len(data) == 5 - for row in data: - assert len(row) == 4 - - client.sql(drop_query, page_size) - - -@pytest.mark.parametrize('page_size', range(1, 6, 2)) -def test_sql(client, page_size): - - client.sql(drop_query, page_size) - - result = client.sql(create_query, page_size) - assert next(result)[0] == 0 - - for i, data_line in enumerate(initial_data, start=1): - fname, lname, grade = data_line - result = client.sql( - insert_query, - page_size, - query_args=[i, fname, lname, grade] - ) - assert next(result)[0] == 1 - - student = client.get_or_create_cache('SQL_PUBLIC_STUDENT') - result = student.select_row('TRUE', page_size) - for k, v in result: - assert k in range(1, 6) - assert v.FIRST_NAME in [ - 'John', - 'Jane', - 'Joe', - 'Richard', - 'Negidius', - ] - - client.sql(drop_query, page_size) diff --git a/tests/test_cache_config.py b/tests/test_cache_config.py deleted file mode 100644 index 2f01618..0000000 --- a/tests/test_cache_config.py +++ /dev/null @@ -1,75 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pyignite.api import * -from pyignite.datatypes.prop_codes import * - - -def test_get_configuration(client): - - result = cache_get_or_create(client, 'my_unique_cache') - assert result.status == 0 - - result = cache_get_configuration(client, 'my_unique_cache') - assert result.status == 0 - assert result.value[PROP_NAME] == 'my_unique_cache' - - -def test_create_with_config(client): - - cache_name = 'my_very_unique_name' - - result = cache_create_with_config(client, { - PROP_NAME: cache_name, - PROP_CACHE_KEY_CONFIGURATION: [ - { - 'type_name': 'blah', - 'affinity_key_field_name': 'abc1234', - } - ], - }) - assert result.status == 0 - - result = cache_get_names(client) - assert cache_name in result.value - - result = cache_create_with_config(client, { - PROP_NAME: cache_name, - }) - assert result.status != 0 - - -def test_get_or_create_with_config(client): - - cache_name = 'my_very_unique_name' - - result = cache_get_or_create_with_config(client, { - PROP_NAME: cache_name, - PROP_CACHE_KEY_CONFIGURATION: [ - { - 'type_name': 'blah', - 'affinity_key_field_name': 'abc1234', - } - ], - }) - assert result.status == 0 - - result = cache_get_names(client) - assert cache_name in result.value - - result = cache_get_or_create_with_config(client, { - PROP_NAME: cache_name, - }) - assert result.status == 0 diff --git a/tests/test_cutils.py b/tests/test_cutils.py new file mode 100644 index 0000000..d66425f --- /dev/null +++ b/tests/test_cutils.py @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from collections import OrderedDict + +import pytest + +import pyignite.utils as _putils +from pyignite.datatypes import IntObject + +try: + from pyignite import _cutils + + _cutils_hashcode = _cutils.hashcode + _cutils_schema_id = _cutils.schema_id +except ImportError: + _cutils_hashcode = lambda x: None # noqa: E731 + _cutils_schema_id = lambda x: None # noqa: E731 + pass + + +@pytest.mark.skip_if_no_cext +def test_bytes_hashcode(): + assert _cutils_hashcode(None) == 0 + assert _cutils_hashcode(b'') == 1 + assert _cutils_hashcode(bytearray()) == 1 + assert _cutils_hashcode(memoryview(b'')) == 1 + + for i in range(1000): + rnd_bytes = bytearray([random.randint(0, 255) for _ in range(random.randint(1, 1024))]) + + fallback_val = _putils.__hashcode_fallback(rnd_bytes) + assert _cutils_hashcode(rnd_bytes) == fallback_val + assert _cutils_hashcode(bytes(rnd_bytes)) == fallback_val + assert _cutils_hashcode(memoryview(rnd_bytes)) == fallback_val + + +@pytest.mark.skip_if_no_cext +@pytest.mark.parametrize( + 'value', + [ + '皮膚の色、', + 'Произвольный символ', + 'Random string', + '', + ] +) +def test_string_hashcode(value): + assert _cutils_hashcode(value) == _putils.__hashcode_fallback(value), f'failed on {value}' + + +@pytest.mark.skip_if_no_cext +def test_random_string_hashcode(): + assert _cutils_hashcode(None) == 0 + assert _cutils_hashcode('') == 0 + + for i in range(1000): + rnd_str = get_random_unicode(random.randint(1, 128)) + assert _cutils_hashcode(rnd_str) == _putils.__hashcode_fallback(rnd_str), f'failed on {rnd_str}' + + +@pytest.mark.skip_if_no_cext +def test_schema_id(): + rnd_id = random.randint(-100, 100) + assert _cutils_schema_id(rnd_id) == rnd_id + assert _cutils_schema_id(None) == 0 + assert _cutils_schema_id({}) == 0 + + for i in range(1000): + schema = OrderedDict({get_random_field_name(20): IntObject for _ in range(20)}) + assert _cutils_schema_id(schema) == _putils.__schema_id_fallback(schema), f'failed on {schema}' + + +@pytest.mark.skip_if_no_cext +@pytest.mark.parametrize( + 'func,args,kwargs,err_cls', + [ + [_cutils_hashcode, [123], {}, ValueError], + [_cutils_hashcode, [{'test': 'test'}], {}, ValueError], + [_cutils_hashcode, [], {}, TypeError], + [_cutils_hashcode, [123, 123], {}, TypeError], + [_cutils_hashcode, [], {'input': 'test'}, TypeError], + [_cutils_schema_id, ['test'], {}, ValueError], + [_cutils_schema_id, [], {}, TypeError], + [_cutils_schema_id, [], {}, TypeError], + [_cutils_schema_id, [123, 123], {}, TypeError], + [_cutils_schema_id, [], {'input': 'test'}, TypeError], + ] +) +def test_handling_errors(func, args, kwargs, err_cls): + with pytest.raises(err_cls): + func(*args, **kwargs) + + +def get_random_field_name(length): + first = get_random_unicode(length // 2, latin=True) + second = get_random_unicode(length - length // 2, latin=True) + + first = first.upper() if random.randint(0, 1) else first.lower() + second = second.upper() if random.randint(0, 1) else second.lower() + + return first + '_' + second + + +def get_random_unicode(length, latin=False): + include_ranges = [ + (0x0041, 0x005A), # Latin high + (0x0061, 0x007A), # Latin lower + (0x0410, 0x042F), # Russian high + (0x0430, 0x044F), # Russian lower + (0x05D0, 0x05EA) # Hebrew + ] + + alphabet = [] + + if latin: + include_ranges = include_ranges[0:2] + + for current_range in include_ranges: + for code_point in range(current_range[0], current_range[1] + 1): + alphabet.append(chr(code_point)) + + return ''.join(random.choice(alphabet) for _ in range(length)) diff --git a/tests/test_datatypes.py b/tests/test_datatypes.py deleted file mode 100644 index b68ba8c..0000000 --- a/tests/test_datatypes.py +++ /dev/null @@ -1,176 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from datetime import datetime, timedelta -import decimal -import pytest -import uuid - -from pyignite.api.key_value import cache_get, cache_put -from pyignite.datatypes import * - - -@pytest.mark.parametrize( - 'value, value_hint', - [ - # integers - (42, None), - (42, ByteObject), - (42, ShortObject), - (42, IntObject), - - # floats - (3.1415, None), # True for Double but not Float - (3.5, FloatObject), - - # char is never autodetected - ('ы', CharObject), - ('カ', CharObject), - - # bool - (True, None), - (False, None), - (True, BoolObject), - (False, BoolObject), - - # arrays of integers - ([1, 2, 3, 5], None), - ([1, 2, 3, 5], ByteArrayObject), - ([1, 2, 3, 5], ShortArrayObject), - ([1, 2, 3, 5], IntArrayObject), - - # arrays of floats - ([2.2, 4.4, 6.6], None), - ([2.5, 6.5], FloatArrayObject), - - # array of char - (['ы', 'カ'], CharArrayObject), - - # array of bool - ([True, False, True], None), - - # string - ('Little Mary had a lamb', None), - ('This is a test', String), - - # decimals - (decimal.Decimal('2.5'), None), - (decimal.Decimal('-1.3'), None), - - # uuid - (uuid.uuid4(), None), - - # date - (datetime(year=1998, month=4, day=6, hour=18, minute=30), None), - - # no autodetection for timestamp either - ( - (datetime(year=1998, month=4, day=6, hour=18, minute=30), 1000), - TimestampObject - ), - - # time - (timedelta(days=4, hours=4, minutes=24), None), - - # enum is useless in Python, except for interoperability with Java. - # Also no autodetection - ((5, 6), BinaryEnumObject), - - # arrays of standard types - (['String 1', 'String 2'], None), - (['Some of us are empty', None, 'But not the others'], None), - - ([decimal.Decimal('2.71828'), decimal.Decimal('100')], None), - ([decimal.Decimal('2.1'), None, decimal.Decimal('3.1415')], None), - - ([uuid.uuid4(), uuid.uuid4()], None), - ( - [ - datetime(year=2010, month=1, day=1), - datetime(year=2010, month=12, day=31), - ], - None, - ), - ([timedelta(minutes=30), timedelta(hours=2)], None), - ( - [ - (datetime(year=2010, month=1, day=1), 1000), - (datetime(year=2010, month=12, day=31), 200), - ], - TimestampArrayObject - ), - ((-1, [(6001, 1), (6002, 2), (6003, 3)]), BinaryEnumArrayObject), - - # object array - ((-1, [1, 2, decimal.Decimal('3')]), None), - - # collection - ((3, [1, 2, 3]), CollectionObject), - - # map - ((1, {'key': 4, 5: 6.0}), None), - ((2, {'key': 4, 5: 6.0}), None), - ] -) -def test_put_get_data(client, cache, value, value_hint): - - result = cache_put(client, cache, 'my_key', value, value_hint=value_hint) - assert result.status == 0 - - result = cache_get(client, cache, 'my_key') - assert result.status == 0 - assert result.value == value - - -@pytest.mark.parametrize( - 'uuid_string', - [ - 'd57babad-7bc1-4c82-9f9c-e72841b92a85', - '5946c0c0-2b76-479d-8694-a2e64a3968da', - 'a521723d-ad5d-46a6-94ad-300f850ef704', - ] -) -def test_uuid_representation(client, uuid_string): - """ Test if textual UUID representation is correct. """ - uuid_value = uuid.UUID(uuid_string) - - # initial cleanup - client.sql("DROP TABLE test_uuid_repr IF EXISTS") - # create table with UUID field - client.sql( - "CREATE TABLE test_uuid_repr (id INTEGER PRIMARY KEY, uuid_field UUID)" - ) - # use uuid.UUID class to insert data - client.sql( - "INSERT INTO test_uuid_repr(id, uuid_field) VALUES (?, ?)", - query_args=[1, uuid_value] - ) - # use hex string to retrieve data - result = client.sql( - "SELECT * FROM test_uuid_repr WHERE uuid_field='{}'".format( - uuid_string - ) - ) - - # finalize query - result = list(result) - - # final cleanup - client.sql("DROP TABLE test_uuid_repr IF EXISTS") - - # if a line was retrieved, our test was successful - assert len(result) == 1 - # doublecheck - assert result[0][1] == uuid_value diff --git a/tests/test_examples.py b/tests/test_examples.py index 4665d8c..f90ed17 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -12,46 +12,41 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import glob +import os import subprocess import sys import pytest +from tests.util import get_test_dir, start_ignite_gen SKIP_LIST = [ 'failover.py', # it hangs by design ] -def run_subprocess_34(script: str): - return subprocess.call([ - 'python', - '../examples/{}'.format(script), - ]) +def examples_scripts_gen(): + examples_dir = os.path.join(get_test_dir(), '..', 'examples') + for script in glob.glob1(examples_dir, '*.py'): + if script not in SKIP_LIST: + yield os.path.join(examples_dir, script) -def run_subprocess_35(script: str): - return subprocess.run([ - 'python', - '../examples/{}'.format(script), - ]).returncode +@pytest.fixture(autouse=True) +def server(): + yield from start_ignite_gen(idx=0) # idx=0, because 10800 port is needed for examples. -@pytest.mark.skipif( - condition=not pytest.config.option.examples, - reason=( - 'If you wish to test examples, invoke pytest with ' - '`--examples` option.' - ), +@pytest.mark.examples +@pytest.mark.parametrize( + 'example_script', + examples_scripts_gen() ) -def test_examples(): - for script in glob.glob1('../examples', '*.py'): - if script not in SKIP_LIST: - # `subprocess` module was refactored in Python 3.5 - if sys.version_info >= (3, 5): - return_code = run_subprocess_35(script) - else: - return_code = run_subprocess_34(script) - assert return_code == 0 +def test_examples(example_script): + proc = subprocess.run([ + sys.executable, + example_script + ]) + + assert proc.returncode == 0 diff --git a/tests/test_handshake.py b/tests/test_handshake.py deleted file mode 100644 index d655d94..0000000 --- a/tests/test_handshake.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket - -from pyignite import Client -from pyignite.connection.handshake import HandshakeRequest, read_response - - -def test_handshake( - monkeypatch, - ignite_host, ignite_port, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, - ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, - username, password, -): - client = Client( - use_ssl=use_ssl, - ssl_keyfile=ssl_keyfile, - ssl_keyfile_password=ssl_keyfile_password, - ssl_certfile=ssl_certfile, - ssl_ca_certfile=ssl_ca_certfile, - ssl_cert_reqs=ssl_cert_reqs, - ssl_ciphers=ssl_ciphers, - ssl_version=ssl_version, - username=username, - password=password, - ) - client._socket = client._wrap( - socket.socket(socket.AF_INET, socket.SOCK_STREAM) - ) - client.socket.connect((ignite_host, ignite_port)) - hs_request = HandshakeRequest(username, password) - client.send(hs_request) - hs_response = read_response(client) - assert hs_response['op_code'] != 0 - - client.close() - - # intentionally pass wrong protocol version - from pyignite.connection import handshake - monkeypatch.setattr(handshake, 'PROTOCOL_VERSION_MAJOR', 10) - - client._socket = client._wrap( - socket.socket(socket.AF_INET, socket.SOCK_STREAM) - ) - client.socket.connect((ignite_host, ignite_port)) - hs_request = HandshakeRequest(username, password) - client.send(hs_request) - hs_response = read_response(client) - assert hs_response['op_code'] == 0 - - client.close() diff --git a/tests/test_key_value.py b/tests/test_key_value.py deleted file mode 100644 index 6b4fb0e..0000000 --- a/tests/test_key_value.py +++ /dev/null @@ -1,400 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from datetime import datetime - -from pyignite.api import * -from pyignite.datatypes import ( - CollectionObject, IntObject, MapObject, TimestampObject, -) - - -def test_put_get(client, cache): - - result = cache_put(client, cache, 'my_key', 5) - assert result.status == 0 - - result = cache_get(client, cache, 'my_key') - assert result.status == 0 - assert result.value == 5 - - -def test_get_all(client, cache): - - result = cache_get_all(client, cache, ['key_1', 2, (3, IntObject)]) - assert result.status == 0 - assert result.value == {} - - cache_put(client, cache, 'key_1', 4) - cache_put(client, cache, 3, 18, key_hint=IntObject) - - result = cache_get_all(client, cache, ['key_1', 2, (3, IntObject)]) - assert result.status == 0 - assert result.value == {'key_1': 4, 3: 18} - - -def test_put_all(client, cache): - - test_dict = { - 1: 2, - 'key_1': 4, - (3, IntObject): 18, - } - test_keys = ['key_1', 1, 3] - - result = cache_put_all(client, cache, test_dict) - assert result.status == 0 - - result = cache_get_all(client, cache, test_keys) - assert result.status == 0 - assert len(test_dict) == 3 - - for key in result.value: - assert key in test_keys - - -def test_contains_key(client, cache): - - cache_put(client, cache, 'test_key', 42) - - result = cache_contains_key(client, cache, 'test_key') - assert result.value is True - - result = cache_contains_key(client, cache, 'non-existant-key') - assert result.value is False - - -def test_contains_keys(client, cache): - - cache_put(client, cache, 5, 6) - cache_put(client, cache, 'test_key', 42) - - result = cache_contains_keys(client, cache, [5, 'test_key']) - assert result.value is True - - result = cache_contains_keys(client, cache, [5, 'non-existent-key']) - assert result.value is False - - -def test_get_and_put(client, cache): - - result = cache_get_and_put(client, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is None - - result = cache_get(client, cache, 'test_key') - assert result.status == 0 - assert result.value is 42 - - result = cache_get_and_put(client, cache, 'test_key', 1234) - assert result.status == 0 - assert result.value == 42 - - -def test_get_and_replace(client, cache): - - result = cache_get_and_replace(client, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is None - - result = cache_get(client, cache, 'test_key') - assert result.status == 0 - assert result.value is None - - cache_put(client, cache, 'test_key', 42) - - result = cache_get_and_replace(client, cache, 'test_key', 1234) - assert result.status == 0 - assert result.value == 42 - - -def test_get_and_remove(client, cache): - - result = cache_get_and_remove(client, cache, 'test_key') - assert result.status == 0 - assert result.value is None - - cache_put(client, cache, 'test_key', 42) - - result = cache_get_and_remove(client, cache, 'test_key') - assert result.status == 0 - assert result.value == 42 - - -def test_put_if_absent(client, cache): - - result = cache_put_if_absent(client, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is True - - result = cache_put_if_absent(client, cache, 'test_key', 1234) - assert result.status == 0 - assert result.value is False - - -def test_get_and_put_if_absent(client, cache): - - result = cache_get_and_put_if_absent(client, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is None - - result = cache_get_and_put_if_absent(client, cache, 'test_key', 1234) - assert result.status == 0 - assert result.value == 42 - - result = cache_get_and_put_if_absent(client, cache, 'test_key', 5678) - assert result.status == 0 - assert result.value == 42 - - -def test_replace(client, cache): - - result = cache_replace(client, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is False - - cache_put(client, cache, 'test_key', 1234) - - result = cache_replace(client, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is True - - result = cache_get(client, cache, 'test_key') - assert result.status == 0 - assert result.value == 42 - - -def test_replace_if_equals(client, cache): - - result = cache_replace_if_equals(client, cache, 'my_test', 42, 1234) - assert result.status == 0 - assert result.value is False - - cache_put(client, cache, 'my_test', 42) - - result = cache_replace_if_equals(client, cache, 'my_test', 42, 1234) - assert result.status == 0 - assert result.value is True - - result = cache_get(client, cache, 'my_test') - assert result.status == 0 - assert result.value == 1234 - - -def test_clear(client, cache): - - result = cache_put(client, cache, 'my_test', 42) - assert result.status == 0 - - result = cache_clear(client, cache) - assert result.status == 0 - - result = cache_get(client, cache, 'my_test') - assert result.status == 0 - assert result.value is None - - -def test_clear_key(client, cache): - - result = cache_put(client, cache, 'my_test', 42) - assert result.status == 0 - - result = cache_put(client, cache, 'another_test', 24) - assert result.status == 0 - - result = cache_clear_key(client, cache, 'my_test') - assert result.status == 0 - - result = cache_get(client, cache, 'my_test') - assert result.status == 0 - assert result.value is None - - result = cache_get(client, cache, 'another_test') - assert result.status == 0 - assert result.value == 24 - - -def test_clear_keys(client, cache): - - result = cache_put(client, cache, 'my_test_key', 42) - assert result.status == 0 - - result = cache_put(client, cache, 'another_test', 24) - assert result.status == 0 - - result = cache_clear_keys(client, cache, [ - 'my_test_key', - 'nonexistent_key', - ]) - assert result.status == 0 - - result = cache_get(client, cache, 'my_test_key') - assert result.status == 0 - assert result.value is None - - result = cache_get(client, cache, 'another_test') - assert result.status == 0 - assert result.value == 24 - - -def test_remove_key(client, cache): - - result = cache_put(client, cache, 'my_test_key', 42) - assert result.status == 0 - - result = cache_remove_key(client, cache, 'my_test_key') - assert result.status == 0 - assert result.value is True - - result = cache_remove_key(client, cache, 'non_existent_key') - assert result.status == 0 - assert result.value is False - - -def test_remove_if_equals(client, cache): - - result = cache_put(client, cache, 'my_test', 42) - assert result.status == 0 - - result = cache_remove_if_equals(client, cache, 'my_test', 1234) - assert result.status == 0 - assert result.value is False - - result = cache_remove_if_equals(client, cache, 'my_test', 42) - assert result.status == 0 - assert result.value is True - - result = cache_get(client, cache, 'my_test') - assert result.status == 0 - assert result.value is None - - -def test_remove_keys(client, cache): - - result = cache_put(client, cache, 'my_test', 42) - assert result.status == 0 - - result = cache_put(client, cache, 'another_test', 24) - assert result.status == 0 - - result = cache_remove_keys(client, cache, ['my_test', 'non_existent']) - assert result.status == 0 - - result = cache_get(client, cache, 'my_test') - assert result.status == 0 - assert result.value is None - - result = cache_get(client, cache, 'another_test') - assert result.status == 0 - assert result.value == 24 - - -def test_remove_all(client, cache): - - result = cache_put(client, cache, 'my_test', 42) - assert result.status == 0 - - result = cache_put(client, cache, 'another_test', 24) - assert result.status == 0 - - result = cache_remove_all(client, cache) - assert result.status == 0 - - result = cache_get(client, cache, 'my_test') - assert result.status == 0 - assert result.value is None - - result = cache_get(client, cache, 'another_test') - assert result.status == 0 - assert result.value is None - - -def test_cache_get_size(client, cache): - - result = cache_put(client, cache, 'my_test', 42) - assert result.status == 0 - - result = cache_get_size(client, cache) - assert result.status == 0 - assert result.value == 1 - - -def test_put_get_collection(client): - - test_datetime = datetime(year=1996, month=3, day=1) - - cache = client.get_or_create_cache('test_coll_cache') - cache.put( - 'simple', - ( - 1, - [ - (123, IntObject), - 678, - None, - 55.2, - ((test_datetime, 0), TimestampObject), - ] - ), - value_hint=CollectionObject - ) - value = cache.get('simple') - assert value == (1, [123, 678, None, 55.2, (test_datetime, 0)]) - - cache.put( - 'nested', - ( - 1, - [ - 123, - ((1, [456, 'inner_test_string', 789]), CollectionObject), - 'outer_test_string', - ] - ), - value_hint=CollectionObject - ) - value = cache.get('nested') - assert value == ( - 1, - [ - 123, - (1, [456, 'inner_test_string', 789]), - 'outer_test_string' - ] - ) - - -def test_put_get_map(client): - - cache = client.get_or_create_cache('test_map_cache') - - cache.put( - 'test_map', - ( - MapObject.HASH_MAP, - { - (123, IntObject): 'test_data', - 456: ((1, [456, 'inner_test_string', 789]), CollectionObject), - 'test_key': 32.4, - } - ), - value_hint=MapObject - ) - value = cache.get('test_map') - assert value == (MapObject.HASH_MAP, { - 123: 'test_data', - 456: (1, [456, 'inner_test_string', 789]), - 'test_key': 32.4, - }) diff --git a/tests/test_scan.py b/tests/test_scan.py deleted file mode 100644 index 77e9613..0000000 --- a/tests/test_scan.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pyignite.api import ( - scan, scan_cursor_get_page, resource_close, cache_put_all, -) - - -def test_scan(client, cache): - - page_size = 10 - - result = cache_put_all(client, cache, { - 'key_{}'.format(v): v for v in range(page_size * 2) - }) - assert result.status == 0 - - result = scan(client, cache, page_size) - assert result.status == 0 - assert len(result.value['data']) == page_size - assert result.value['more'] is True - - cursor = result.value['cursor'] - - result = scan_cursor_get_page(client, cursor) - assert result.status == 0 - assert len(result.value['data']) == page_size - assert result.value['more'] is False - - result = scan_cursor_get_page(client, cursor) - assert result.status != 0 - - -def test_close_resource(client, cache): - - page_size = 10 - - result = cache_put_all(client, cache, { - 'key_{}'.format(v): v for v in range(page_size * 2) - }) - assert result.status == 0 - - result = scan(client, cache, page_size) - assert result.status == 0 - assert len(result.value['data']) == page_size - assert result.value['more'] is True - - cursor = result.value['cursor'] - - result = resource_close(client, cursor) - assert result.status == 0 - - result = scan_cursor_get_page(client, cursor) - assert result.status != 0 diff --git a/tests/test_sql.py b/tests/test_sql.py deleted file mode 100644 index d983a20..0000000 --- a/tests/test_sql.py +++ /dev/null @@ -1,184 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pyignite.api import ( - sql_fields, sql_fields_cursor_get_page, - cache_get_or_create, sql, sql_cursor_get_page, - cache_get_configuration, -) -from pyignite.datatypes.prop_codes import * -from pyignite.utils import entity_id, unwrap_binary - -initial_data = [ - ('John', 'Doe', 5), - ('Jane', 'Roe', 4), - ('Joe', 'Bloggs', 4), - ('Richard', 'Public', 3), - ('Negidius', 'Numerius', 3), - ] - -create_query = '''CREATE TABLE Student ( - id INT(11) PRIMARY KEY, - first_name CHAR(24), - last_name CHAR(32), - grade INT(11))''' - -insert_query = '''INSERT INTO Student(id, first_name, last_name, grade) -VALUES (?, ?, ?, ?)''' - -select_query = 'SELECT id, first_name, last_name, grade FROM Student' - -drop_query = 'DROP TABLE Student IF EXISTS' - -page_size = 4 - - -def test_sql(client): - - # cleanup - client.sql(drop_query) - - result = sql_fields( - client, - 'PUBLIC', - create_query, - page_size, - include_field_names=True - ) - assert result.status == 0, result.message - - for i, data_line in enumerate(initial_data, start=1): - fname, lname, grade = data_line - result = sql_fields( - client, - 'PUBLIC', - insert_query, - page_size, - query_args=[i, fname, lname, grade], - include_field_names=True - ) - assert result.status == 0, result.message - - result = cache_get_configuration(client, 'SQL_PUBLIC_STUDENT') - assert result.status == 0, result.message - - binary_type_name = result.value[PROP_QUERY_ENTITIES][0]['value_type_name'] - result = sql( - client, - 'SQL_PUBLIC_STUDENT', - binary_type_name, - 'TRUE', - page_size - ) - assert result.status == 0, result.message - assert len(result.value['data']) == page_size - assert result.value['more'] is True - - for wrapped_object in result.value['data'].values(): - data = unwrap_binary(client, wrapped_object) - assert data.type_id == entity_id(binary_type_name) - - cursor = result.value['cursor'] - - while result.value['more']: - result = sql_cursor_get_page(client, cursor) - assert result.status == 0, result.message - - for wrapped_object in result.value['data'].values(): - data = unwrap_binary(client, wrapped_object) - assert data.type_id == entity_id(binary_type_name) - - # repeat cleanup - result = sql_fields(client, 'PUBLIC', drop_query, page_size) - assert result.status == 0 - - -def test_sql_fields(client): - - # cleanup - client.sql(drop_query) - - result = sql_fields( - client, - 'PUBLIC', - create_query, - page_size, - include_field_names=True - ) - assert result.status == 0, result.message - - for i, data_line in enumerate(initial_data, start=1): - fname, lname, grade = data_line - result = sql_fields( - client, - 'PUBLIC', - insert_query, - page_size, - query_args=[i, fname, lname, grade], - include_field_names=True - ) - assert result.status == 0, result.message - - result = sql_fields( - client, - 'PUBLIC', - select_query, - page_size, - include_field_names=True - ) - assert result.status == 0 - assert len(result.value['data']) == page_size - assert result.value['more'] is True - - cursor = result.value['cursor'] - - result = sql_fields_cursor_get_page(client, cursor, field_count=4) - assert result.status == 0 - assert len(result.value['data']) == len(initial_data) - page_size - assert result.value['more'] is False - - # repeat cleanup - result = sql_fields(client, 'PUBLIC', drop_query, page_size) - assert result.status == 0 - - -def test_long_multipage_query(client): - """ - The test creates a table with 13 columns (id and 12 enumerated columns) - and 20 records with id in range from 1 to 20. Values of enumerated columns - are = column number * id. - - The goal is to ensure that all the values are selected in a right order. - """ - - fields = ["id", "abc", "ghi", "def", "jkl", "prs", "mno", "tuw", "zyz", "abc1", "def1", "jkl1", "prs1"] - - client.sql('DROP TABLE LongMultipageQuery IF EXISTS') - - client.sql("CREATE TABLE LongMultiPageQuery (%s, %s)" % \ - (fields[0] + " INT(11) PRIMARY KEY", ",".join(map(lambda f: f + " INT(11)", fields[1:])))) - - for id in range(1, 21): - client.sql( - "INSERT INTO LongMultipageQuery (%s) VALUES (%s)" % (",".join(fields), ",".join("?" * len(fields))), - query_args=[id] + list(i * id for i in range(1, len(fields)))) - - result = client.sql('SELECT * FROM LongMultipageQuery', page_size=1) - for page in result: - assert len(page) == len(fields) - for field_number, value in enumerate(page[1:], start=1): - assert value == field_number * page[0] - - client.sql(drop_query) diff --git a/tests/util.py b/tests/util.py new file mode 100644 index 0000000..af3b70e --- /dev/null +++ b/tests/util.py @@ -0,0 +1,218 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import contextlib +import glob +import inspect +import os +import shutil + +import jinja2 as jinja2 +import psutil +import re +import signal +import subprocess +import time + + +try: + from contextlib import asynccontextmanager +except ImportError: + from async_generator import asynccontextmanager + + +@contextlib.contextmanager +def get_or_create_cache(client, settings): + cache = client.get_or_create_cache(settings) + try: + yield cache + finally: + cache.destroy() + + +@asynccontextmanager +async def get_or_create_cache_async(client, settings): + cache = await client.get_or_create_cache(settings) + try: + yield cache + finally: + await cache.destroy() + + +def wait_for_condition(condition, interval=0.1, timeout=10, error=None): + start = time.time() + res = condition() + + while not res and time.time() - start < timeout: + time.sleep(interval) + res = condition() + + if res: + return True + + if error is not None: + raise Exception(error) + + return False + + +async def wait_for_condition_async(condition, interval=0.1, timeout=10, error=None): + start = time.time() + res = await condition() if inspect.iscoroutinefunction(condition) else condition() + + while not res and time.time() - start < timeout: + await asyncio.sleep(interval) + res = await condition() if inspect.iscoroutinefunction(condition) else condition() + + if res: + return True + + if error is not None: + raise Exception(error) + + return False + + +def is_windows(): + return os.name == "nt" + + +def get_test_dir(): + return os.path.dirname(os.path.realpath(__file__)) + + +def get_ignite_dirs(): + ignite_home = os.getenv("IGNITE_HOME") + if ignite_home is not None: + yield ignite_home + + proj_dir = os.path.abspath(os.path.join(get_test_dir(), "..", "..")) + yield os.path.join(proj_dir, "ignite") + yield os.path.join(proj_dir, "incubator_ignite") + + +def get_ignite_runner(): + ext = ".bat" if is_windows() else ".sh" + for ignite_dir in get_ignite_dirs(): + runner = os.path.join(ignite_dir, "bin", "ignite" + ext) + print("Probing Ignite runner at '{0}'...".format(runner)) + if os.path.exists(runner): + return runner + + raise Exception(f"Ignite not found. IGNITE_HOME {os.getenv('IGNITE_HOME')}") + + +def get_ignite_config_path(use_ssl=False): + if use_ssl: + file_name = "ignite-config-ssl.xml" + else: + file_name = "ignite-config.xml.jinja2" + + return os.path.join(get_test_dir(), "config", file_name) + + +def check_server_started(idx=1): + pattern = re.compile('^Topology snapshot.*') + + for log_file in get_log_files(idx): + with open(log_file) as f: + for line in f.readlines(): + if pattern.match(line): + return True + + return False + + +def kill_process_tree(pid): + if is_windows(): + subprocess.call(['taskkill', '/F', '/T', '/PID', str(pid)]) + else: + children = psutil.Process(pid).children(recursive=True) + for child in children: + os.kill(child.pid, signal.SIGKILL) + os.kill(pid, signal.SIGKILL) + + +templateLoader = jinja2.FileSystemLoader(searchpath=os.path.join(get_test_dir(), "config")) +templateEnv = jinja2.Environment(loader=templateLoader) + + +def create_config_file(tpl_name, file_name, **kwargs): + template = templateEnv.get_template(tpl_name) + with open(os.path.join(get_test_dir(), "config", file_name), mode='w') as f: + f.write(template.render(**kwargs)) + + +def start_ignite(idx=1, debug=False, use_ssl=False, use_auth=False, use_persistence=False): + clear_logs(idx) + + runner = get_ignite_runner() + + env = os.environ.copy() + + if debug: + env["JVM_OPTS"] = "-Djava.net.preferIPv4Stack=true -Xdebug -Xnoagent -Djava.compiler=NONE " \ + "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 " + + if use_auth: + use_persistence = True + + params = { + 'ignite_instance_idx': str(idx), + 'ignite_client_port': 10800 + idx, + 'use_ssl': use_ssl, + 'use_auth': use_auth, + 'use_persistence': use_persistence, + } + + create_config_file('log4j.xml.jinja2', f'log4j-{idx}.xml', **params) + create_config_file('ignite-config.xml.jinja2', f'ignite-config-{idx}.xml', **params) + + ignite_cmd = [runner, os.path.join(get_test_dir(), "config", f'ignite-config-{idx}.xml')] + print("Starting Ignite server node:", ignite_cmd) + + srv = subprocess.Popen(ignite_cmd, env=env, cwd=get_test_dir()) + + started = wait_for_condition(lambda: check_server_started(idx), timeout=60) + if started: + return srv + + kill_process_tree(srv.pid) + raise Exception("Failed to start Ignite: timeout while trying to connect") + + +def start_ignite_gen(idx=1, use_ssl=False, use_auth=False, use_persistence=False): + srv = start_ignite(idx, use_ssl=use_ssl, use_auth=use_auth, use_persistence=use_persistence) + try: + yield srv + finally: + kill_process_tree(srv.pid) + + +def get_log_files(idx=1): + logs_pattern = os.path.join(get_test_dir(), "logs", "ignite-log-{0}*.txt".format(idx)) + return glob.glob(logs_pattern) + + +def clear_ignite_work_dir(): + for ignite_dir in get_ignite_dirs(): + work_dir = os.path.join(ignite_dir, 'work') + if os.path.exists(work_dir): + shutil.rmtree(work_dir, ignore_errors=True) + + +def clear_logs(idx=1): + for f in get_log_files(idx): + os.remove(f) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..d68f02e --- /dev/null +++ b/tox.ini @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[tox] +skipsdist = True +envlist = codestyle,py{37,38,39,310,311} + +[pytest] +log_format = %(asctime)s %(name)s %(levelname)s %(message)s +log_date_format = %Y-%m-%d %H:%M:%S +# Uncomment if you want verbose logging for all tests (for failed it will be printed anyway). +# log_cli = True +# log_cli_level = DEBUG + +[flake8] +max-line-length=120 +ignore = F401,F403,F405,F821 + +[testenv:codestyle] +basepython = python3.8 +commands = flake8 + +[testenv] +passenv = TEAMCITY_VERSION IGNITE_HOME +envdir = {homedir}/.virtualenvs/pyignite-{envname} +deps = + -r ./requirements/install.txt + -r ./requirements/tests.txt +recreate = True +usedevelop = True +commands = + pytest {env:PYTESTARGS:} {posargs} --force-cext --examples + +[testenv:py{37,38,39,310,311}-jenkins] +setenv: + PYTESTARGS = --junitxml=junit-{envname}.xml