diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
index 0ddd0e4d18..a3da1b0d4c 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/.github/.OwlBot.lock.yaml
@@ -13,5 +13,5 @@
# limitations under the License.
docker:
image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
- digest: sha256:6c1cbc75c74b8bdd71dada2fa1677e9d6d78a889e9a70ee75b93d1d0543f96e1
-# created: 2023-07-25T21:01:10.396410762Z
+ digest: sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7
+# created: 2023-08-02T10:53:29.114535628Z
diff --git a/.github/blunderbuss.yml b/.github/blunderbuss.yml
index fc2092ed7f..68b2d1df54 100644
--- a/.github/blunderbuss.yml
+++ b/.github/blunderbuss.yml
@@ -1,2 +1,2 @@
assign_issues:
- - asthamohta
+ - harshachinta
diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt
index 76d9bba0f7..029bd342de 100644
--- a/.kokoro/requirements.txt
+++ b/.kokoro/requirements.txt
@@ -113,30 +113,30 @@ commonmark==0.9.1 \
--hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
--hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
# via rich
-cryptography==41.0.2 \
- --hash=sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711 \
- --hash=sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7 \
- --hash=sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd \
- --hash=sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e \
- --hash=sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58 \
- --hash=sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0 \
- --hash=sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d \
- --hash=sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83 \
- --hash=sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831 \
- --hash=sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766 \
- --hash=sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b \
- --hash=sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c \
- --hash=sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182 \
- --hash=sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f \
- --hash=sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa \
- --hash=sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4 \
- --hash=sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a \
- --hash=sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2 \
- --hash=sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76 \
- --hash=sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5 \
- --hash=sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee \
- --hash=sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f \
- --hash=sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14
+cryptography==41.0.3 \
+ --hash=sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306 \
+ --hash=sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84 \
+ --hash=sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47 \
+ --hash=sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d \
+ --hash=sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116 \
+ --hash=sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207 \
+ --hash=sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81 \
+ --hash=sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087 \
+ --hash=sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd \
+ --hash=sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507 \
+ --hash=sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858 \
+ --hash=sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae \
+ --hash=sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34 \
+ --hash=sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906 \
+ --hash=sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd \
+ --hash=sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922 \
+ --hash=sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7 \
+ --hash=sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4 \
+ --hash=sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574 \
+ --hash=sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1 \
+ --hash=sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c \
+ --hash=sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e \
+ --hash=sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de
# via
# gcp-releasetool
# secretstorage
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 9e3898fd1c..19409cbd37 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -26,6 +26,6 @@ repos:
hooks:
- id: black
- repo: https://github.com/pycqa/flake8
- rev: 3.9.2
+ rev: 6.1.0
hooks:
- id: flake8
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 9f49ec500c..7ce5921b04 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "3.38.0"
+ ".": "3.40.1"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f8f39f053a..9fed5da30c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,32 @@
[1]: https://pypi.org/project/google-cloud-spanner/#history
+## [3.40.1](https://github.com/googleapis/python-spanner/compare/v3.40.0...v3.40.1) (2023-08-17)
+
+
+### Bug Fixes
+
+* Fix to reload table when checking if table exists ([#1002](https://github.com/googleapis/python-spanner/issues/1002)) ([53bda62](https://github.com/googleapis/python-spanner/commit/53bda62c4996d622b7a11e860841c16e4097bded))
+
+## [3.40.0](https://github.com/googleapis/python-spanner/compare/v3.39.0...v3.40.0) (2023-08-04)
+
+
+### Features
+
+* Enable leader aware routing by default. This update contains performance optimisations that will reduce the latency of read/write transactions that originate from a region other than the default leader region. ([e8dbfe7](https://github.com/googleapis/python-spanner/commit/e8dbfe709d72a04038e05166adbad275642f1f22))
+
+## [3.39.0](https://github.com/googleapis/python-spanner/compare/v3.38.0...v3.39.0) (2023-08-02)
+
+
+### Features
+
+* Foreign key on delete cascade action testing and samples ([#910](https://github.com/googleapis/python-spanner/issues/910)) ([681c8ee](https://github.com/googleapis/python-spanner/commit/681c8eead40582addf75e02c159ea1ff9d6de85e))
+
+
+### Documentation
+
+* Minor formatting ([#991](https://github.com/googleapis/python-spanner/issues/991)) ([60efc42](https://github.com/googleapis/python-spanner/commit/60efc426cf26c4863d81743a5545c5f296308815))
+
## [3.38.0](https://github.com/googleapis/python-spanner/compare/v3.37.0...v3.38.0) (2023-07-21)
diff --git a/google/cloud/spanner_admin_database_v1/gapic_version.py b/google/cloud/spanner_admin_database_v1/gapic_version.py
index e0c31c2ce4..4f879f0e40 100644
--- a/google/cloud/spanner_admin_database_v1/gapic_version.py
+++ b/google/cloud/spanner_admin_database_v1/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "3.38.0" # {x-release-please-version}
+__version__ = "3.40.1" # {x-release-please-version}
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
index fa0d9a059c..4cd1d4756a 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
@@ -1293,42 +1293,11 @@ async def sample_set_iam_policy():
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -1467,42 +1436,11 @@ async def sample_get_iam_policy():
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
index f41c0ec86a..b6f2d1f1e7 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
@@ -1559,42 +1559,11 @@ def sample_set_iam_policy():
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -1730,42 +1699,11 @@ def sample_get_iam_policy():
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py
index b210297f8c..bd35307fcc 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py
@@ -1725,54 +1725,54 @@ def __call__(
::
- {
- "bindings": [
- {
- "role": "roles/resourcemanager.organizationAdmin",
- "members": [
- "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
- },
- {
- "role": "roles/resourcemanager.organizationViewer",
- "members": [
- "user:eve@example.com"
- ],
- "condition": {
- "title": "expirable access",
- "description": "Does not grant access after Sep 2020",
- "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')",
- }
- }
- ],
- "etag": "BwWWja0YfJA=",
- "version": 3
- }
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
**YAML example:**
::
- bindings:
- - members:
- - user:mike@example.com
- - group:admins@example.com
- - domain:google.com
- - serviceAccount:my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin
- - members:
- - user:eve@example.com
- role: roles/resourcemanager.organizationViewer
- condition:
- title: expirable access
- description: Does not grant access after Sep 2020
- expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
- etag: BwWWja0YfJA=
- version: 3
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
For a description of IAM and its features, see the `IAM
documentation `__.
@@ -2452,54 +2452,54 @@ def __call__(
::
- {
- "bindings": [
- {
- "role": "roles/resourcemanager.organizationAdmin",
- "members": [
- "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
- },
- {
- "role": "roles/resourcemanager.organizationViewer",
- "members": [
- "user:eve@example.com"
- ],
- "condition": {
- "title": "expirable access",
- "description": "Does not grant access after Sep 2020",
- "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')",
- }
- }
- ],
- "etag": "BwWWja0YfJA=",
- "version": 3
- }
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
**YAML example:**
::
- bindings:
- - members:
- - user:mike@example.com
- - group:admins@example.com
- - domain:google.com
- - serviceAccount:my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin
- - members:
- - user:eve@example.com
- role: roles/resourcemanager.organizationViewer
- condition:
- title: expirable access
- description: Does not grant access after Sep 2020
- expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
- etag: BwWWja0YfJA=
- version: 3
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
For a description of IAM and its features, see the `IAM
documentation `__.
diff --git a/google/cloud/spanner_admin_instance_v1/gapic_version.py b/google/cloud/spanner_admin_instance_v1/gapic_version.py
index e0c31c2ce4..4f879f0e40 100644
--- a/google/cloud/spanner_admin_instance_v1/gapic_version.py
+++ b/google/cloud/spanner_admin_instance_v1/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "3.38.0" # {x-release-please-version}
+__version__ = "3.40.1" # {x-release-please-version}
diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py
index b523f171dc..f6dbc4e73d 100644
--- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py
+++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py
@@ -1915,42 +1915,11 @@ async def sample_set_iam_policy():
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -2085,42 +2054,11 @@ async def sample_get_iam_policy():
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py
index 1245c2554e..dd94cacafb 100644
--- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py
+++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py
@@ -2109,42 +2109,11 @@ def sample_set_iam_policy():
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -2276,42 +2245,11 @@ def sample_get_iam_policy():
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py
index 808a3bfd1d..c743fa011d 100644
--- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py
+++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py
@@ -1086,54 +1086,54 @@ def __call__(
::
- {
- "bindings": [
- {
- "role": "roles/resourcemanager.organizationAdmin",
- "members": [
- "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
- },
- {
- "role": "roles/resourcemanager.organizationViewer",
- "members": [
- "user:eve@example.com"
- ],
- "condition": {
- "title": "expirable access",
- "description": "Does not grant access after Sep 2020",
- "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')",
- }
- }
- ],
- "etag": "BwWWja0YfJA=",
- "version": 3
- }
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
**YAML example:**
::
- bindings:
- - members:
- - user:mike@example.com
- - group:admins@example.com
- - domain:google.com
- - serviceAccount:my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin
- - members:
- - user:eve@example.com
- role: roles/resourcemanager.organizationViewer
- condition:
- title: expirable access
- description: Does not grant access after Sep 2020
- expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
- etag: BwWWja0YfJA=
- version: 3
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
For a description of IAM and its features, see the `IAM
documentation `__.
@@ -1715,54 +1715,54 @@ def __call__(
::
- {
- "bindings": [
- {
- "role": "roles/resourcemanager.organizationAdmin",
- "members": [
- "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
- },
- {
- "role": "roles/resourcemanager.organizationViewer",
- "members": [
- "user:eve@example.com"
- ],
- "condition": {
- "title": "expirable access",
- "description": "Does not grant access after Sep 2020",
- "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')",
- }
- }
- ],
- "etag": "BwWWja0YfJA=",
- "version": 3
- }
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
**YAML example:**
::
- bindings:
- - members:
- - user:mike@example.com
- - group:admins@example.com
- - domain:google.com
- - serviceAccount:my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin
- - members:
- - user:eve@example.com
- role: roles/resourcemanager.organizationViewer
- condition:
- title: expirable access
- description: Does not grant access after Sep 2020
- expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
- etag: BwWWja0YfJA=
- version: 3
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
For a description of IAM and its features, see the `IAM
documentation `__.
diff --git a/google/cloud/spanner_dbapi/connection.py b/google/cloud/spanner_dbapi/connection.py
index 0e26db2b3a..3664a0c231 100644
--- a/google/cloud/spanner_dbapi/connection.py
+++ b/google/cloud/spanner_dbapi/connection.py
@@ -508,7 +508,7 @@ def connect(
pool=None,
user_agent=None,
client=None,
- route_to_leader_enabled=False,
+ route_to_leader_enabled=True,
):
"""Creates a connection to a Google Cloud Spanner database.
@@ -547,10 +547,9 @@ def connect(
:type route_to_leader_enabled: boolean
:param route_to_leader_enabled:
- (Optional) Default False. Set route_to_leader_enabled as True to
- enable leader aware routing. Enabling leader aware routing
- would route all requests in RW/PDML transactions to the
- leader region.
+ (Optional) Default True. Set route_to_leader_enabled as False to
+ disable leader aware routing. Disabling leader aware routing would
+ route all requests in RW/PDML transactions to the closest region.
:rtype: :class:`google.cloud.spanner_dbapi.connection.Connection`
@@ -568,14 +567,14 @@ def connect(
credentials,
project=project,
client_info=client_info,
- route_to_leader_enabled=False,
+ route_to_leader_enabled=True,
)
else:
client = spanner.Client(
project=project,
credentials=credentials,
client_info=client_info,
- route_to_leader_enabled=False,
+ route_to_leader_enabled=True,
)
else:
if project is not None and client.project != project:
diff --git a/google/cloud/spanner_dbapi/parser.py b/google/cloud/spanner_dbapi/parser.py
index 1d84daa531..f5c1d0edf7 100644
--- a/google/cloud/spanner_dbapi/parser.py
+++ b/google/cloud/spanner_dbapi/parser.py
@@ -52,7 +52,7 @@ def __repr__(self):
return self.__str__()
def __eq__(self, other):
- if type(self) != type(other):
+ if type(self) is not type(other):
return False
if self.name != other.name:
return False
@@ -95,7 +95,7 @@ def __len__(self):
return len(self.argv)
def __eq__(self, other):
- if type(self) != type(other):
+ if type(self) is not type(other):
return False
if len(self) != len(other):
diff --git a/google/cloud/spanner_v1/_helpers.py b/google/cloud/spanner_v1/_helpers.py
index db0827c528..b267996d1f 100644
--- a/google/cloud/spanner_v1/_helpers.py
+++ b/google/cloud/spanner_v1/_helpers.py
@@ -87,7 +87,7 @@ def _merge_query_options(base, merge):
If the resultant object only has empty fields, returns None.
"""
combined = base or ExecuteSqlRequest.QueryOptions()
- if type(combined) == dict:
+ if type(combined) is dict:
combined = ExecuteSqlRequest.QueryOptions(
optimizer_version=combined.get("optimizer_version", ""),
optimizer_statistics_package=combined.get(
@@ -95,7 +95,7 @@ def _merge_query_options(base, merge):
),
)
merge = merge or ExecuteSqlRequest.QueryOptions()
- if type(merge) == dict:
+ if type(merge) is dict:
merge = ExecuteSqlRequest.QueryOptions(
optimizer_version=merge.get("optimizer_version", ""),
optimizer_statistics_package=merge.get("optimizer_statistics_package", ""),
diff --git a/google/cloud/spanner_v1/backup.py b/google/cloud/spanner_v1/backup.py
index 2f54cf2167..1fcffbe05a 100644
--- a/google/cloud/spanner_v1/backup.py
+++ b/google/cloud/spanner_v1/backup.py
@@ -95,7 +95,7 @@ def __init__(
self._max_expire_time = None
self._referencing_backups = None
self._database_dialect = None
- if type(encryption_config) == dict:
+ if type(encryption_config) is dict:
if source_backup:
self._encryption_config = CopyBackupEncryptionConfig(
**encryption_config
diff --git a/google/cloud/spanner_v1/batch.py b/google/cloud/spanner_v1/batch.py
index 8cc0b37e21..1d65a74ed2 100644
--- a/google/cloud/spanner_v1/batch.py
+++ b/google/cloud/spanner_v1/batch.py
@@ -178,7 +178,7 @@ def commit(self, return_commit_stats=False, request_options=None):
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
request_options.transaction_tag = self.transaction_tag
diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py
index 955fd94820..a0e848228b 100644
--- a/google/cloud/spanner_v1/client.py
+++ b/google/cloud/spanner_v1/client.py
@@ -116,10 +116,9 @@ class Client(ClientWithProject):
:type route_to_leader_enabled: boolean
:param route_to_leader_enabled:
- (Optional) Default False. Set route_to_leader_enabled as True to
- enable leader aware routing. Enabling leader aware routing
- would route all requests in RW/PDML transactions to the
- leader region.
+ (Optional) Default True. Set route_to_leader_enabled as False to
+ disable leader aware routing. Disabling leader aware routing would
+ route all requests in RW/PDML transactions to the closest region.
:raises: :class:`ValueError ` if both ``read_only``
and ``admin`` are :data:`True`
@@ -139,11 +138,11 @@ def __init__(
client_info=_CLIENT_INFO,
client_options=None,
query_options=None,
- route_to_leader_enabled=False,
+ route_to_leader_enabled=True,
):
self._emulator_host = _get_spanner_emulator_host()
- if client_options and type(client_options) == dict:
+ if client_options and type(client_options) is dict:
self._client_options = google.api_core.client_options.from_dict(
client_options
)
diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py
index f8bb09b678..d5babb48ad 100644
--- a/google/cloud/spanner_v1/database.py
+++ b/google/cloud/spanner_v1/database.py
@@ -451,7 +451,7 @@ def create(self):
db_name = f'"{db_name}"'
else:
db_name = f"`{db_name}`"
- if type(self._encryption_config) == dict:
+ if type(self._encryption_config) is dict:
self._encryption_config = EncryptionConfig(**self._encryption_config)
request = CreateDatabaseRequest(
@@ -636,7 +636,7 @@ def execute_partitioned_dml(
)
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
request_options.transaction_tag = None
@@ -821,7 +821,7 @@ def restore(self, source):
"""
if source is None:
raise ValueError("Restore source not specified")
- if type(self._encryption_config) == dict:
+ if type(self._encryption_config) is dict:
self._encryption_config = RestoreDatabaseEncryptionConfig(
**self._encryption_config
)
@@ -1026,7 +1026,7 @@ def __init__(self, database, request_options=None):
self._session = self._batch = None
if request_options is None:
self._request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
self._request_options = RequestOptions(request_options)
else:
self._request_options = request_options
diff --git a/google/cloud/spanner_v1/gapic_version.py b/google/cloud/spanner_v1/gapic_version.py
index e0c31c2ce4..4f879f0e40 100644
--- a/google/cloud/spanner_v1/gapic_version.py
+++ b/google/cloud/spanner_v1/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "3.38.0" # {x-release-please-version}
+__version__ = "3.40.1" # {x-release-please-version}
diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py
index 8ffb30bf58..7b06ec6da0 100644
--- a/google/cloud/spanner_v1/snapshot.py
+++ b/google/cloud/spanner_v1/snapshot.py
@@ -255,7 +255,7 @@ def read(
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
if self._read_only:
@@ -423,7 +423,7 @@ def execute_sql(
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
if self._read_only:
# Transaction tags are not supported for read only transactions.
diff --git a/google/cloud/spanner_v1/table.py b/google/cloud/spanner_v1/table.py
index 0f25c41756..38ca798db8 100644
--- a/google/cloud/spanner_v1/table.py
+++ b/google/cloud/spanner_v1/table.py
@@ -77,6 +77,11 @@ def _exists(self, snapshot):
:rtype: bool
:returns: True if the table exists, else false.
"""
+ if (
+ self._database.database_dialect
+ == DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED
+ ):
+ self._database.reload()
if self._database.database_dialect == DatabaseDialect.POSTGRESQL:
results = snapshot.execute_sql(
_EXISTS_TEMPLATE.format("WHERE TABLE_NAME = $1"),
diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py
index d61fb58603..af25a62eac 100644
--- a/google/cloud/spanner_v1/transaction.py
+++ b/google/cloud/spanner_v1/transaction.py
@@ -224,7 +224,7 @@ def commit(self, return_commit_stats=False, request_options=None):
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
if self.transaction_tag is not None:
request_options.transaction_tag = self.transaction_tag
@@ -362,7 +362,7 @@ def execute_update(
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
request_options.transaction_tag = self.transaction_tag
@@ -474,7 +474,7 @@ def batch_update(self, statements, request_options=None):
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
request_options.transaction_tag = self.transaction_tag
diff --git a/noxfile.py b/noxfile.py
index eaf653cd07..95fe0d2365 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -25,6 +25,7 @@
import nox
+FLAKE8_VERSION = "flake8==6.1.0"
BLACK_VERSION = "black==22.3.0"
ISORT_VERSION = "isort==5.10.1"
LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
@@ -83,7 +84,7 @@ def lint(session):
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
- session.install("flake8", BLACK_VERSION)
+ session.install(FLAKE8_VERSION, BLACK_VERSION)
session.run(
"black",
"--check",
diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json
index 111a3cfca1..0ede9fccff 100644
--- a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json
+++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json
@@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-spanner-admin-database",
- "version": "3.38.0"
+ "version": "3.40.1"
},
"snippets": [
{
diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json
index 6368c573e5..76f704e8fb 100644
--- a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json
+++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json
@@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-spanner-admin-instance",
- "version": "3.38.0"
+ "version": "3.40.1"
},
"snippets": [
{
diff --git a/samples/generated_samples/snippet_metadata_google.spanner.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.v1.json
index c71c768c3d..a645b19356 100644
--- a/samples/generated_samples/snippet_metadata_google.spanner.v1.json
+++ b/samples/generated_samples/snippet_metadata_google.spanner.v1.json
@@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-spanner",
- "version": "3.38.0"
+ "version": "3.40.1"
},
"snippets": [
{
diff --git a/samples/samples/batch_sample.py b/samples/samples/batch_sample.py
index d11dd5f95a..69913ac4b3 100644
--- a/samples/samples/batch_sample.py
+++ b/samples/samples/batch_sample.py
@@ -50,7 +50,7 @@ def run_batch_query(instance_id, database_id):
# A Partition object is serializable and can be used from a different process.
# DataBoost option is an optional parameter which can also be used for partition read
# and query to execute the request via spanner independent compute resources.
- data_boost_enabled=False,
+ data_boost_enabled=True,
)
# Create a pool of workers for the tasks
diff --git a/samples/samples/conftest.py b/samples/samples/conftest.py
index c63548c460..5b1af63876 100644
--- a/samples/samples/conftest.py
+++ b/samples/samples/conftest.py
@@ -38,20 +38,19 @@
def sample_name():
"""Sample testcase modules must define this fixture.
- The name is used to label the instance created by the sample, to
- aid in debugging leaked instances.
- """
- raise NotImplementedError(
- "Define 'sample_name' fixture in sample test driver")
+ The name is used to label the instance created by the sample, to
+ aid in debugging leaked instances.
+ """
+ raise NotImplementedError("Define 'sample_name' fixture in sample test driver")
@pytest.fixture(scope="module")
def database_dialect():
"""Database dialect to be used for this sample.
- The dialect is used to initialize the dialect for the database.
- It can either be GoogleStandardSql or PostgreSql.
- """
+ The dialect is used to initialize the dialect for the database.
+ It can either be GoogleStandardSql or PostgreSql.
+ """
# By default, we consider GOOGLE_STANDARD_SQL dialect. Other specific tests
# can override this if required.
return DatabaseDialect.GOOGLE_STANDARD_SQL
@@ -105,7 +104,7 @@ def multi_region_instance_id():
@pytest.fixture(scope="module")
def instance_config(spanner_client):
return "{}/instanceConfigs/{}".format(
- spanner_client.project_name, "regional-us-central1"
+ spanner_client.project_name, "regional-us-central1"
)
@@ -116,20 +115,20 @@ def multi_region_instance_config(spanner_client):
@pytest.fixture(scope="module")
def sample_instance(
- spanner_client,
- cleanup_old_instances,
- instance_id,
- instance_config,
- sample_name,
+ spanner_client,
+ cleanup_old_instances,
+ instance_id,
+ instance_config,
+ sample_name,
):
sample_instance = spanner_client.instance(
- instance_id,
- instance_config,
- labels={
- "cloud_spanner_samples": "true",
- "sample_name": sample_name,
- "created": str(int(time.time())),
- },
+ instance_id,
+ instance_config,
+ labels={
+ "cloud_spanner_samples": "true",
+ "sample_name": sample_name,
+ "created": str(int(time.time())),
+ },
)
op = retry_429(sample_instance.create)()
op.result(INSTANCE_CREATION_TIMEOUT) # block until completion
@@ -151,20 +150,20 @@ def sample_instance(
@pytest.fixture(scope="module")
def multi_region_instance(
- spanner_client,
- cleanup_old_instances,
- multi_region_instance_id,
- multi_region_instance_config,
- sample_name,
+ spanner_client,
+ cleanup_old_instances,
+ multi_region_instance_id,
+ multi_region_instance_config,
+ sample_name,
):
multi_region_instance = spanner_client.instance(
- multi_region_instance_id,
- multi_region_instance_config,
- labels={
- "cloud_spanner_samples": "true",
- "sample_name": sample_name,
- "created": str(int(time.time())),
- },
+ multi_region_instance_id,
+ multi_region_instance_config,
+ labels={
+ "cloud_spanner_samples": "true",
+ "sample_name": sample_name,
+ "created": str(int(time.time())),
+ },
)
op = retry_429(multi_region_instance.create)()
op.result(INSTANCE_CREATION_TIMEOUT) # block until completion
@@ -188,31 +187,37 @@ def multi_region_instance(
def database_id():
"""Id for the database used in samples.
- Sample testcase modules can override as needed.
- """
+ Sample testcase modules can override as needed.
+ """
return "my-database-id"
+@pytest.fixture(scope="module")
+def bit_reverse_sequence_database_id():
+ """Id for the database used in bit reverse sequence samples.
+
+ Sample testcase modules can override as needed.
+ """
+ return "sequence-database-id"
+
+
@pytest.fixture(scope="module")
def database_ddl():
"""Sequence of DDL statements used to set up the database.
- Sample testcase modules can override as needed.
- """
+ Sample testcase modules can override as needed.
+ """
return []
@pytest.fixture(scope="module")
def sample_database(
- spanner_client,
- sample_instance,
- database_id,
- database_ddl,
- database_dialect):
+ spanner_client, sample_instance, database_id, database_ddl, database_dialect
+):
if database_dialect == DatabaseDialect.POSTGRESQL:
sample_database = sample_instance.database(
- database_id,
- database_dialect=DatabaseDialect.POSTGRESQL,
+ database_id,
+ database_dialect=DatabaseDialect.POSTGRESQL,
)
if not sample_database.exists():
@@ -220,12 +225,11 @@ def sample_database(
operation.result(OPERATION_TIMEOUT_SECONDS)
request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
- database=sample_database.name,
- statements=database_ddl,
+ database=sample_database.name,
+ statements=database_ddl,
)
- operation =\
- spanner_client.database_admin_api.update_database_ddl(request)
+ operation = spanner_client.database_admin_api.update_database_ddl(request)
operation.result(OPERATION_TIMEOUT_SECONDS)
yield sample_database
@@ -234,8 +238,8 @@ def sample_database(
return
sample_database = sample_instance.database(
- database_id,
- ddl_statements=database_ddl,
+ database_id,
+ ddl_statements=database_ddl,
)
if not sample_database.exists():
@@ -247,11 +251,43 @@ def sample_database(
sample_database.drop()
+@pytest.fixture(scope="module")
+def bit_reverse_sequence_database(
+ spanner_client, sample_instance, bit_reverse_sequence_database_id, database_dialect
+):
+ if database_dialect == DatabaseDialect.POSTGRESQL:
+ bit_reverse_sequence_database = sample_instance.database(
+ bit_reverse_sequence_database_id,
+ database_dialect=DatabaseDialect.POSTGRESQL,
+ )
+
+ if not bit_reverse_sequence_database.exists():
+ operation = bit_reverse_sequence_database.create()
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ yield bit_reverse_sequence_database
+
+ bit_reverse_sequence_database.drop()
+ return
+
+ bit_reverse_sequence_database = sample_instance.database(
+ bit_reverse_sequence_database_id
+ )
+
+ if not bit_reverse_sequence_database.exists():
+ operation = bit_reverse_sequence_database.create()
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ yield bit_reverse_sequence_database
+
+ bit_reverse_sequence_database.drop()
+
+
@pytest.fixture(scope="module")
def kms_key_name(spanner_client):
return "projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}".format(
- spanner_client.project,
- "us-central1",
- "spanner-test-keyring",
- "spanner-test-cmek",
+ spanner_client.project,
+ "us-central1",
+ "spanner-test-keyring",
+ "spanner-test-cmek",
)
diff --git a/samples/samples/pg_snippets.py b/samples/samples/pg_snippets.py
index f53fe1d4dd..51ddec6906 100644
--- a/samples/samples/pg_snippets.py
+++ b/samples/samples/pg_snippets.py
@@ -39,19 +39,19 @@ def create_instance(instance_id):
spanner_client = spanner.Client()
config_name = "{}/instanceConfigs/regional-us-central1".format(
- spanner_client.project_name
+ spanner_client.project_name
)
instance = spanner_client.instance(
- instance_id,
- configuration_name=config_name,
- display_name="This is a display name.",
- node_count=1,
- labels={
- "cloud_spanner_samples": "true",
- "sample_name": "snippets-create_instance-explicit",
- "created": str(int(time.time())),
- },
+ instance_id,
+ configuration_name=config_name,
+ display_name="This is a display name.",
+ node_count=1,
+ labels={
+ "cloud_spanner_samples": "true",
+ "sample_name": "snippets-create_instance-explicit",
+ "created": str(int(time.time())),
+ },
)
operation = instance.create()
@@ -72,8 +72,8 @@ def create_database(instance_id, database_id):
instance = spanner_client.instance(instance_id)
database = instance.database(
- database_id,
- database_dialect=DatabaseDialect.POSTGRESQL,
+ database_id,
+ database_dialect=DatabaseDialect.POSTGRESQL,
)
operation = database.create()
@@ -88,9 +88,9 @@ def create_database(instance_id, database_id):
def create_table_using_ddl(database_name):
spanner_client = spanner.Client()
request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
- database=database_name,
- statements=[
- """CREATE TABLE Singers (
+ database=database_name,
+ statements=[
+ """CREATE TABLE Singers (
SingerId bigint NOT NULL,
FirstName character varying(1024),
LastName character varying(1024),
@@ -99,13 +99,13 @@ def create_table_using_ddl(database_name):
GENERATED ALWAYS AS (FirstName || ' ' || LastName) STORED,
PRIMARY KEY (SingerId)
)""",
- """CREATE TABLE Albums (
+ """CREATE TABLE Albums (
SingerId bigint NOT NULL,
AlbumId bigint NOT NULL,
AlbumTitle character varying(1024),
PRIMARY KEY (SingerId, AlbumId)
) INTERLEAVE IN PARENT Singers ON DELETE CASCADE""",
- ],
+ ],
)
operation = spanner_client.database_admin_api.update_database_ddl(request)
operation.result(OPERATION_TIMEOUT_SECONDS)
@@ -127,27 +127,27 @@ def insert_data(instance_id, database_id):
with database.batch() as batch:
batch.insert(
- table="Singers",
- columns=("SingerId", "FirstName", "LastName"),
- values=[
- (1, "Marc", "Richards"),
- (2, "Catalina", "Smith"),
- (3, "Alice", "Trentor"),
- (4, "Lea", "Martin"),
- (5, "David", "Lomond"),
- ],
+ table="Singers",
+ columns=("SingerId", "FirstName", "LastName"),
+ values=[
+ (1, "Marc", "Richards"),
+ (2, "Catalina", "Smith"),
+ (3, "Alice", "Trentor"),
+ (4, "Lea", "Martin"),
+ (5, "David", "Lomond"),
+ ],
)
batch.insert(
- table="Albums",
- columns=("SingerId", "AlbumId", "AlbumTitle"),
- values=[
- (1, 1, "Total Junk"),
- (1, 2, "Go, Go, Go"),
- (2, 1, "Green"),
- (2, 2, "Forever Hold Your Peace"),
- (2, 3, "Terrified"),
- ],
+ table="Albums",
+ columns=("SingerId", "AlbumId", "AlbumTitle"),
+ values=[
+ (1, 1, "Total Junk"),
+ (1, 2, "Go, Go, Go"),
+ (2, 1, "Green"),
+ (2, 2, "Forever Hold Your Peace"),
+ (2, 3, "Terrified"),
+ ],
)
print("Inserted data.")
@@ -198,7 +198,7 @@ def query_data(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT SingerId, AlbumId, AlbumTitle FROM Albums"
+ "SELECT SingerId, AlbumId, AlbumTitle FROM Albums"
)
for row in results:
@@ -218,8 +218,7 @@ def read_data(instance_id, database_id):
with database.snapshot() as snapshot:
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
- table="Albums", columns=("SingerId", "AlbumId", "AlbumTitle"),
- keyset=keyset
+ table="Albums", columns=("SingerId", "AlbumId", "AlbumTitle"), keyset=keyset
)
for row in results:
@@ -237,7 +236,7 @@ def add_column(instance_id, database_id):
database = instance.database(database_id)
operation = database.update_ddl(
- ["ALTER TABLE Albums ADD COLUMN MarketingBudget BIGINT"]
+ ["ALTER TABLE Albums ADD COLUMN MarketingBudget BIGINT"]
)
print("Waiting for operation to complete...")
@@ -266,9 +265,9 @@ def update_data(instance_id, database_id):
with database.batch() as batch:
batch.update(
- table="Albums",
- columns=("SingerId", "AlbumId", "MarketingBudget"),
- values=[(1, 1, 100000), (2, 2, 500000)],
+ table="Albums",
+ columns=("SingerId", "AlbumId", "MarketingBudget"),
+ values=[(1, 1, 100000), (2, 2, 500000)],
)
print("Updated data.")
@@ -297,10 +296,10 @@ def update_albums(transaction):
# Read the second album budget.
second_album_keyset = spanner.KeySet(keys=[(2, 2)])
second_album_result = transaction.read(
- table="Albums",
- columns=("MarketingBudget",),
- keyset=second_album_keyset,
- limit=1,
+ table="Albums",
+ columns=("MarketingBudget",),
+ keyset=second_album_keyset,
+ limit=1,
)
second_album_row = list(second_album_result)[0]
second_album_budget = second_album_row[0]
@@ -310,16 +309,15 @@ def update_albums(transaction):
if second_album_budget < transfer_amount:
# Raising an exception will automatically roll back the
# transaction.
- raise ValueError(
- "The second album doesn't have enough funds to transfer")
+ raise ValueError("The second album doesn't have enough funds to transfer")
# Read the first album's budget.
first_album_keyset = spanner.KeySet(keys=[(1, 1)])
first_album_result = transaction.read(
- table="Albums",
- columns=("MarketingBudget",),
- keyset=first_album_keyset,
- limit=1,
+ table="Albums",
+ columns=("MarketingBudget",),
+ keyset=first_album_keyset,
+ limit=1,
)
first_album_row = list(first_album_result)[0]
first_album_budget = first_album_row[0]
@@ -328,15 +326,15 @@ def update_albums(transaction):
second_album_budget -= transfer_amount
first_album_budget += transfer_amount
print(
- "Setting first album's budget to {} and the second album's "
- "budget to {}.".format(first_album_budget, second_album_budget)
+ "Setting first album's budget to {} and the second album's "
+ "budget to {}.".format(first_album_budget, second_album_budget)
)
# Update the rows.
transaction.update(
- table="Albums",
- columns=("SingerId", "AlbumId", "MarketingBudget"),
- values=[(1, 1, first_album_budget), (2, 2, second_album_budget)],
+ table="Albums",
+ columns=("SingerId", "AlbumId", "MarketingBudget"),
+ values=[(1, 1, first_album_budget), (2, 2, second_album_budget)],
)
database.run_in_transaction(update_albums)
@@ -363,7 +361,7 @@ def query_data_with_new_column(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT SingerId, AlbumId, MarketingBudget FROM Albums"
+ "SELECT SingerId, AlbumId, MarketingBudget FROM Albums"
)
for row in results:
@@ -381,7 +379,7 @@ def add_index(instance_id, database_id):
database = instance.database(database_id)
operation = database.update_ddl(
- ["CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)"]
+ ["CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)"]
)
print("Waiting for operation to complete...")
@@ -410,10 +408,10 @@ def read_data_with_index(instance_id, database_id):
with database.snapshot() as snapshot:
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
- table="Albums",
- columns=("AlbumId", "AlbumTitle"),
- keyset=keyset,
- index="AlbumsByAlbumTitle",
+ table="Albums",
+ columns=("AlbumId", "AlbumTitle"),
+ keyset=keyset,
+ index="AlbumsByAlbumTitle",
)
for row in results:
@@ -431,10 +429,10 @@ def add_storing_index(instance_id, database_id):
database = instance.database(database_id)
operation = database.update_ddl(
- [
- "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle)"
- "INCLUDE (MarketingBudget)"
- ]
+ [
+ "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle)"
+ "INCLUDE (MarketingBudget)"
+ ]
)
print("Waiting for operation to complete...")
@@ -466,15 +464,14 @@ def read_data_with_storing_index(instance_id, database_id):
with database.snapshot() as snapshot:
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
- table="Albums",
- columns=("AlbumId", "AlbumTitle", "MarketingBudget"),
- keyset=keyset,
- index="AlbumsByAlbumTitle2",
+ table="Albums",
+ columns=("AlbumId", "AlbumTitle", "MarketingBudget"),
+ keyset=keyset,
+ index="AlbumsByAlbumTitle2",
)
for row in results:
- print("AlbumId: {}, AlbumTitle: {}, " "MarketingBudget: {}".format(
- *row))
+ print("AlbumId: {}, AlbumTitle: {}, " "MarketingBudget: {}".format(*row))
# [END spanner_postgresql_read_data_with_storing_index]
@@ -494,7 +491,7 @@ def read_only_transaction(instance_id, database_id):
with database.snapshot(multi_use=True) as snapshot:
# Read using SQL.
results = snapshot.execute_sql(
- "SELECT SingerId, AlbumId, AlbumTitle FROM Albums"
+ "SELECT SingerId, AlbumId, AlbumTitle FROM Albums"
)
print("Results from first read:")
@@ -506,8 +503,7 @@ def read_only_transaction(instance_id, database_id):
# return the same data.
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
- table="Albums", columns=("SingerId", "AlbumId", "AlbumTitle"),
- keyset=keyset
+ table="Albums", columns=("SingerId", "AlbumId", "AlbumTitle"), keyset=keyset
)
print("Results from second read:")
@@ -529,11 +525,11 @@ def insert_with_dml(instance_id, database_id):
def insert_singers(transaction):
row_ct = transaction.execute_update(
- "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES "
- "(12, 'Melissa', 'Garcia'), "
- "(13, 'Russell', 'Morales'), "
- "(14, 'Jacqueline', 'Long'), "
- "(15, 'Dylan', 'Shaw')"
+ "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES "
+ "(12, 'Melissa', 'Garcia'), "
+ "(13, 'Russell', 'Morales'), "
+ "(14, 'Jacqueline', 'Long'), "
+ "(15, 'Dylan', 'Shaw')"
)
print("{} record(s) inserted.".format(row_ct))
@@ -542,7 +538,7 @@ def insert_singers(transaction):
def insert_with_dml_returning(instance_id, database_id):
- """Inserts sample data into the given database using a DML statement having a RETURNING clause. """
+ """Inserts sample data into the given database using a DML statement having a RETURNING clause."""
# [START spanner_postgresql_dml_insert_returning]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
@@ -584,9 +580,9 @@ def query_data_with_parameter(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT SingerId, FirstName, LastName FROM Singers " "WHERE LastName = $1",
- params={"p1": "Garcia"},
- param_types={"p1": spanner.param_types.STRING},
+ "SELECT SingerId, FirstName, LastName FROM Singers " "WHERE LastName = $1",
+ params={"p1": "Garcia"},
+ param_types={"p1": spanner.param_types.STRING},
)
for row in results:
@@ -608,7 +604,7 @@ def transfer_budget(transaction):
# Transfer marketing budget from one album to another. Performed in a
# single transaction to ensure that the transfer is atomic.
second_album_result = transaction.execute_sql(
- "SELECT MarketingBudget from Albums " "WHERE SingerId = 2 and AlbumId = 2"
+ "SELECT MarketingBudget from Albums " "WHERE SingerId = 2 and AlbumId = 2"
)
second_album_row = list(second_album_result)[0]
second_album_budget = second_album_row[0]
@@ -620,8 +616,8 @@ def transfer_budget(transaction):
# will be rerun by the client library
if second_album_budget >= transfer_amount:
first_album_result = transaction.execute_sql(
- "SELECT MarketingBudget from Albums "
- "WHERE SingerId = 1 and AlbumId = 1"
+ "SELECT MarketingBudget from Albums "
+ "WHERE SingerId = 1 and AlbumId = 1"
)
first_album_row = list(first_album_result)[0]
first_album_budget = first_album_row[0]
@@ -631,26 +627,26 @@ def transfer_budget(transaction):
# Update first album
transaction.execute_update(
- "UPDATE Albums "
- "SET MarketingBudget = $1 "
- "WHERE SingerId = 1 and AlbumId = 1",
- params={"p1": first_album_budget},
- param_types={"p1": spanner.param_types.INT64},
+ "UPDATE Albums "
+ "SET MarketingBudget = $1 "
+ "WHERE SingerId = 1 and AlbumId = 1",
+ params={"p1": first_album_budget},
+ param_types={"p1": spanner.param_types.INT64},
)
# Update second album
transaction.execute_update(
- "UPDATE Albums "
- "SET MarketingBudget = $1 "
- "WHERE SingerId = 2 and AlbumId = 2",
- params={"p1": second_album_budget},
- param_types={"p1": spanner.param_types.INT64},
+ "UPDATE Albums "
+ "SET MarketingBudget = $1 "
+ "WHERE SingerId = 2 and AlbumId = 2",
+ params={"p1": second_album_budget},
+ param_types={"p1": spanner.param_types.INT64},
)
print(
- "Transferred {} from Album2's budget to Album1's".format(
- transfer_amount
- )
+ "Transferred {} from Album2's budget to Album1's".format(
+ transfer_amount
+ )
)
database.run_in_transaction(transfer_budget)
@@ -671,9 +667,9 @@ def read_stale_data(instance_id, database_id):
with database.snapshot(exact_staleness=staleness) as snapshot:
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
- table="Albums",
- columns=("SingerId", "AlbumId", "MarketingBudget"),
- keyset=keyset,
+ table="Albums",
+ columns=("SingerId", "AlbumId", "MarketingBudget"),
+ keyset=keyset,
)
for row in results:
@@ -706,13 +702,12 @@ def update_data_with_timestamp(instance_id, database_id):
with database.batch() as batch:
batch.update(
- table="Albums",
- columns=(
- "SingerId", "AlbumId", "MarketingBudget", "LastUpdateTime"),
- values=[
- (1, 1, 1000000, spanner.COMMIT_TIMESTAMP),
- (2, 2, 750000, spanner.COMMIT_TIMESTAMP),
- ],
+ table="Albums",
+ columns=("SingerId", "AlbumId", "MarketingBudget", "LastUpdateTime"),
+ values=[
+ (1, 1, 1000000, spanner.COMMIT_TIMESTAMP),
+ (2, 2, 750000, spanner.COMMIT_TIMESTAMP),
+ ],
)
print("Updated data.")
@@ -730,17 +725,16 @@ def add_timestamp_column(instance_id, database_id):
database = instance.database(database_id)
operation = database.update_ddl(
- [
- "ALTER TABLE Albums ADD COLUMN LastUpdateTime SPANNER.COMMIT_TIMESTAMP"]
+ ["ALTER TABLE Albums ADD COLUMN LastUpdateTime SPANNER.COMMIT_TIMESTAMP"]
)
print("Waiting for operation to complete...")
operation.result(OPERATION_TIMEOUT_SECONDS)
print(
- 'Altered table "Albums" on database {} on instance {}.'.format(
- database_id, instance_id
- )
+ 'Altered table "Albums" on database {} on instance {}.'.format(
+ database_id, instance_id
+ )
)
@@ -767,8 +761,8 @@ def query_data_with_timestamp(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT SingerId, AlbumId, MarketingBudget FROM Albums "
- "ORDER BY LastUpdateTime DESC"
+ "SELECT SingerId, AlbumId, MarketingBudget FROM Albums "
+ "ORDER BY LastUpdateTime DESC"
)
for row in results:
@@ -787,9 +781,9 @@ def create_table_with_timestamp(instance_id, database_id):
database = instance.database(database_id)
request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
- database=database.name,
- statements=[
- """CREATE TABLE Performances (
+ database=database.name,
+ statements=[
+ """CREATE TABLE Performances (
SingerId BIGINT NOT NULL,
VenueId BIGINT NOT NULL,
EventDate Date,
@@ -797,7 +791,7 @@ def create_table_with_timestamp(instance_id, database_id):
LastUpdateTime SPANNER.COMMIT_TIMESTAMP NOT NULL,
PRIMARY KEY (SingerId, VenueId, EventDate))
INTERLEAVE IN PARENT Singers ON DELETE CASCADE"""
- ],
+ ],
)
operation = spanner_client.database_admin_api.update_database_ddl(request)
@@ -805,9 +799,9 @@ def create_table_with_timestamp(instance_id, database_id):
operation.result(OPERATION_TIMEOUT_SECONDS)
print(
- "Created Performances table on database {} on instance {}".format(
- database_id, instance_id
- )
+ "Created Performances table on database {} on instance {}".format(
+ database_id, instance_id
+ )
)
@@ -825,14 +819,13 @@ def insert_data_with_timestamp(instance_id, database_id):
with database.batch() as batch:
batch.insert(
- table="Performances",
- columns=(
- "SingerId", "VenueId", "EventDate", "Revenue", "LastUpdateTime"),
- values=[
- (1, 4, "2017-10-05", 11000, spanner.COMMIT_TIMESTAMP),
- (1, 19, "2017-11-02", 15000, spanner.COMMIT_TIMESTAMP),
- (2, 42, "2017-12-23", 7000, spanner.COMMIT_TIMESTAMP),
- ],
+ table="Performances",
+ columns=("SingerId", "VenueId", "EventDate", "Revenue", "LastUpdateTime"),
+ values=[
+ (1, 4, "2017-10-05", 11000, spanner.COMMIT_TIMESTAMP),
+ (1, 19, "2017-11-02", 15000, spanner.COMMIT_TIMESTAMP),
+ (2, 42, "2017-12-23", 7000, spanner.COMMIT_TIMESTAMP),
+ ],
)
print("Inserted data.")
@@ -853,8 +846,8 @@ def insert_data_with_dml(instance_id, database_id):
def insert_singers(transaction):
row_ct = transaction.execute_update(
- "INSERT INTO Singers (SingerId, FirstName, LastName) "
- " VALUES (10, 'Virginia', 'Watson')"
+ "INSERT INTO Singers (SingerId, FirstName, LastName) "
+ " VALUES (10, 'Virginia', 'Watson')"
)
print("{} record(s) inserted.".format(row_ct))
@@ -875,9 +868,9 @@ def update_data_with_dml(instance_id, database_id):
def update_albums(transaction):
row_ct = transaction.execute_update(
- "UPDATE Albums "
- "SET MarketingBudget = MarketingBudget * 2 "
- "WHERE SingerId = 1 and AlbumId = 1"
+ "UPDATE Albums "
+ "SET MarketingBudget = MarketingBudget * 2 "
+ "WHERE SingerId = 1 and AlbumId = 1"
)
print("{} record(s) updated.".format(row_ct))
@@ -929,7 +922,7 @@ def delete_data_with_dml(instance_id, database_id):
def delete_singers(transaction):
row_ct = transaction.execute_update(
- "DELETE FROM Singers WHERE FirstName = 'Alice'"
+ "DELETE FROM Singers WHERE FirstName = 'Alice'"
)
print("{} record(s) deleted.".format(row_ct))
@@ -939,7 +932,7 @@ def delete_singers(transaction):
def delete_data_with_dml_returning(instance_id, database_id):
- """Deletes sample data from the database using a DML statement having a RETURNING clause. """
+ """Deletes sample data from the database using a DML statement having a RETURNING clause."""
# [START spanner_postgresql_dml_delete_returning]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
@@ -980,14 +973,14 @@ def dml_write_read_transaction(instance_id, database_id):
def write_then_read(transaction):
# Insert record.
row_ct = transaction.execute_update(
- "INSERT INTO Singers (SingerId, FirstName, LastName) "
- " VALUES (11, 'Timothy', 'Campbell')"
+ "INSERT INTO Singers (SingerId, FirstName, LastName) "
+ " VALUES (11, 'Timothy', 'Campbell')"
)
print("{} record(s) inserted.".format(row_ct))
# Read newly inserted record.
results = transaction.execute_sql(
- "SELECT FirstName, LastName FROM Singers WHERE SingerId = 11"
+ "SELECT FirstName, LastName FROM Singers WHERE SingerId = 11"
)
for result in results:
print("FirstName: {}, LastName: {}".format(*result))
@@ -1007,7 +1000,7 @@ def update_data_with_partitioned_dml(instance_id, database_id):
database = instance.database(database_id)
row_ct = database.execute_partitioned_dml(
- "UPDATE Albums SET MarketingBudget = 100000 WHERE SingerId > 1"
+ "UPDATE Albums SET MarketingBudget = 100000 WHERE SingerId > 1"
)
print("{} records updated.".format(row_ct))
@@ -1023,8 +1016,7 @@ def delete_data_with_partitioned_dml(instance_id, database_id):
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
- row_ct = database.execute_partitioned_dml(
- "DELETE FROM Singers WHERE SingerId > 10")
+ row_ct = database.execute_partitioned_dml("DELETE FROM Singers WHERE SingerId > 10")
print("{} record(s) deleted.".format(row_ct))
# [END spanner_postgresql_dml_partitioned_delete]
@@ -1043,20 +1035,19 @@ def update_with_batch_dml(instance_id, database_id):
database = instance.database(database_id)
insert_statement = (
- "INSERT INTO Albums "
- "(SingerId, AlbumId, AlbumTitle, MarketingBudget) "
- "VALUES (1, 3, 'Test Album Title', 10000)"
+ "INSERT INTO Albums "
+ "(SingerId, AlbumId, AlbumTitle, MarketingBudget) "
+ "VALUES (1, 3, 'Test Album Title', 10000)"
)
update_statement = (
- "UPDATE Albums "
- "SET MarketingBudget = MarketingBudget * 2 "
- "WHERE SingerId = 1 and AlbumId = 3"
+ "UPDATE Albums "
+ "SET MarketingBudget = MarketingBudget * 2 "
+ "WHERE SingerId = 1 and AlbumId = 3"
)
def update_albums(transaction):
- status, row_cts = transaction.batch_update(
- [insert_statement, update_statement])
+ status, row_cts = transaction.batch_update([insert_statement, update_statement])
if status.code != OK:
# Do handling here.
@@ -1064,8 +1055,7 @@ def update_albums(transaction):
# `commit` is called by `run_in_transaction`.
return
- print(
- "Executed {} SQL statements using Batch DML.".format(len(row_cts)))
+ print("Executed {} SQL statements using Batch DML.".format(len(row_cts)))
database.run_in_transaction(update_albums)
# [END spanner_postgresql_dml_batch_update]
@@ -1081,9 +1071,9 @@ def create_table_with_datatypes(instance_id, database_id):
database = instance.database(database_id)
request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
- database=database.name,
- statements=[
- """CREATE TABLE Venues (
+ database=database.name,
+ statements=[
+ """CREATE TABLE Venues (
VenueId BIGINT NOT NULL,
VenueName character varying(100),
VenueInfo BYTEA,
@@ -1093,7 +1083,7 @@ def create_table_with_datatypes(instance_id, database_id):
Revenue NUMERIC,
LastUpdateTime SPANNER.COMMIT_TIMESTAMP NOT NULL,
PRIMARY KEY (VenueId))"""
- ],
+ ],
)
operation = spanner_client.database_admin_api.update_database_ddl(request)
@@ -1101,9 +1091,9 @@ def create_table_with_datatypes(instance_id, database_id):
operation.result(OPERATION_TIMEOUT_SECONDS)
print(
- "Created Venues table on database {} on instance {}".format(
- database_id, instance_id
- )
+ "Created Venues table on database {} on instance {}".format(
+ database_id, instance_id
+ )
)
# [END spanner_postgresql_create_table_with_datatypes]
@@ -1122,49 +1112,49 @@ def insert_datatypes_data(instance_id, database_id):
exampleBytes3 = base64.b64encode("Hello World 3".encode())
with database.batch() as batch:
batch.insert(
- table="Venues",
- columns=(
- "VenueId",
- "VenueName",
- "VenueInfo",
- "Capacity",
- "OutdoorVenue",
- "PopularityScore",
- "Revenue",
- "LastUpdateTime",
- ),
- values=[
- (
- 4,
- "Venue 4",
- exampleBytes1,
- 1800,
- False,
- 0.85543,
- decimal.Decimal("215100.10"),
- spanner.COMMIT_TIMESTAMP,
- ),
- (
- 19,
- "Venue 19",
- exampleBytes2,
- 6300,
- True,
- 0.98716,
- decimal.Decimal("1200100.00"),
- spanner.COMMIT_TIMESTAMP,
- ),
- (
- 42,
- "Venue 42",
- exampleBytes3,
- 3000,
- False,
- 0.72598,
- decimal.Decimal("390650.99"),
- spanner.COMMIT_TIMESTAMP,
+ table="Venues",
+ columns=(
+ "VenueId",
+ "VenueName",
+ "VenueInfo",
+ "Capacity",
+ "OutdoorVenue",
+ "PopularityScore",
+ "Revenue",
+ "LastUpdateTime",
),
- ],
+ values=[
+ (
+ 4,
+ "Venue 4",
+ exampleBytes1,
+ 1800,
+ False,
+ 0.85543,
+ decimal.Decimal("215100.10"),
+ spanner.COMMIT_TIMESTAMP,
+ ),
+ (
+ 19,
+ "Venue 19",
+ exampleBytes2,
+ 6300,
+ True,
+ 0.98716,
+ decimal.Decimal("1200100.00"),
+ spanner.COMMIT_TIMESTAMP,
+ ),
+ (
+ 42,
+ "Venue 42",
+ exampleBytes3,
+ 3000,
+ False,
+ 0.72598,
+ decimal.Decimal("390650.99"),
+ spanner.COMMIT_TIMESTAMP,
+ ),
+ ],
)
print("Inserted data.")
@@ -1186,10 +1176,10 @@ def query_data_with_bool(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT VenueId, VenueName, OutdoorVenue FROM Venues "
- "WHERE OutdoorVenue = $1",
- params=param,
- param_types=param_type,
+ "SELECT VenueId, VenueName, OutdoorVenue FROM Venues "
+ "WHERE OutdoorVenue = $1",
+ params=param,
+ param_types=param_type,
)
for row in results:
@@ -1212,9 +1202,9 @@ def query_data_with_bytes(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT VenueId, VenueName FROM Venues " "WHERE VenueInfo = $1",
- params=param,
- param_types=param_type,
+ "SELECT VenueId, VenueName FROM Venues " "WHERE VenueInfo = $1",
+ params=param,
+ param_types=param_type,
)
for row in results:
@@ -1237,15 +1227,14 @@ def query_data_with_float(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT VenueId, VenueName, PopularityScore FROM Venues "
- "WHERE PopularityScore > $1",
- params=param,
- param_types=param_type,
+ "SELECT VenueId, VenueName, PopularityScore FROM Venues "
+ "WHERE PopularityScore > $1",
+ params=param,
+ param_types=param_type,
)
for row in results:
- print(
- "VenueId: {}, VenueName: {}, PopularityScore: {}".format(*row))
+ print("VenueId: {}, VenueName: {}, PopularityScore: {}".format(*row))
# [END spanner_postgresql_query_with_float_parameter]
@@ -1264,9 +1253,9 @@ def query_data_with_int(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT VenueId, VenueName, Capacity FROM Venues " "WHERE Capacity >= $1",
- params=param,
- param_types=param_type,
+ "SELECT VenueId, VenueName, Capacity FROM Venues " "WHERE Capacity >= $1",
+ params=param,
+ param_types=param_type,
)
for row in results:
@@ -1289,9 +1278,9 @@ def query_data_with_string(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT VenueId, VenueName FROM Venues " "WHERE VenueName = $1",
- params=param,
- param_types=param_type,
+ "SELECT VenueId, VenueName FROM Venues " "WHERE VenueName = $1",
+ params=param,
+ param_types=param_type,
)
for row in results:
@@ -1312,18 +1301,19 @@ def query_data_with_timestamp_parameter(instance_id, database_id):
# [END spanner_postgresql_query_with_timestamp_parameter]
# Avoid time drift on the local machine.
# https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4197.
- example_timestamp = (datetime.datetime.utcnow() + datetime.timedelta(days=1)
- ).isoformat() + "Z"
+ example_timestamp = (
+ datetime.datetime.utcnow() + datetime.timedelta(days=1)
+ ).isoformat() + "Z"
# [START spanner_postgresql_query_with_timestamp_parameter]
param = {"p1": example_timestamp}
param_type = {"p1": param_types.TIMESTAMP}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT VenueId, VenueName, LastUpdateTime FROM Venues "
- "WHERE LastUpdateTime < $1",
- params=param,
- param_types=param_type,
+ "SELECT VenueId, VenueName, LastUpdateTime FROM Venues "
+ "WHERE LastUpdateTime < $1",
+ params=param,
+ param_types=param_type,
)
for row in results:
@@ -1350,13 +1340,13 @@ def update_data_with_numeric(instance_id, database_id):
with database.batch() as batch:
batch.update(
- table="Venues",
- columns=("VenueId", "Revenue"),
- values=[
- (4, decimal.Decimal("35000")),
- (19, decimal.Decimal("104500")),
- (42, decimal.Decimal("99999999999999999999999999999.99")),
- ],
+ table="Venues",
+ columns=("VenueId", "Revenue"),
+ values=[
+ (4, decimal.Decimal("35000")),
+ (19, decimal.Decimal("104500")),
+ (42, decimal.Decimal("99999999999999999999999999999.99")),
+ ],
)
print("Updated data.")
@@ -1380,9 +1370,9 @@ def query_data_with_numeric_parameter(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT VenueId, Revenue FROM Venues WHERE Revenue < $1",
- params=param,
- param_types=param_type,
+ "SELECT VenueId, Revenue FROM Venues WHERE Revenue < $1",
+ params=param,
+ param_types=param_type,
)
for row in results:
@@ -1396,17 +1386,17 @@ def create_client_with_query_options(instance_id, database_id):
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client(
- query_options={
- "optimizer_version": "1",
- "optimizer_statistics_package": "latest",
- }
+ query_options={
+ "optimizer_version": "1",
+ "optimizer_statistics_package": "latest",
+ }
)
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT VenueId, VenueName, LastUpdateTime FROM Venues"
+ "SELECT VenueId, VenueName, LastUpdateTime FROM Venues"
)
for row in results:
@@ -1425,11 +1415,11 @@ def query_data_with_query_options(instance_id, database_id):
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
- "SELECT VenueId, VenueName, LastUpdateTime FROM Venues",
- query_options={
- "optimizer_version": "1",
- "optimizer_statistics_package": "latest",
- },
+ "SELECT VenueId, VenueName, LastUpdateTime FROM Venues",
+ query_options={
+ "optimizer_version": "1",
+ "optimizer_statistics_package": "latest",
+ },
)
for row in results:
@@ -1511,9 +1501,7 @@ def update_data_with_jsonb(instance_id, database_id):
JsonObject(
[
JsonObject({"name": None, "open": True}),
- JsonObject(
- {"name": "room 2", "open": False}
- ),
+ JsonObject({"name": "room 2", "open": False}),
]
),
),
@@ -1564,15 +1552,127 @@ def query_data_with_jsonb_parameter(instance_id, database_id):
# [END spanner_postgresql_jsonb_query_parameter]
+# [START spanner_postgresql_create_sequence]
+def create_sequence(instance_id, database_id):
+ """Creates the Sequence and insert data"""
+ spanner_client = spanner.Client()
+ instance = spanner_client.instance(instance_id)
+ database = instance.database(database_id)
+
+ request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
+ database=database.name,
+ statements=[
+ "CREATE SEQUENCE Seq BIT_REVERSED_POSITIVE",
+ """CREATE TABLE Customers (
+ CustomerId BIGINT DEFAULT nextval('Seq'),
+ CustomerName character varying(1024),
+ PRIMARY KEY (CustomerId)
+ )""",
+ ],
+ )
+ operation = spanner_client.database_admin_api.update_database_ddl(request)
+ print("Waiting for operation to complete...")
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ print(
+ "Created Seq sequence and Customers table, where the key column CustomerId uses the sequence as a default value on database {} on instance {}".format(
+ database_id, instance_id
+ )
+ )
+
+ def insert_customers(transaction):
+ results = transaction.execute_sql(
+ "INSERT INTO Customers (CustomerName) VALUES "
+ "('Alice'), "
+ "('David'), "
+ "('Marc') "
+ "RETURNING CustomerId"
+ )
+ for result in results:
+ print("Inserted customer record with Customer Id: {}".format(*result))
+ print(
+ "Number of customer records inserted is {}".format(
+ results.stats.row_count_exact
+ )
+ )
+
+ database.run_in_transaction(insert_customers)
+
+
+# [END spanner_postgresql_create_sequence]
+
+# [START spanner_postgresql_alter_sequence]
+def alter_sequence(instance_id, database_id):
+ """Alters the Sequence and insert data"""
+ spanner_client = spanner.Client()
+ instance = spanner_client.instance(instance_id)
+ database = instance.database(database_id)
+
+ operation = database.update_ddl(["ALTER SEQUENCE Seq SKIP RANGE 1000 5000000"])
+
+ print("Waiting for operation to complete...")
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ print(
+ "Altered Seq sequence to skip an inclusive range between 1000 and 5000000 on database {} on instance {}".format(
+ database_id, instance_id
+ )
+ )
+
+ def insert_customers(transaction):
+ results = transaction.execute_sql(
+ "INSERT INTO Customers (CustomerName) VALUES "
+ "('Lea'), "
+ "('Cataline'), "
+ "('Smith') "
+ "RETURNING CustomerId"
+ )
+ for result in results:
+ print("Inserted customer record with Customer Id: {}".format(*result))
+ print(
+ "Number of customer records inserted is {}".format(
+ results.stats.row_count_exact
+ )
+ )
+
+ database.run_in_transaction(insert_customers)
+
+
+# [END spanner_postgresql_alter_sequence]
+
+# [START spanner_postgresql_drop_sequence]
+def drop_sequence(instance_id, database_id):
+ """Drops the Sequence"""
+ spanner_client = spanner.Client()
+ instance = spanner_client.instance(instance_id)
+ database = instance.database(database_id)
+
+ operation = database.update_ddl(
+ [
+ "ALTER TABLE Customers ALTER COLUMN CustomerId DROP DEFAULT",
+ "DROP SEQUENCE Seq",
+ ]
+ )
+
+ print("Waiting for operation to complete...")
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ print(
+ "Altered Customers table to drop DEFAULT from CustomerId column and dropped the Seq sequence on database {} on instance {}".format(
+ database_id, instance_id
+ )
+ )
+
+
+# [END spanner_postgresql_drop_sequence]
+
if __name__ == "__main__": # noqa: C901
parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.RawDescriptionHelpFormatter
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("instance_id", help="Your Cloud Spanner instance ID.")
parser.add_argument(
- "--database-id", help="Your Cloud Spanner database ID.",
- default="example_db"
+ "--database-id", help="Your Cloud Spanner database ID.", default="example_db"
)
subparsers = parser.add_subparsers(dest="command")
@@ -1586,98 +1686,91 @@ def query_data_with_jsonb_parameter(instance_id, database_id):
subparsers.add_parser("add_column", help=add_column.__doc__)
subparsers.add_parser("update_data", help=update_data.__doc__)
subparsers.add_parser(
- "query_data_with_new_column", help=query_data_with_new_column.__doc__
+ "query_data_with_new_column", help=query_data_with_new_column.__doc__
)
- subparsers.add_parser("read_write_transaction",
- help=read_write_transaction.__doc__)
- subparsers.add_parser("read_only_transaction",
- help=read_only_transaction.__doc__)
+ subparsers.add_parser("read_write_transaction", help=read_write_transaction.__doc__)
+ subparsers.add_parser("read_only_transaction", help=read_only_transaction.__doc__)
subparsers.add_parser("add_index", help=add_index.__doc__)
- subparsers.add_parser("read_data_with_index",
- help=read_data_with_index.__doc__)
+ subparsers.add_parser("read_data_with_index", help=read_data_with_index.__doc__)
subparsers.add_parser("add_storing_index", help=add_storing_index.__doc__)
- subparsers.add_parser("read_data_with_storing_index",
- help=read_data_with_storing_index.__doc__)
subparsers.add_parser(
- "create_table_with_timestamp", help=create_table_with_timestamp.__doc__
+ "read_data_with_storing_index", help=read_data_with_storing_index.__doc__
)
subparsers.add_parser(
- "insert_data_with_timestamp", help=insert_data_with_timestamp.__doc__
+ "create_table_with_timestamp", help=create_table_with_timestamp.__doc__
)
- subparsers.add_parser("add_timestamp_column",
- help=add_timestamp_column.__doc__)
subparsers.add_parser(
- "update_data_with_timestamp", help=update_data_with_timestamp.__doc__
+ "insert_data_with_timestamp", help=insert_data_with_timestamp.__doc__
)
+ subparsers.add_parser("add_timestamp_column", help=add_timestamp_column.__doc__)
subparsers.add_parser(
- "query_data_with_timestamp", help=query_data_with_timestamp.__doc__
+ "update_data_with_timestamp", help=update_data_with_timestamp.__doc__
)
- subparsers.add_parser("insert_data_with_dml",
- help=insert_data_with_dml.__doc__)
- subparsers.add_parser("update_data_with_dml",
- help=update_data_with_dml.__doc__)
- subparsers.add_parser("update_data_with_dml",
- help=update_data_with_dml_returning.__doc__)
- subparsers.add_parser("delete_data_with_dml",
- help=delete_data_with_dml.__doc__)
- subparsers.add_parser("delete_data_with_dml_returning",
- help=delete_data_with_dml_returning.__doc__)
subparsers.add_parser(
- "dml_write_read_transaction", help=dml_write_read_transaction.__doc__
+ "query_data_with_timestamp", help=query_data_with_timestamp.__doc__
+ )
+ subparsers.add_parser("insert_data_with_dml", help=insert_data_with_dml.__doc__)
+ subparsers.add_parser("update_data_with_dml", help=update_data_with_dml.__doc__)
+ subparsers.add_parser(
+ "update_data_with_dml", help=update_data_with_dml_returning.__doc__
+ )
+ subparsers.add_parser("delete_data_with_dml", help=delete_data_with_dml.__doc__)
+ subparsers.add_parser(
+ "delete_data_with_dml_returning", help=delete_data_with_dml_returning.__doc__
+ )
+ subparsers.add_parser(
+ "dml_write_read_transaction", help=dml_write_read_transaction.__doc__
)
subparsers.add_parser("insert_with_dml", help=insert_with_dml.__doc__)
- subparsers.add_parser("insert_with_dml_returning", help=insert_with_dml_returning.__doc__)
subparsers.add_parser(
- "query_data_with_parameter", help=query_data_with_parameter.__doc__
+ "insert_with_dml_returning", help=insert_with_dml_returning.__doc__
+ )
+ subparsers.add_parser(
+ "query_data_with_parameter", help=query_data_with_parameter.__doc__
)
subparsers.add_parser(
- "write_with_dml_transaction", help=write_with_dml_transaction.__doc__
+ "write_with_dml_transaction", help=write_with_dml_transaction.__doc__
)
subparsers.add_parser(
- "update_data_with_partitioned_dml",
- help=update_data_with_partitioned_dml.__doc__,
+ "update_data_with_partitioned_dml",
+ help=update_data_with_partitioned_dml.__doc__,
)
subparsers.add_parser(
- "delete_data_with_partitioned_dml",
- help=delete_data_with_partitioned_dml.__doc__,
+ "delete_data_with_partitioned_dml",
+ help=delete_data_with_partitioned_dml.__doc__,
)
- subparsers.add_parser("update_with_batch_dml",
- help=update_with_batch_dml.__doc__)
+ subparsers.add_parser("update_with_batch_dml", help=update_with_batch_dml.__doc__)
subparsers.add_parser(
- "create_table_with_datatypes", help=create_table_with_datatypes.__doc__
+ "create_table_with_datatypes", help=create_table_with_datatypes.__doc__
)
- subparsers.add_parser("insert_datatypes_data",
- help=insert_datatypes_data.__doc__)
- subparsers.add_parser("query_data_with_bool",
- help=query_data_with_bool.__doc__)
- subparsers.add_parser("query_data_with_bytes",
- help=query_data_with_bytes.__doc__)
- subparsers.add_parser("query_data_with_float",
- help=query_data_with_float.__doc__)
- subparsers.add_parser("query_data_with_int",
- help=query_data_with_int.__doc__)
- subparsers.add_parser("query_data_with_string",
- help=query_data_with_string.__doc__)
+ subparsers.add_parser("insert_datatypes_data", help=insert_datatypes_data.__doc__)
+ subparsers.add_parser("query_data_with_bool", help=query_data_with_bool.__doc__)
+ subparsers.add_parser("query_data_with_bytes", help=query_data_with_bytes.__doc__)
+ subparsers.add_parser("query_data_with_float", help=query_data_with_float.__doc__)
+ subparsers.add_parser("query_data_with_int", help=query_data_with_int.__doc__)
+ subparsers.add_parser("query_data_with_string", help=query_data_with_string.__doc__)
subparsers.add_parser(
- "query_data_with_timestamp_parameter",
- help=query_data_with_timestamp_parameter.__doc__,
+ "query_data_with_timestamp_parameter",
+ help=query_data_with_timestamp_parameter.__doc__,
)
subparsers.add_parser(
- "update_data_with_numeric",
- help=update_data_with_numeric.__doc__,
+ "update_data_with_numeric",
+ help=update_data_with_numeric.__doc__,
)
subparsers.add_parser(
- "query_data_with_numeric_parameter",
- help=query_data_with_numeric_parameter.__doc__,
+ "query_data_with_numeric_parameter",
+ help=query_data_with_numeric_parameter.__doc__,
)
subparsers.add_parser(
- "query_data_with_query_options",
- help=query_data_with_query_options.__doc__
+ "query_data_with_query_options", help=query_data_with_query_options.__doc__
)
subparsers.add_parser(
- "create_client_with_query_options",
- help=create_client_with_query_options.__doc__,
+ "create_client_with_query_options",
+ help=create_client_with_query_options.__doc__,
)
+ subparsers.add_parser("create_sequence", help=create_sequence.__doc__)
+ subparsers.add_parser("alter_sequence", help=alter_sequence.__doc__)
+ subparsers.add_parser("drop_sequence", help=drop_sequence.__doc__)
args = parser.parse_args()
diff --git a/samples/samples/pg_snippets_test.py b/samples/samples/pg_snippets_test.py
index 679b818ed1..d4f08499d2 100644
--- a/samples/samples/pg_snippets_test.py
+++ b/samples/samples/pg_snippets_test.py
@@ -190,8 +190,7 @@ def test_read_write_transaction(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_column"])
def test_query_data_with_new_column(capsys, instance_id, sample_database):
- snippets.query_data_with_new_column(instance_id,
- sample_database.database_id)
+ snippets.query_data_with_new_column(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 1, AlbumId: 1, MarketingBudget: 300000" in out
assert "SingerId: 2, AlbumId: 2, MarketingBudget: 300000" in out
@@ -222,8 +221,7 @@ def test_add_storing_index(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_storing_index"])
def test_read_data_with_storing_index(capsys, instance_id, sample_database):
- snippets.read_data_with_storing_index(instance_id,
- sample_database.database_id)
+ snippets.read_data_with_storing_index(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "300000" in out
@@ -245,8 +243,7 @@ def test_add_timestamp_column(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_timestamp_column"])
def test_update_data_with_timestamp(capsys, instance_id, sample_database):
- snippets.update_data_with_timestamp(instance_id,
- sample_database.database_id)
+ snippets.update_data_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Updated data" in out
@@ -261,16 +258,14 @@ def test_query_data_with_timestamp(capsys, instance_id, sample_database):
@pytest.mark.dependency(name="create_table_with_timestamp")
def test_create_table_with_timestamp(capsys, instance_id, sample_database):
- snippets.create_table_with_timestamp(instance_id,
- sample_database.database_id)
+ snippets.create_table_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Created Performances table on database" in out
@pytest.mark.dependency(depends=["create_table_with_timestamp"])
def test_insert_data_with_timestamp(capsys, instance_id, sample_database):
- snippets.insert_data_with_timestamp(instance_id,
- sample_database.database_id)
+ snippets.insert_data_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Inserted data." in out
@@ -312,8 +307,7 @@ def test_delete_data_with_dml_returning(capsys, instance_id, sample_database):
@pytest.mark.dependency(name="dml_write_read_transaction")
def test_dml_write_read_transaction(capsys, instance_id, sample_database):
- snippets.dml_write_read_transaction(instance_id,
- sample_database.database_id)
+ snippets.dml_write_read_transaction(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "1 record(s) inserted." in out
assert "FirstName: Timothy, LastName: Campbell" in out
@@ -342,24 +336,21 @@ def test_query_data_with_parameter(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_column"])
def test_write_with_dml_transaction(capsys, instance_id, sample_database):
- snippets.write_with_dml_transaction(instance_id,
- sample_database.database_id)
+ snippets.write_with_dml_transaction(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Transferred 200000 from Album2's budget to Album1's" in out
@pytest.mark.dependency(depends=["add_column"])
def update_data_with_partitioned_dml(capsys, instance_id, sample_database):
- snippets.update_data_with_partitioned_dml(instance_id,
- sample_database.database_id)
+ snippets.update_data_with_partitioned_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "3 record(s) updated" in out
@pytest.mark.dependency(depends=["insert_with_dml", "insert_with_dml_returning"])
def test_delete_data_with_partitioned_dml(capsys, instance_id, sample_database):
- snippets.delete_data_with_partitioned_dml(instance_id,
- sample_database.database_id)
+ snippets.delete_data_with_partitioned_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "9 record(s) deleted" in out
@@ -373,15 +364,14 @@ def test_update_with_batch_dml(capsys, instance_id, sample_database):
@pytest.mark.dependency(name="create_table_with_datatypes")
def test_create_table_with_datatypes(capsys, instance_id, sample_database):
- snippets.create_table_with_datatypes(instance_id,
- sample_database.database_id)
+ snippets.create_table_with_datatypes(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Created Venues table on database" in out
@pytest.mark.dependency(
- name="insert_datatypes_data",
- depends=["create_table_with_datatypes"],
+ name="insert_datatypes_data",
+ depends=["create_table_with_datatypes"],
)
def test_insert_datatypes_data(capsys, instance_id, sample_database):
snippets.insert_datatypes_data(instance_id, sample_database.database_id)
@@ -434,19 +424,16 @@ def test_update_data_with_numeric(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["insert_datatypes_data"])
-def test_query_data_with_numeric_parameter(capsys, instance_id,
- sample_database):
- snippets.query_data_with_numeric_parameter(instance_id,
- sample_database.database_id)
+def test_query_data_with_numeric_parameter(capsys, instance_id, sample_database):
+ snippets.query_data_with_numeric_parameter(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 4, Revenue: 35000" in out
@pytest.mark.dependency(depends=["insert_datatypes_data"])
-def test_query_data_with_timestamp_parameter(capsys, instance_id,
- sample_database):
+def test_query_data_with_timestamp_parameter(capsys, instance_id, sample_database):
snippets.query_data_with_timestamp_parameter(
- instance_id, sample_database.database_id
+ instance_id, sample_database.database_id
)
out, _ = capsys.readouterr()
assert "VenueId: 4, VenueName: Venue 4, LastUpdateTime:" in out
@@ -456,8 +443,7 @@ def test_query_data_with_timestamp_parameter(capsys, instance_id,
@pytest.mark.dependency(depends=["insert_datatypes_data"])
def test_query_data_with_query_options(capsys, instance_id, sample_database):
- snippets.query_data_with_query_options(instance_id,
- sample_database.database_id)
+ snippets.query_data_with_query_options(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 4, VenueName: Venue 4, LastUpdateTime:" in out
assert "VenueId: 19, VenueName: Venue 19, LastUpdateTime:" in out
@@ -466,8 +452,7 @@ def test_query_data_with_query_options(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["insert_datatypes_data"])
def test_create_client_with_query_options(capsys, instance_id, sample_database):
- snippets.create_client_with_query_options(instance_id,
- sample_database.database_id)
+ snippets.create_client_with_query_options(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 4, VenueName: Venue 4, LastUpdateTime:" in out
assert "VenueId: 19, VenueName: Venue 19, LastUpdateTime:" in out
@@ -494,3 +479,36 @@ def test_query_data_with_jsonb_parameter(capsys, instance_id, sample_database):
snippets.query_data_with_jsonb_parameter(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 19, VenueDetails: {'open': True, 'rating': 9}" in out
+
+
+def test_create_sequence(capsys, instance_id, bit_reverse_sequence_database):
+ snippets.create_sequence(instance_id, bit_reverse_sequence_database.database_id)
+ out, _ = capsys.readouterr()
+ assert (
+ "Created Seq sequence and Customers table, where the key column CustomerId uses the sequence as a default value on database"
+ in out
+ )
+ assert "Number of customer records inserted is 3" in out
+ assert "Inserted customer record with Customer Id:" in out
+
+
+@pytest.mark.dependency(depends=["create_sequence"])
+def test_alter_sequence(capsys, instance_id, bit_reverse_sequence_database):
+ snippets.alter_sequence(instance_id, bit_reverse_sequence_database.database_id)
+ out, _ = capsys.readouterr()
+ assert (
+ "Altered Seq sequence to skip an inclusive range between 1000 and 5000000 on database"
+ in out
+ )
+ assert "Number of customer records inserted is 3" in out
+ assert "Inserted customer record with Customer Id:" in out
+
+
+@pytest.mark.dependency(depends=["alter_sequence"])
+def test_drop_sequence(capsys, instance_id, bit_reverse_sequence_database):
+ snippets.drop_sequence(instance_id, bit_reverse_sequence_database.database_id)
+ out, _ = capsys.readouterr()
+ assert (
+ "Altered Customers table to drop DEFAULT from CustomerId column and dropped the Seq sequence on database"
+ in out
+ )
diff --git a/samples/samples/requirements.txt b/samples/samples/requirements.txt
index ea28854fbb..4ca3a436c6 100644
--- a/samples/samples/requirements.txt
+++ b/samples/samples/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-spanner==3.33.0
+google-cloud-spanner==3.35.1
futures==3.4.0; python_version < "3"
diff --git a/samples/samples/snippets.py b/samples/samples/snippets.py
index cbcb6b9bdc..82fb95a0dd 100644
--- a/samples/samples/snippets.py
+++ b/samples/samples/snippets.py
@@ -35,6 +35,7 @@
from google.iam.v1 import policy_pb2
from google.cloud.spanner_v1.data_types import JsonObject
from google.protobuf import field_mask_pb2 # type: ignore
+
OPERATION_TIMEOUT_SECONDS = 240
@@ -207,8 +208,7 @@ def update_database(instance_id, database_id):
operation = db.update(["enable_drop_protection"])
- print("Waiting for update operation for {} to complete...".format(
- db.name))
+ print("Waiting for update operation for {} to complete...".format(db.name))
operation.result(OPERATION_TIMEOUT_SECONDS)
print("Updated database {}.".format(db.name))
@@ -1423,7 +1423,7 @@ def delete_singers(transaction):
def delete_data_with_dml_returning(instance_id, database_id):
- """Deletes sample data from the database using a DML statement having a THEN RETURN clause. """
+ """Deletes sample data from the database using a DML statement having a THEN RETURN clause."""
# [START spanner_dml_delete_returning]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
@@ -1559,7 +1559,7 @@ def insert_singers(transaction):
def insert_with_dml_returning(instance_id, database_id):
- """Inserts sample data into the given database using a DML statement having a THEN RETURN clause. """
+ """Inserts sample data into the given database using a DML statement having a THEN RETURN clause."""
# [START spanner_dml_insert_returning]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
@@ -1748,7 +1748,7 @@ def update_albums(transaction):
def create_table_with_datatypes(instance_id, database_id):
- """Creates a table with supported datatypes. """
+ """Creates a table with supported datatypes."""
# [START spanner_create_table_with_datatypes]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
@@ -2471,7 +2471,7 @@ def create_table_with_foreign_key_delete_cascade(instance_id, database_id):
CONSTRAINT FKShoppingCartsCustomerId FOREIGN KEY (CustomerId)
REFERENCES Customers (CustomerId) ON DELETE CASCADE
) PRIMARY KEY (CartId)
- """
+ """,
]
)
@@ -2481,7 +2481,7 @@ def create_table_with_foreign_key_delete_cascade(instance_id, database_id):
print(
"""Created Customers and ShoppingCarts table with FKShoppingCartsCustomerId
foreign key constraint on database {} on instance {}""".format(
- database_id, instance_id
+ database_id, instance_id
)
)
@@ -2512,7 +2512,7 @@ def alter_table_with_foreign_key_delete_cascade(instance_id, database_id):
print(
"""Altered ShoppingCarts table with FKShoppingCartsCustomerName
foreign key constraint on database {} on instance {}""".format(
- database_id, instance_id
+ database_id, instance_id
)
)
@@ -2540,7 +2540,7 @@ def drop_foreign_key_constraint_delete_cascade(instance_id, database_id):
print(
"""Altered ShoppingCarts table to drop FKShoppingCartsCustomerName
foreign key constraint on database {} on instance {}""".format(
- database_id, instance_id
+ database_id, instance_id
)
)
@@ -2548,6 +2548,122 @@ def drop_foreign_key_constraint_delete_cascade(instance_id, database_id):
# [END spanner_drop_foreign_key_constraint_delete_cascade]
+# [START spanner_create_sequence]
+def create_sequence(instance_id, database_id):
+ """Creates the Sequence and insert data"""
+ spanner_client = spanner.Client()
+ instance = spanner_client.instance(instance_id)
+ database = instance.database(database_id)
+
+ operation = database.update_ddl(
+ [
+ "CREATE SEQUENCE Seq OPTIONS (sequence_kind = 'bit_reversed_positive')",
+ """CREATE TABLE Customers (
+ CustomerId INT64 DEFAULT (GET_NEXT_SEQUENCE_VALUE(Sequence Seq)),
+ CustomerName STRING(1024)
+ ) PRIMARY KEY (CustomerId)""",
+ ]
+ )
+
+ print("Waiting for operation to complete...")
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ print(
+ "Created Seq sequence and Customers table, where the key column CustomerId uses the sequence as a default value on database {} on instance {}".format(
+ database_id, instance_id
+ )
+ )
+
+ def insert_customers(transaction):
+ results = transaction.execute_sql(
+ "INSERT INTO Customers (CustomerName) VALUES "
+ "('Alice'), "
+ "('David'), "
+ "('Marc') "
+ "THEN RETURN CustomerId"
+ )
+ for result in results:
+ print("Inserted customer record with Customer Id: {}".format(*result))
+ print(
+ "Number of customer records inserted is {}".format(
+ results.stats.row_count_exact
+ )
+ )
+
+ database.run_in_transaction(insert_customers)
+
+
+# [END spanner_create_sequence]
+
+# [START spanner_alter_sequence]
+def alter_sequence(instance_id, database_id):
+ """Alters the Sequence and insert data"""
+ spanner_client = spanner.Client()
+ instance = spanner_client.instance(instance_id)
+ database = instance.database(database_id)
+
+ operation = database.update_ddl(
+ [
+ "ALTER SEQUENCE Seq SET OPTIONS (skip_range_min = 1000, skip_range_max = 5000000)"
+ ]
+ )
+
+ print("Waiting for operation to complete...")
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ print(
+ "Altered Seq sequence to skip an inclusive range between 1000 and 5000000 on database {} on instance {}".format(
+ database_id, instance_id
+ )
+ )
+
+ def insert_customers(transaction):
+ results = transaction.execute_sql(
+ "INSERT INTO Customers (CustomerName) VALUES "
+ "('Lea'), "
+ "('Cataline'), "
+ "('Smith') "
+ "THEN RETURN CustomerId"
+ )
+ for result in results:
+ print("Inserted customer record with Customer Id: {}".format(*result))
+ print(
+ "Number of customer records inserted is {}".format(
+ results.stats.row_count_exact
+ )
+ )
+
+ database.run_in_transaction(insert_customers)
+
+
+# [END spanner_alter_sequence]
+
+# [START spanner_drop_sequence]
+def drop_sequence(instance_id, database_id):
+ """Drops the Sequence"""
+ spanner_client = spanner.Client()
+ instance = spanner_client.instance(instance_id)
+ database = instance.database(database_id)
+
+ operation = database.update_ddl(
+ [
+ "ALTER TABLE Customers ALTER COLUMN CustomerId DROP DEFAULT",
+ "DROP SEQUENCE Seq",
+ ]
+ )
+
+ print("Waiting for operation to complete...")
+ operation.result(OPERATION_TIMEOUT_SECONDS)
+
+ print(
+ "Altered Customers table to drop DEFAULT from CustomerId column and dropped the Seq sequence on database {} on instance {}".format(
+ database_id, instance_id
+ )
+ )
+
+
+# [END spanner_drop_sequence]
+
if __name__ == "__main__": # noqa: C901
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
@@ -2580,7 +2696,9 @@ def drop_foreign_key_constraint_delete_cascade(instance_id, database_id):
query_data_with_index_parser.add_argument("--end_title", default="Goo")
subparsers.add_parser("read_data_with_index", help=read_data_with_index.__doc__)
subparsers.add_parser("add_storing_index", help=add_storing_index.__doc__)
- subparsers.add_parser("read_data_with_storing_index", help=read_data_with_storing_index.__doc__)
+ subparsers.add_parser(
+ "read_data_with_storing_index", help=read_data_with_storing_index.__doc__
+ )
subparsers.add_parser(
"create_table_with_timestamp", help=create_table_with_timestamp.__doc__
)
@@ -2606,9 +2724,13 @@ def drop_foreign_key_constraint_delete_cascade(instance_id, database_id):
subparsers.add_parser("insert_data_with_dml", help=insert_data_with_dml.__doc__)
subparsers.add_parser("log_commit_stats", help=log_commit_stats.__doc__)
subparsers.add_parser("update_data_with_dml", help=update_data_with_dml.__doc__)
- subparsers.add_parser("update_data_with_dml_returning", help=update_data_with_dml_returning.__doc__)
+ subparsers.add_parser(
+ "update_data_with_dml_returning", help=update_data_with_dml_returning.__doc__
+ )
subparsers.add_parser("delete_data_with_dml", help=delete_data_with_dml.__doc__)
- subparsers.add_parser("delete_data_with_dml_returning", help=delete_data_with_dml_returning.__doc__)
+ subparsers.add_parser(
+ "delete_data_with_dml_returning", help=delete_data_with_dml_returning.__doc__
+ )
subparsers.add_parser(
"update_data_with_dml_timestamp", help=update_data_with_dml_timestamp.__doc__
)
@@ -2619,7 +2741,9 @@ def drop_foreign_key_constraint_delete_cascade(instance_id, database_id):
"update_data_with_dml_struct", help=update_data_with_dml_struct.__doc__
)
subparsers.add_parser("insert_with_dml", help=insert_with_dml.__doc__)
- subparsers.add_parser("insert_with_dml_returning", help=insert_with_dml_returning.__doc__)
+ subparsers.add_parser(
+ "insert_with_dml_returning", help=insert_with_dml_returning.__doc__
+ )
subparsers.add_parser(
"query_data_with_parameter", help=query_data_with_parameter.__doc__
)
@@ -2664,6 +2788,10 @@ def drop_foreign_key_constraint_delete_cascade(instance_id, database_id):
"read_data_with_database_role", help=read_data_with_database_role.__doc__
)
subparsers.add_parser("list_database_roles", help=list_database_roles.__doc__)
+ subparsers.add_parser("create_sequence", help=create_sequence.__doc__)
+ subparsers.add_parser("alter_sequence", help=alter_sequence.__doc__)
+ subparsers.add_parser("drop_sequence", help=drop_sequence.__doc__)
+
enable_fine_grained_access_parser = subparsers.add_parser(
"enable_fine_grained_access", help=enable_fine_grained_access.__doc__
)
diff --git a/samples/samples/snippets_test.py b/samples/samples/snippets_test.py
index f0824348c0..22b5b6f944 100644
--- a/samples/samples/snippets_test.py
+++ b/samples/samples/snippets_test.py
@@ -114,7 +114,7 @@ def user_managed_instance_config_name(spanner_client):
name = f"custom-python-samples-config-{uuid.uuid4().hex[:10]}"
yield name
snippets.delete_instance_config(
- "{}/instanceConfigs/{}".format(spanner_client.project_name, name)
+ "{}/instanceConfigs/{}".format(spanner_client.project_name, name)
)
return
@@ -143,8 +143,8 @@ def test_create_database_explicit(sample_instance, create_database_id):
def test_create_instance_with_processing_units(capsys, lci_instance_id):
processing_units = 500
retry_429(snippets.create_instance_with_processing_units)(
- lci_instance_id,
- processing_units,
+ lci_instance_id,
+ processing_units,
)
out, _ = capsys.readouterr()
assert lci_instance_id in out
@@ -155,9 +155,7 @@ def test_create_instance_with_processing_units(capsys, lci_instance_id):
def test_update_database(capsys, instance_id, sample_database):
- snippets.update_database(
- instance_id, sample_database.database_id
- )
+ snippets.update_database(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Updated database {}.".format(sample_database.name) in out
@@ -168,10 +166,10 @@ def test_update_database(capsys, instance_id, sample_database):
def test_create_database_with_encryption_config(
- capsys, instance_id, cmek_database_id, kms_key_name
+ capsys, instance_id, cmek_database_id, kms_key_name
):
snippets.create_database_with_encryption_key(
- instance_id, cmek_database_id, kms_key_name
+ instance_id, cmek_database_id, kms_key_name
)
out, _ = capsys.readouterr()
assert cmek_database_id in out
@@ -193,10 +191,10 @@ def test_list_instance_config(capsys):
@pytest.mark.dependency(name="create_instance_config")
def test_create_instance_config(
- capsys, user_managed_instance_config_name, base_instance_config_id
+ capsys, user_managed_instance_config_name, base_instance_config_id
):
snippets.create_instance_config(
- user_managed_instance_config_name, base_instance_config_id
+ user_managed_instance_config_name, base_instance_config_id
)
out, _ = capsys.readouterr()
assert "Created instance configuration" in out
@@ -213,9 +211,9 @@ def test_update_instance_config(capsys, user_managed_instance_config_name):
def test_delete_instance_config(capsys, user_managed_instance_config_name):
spanner_client = spanner.Client()
snippets.delete_instance_config(
- "{}/instanceConfigs/{}".format(
- spanner_client.project_name, user_managed_instance_config_name
- )
+ "{}/instanceConfigs/{}".format(
+ spanner_client.project_name, user_managed_instance_config_name
+ )
)
out, _ = capsys.readouterr()
assert "successfully deleted" in out
@@ -234,15 +232,15 @@ def test_list_databases(capsys, instance_id):
def test_create_database_with_default_leader(
- capsys,
- multi_region_instance,
- multi_region_instance_id,
- default_leader_database_id,
- default_leader,
+ capsys,
+ multi_region_instance,
+ multi_region_instance_id,
+ default_leader_database_id,
+ default_leader,
):
retry_429 = RetryErrors(exceptions.ResourceExhausted, delay=15)
retry_429(snippets.create_database_with_default_leader)(
- multi_region_instance_id, default_leader_database_id, default_leader
+ multi_region_instance_id, default_leader_database_id, default_leader
)
out, _ = capsys.readouterr()
assert default_leader_database_id in out
@@ -250,15 +248,15 @@ def test_create_database_with_default_leader(
def test_update_database_with_default_leader(
- capsys,
- multi_region_instance,
- multi_region_instance_id,
- default_leader_database_id,
- default_leader,
+ capsys,
+ multi_region_instance,
+ multi_region_instance_id,
+ default_leader_database_id,
+ default_leader,
):
retry_429 = RetryErrors(exceptions.ResourceExhausted, delay=15)
retry_429(snippets.update_database_with_default_leader)(
- multi_region_instance_id, default_leader_database_id, default_leader
+ multi_region_instance_id, default_leader_database_id, default_leader
)
out, _ = capsys.readouterr()
assert default_leader_database_id in out
@@ -272,14 +270,14 @@ def test_get_database_ddl(capsys, instance_id, sample_database):
def test_query_information_schema_database_options(
- capsys,
- multi_region_instance,
- multi_region_instance_id,
- default_leader_database_id,
- default_leader,
+ capsys,
+ multi_region_instance,
+ multi_region_instance_id,
+ default_leader_database_id,
+ default_leader,
):
snippets.query_information_schema_database_options(
- multi_region_instance_id, default_leader_database_id
+ multi_region_instance_id, default_leader_database_id
)
out, _ = capsys.readouterr()
assert default_leader in out
@@ -351,8 +349,7 @@ def test_read_write_transaction(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_column"])
def test_query_data_with_new_column(capsys, instance_id, sample_database):
- snippets.query_data_with_new_column(instance_id,
- sample_database.database_id)
+ snippets.query_data_with_new_column(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 1, AlbumId: 1, MarketingBudget: 300000" in out
assert "SingerId: 2, AlbumId: 2, MarketingBudget: 300000" in out
@@ -392,8 +389,7 @@ def test_add_storing_index(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_storing_index"])
def test_read_data_with_storing_index(capsys, instance_id, sample_database):
- snippets.read_data_with_storing_index(instance_id,
- sample_database.database_id)
+ snippets.read_data_with_storing_index(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "300000" in out
@@ -415,8 +411,7 @@ def test_add_timestamp_column(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_timestamp_column"])
def test_update_data_with_timestamp(capsys, instance_id, sample_database):
- snippets.update_data_with_timestamp(instance_id,
- sample_database.database_id)
+ snippets.update_data_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Updated data" in out
@@ -431,16 +426,14 @@ def test_query_data_with_timestamp(capsys, instance_id, sample_database):
@pytest.mark.dependency(name="create_table_with_timestamp")
def test_create_table_with_timestamp(capsys, instance_id, sample_database):
- snippets.create_table_with_timestamp(instance_id,
- sample_database.database_id)
+ snippets.create_table_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Created Performances table on database" in out
@pytest.mark.dependency(depends=["create_table_with_timestamp"])
def test_insert_data_with_timestamp(capsys, instance_id, sample_database):
- snippets.insert_data_with_timestamp(instance_id,
- sample_database.database_id)
+ snippets.insert_data_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Inserted data." in out
@@ -461,8 +454,7 @@ def test_query_with_struct(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["write_struct_data"])
def test_query_with_array_of_struct(capsys, instance_id, sample_database):
- snippets.query_with_array_of_struct(instance_id,
- sample_database.database_id)
+ snippets.query_with_array_of_struct(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 8" in out
assert "SingerId: 7" in out
@@ -530,16 +522,14 @@ def test_delete_data_with_dml_returning(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_timestamp_column"])
def test_update_data_with_dml_timestamp(capsys, instance_id, sample_database):
- snippets.update_data_with_dml_timestamp(instance_id,
- sample_database.database_id)
+ snippets.update_data_with_dml_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "2 record(s) updated." in out
@pytest.mark.dependency(name="dml_write_read_transaction")
def test_dml_write_read_transaction(capsys, instance_id, sample_database):
- snippets.dml_write_read_transaction(instance_id,
- sample_database.database_id)
+ snippets.dml_write_read_transaction(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "1 record(s) inserted." in out
assert "FirstName: Timothy, LastName: Campbell" in out
@@ -547,8 +537,7 @@ def test_dml_write_read_transaction(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["dml_write_read_transaction"])
def test_update_data_with_dml_struct(capsys, instance_id, sample_database):
- snippets.update_data_with_dml_struct(instance_id,
- sample_database.database_id)
+ snippets.update_data_with_dml_struct(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "1 record(s) updated" in out
@@ -576,24 +565,21 @@ def test_query_data_with_parameter(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_column"])
def test_write_with_dml_transaction(capsys, instance_id, sample_database):
- snippets.write_with_dml_transaction(instance_id,
- sample_database.database_id)
+ snippets.write_with_dml_transaction(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Transferred 200000 from Album2's budget to Album1's" in out
@pytest.mark.dependency(depends=["add_column"])
def update_data_with_partitioned_dml(capsys, instance_id, sample_database):
- snippets.update_data_with_partitioned_dml(instance_id,
- sample_database.database_id)
+ snippets.update_data_with_partitioned_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "3 record(s) updated" in out
@pytest.mark.dependency(depends=["insert_with_dml"])
def test_delete_data_with_partitioned_dml(capsys, instance_id, sample_database):
- snippets.delete_data_with_partitioned_dml(instance_id,
- sample_database.database_id)
+ snippets.delete_data_with_partitioned_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "6 record(s) deleted" in out
@@ -607,15 +593,14 @@ def test_update_with_batch_dml(capsys, instance_id, sample_database):
@pytest.mark.dependency(name="create_table_with_datatypes")
def test_create_table_with_datatypes(capsys, instance_id, sample_database):
- snippets.create_table_with_datatypes(instance_id,
- sample_database.database_id)
+ snippets.create_table_with_datatypes(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Created Venues table on database" in out
@pytest.mark.dependency(
- name="insert_datatypes_data",
- depends=["create_table_with_datatypes"],
+ name="insert_datatypes_data",
+ depends=["create_table_with_datatypes"],
)
def test_insert_datatypes_data(capsys, instance_id, sample_database):
snippets.insert_datatypes_data(instance_id, sample_database.database_id)
@@ -677,8 +662,8 @@ def test_query_data_with_string(capsys, instance_id, sample_database):
@pytest.mark.dependency(
- name="add_numeric_column",
- depends=["create_table_with_datatypes"],
+ name="add_numeric_column",
+ depends=["create_table_with_datatypes"],
)
def test_add_numeric_column(capsys, instance_id, sample_database):
snippets.add_numeric_column(instance_id, sample_database.database_id)
@@ -694,17 +679,15 @@ def test_update_data_with_numeric(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_numeric_column"])
-def test_query_data_with_numeric_parameter(capsys, instance_id,
- sample_database):
- snippets.query_data_with_numeric_parameter(instance_id,
- sample_database.database_id)
+def test_query_data_with_numeric_parameter(capsys, instance_id, sample_database):
+ snippets.query_data_with_numeric_parameter(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 4, Revenue: 35000" in out
@pytest.mark.dependency(
- name="add_json_column",
- depends=["create_table_with_datatypes"],
+ name="add_json_column",
+ depends=["create_table_with_datatypes"],
)
def test_add_json_column(capsys, instance_id, sample_database):
snippets.add_json_column(instance_id, sample_database.database_id)
@@ -721,17 +704,15 @@ def test_update_data_with_json(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["add_json_column"])
def test_query_data_with_json_parameter(capsys, instance_id, sample_database):
- snippets.query_data_with_json_parameter(instance_id,
- sample_database.database_id)
+ snippets.query_data_with_json_parameter(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 19, VenueDetails: {'open': True, 'rating': 9}" in out
@pytest.mark.dependency(depends=["insert_datatypes_data"])
-def test_query_data_with_timestamp_parameter(capsys, instance_id,
- sample_database):
+def test_query_data_with_timestamp_parameter(capsys, instance_id, sample_database):
snippets.query_data_with_timestamp_parameter(
- instance_id, sample_database.database_id
+ instance_id, sample_database.database_id
)
out, _ = capsys.readouterr()
assert "VenueId: 4, VenueName: Venue 4, LastUpdateTime:" in out
@@ -741,8 +722,7 @@ def test_query_data_with_timestamp_parameter(capsys, instance_id,
@pytest.mark.dependency(depends=["insert_datatypes_data"])
def test_query_data_with_query_options(capsys, instance_id, sample_database):
- snippets.query_data_with_query_options(instance_id,
- sample_database.database_id)
+ snippets.query_data_with_query_options(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 4, VenueName: Venue 4, LastUpdateTime:" in out
assert "VenueId: 19, VenueName: Venue 19, LastUpdateTime:" in out
@@ -751,8 +731,7 @@ def test_query_data_with_query_options(capsys, instance_id, sample_database):
@pytest.mark.dependency(depends=["insert_datatypes_data"])
def test_create_client_with_query_options(capsys, instance_id, sample_database):
- snippets.create_client_with_query_options(instance_id,
- sample_database.database_id)
+ snippets.create_client_with_query_options(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 4, VenueName: Venue 4, LastUpdateTime:" in out
assert "VenueId: 19, VenueName: Venue 19, LastUpdateTime:" in out
@@ -797,22 +776,72 @@ def test_list_database_roles(capsys, instance_id, sample_database):
@pytest.mark.dependency(name="create_table_with_foreign_key_delete_cascade")
-def test_create_table_with_foreign_key_delete_cascade(capsys, instance_id, sample_database):
- snippets.create_table_with_foreign_key_delete_cascade(instance_id, sample_database.database_id)
+def test_create_table_with_foreign_key_delete_cascade(
+ capsys, instance_id, sample_database
+):
+ snippets.create_table_with_foreign_key_delete_cascade(
+ instance_id, sample_database.database_id
+ )
out, _ = capsys.readouterr()
- assert "Created Customers and ShoppingCarts table with FKShoppingCartsCustomerId" in out
+ assert (
+ "Created Customers and ShoppingCarts table with FKShoppingCartsCustomerId"
+ in out
+ )
-@pytest.mark.dependency(name="alter_table_with_foreign_key_delete_cascade",
- depends=["create_table_with_foreign_key_delete_cascade"])
-def test_alter_table_with_foreign_key_delete_cascade(capsys, instance_id, sample_database):
- snippets.alter_table_with_foreign_key_delete_cascade(instance_id, sample_database.database_id)
+@pytest.mark.dependency(
+ name="alter_table_with_foreign_key_delete_cascade",
+ depends=["create_table_with_foreign_key_delete_cascade"],
+)
+def test_alter_table_with_foreign_key_delete_cascade(
+ capsys, instance_id, sample_database
+):
+ snippets.alter_table_with_foreign_key_delete_cascade(
+ instance_id, sample_database.database_id
+ )
out, _ = capsys.readouterr()
assert "Altered ShoppingCarts table with FKShoppingCartsCustomerName" in out
@pytest.mark.dependency(depends=["alter_table_with_foreign_key_delete_cascade"])
-def test_drop_foreign_key_contraint_delete_cascade(capsys, instance_id, sample_database):
- snippets.drop_foreign_key_constraint_delete_cascade(instance_id, sample_database.database_id)
+def test_drop_foreign_key_contraint_delete_cascade(
+ capsys, instance_id, sample_database
+):
+ snippets.drop_foreign_key_constraint_delete_cascade(
+ instance_id, sample_database.database_id
+ )
out, _ = capsys.readouterr()
assert "Altered ShoppingCarts table to drop FKShoppingCartsCustomerName" in out
+
+
+def test_create_sequence(capsys, instance_id, bit_reverse_sequence_database):
+ snippets.create_sequence(instance_id, bit_reverse_sequence_database.database_id)
+ out, _ = capsys.readouterr()
+ assert (
+ "Created Seq sequence and Customers table, where the key column CustomerId uses the sequence as a default value on database"
+ in out
+ )
+ assert "Number of customer records inserted is 3" in out
+ assert "Inserted customer record with Customer Id:" in out
+
+
+@pytest.mark.dependency(depends=["create_sequence"])
+def test_alter_sequence(capsys, instance_id, bit_reverse_sequence_database):
+ snippets.alter_sequence(instance_id, bit_reverse_sequence_database.database_id)
+ out, _ = capsys.readouterr()
+ assert (
+ "Altered Seq sequence to skip an inclusive range between 1000 and 5000000 on database"
+ in out
+ )
+ assert "Number of customer records inserted is 3" in out
+ assert "Inserted customer record with Customer Id:" in out
+
+
+@pytest.mark.dependency(depends=["alter_sequence"])
+def test_drop_sequence(capsys, instance_id, bit_reverse_sequence_database):
+ snippets.drop_sequence(instance_id, bit_reverse_sequence_database.database_id)
+ out, _ = capsys.readouterr()
+ assert (
+ "Altered Customers table to drop DEFAULT from CustomerId column and dropped the Seq sequence on database"
+ in out
+ )
diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py
index d90b2180f7..ae22d4d897 100644
--- a/tests/system/test_session_api.py
+++ b/tests/system/test_session_api.py
@@ -1890,7 +1890,7 @@ def test_partition_read_w_index(sessions_database, not_emulator):
columns,
spanner_v1.KeySet(all_=True),
index="name",
- data_boost_enabled=False,
+ data_boost_enabled=True,
)
for batch in batches:
p_results_iter = batch_txn.process(batch)
@@ -2507,7 +2507,7 @@ def test_partition_query(sessions_database, not_emulator):
all_data_rows = set(_row_data(row_count))
union = set()
batch_txn = sessions_database.batch_snapshot(read_timestamp=committed)
- for batch in batch_txn.generate_query_batches(sql, data_boost_enabled=False):
+ for batch in batch_txn.generate_query_batches(sql, data_boost_enabled=True):
p_results_iter = batch_txn.process(batch)
# Lists aren't hashable so the results need to be converted
rows = [tuple(result) for result in p_results_iter]
diff --git a/tests/system/test_table_api.py b/tests/system/test_table_api.py
index 1385fb953c..7d4da2b363 100644
--- a/tests/system/test_table_api.py
+++ b/tests/system/test_table_api.py
@@ -29,6 +29,16 @@ def test_table_exists_not_found(shared_database):
assert not table.exists()
+def test_table_exists_reload_database_dialect(
+ shared_instance, shared_database, not_emulator
+):
+ database = shared_instance.database(shared_database.database_id)
+ assert database.database_dialect == DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED
+ table = database.table("all_types")
+ assert table.exists()
+ assert database.database_dialect != DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED
+
+
def test_db_list_tables(shared_database):
tables = shared_database.list_tables()
table_ids = set(table.table_id for table in tables)
diff --git a/tests/unit/spanner_dbapi/test_connect.py b/tests/unit/spanner_dbapi/test_connect.py
index f1c3b12ac8..17122352d4 100644
--- a/tests/unit/spanner_dbapi/test_connect.py
+++ b/tests/unit/spanner_dbapi/test_connect.py
@@ -90,7 +90,7 @@ def test_w_explicit(self, mock_client):
project=PROJECT,
credentials=credentials,
client_info=mock.ANY,
- route_to_leader_enabled=False,
+ route_to_leader_enabled=True,
)
client_info = mock_client.call_args_list[0][1]["client_info"]
self.assertEqual(client_info.user_agent, USER_AGENT)
@@ -128,7 +128,7 @@ def test_w_credential_file_path(self, mock_client):
credentials_path,
project=PROJECT,
client_info=mock.ANY,
- route_to_leader_enabled=False,
+ route_to_leader_enabled=True,
)
client_info = factory.call_args_list[0][1]["client_info"]
self.assertEqual(client_info.user_agent, USER_AGENT)
diff --git a/tests/unit/test_batch.py b/tests/unit/test_batch.py
index 532e0cda55..6d356565b7 100644
--- a/tests/unit/test_batch.py
+++ b/tests/unit/test_batch.py
@@ -284,7 +284,7 @@ def _test_commit_with_request_options(self, request_options=None):
self.assertEqual(committed, now)
self.assertEqual(batch.committed, committed)
- if type(request_options) == dict:
+ if type(request_options) is dict:
expected_request_options = RequestOptions(request_options)
else:
expected_request_options = request_options
diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py
index f8bcb709cb..ed79271a96 100644
--- a/tests/unit/test_client.py
+++ b/tests/unit/test_client.py
@@ -59,7 +59,7 @@ def _constructor_test_helper(
client_options=None,
query_options=None,
expected_query_options=None,
- route_to_leader_enabled=None,
+ route_to_leader_enabled=True,
):
import google.api_core.client_options
from google.cloud.spanner_v1 import client as MUT
@@ -72,7 +72,7 @@ def _constructor_test_helper(
expected_client_info = MUT._CLIENT_INFO
kwargs["client_options"] = client_options
- if type(client_options) == dict:
+ if type(client_options) is dict:
expected_client_options = google.api_core.client_options.from_dict(
client_options
)
diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py
index f9d1fec6b8..0a7dbccb81 100644
--- a/tests/unit/test_instance.py
+++ b/tests/unit/test_instance.py
@@ -818,7 +818,7 @@ def test_list_backup_operations_defaults(self):
retry=mock.ANY,
timeout=mock.ANY,
)
- self.assertTrue(all([type(op) == Operation for op in ops]))
+ self.assertTrue(all([type(op) is Operation for op in ops]))
def test_list_backup_operations_w_options(self):
from google.api_core.operation import Operation
@@ -865,7 +865,7 @@ def test_list_backup_operations_w_options(self):
retry=mock.ANY,
timeout=mock.ANY,
)
- self.assertTrue(all([type(op) == Operation for op in ops]))
+ self.assertTrue(all([type(op) is Operation for op in ops]))
def test_list_database_operations_defaults(self):
from google.api_core.operation import Operation
@@ -923,7 +923,7 @@ def test_list_database_operations_defaults(self):
retry=mock.ANY,
timeout=mock.ANY,
)
- self.assertTrue(all([type(op) == Operation for op in ops]))
+ self.assertTrue(all([type(op) is Operation for op in ops]))
def test_list_database_operations_w_options(self):
from google.api_core.operation import Operation
@@ -988,7 +988,7 @@ def test_list_database_operations_w_options(self):
retry=mock.ANY,
timeout=mock.ANY,
)
- self.assertTrue(all([type(op) == Operation for op in ops]))
+ self.assertTrue(all([type(op) is Operation for op in ops]))
def test_type_string_to_type_pb_hit(self):
from google.cloud.spanner_admin_database_v1 import (
diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py
index d03d396039..f09720d639 100644
--- a/tests/unit/test_snapshot.py
+++ b/tests/unit/test_snapshot.py
@@ -654,7 +654,7 @@ def _read_helper(
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
if partition is not None: # 'limit' and 'partition' incompatible
@@ -902,7 +902,7 @@ def _execute_sql_helper(
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
result_set = derived.execute_sql(
diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py
index 487344e2b5..2cbdf8ca17 100644
--- a/tests/unit/test_transaction.py
+++ b/tests/unit/test_transaction.py
@@ -394,7 +394,7 @@ def _commit_helper(
expected_request_options = RequestOptions(
transaction_tag=self.TRANSACTION_TAG
)
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
expected_request_options = RequestOptions(request_options)
expected_request_options.transaction_tag = self.TRANSACTION_TAG
expected_request_options.request_tag = None
@@ -545,7 +545,7 @@ def _execute_update_helper(
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
row_count = transaction.execute_update(
@@ -739,7 +739,7 @@ def _batch_update_helper(self, error_after=None, count=0, request_options=None):
if request_options is None:
request_options = RequestOptions()
- elif type(request_options) == dict:
+ elif type(request_options) is dict:
request_options = RequestOptions(request_options)
status, row_counts = transaction.batch_update(