From 48db0c2d6c0a8b05405d0074f9b0298659233be8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jan 2025 17:14:06 +0000 Subject: [PATCH] Drop indices concurrently on background updates (#18091) Otherwise these can race with other long running queries and lock out all other queries. This caused problems in v1.22.0 as we added an index to `events` table in #17948, but that got interrupted and so next time we ran the background update we needed to delete the half-finished index. However, that got blocked behind some long running queries and then locked other queries out (stopping workers from even starting). --- changelog.d/18091.bugfix | 1 + synapse/storage/background_updates.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/18091.bugfix diff --git a/changelog.d/18091.bugfix b/changelog.d/18091.bugfix new file mode 100644 index 00000000000..c163b4f9a2e --- /dev/null +++ b/changelog.d/18091.bugfix @@ -0,0 +1 @@ +Fix rare race where on upgrade to v1.22.0 a long running database upgrade could lock out new events from being received or sent. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 34139f580d4..a02b4cc9ce9 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -789,7 +789,7 @@ def create_index_psql(conn: "LoggingDatabaseConnection") -> None: # we may already have a half-built index. Let's just drop it # before trying to create it again. - sql = "DROP INDEX IF EXISTS %s" % (index_name,) + sql = "DROP INDEX CONCURRENTLY IF EXISTS %s" % (index_name,) logger.debug("[SQL] %s", sql) c.execute(sql) @@ -814,7 +814,7 @@ def create_index_psql(conn: "LoggingDatabaseConnection") -> None: if replaces_index is not None: # We drop the old index as the new index has now been created. - sql = f"DROP INDEX IF EXISTS {replaces_index}" + sql = f"DROP INDEX CONCURRENTLY IF EXISTS {replaces_index}" logger.debug("[SQL] %s", sql) c.execute(sql) finally: