Skip to content

Commit

Permalink
Skip SQL cache in cache recompilation (#8338)
Browse files Browse the repository at this point in the history
A complete SQL compilation requires a backend connection to amend the
in/out tids. We recompiled wrong SQL cache with out_type_tid=000...000,
so this commit temporarily discards SQL cache after DDL to avoid such
issue.

Also fixed so that compiling SQL over binary protocol bumps the
`sql_compilations_total` metrics instead of
`edgeql_query_compilations_total`.
  • Loading branch information
fantix authored Feb 13, 2025
1 parent 7ef63b2 commit d70df12
Show file tree
Hide file tree
Showing 2 changed files with 55 additions and 8 deletions.
14 changes: 11 additions & 3 deletions edb/server/dbview/dbview.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1262,6 +1262,9 @@ cdef class DatabaseConnectionView:
# Only recompile queries from the *latest* version,
# to avoid quadratic slowdown problems.
and req.schema_version == self.schema_version
# SQL queries require _amend_typedesc_in_sql() with a
# backend connection, which is not available here.
and req.input_language != enums.InputLanguage.SQL
):
g.create_task(recompile_request(req))

Expand Down Expand Up @@ -1447,9 +1450,14 @@ cdef class DatabaseConnectionView:
)

if use_metrics:
metrics.edgeql_query_compilations.inc(
1.0, self.tenant.get_instance_name(), 'compiler'
)
if query_req.input_language is enums.InputLanguage.EDGEQL:
metrics.edgeql_query_compilations.inc(
1.0, self.tenant.get_instance_name(), 'compiler'
)
else:
metrics.sql_compilations.inc(
1.0, self.tenant.get_instance_name()
)

source = query_req.source
if query_unit_group.force_non_normalized:
Expand Down
49 changes: 44 additions & 5 deletions tests/test_server_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -745,12 +745,34 @@ def measure_sql_compilations(
con = await sd.connect()
try:
qry = 'select schema::Object { name }'
sql = '''
SELECT
n.nspname::text AS table_schema,
c.relname::text AS table_name,
CASE
WHEN c.relkind = 'r' THEN 'table'
WHEN c.relkind = 'v' THEN 'view'
WHEN c.relkind = 'm' THEN 'materialized_view'
END AS type,
c.relrowsecurity AS rls_enabled
FROM
pg_catalog.pg_class c
JOIN
pg_catalog.pg_namespace n
ON n.oid::text = c.relnamespace::text
WHERE
c.relkind IN ('r', 'v', 'm')
AND n.nspname = 'public';
'''

await con.query(qry)
await con.query_sql(sql)

# Querying a second time should hit the cache
with self.assertChange(measure_compilations(sd), 0):
await con.query(qry)
with self.assertChange(measure_sql_compilations(sd), 0):
await con.query_sql(sql)

await con.query('''
create type X
Expand All @@ -764,6 +786,14 @@ def measure_sql_compilations(
with self.assertChange(measure_compilations(sd), 0):
await con.query(qry)

# The SQL cache is reset after DDL, because recompiling SQL
# requires backend connection for amending in/out tids, and
# we don't have the infra for that yet.
with self.assertChange(measure_sql_compilations(sd), 1):
await con.query_sql(sql)
with self.assertChange(measure_sql_compilations(sd), 0):
await con.query_sql(sql)

# TODO: this does not behave the way I thing it should
# with self.assertChange(measure_compilations(sd), 1):
# con_c = con.with_config(apply_access_policies=False)
Expand Down Expand Up @@ -794,6 +824,12 @@ def measure_sql_compilations(

with self.assertChange(measure_compilations(sd), 1):
await con.query(qry)
with self.assertChange(measure_compilations(sd), 0):
await con.query(qry)
with self.assertChange(measure_sql_compilations(sd), 1):
await con.query_sql(sql)
with self.assertChange(measure_sql_compilations(sd), 0):
await con.query_sql(sql)

await con.execute(
"configure current database "
Expand All @@ -802,24 +838,24 @@ def measure_sql_compilations(

# Now, a similar thing for SQL queries

with self.assertChange(measure_compilations(sd), 1):
with self.assertChange(measure_sql_compilations(sd), 1):
await con.query_sql('select 1')

# cache hit
with self.assertChange(measure_compilations(sd), 0):
with self.assertChange(measure_sql_compilations(sd), 0):
await con.query_sql('select 1')

# changing globals: cache hit
with self.assertChange(measure_compilations(sd), 0):
with self.assertChange(measure_sql_compilations(sd), 0):
con_g = con.with_globals({'g': 'hello'})
await con_g.query_sql('select 1')

# normalization: pg_query_normalize is underwhelming
with self.assertChange(measure_compilations(sd), 1):
with self.assertChange(measure_sql_compilations(sd), 1):
await con.query_sql('sElEct 1')

# constant extraction: cache hit
with self.assertChange(measure_compilations(sd), 0):
with self.assertChange(measure_sql_compilations(sd), 0):
await con.query_sql('select 2')

# TODO: this does not behave the way I though it should
Expand Down Expand Up @@ -882,6 +918,9 @@ def measure_sql_compilations(
# It should hit the cache no problem.
with self.assertChange(measure_compilations(sd), 0):
await con.query(qry)
# TODO(fantix): persistent SQL cache not working?
# with self.assertChange(measure_sql_compilation(sd), 0):
# await con.query_sql(sql)

finally:
await con.aclose()
Expand Down

0 comments on commit d70df12

Please sign in to comment.