Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
1st1 committed Dec 12, 2024
1 parent c7576cd commit c7a2ddb
Show file tree
Hide file tree
Showing 5 changed files with 61 additions and 24 deletions.
11 changes: 7 additions & 4 deletions edb/server/compiler/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -789,7 +789,7 @@ def compile_sql_descriptors(
param_required = False # SQL arguments can always be NULL

if isinstance(param_type, s_types.Array):
array_type_id = param_type.get_element_type(schema)
array_type_id = param_type.get_element_type(schema).id
else:
array_type_id = None

Expand Down Expand Up @@ -825,11 +825,14 @@ def compile_sql_descriptors(
)

result.append((
(input_desc, input_desc_id, params, len(params)),
(output_desc, output_desc_id)
(input_desc, input_desc_id.bytes, params, len(params)),
(output_desc, output_desc_id.bytes)
))

return result
return (
result,
None, # state
)

def interpret_backend_error(
self,
Expand Down
2 changes: 0 additions & 2 deletions edb/server/compiler/sertypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1054,9 +1054,7 @@ def describe_sql_result(
element_names = []

for rel_name, rel_t in row.items():
print(ctx.uuid_to_pos)
rel_type_id = _describe_type(rel_t, ctx=ctx)
print(ctx.uuid_to_pos)
# SQLRecordElement.name
params_buf.append(_string_packer(rel_name))
element_names.append(rel_name)
Expand Down
27 changes: 16 additions & 11 deletions edb/server/compiler_pool/pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,11 @@ async def _run_method(
sync_state=sync_state
)

return result
worker._last_pickled_state = result[1]
if len(result) == 2:
return *result, 0
else:
return result

finally:
self._release_worker(worker)
Expand Down Expand Up @@ -431,6 +435,7 @@ async def _run_method_in_tx(
pickled_state = state.REUSE_LAST_STATE_MARKER
dbname = user_schema_pickle = None
else:
worker._last_pickled_state = None
worker_db = worker._dbs.get(dbname)
if worker_db is None:
dbname = None
Expand All @@ -448,7 +453,10 @@ async def _run_method_in_tx(
txid,
*compile_args
)
worker._last_pickled_state = new_pickled_state
if new_pickled_state is not None:
# `new_pickled_state is None` is a signal to keep things
# as is.
worker._last_pickled_state = new_pickled_state
return units, new_pickled_state, 0

finally:
Expand All @@ -469,7 +477,7 @@ async def compile(
*compile_args,
**compiler_args,
):
result = await self._run_method(
return await self._run_method(
'compile',
dbname,
user_schema_pickle,
Expand All @@ -481,13 +489,6 @@ async def compile(
**compiler_args,
)

worker._last_pickled_state = result[-1]
ret
if len(result) == 2:
return *result, 0
else:
return result

async def compile_in_tx(
self,
dbname,
Expand Down Expand Up @@ -1665,6 +1666,7 @@ def weighter(w: MultiTenantWorker):
pickled_state = state.REUSE_LAST_STATE_MARKER
dbname = client_id = user_schema_pickle = None
else:
worker._last_pickled_state = None
assert isinstance(worker, MultiTenantWorker)
assert client_id is not None
tenant_schema = worker.get_tenant_schema(client_id)
Expand Down Expand Up @@ -1703,7 +1705,10 @@ def weighter(w: MultiTenantWorker):
txid,
*compile_args
)
worker._last_pickled_state = new_pickled_state
if new_pickled_state is not None:
# `new_pickled_state is None` is a signal to keep things
# as is.
worker._last_pickled_state = new_pickled_state
return units, new_pickled_state, 0

finally:
Expand Down
24 changes: 24 additions & 0 deletions edb/server/compiler_pool/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,30 @@ def compile_sql_descriptors(
return descriptors


def compile_sql_descriptors_in_tx(
dbname: Optional[str],
user_schema: Optional[bytes],
cstate,
*args,
**kwargs
):
global LAST_STATE
if cstate == state.REUSE_LAST_STATE_MARKER:
assert LAST_STATE is not None
cstate = LAST_STATE
else:
cstate = pickle.loads(cstate)
if dbname is None:
assert user_schema is not None
cstate.set_root_user_schema(pickle.loads(user_schema))
else:
cstate.set_root_user_schema(DBS[dbname].user_schema)
units, new_state = COMPILER.compile_serialized_request_in_tx(
cstate, *args, **kwargs)
assert new_state is None # we don't modify state when we compile desc
return units, None


def compile_graphql(
dbname: str,
user_schema: Optional[bytes],
Expand Down
21 changes: 14 additions & 7 deletions edb/server/dbview/dbview.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1499,12 +1499,18 @@ cdef class DatabaseConnectionView:
qug[qu_i].in_type_args = desc_qu[0][2]
qug[qu_i].in_type_args_real_count = desc_qu[0][3]

qug.out_type_data = desc_qug.out_type_data
qug.out_type_id = desc_qug.out_type_id
qug.in_type_data = desc_qug.in_type_data
qug.in_type_id = desc_qug.in_type_id
qug.in_type_args = desc_qug.in_type_args
qug.in_type_args_real_count = desc_qug.in_type_args_real_count
# XXX We don't support SQL scripts just yet, so for now
# we can just copy the last QU's descriptors and
# apply them to the whole group (IOW a group is really
# a group of ONE now.)
# In near future we'll need to properly implement arg
# remap.
qug.out_type_data = desc_qug[-1][1][0]
qug.out_type_id = desc_qug[-1][1][1]
qug.in_type_data = desc_qug[-1][0][0]
qug.in_type_id = desc_qug[-1][0][1]
qug.in_type_args = desc_qug[-1][0][2]
qug.in_type_args_real_count = desc_qug[-1][0][3]

cdef inline _check_in_tx_error(self, query_unit_group):
if self.in_tx_error():
Expand Down Expand Up @@ -1624,7 +1630,8 @@ cdef class DatabaseConnectionView:
client_id=self.tenant.client_id,
)

return result
type_descs, self._last_comp_state, self._last_comp_state_id = result
return type_descs

cdef check_capabilities(
self,
Expand Down

0 comments on commit c7a2ddb

Please sign in to comment.