diff --git a/lumen/ai/coordinator.py b/lumen/ai/coordinator.py index 409242b85..b98a84f6b 100644 --- a/lumen/ai/coordinator.py +++ b/lumen/ai/coordinator.py @@ -521,14 +521,10 @@ async def _lookup_schemas( cache: dict[str, dict] | None = None ) -> str: cache = cache or {} - to_query, queries = [], [] for table in requested: if table in provided or table in cache: continue - to_query.append(table) - queries.append(get_schema(tables[table], table, limit=3)) - for table, schema in zip(to_query, await asyncio.gather(*queries)): - cache[table] = schema + cache[table] = await get_schema(tables[table], table, limit=3) schema_info = '' for table in requested: if table in provided: diff --git a/lumen/ai/models.py b/lumen/ai/models.py index 06f307d95..6a7686a2e 100644 --- a/lumen/ai/models.py +++ b/lumen/ai/models.py @@ -116,7 +116,7 @@ def make_plan_models(agent_names: list[str], tables: list[str]): extras['tables'] = ( list[Literal[tuple(tables)]], FieldInfo( - description="A list of tables you want to inspect before coming up with a plan." + description="A list of tables to load into memory before coming up with a plan. NOTE: Simple queries asking to list the tables/datasets do not require loading the tables." ) ) reasoning = create_model(