diff --git a/nano_graphrag/_op.py b/nano_graphrag/_op.py index 32b254e..cfa48d6 100644 --- a/nano_graphrag/_op.py +++ b/nano_graphrag/_op.py @@ -837,6 +837,8 @@ async def local_query( text_chunks_db, query_param, ) + if query_param.only_need_context: + return context if context is None: return PROMPTS["fail_response"] sys_prompt_temp = PROMPTS["local_rag_response"] @@ -971,6 +973,8 @@ async def global_query( """ ) points_context = "\n".join(points_context) + if query_param.only_need_context: + return points_context sys_prompt_temp = PROMPTS["global_reduce_rag_response"] response = await use_model_func( query, diff --git a/nano_graphrag/base.py b/nano_graphrag/base.py index dae48ba..b6fed83 100644 --- a/nano_graphrag/base.py +++ b/nano_graphrag/base.py @@ -9,6 +9,7 @@ @dataclass class QueryParam: mode: Literal["local", "global"] = "global" + only_need_context: bool = False response_type: str = "Multiple Paragraphs" level: int = 2 top_k: int = 20 diff --git a/nano_graphrag/graphrag.py b/nano_graphrag/graphrag.py index 3e5c43e..7ca0167 100644 --- a/nano_graphrag/graphrag.py +++ b/nano_graphrag/graphrag.py @@ -88,7 +88,6 @@ class GraphRAG: cheap_model_func: callable = gpt_4o_mini_complete cheap_model_max_token_size: int = 32768 cheap_model_max_async: int = 16 - chat_model_func: callable = gpt_4o_complete # storage key_string_value_json_storage_cls: Type[BaseKVStorage] = JsonKVStorage @@ -160,9 +159,6 @@ def query(self, query: str, param: QueryParam = QueryParam()): loop = asyncio.get_event_loop() return loop.run_until_complete(self.aquery(query, param)) - def chat(self, messages: list[str], param: QueryParam = QueryParam()): - pass - async def aquery(self, query: str, param: QueryParam = QueryParam()): if param.mode == "local" and not self.enable_local: raise ValueError("enable_local is False, cannot query in local mode")