From 2f07a314ce8ee22d6389ae9ad0757c792e04c4db Mon Sep 17 00:00:00 2001 From: Vincent <46945065+yimao@users.noreply.github.com> Date: Mon, 4 Nov 2024 19:58:00 +0800 Subject: [PATCH] Fix abstract method signature error (#16809) Co-authored-by: yimao --- .../llama_index/llms/dashscope/base.py | 12 +++++++++--- .../llms/llama-index-llms-dashscope/pyproject.toml | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py b/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py index 426e133ba8430..e3d7e35770f2b 100644 --- a/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py +++ b/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py @@ -251,7 +251,9 @@ def _get_input_parameters( return message, parameters @llm_completion_callback() - def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: + def complete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponse: message, parameters = self._get_input_parameters(prompt=prompt, **kwargs) parameters.pop("incremental_output", None) parameters.pop("stream", None) @@ -265,7 +267,9 @@ def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: return dashscope_response_to_completion_response(response) @llm_completion_callback() - async def acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse: + async def acomplete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponse: message, parameters = self._get_input_parameters(prompt=prompt, **kwargs) parameters.pop("incremental_output", None) parameters.pop("stream", None) @@ -279,7 +283,9 @@ async def acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse: return dashscope_response_to_completion_response(response) @llm_completion_callback() - def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: + def stream_complete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponseGen: message, parameters = self._get_input_parameters(prompt=prompt, kwargs=kwargs) parameters["incremental_output"] = True parameters["stream"] = True diff --git a/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml index f477f0d99cff9..238643c8ee188 100644 --- a/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-dashscope" readme = "README.md" -version = "0.2.3" +version = "0.2.4" [tool.poetry.dependencies] python = ">=3.8.1,<4.0"