From 3dbb3965f3673461f901473188a6c66950c8b2b9 Mon Sep 17 00:00:00 2001 From: emrgnt-cmplxty <68796651+emrgnt-cmplxty@users.noreply.github.com> Date: Mon, 30 Oct 2023 21:18:05 -0400 Subject: [PATCH] Feature/fix flake8 error merged (#110) * check in latest changes * Implement HF - take 1 * fix hugging face * up * fix hugging face * simplify * flake8 fixes --- README.md | 1 - .../interface/llm/hugging_face_interface.py | 8 ++++++++ sciphi/interface/llm/vllm_interface.py | 12 ++++++++++++ sciphi/llm/models/sciphi_llm.py | 2 +- sciphi/scripts/data_augmenter.py | 1 - sciphi/scripts/rag_harness.py | 1 - sciphi/scripts/sciphi_gen_completion.py | 19 ++++--------------- 7 files changed, 25 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 89ea101..d2df15a 100644 --- a/README.md +++ b/README.md @@ -133,7 +133,6 @@ This is an effort to democratize access to top-tier textbooks. This can readily _Important:_ To make the most out of grounding your data with Wikipedia, ensure your system matches our detailed specifications. An example RAG provider can be seen [here](https://github.com/SciPhi-AI/sciphi/blob/main/sciphi/interface/rag/sciphi_wiki.py). More high quality outbook books are available [here](https://github.com/SciPhi-AI/library-of-phi). - ### RAG Eval Harness Measure the efficacy of your RAG pipeline with our unique evaluation harness. diff --git a/sciphi/interface/llm/hugging_face_interface.py b/sciphi/interface/llm/hugging_face_interface.py index a95a554..db3d619 100644 --- a/sciphi/interface/llm/hugging_face_interface.py +++ b/sciphi/interface/llm/hugging_face_interface.py @@ -35,6 +35,14 @@ def get_chat_completion( "Chat completion not yet implemented for HuggingFace." ) + def get_chat_completion( + self, conversation: list[dict], generation_config: GenerationConfig + ) -> str: + """Get a chat completion from the local HuggingFace provider.""" + raise NotImplementedError( + "Chat completion not yet implemented for HuggingFace." + ) + @property def model(self) -> HuggingFaceLLM: return self._model diff --git a/sciphi/interface/llm/vllm_interface.py b/sciphi/interface/llm/vllm_interface.py index 344d5b6..088b99e 100644 --- a/sciphi/interface/llm/vllm_interface.py +++ b/sciphi/interface/llm/vllm_interface.py @@ -49,6 +49,18 @@ def get_chat_completion( "Chat completion not yet implemented for vLLM." ) + def get_chat_completion( + self, conversation: List[dict], generation_config: GenerationConfig + ) -> str: + """Get a conversation completion from the local vLLM provider.""" + + logger.debug( + f"Requesting chat completion from local vLLM with conversation={conversation}" + ) + return self.model.get_instruct_completion( + conversation, generation_config + ) + @property def model(self) -> vLLM: return self._model diff --git a/sciphi/llm/models/sciphi_llm.py b/sciphi/llm/models/sciphi_llm.py index 46b6c7a..997adf9 100644 --- a/sciphi/llm/models/sciphi_llm.py +++ b/sciphi/llm/models/sciphi_llm.py @@ -65,7 +65,7 @@ def __init__( ), ) elif self.config.mode == SciPhiProviderMode.LOCAL_HF: - from sciphi.llm.models import hugging_face_llm + from sciphi.llm.models import hugging_face_llm # noqa F401 else: raise ValueError(f"Invalid mode: {self.config.mode}") diff --git a/sciphi/scripts/data_augmenter.py b/sciphi/scripts/data_augmenter.py index a934822..57187d8 100644 --- a/sciphi/scripts/data_augmenter.py +++ b/sciphi/scripts/data_augmenter.py @@ -18,7 +18,6 @@ ) from sciphi.core.utils import get_config_dir from sciphi.interface import ( - LLMInterface, LLMInterfaceManager, RAGInterfaceManager, ) diff --git a/sciphi/scripts/rag_harness.py b/sciphi/scripts/rag_harness.py index 24e9309..4662701 100644 --- a/sciphi/scripts/rag_harness.py +++ b/sciphi/scripts/rag_harness.py @@ -1,5 +1,4 @@ import logging -import os from typing import Optional import dotenv diff --git a/sciphi/scripts/sciphi_gen_completion.py b/sciphi/scripts/sciphi_gen_completion.py index 76cab28..8e5bbab 100644 --- a/sciphi/scripts/sciphi_gen_completion.py +++ b/sciphi/scripts/sciphi_gen_completion.py @@ -92,23 +92,12 @@ def main( { "role": "user", "content": query, - # "content": "Return Schrodinger's equation for a particle in a box.", }, - # {"role": "assistant", "content": msg_1}, - # { - # "role": "user", - # "content": "Excellent. Now, what about Dirac's equation for a free particle?", - # }, - # {"role": "assistant", "content": msg_2}, - # { - # "role": "user", - # "content": query, - # }, ] - completion = llm_interface.get_completion(query, generation_config) - # completion = llm_interface.get_chat_completion( - # conversation, generation_config - # ) + + completion = llm_interface.get_chat_completion( + conversation, generation_config + ) print(f"Output Completion = {completion}")