Skip to content
This repository has been archived by the owner on Feb 12, 2024. It is now read-only.

Commit

Permalink
Feature/fix flake8 error merged (#110)
Browse files Browse the repository at this point in the history
* check in latest changes

* Implement HF - take 1

* fix hugging face

* up

* fix hugging face

* simplify

* flake8 fixes
  • Loading branch information
emrgnt-cmplxty authored Oct 31, 2023
1 parent 5f918b7 commit 3dbb396
Show file tree
Hide file tree
Showing 7 changed files with 25 additions and 19 deletions.
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ This is an effort to democratize access to top-tier textbooks. This can readily

_Important:_ To make the most out of grounding your data with Wikipedia, ensure your system matches our detailed specifications. An example RAG provider can be seen [here](https://github.com/SciPhi-AI/sciphi/blob/main/sciphi/interface/rag/sciphi_wiki.py). More high quality outbook books are available [here](https://github.com/SciPhi-AI/library-of-phi).


### RAG Eval Harness

Measure the efficacy of your RAG pipeline with our unique evaluation harness.
Expand Down
8 changes: 8 additions & 0 deletions sciphi/interface/llm/hugging_face_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,14 @@ def get_chat_completion(
"Chat completion not yet implemented for HuggingFace."
)

def get_chat_completion(
self, conversation: list[dict], generation_config: GenerationConfig
) -> str:
"""Get a chat completion from the local HuggingFace provider."""
raise NotImplementedError(
"Chat completion not yet implemented for HuggingFace."
)

@property
def model(self) -> HuggingFaceLLM:
return self._model
12 changes: 12 additions & 0 deletions sciphi/interface/llm/vllm_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,18 @@ def get_chat_completion(
"Chat completion not yet implemented for vLLM."
)

def get_chat_completion(
self, conversation: List[dict], generation_config: GenerationConfig
) -> str:
"""Get a conversation completion from the local vLLM provider."""

logger.debug(
f"Requesting chat completion from local vLLM with conversation={conversation}"
)
return self.model.get_instruct_completion(
conversation, generation_config
)

@property
def model(self) -> vLLM:
return self._model
2 changes: 1 addition & 1 deletion sciphi/llm/models/sciphi_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def __init__(
),
)
elif self.config.mode == SciPhiProviderMode.LOCAL_HF:
from sciphi.llm.models import hugging_face_llm
from sciphi.llm.models import hugging_face_llm # noqa F401

else:
raise ValueError(f"Invalid mode: {self.config.mode}")
Expand Down
1 change: 0 additions & 1 deletion sciphi/scripts/data_augmenter.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
)
from sciphi.core.utils import get_config_dir
from sciphi.interface import (
LLMInterface,
LLMInterfaceManager,
RAGInterfaceManager,
)
Expand Down
1 change: 0 additions & 1 deletion sciphi/scripts/rag_harness.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import logging
import os
from typing import Optional

import dotenv
Expand Down
19 changes: 4 additions & 15 deletions sciphi/scripts/sciphi_gen_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,23 +92,12 @@ def main(
{
"role": "user",
"content": query,
# "content": "Return Schrodinger's equation for a particle in a box.",
},
# {"role": "assistant", "content": msg_1},
# {
# "role": "user",
# "content": "Excellent. Now, what about Dirac's equation for a free particle?",
# },
# {"role": "assistant", "content": msg_2},
# {
# "role": "user",
# "content": query,
# },
]
completion = llm_interface.get_completion(query, generation_config)
# completion = llm_interface.get_chat_completion(
# conversation, generation_config
# )

completion = llm_interface.get_chat_completion(
conversation, generation_config
)
print(f"Output Completion = {completion}")


Expand Down

0 comments on commit 3dbb396

Please sign in to comment.