Skip to content

Commit

Permalink
main into redocs (#66)
Browse files Browse the repository at this point in the history
* Ignore vLLM test (#57)

* add: vllm support

* fix: change model in test

* formatted with black

* add: ignore tests for vllm on github

* ignore pytest for vllm

* docs(contributor): contrib-readme-action has updated readme

* pip3->pip; nit

* Add multimodal tasks

* docs(contributor): contrib-readme-action has updated readme

* Update mathvista.py

Added prompts for answer extraction.

* Update mathvista.py

Added the error handling when importing packages.

* raise exception if cannot parse answer, do not return an arbitrary answer

* remove unused comments

* feat: Added Ollama engine via OpenAI api (#51)

* feat: Added Ollama engine via OpenAI api

* fix: Added PR remarks and test

---------

Co-authored-by: Nihal Nayak <[email protected]>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Mert Yuksekgonul <[email protected]>
Co-authored-by: Pan Lu <[email protected]>
Co-authored-by: Atakan Tekparmak <[email protected]>
  • Loading branch information
6 people authored Jul 13, 2024
1 parent 2980131 commit cd0394e
Show file tree
Hide file tree
Showing 8 changed files with 657 additions and 8 deletions.
3 changes: 1 addition & 2 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ on:
push:
paths-ignore:
- '**.rst'
- 'tests/test_vllm.py' # requires vllm to be installed

jobs:
build:
Expand All @@ -26,7 +25,7 @@ jobs:
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Test with pytest
run: |
pytest
pytest --ignore=tests/test_vllm.py
contrib-readme-job:
runs-on: ubuntu-latest
Expand Down
25 changes: 24 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ pip install git+https://github.com/zou-group/textgrad.git
**Installing textgrad with vllm**:

```sh
pip3 install textgrad[vllm]
pip install textgrad[vllm]
```

See [here](https://pip.pypa.io/en/stable/cli/pip_install/) for more details on various methods of pip installation.
Expand Down Expand Up @@ -327,6 +327,13 @@ We are grateful for all the help we got from our contributors!
<sub><b>Mert Yuksekgonul</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/nihalnayak">
<img src="https://avatars.githubusercontent.com/u/5679782?v=4" width="100;" alt="nihalnayak"/>
<br />
<sub><b>Nihal Nayak</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/sugatoray">
<img src="https://avatars.githubusercontent.com/u/10201242?v=4" width="100;" alt="sugatoray"/>
Expand All @@ -348,6 +355,8 @@ We are grateful for all the help we got from our contributors!
<sub><b>David Ruan</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/nihalnayak">
<img src="https://avatars.githubusercontent.com/u/5679782?v=4" width="100;" alt="nihalnayak"/>
Expand Down Expand Up @@ -384,6 +393,20 @@ We are grateful for all the help we got from our contributors!
<br />
<sub><b>tboen1</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/nihalnayak">
<img src="https://avatars.githubusercontent.com/u/5679782?v=4" width="100;" alt="nihalnayak"/>
<br />
<sub><b>Nihal Nayak</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/AtakanTekparmak">
<img src="https://avatars.githubusercontent.com/u/59488384?v=4" width="100;" alt="AtakanTekparmak"/>
<br />
<sub><b>Atakan Tekparmak</b></sub>
</a>
</td>
</tr>
<tbody>
Expand Down
15 changes: 15 additions & 0 deletions tests/test_engines.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import pytest

from textgrad.engine import get_engine

def test_ollama_engine():
# Declare test constants
OLLAMA_BASE_URL = 'http://localhost:11434/v1'
MODEL_STRING = "test-model-string"

# Initialise the engine
engine = get_engine("ollama-" + MODEL_STRING)

assert engine
assert engine.model_string == MODEL_STRING
assert engine.base_url == OLLAMA_BASE_URL
8 changes: 8 additions & 0 deletions textgrad/engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,14 @@ def get_engine(engine_name: str, **kwargs) -> EngineLM:
elif engine_name in ["command-r-plus", "command-r", "command", "command-light"]:
from .cohere import ChatCohere
return ChatCohere(model_string=engine_name, **kwargs)
elif engine_name.startswith("ollama"):
from .openai import ChatOpenAI, OLLAMA_BASE_URL
model_string = engine_name.replace("ollama-", "")
return ChatOpenAI(
model_string=model_string,
base_url=OLLAMA_BASE_URL,
**kwargs
)
elif "vllm" in engine_name:
from .vllm import ChatVLLM
engine_name = engine_name.replace("vllm-", "")
Expand Down
29 changes: 24 additions & 5 deletions textgrad/engine/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@
from .base import EngineLM, CachedEngine
from .engine_utils import get_image_type_from_bytes

# Default base URL for OLLAMA
OLLAMA_BASE_URL = 'http://localhost:11434/v1'

# Check if the user set the OLLAMA_BASE_URL environment variable
if os.getenv("OLLAMA_BASE_URL"):
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL")

class ChatOpenAI(EngineLM, CachedEngine):
DEFAULT_SYSTEM_PROMPT = "You are a helpful, creative, and smart assistant."
Expand All @@ -26,23 +32,36 @@ def __init__(
model_string: str="gpt-3.5-turbo-0613",
system_prompt: str=DEFAULT_SYSTEM_PROMPT,
is_multimodal: bool=False,
base_url: str=None,
**kwargs):
"""
:param model_string:
:param system_prompt:
:param base_url: Used to support Ollama
"""
root = platformdirs.user_cache_dir("textgrad")
cache_path = os.path.join(root, f"cache_openai_{model_string}.db")

super().__init__(cache_path=cache_path)

self.system_prompt = system_prompt
if os.getenv("OPENAI_API_KEY") is None:
raise ValueError("Please set the OPENAI_API_KEY environment variable if you'd like to use OpenAI models.")
self.base_url = base_url

self.client = OpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
)
if not base_url:
if os.getenv("OPENAI_API_KEY") is None:
raise ValueError("Please set the OPENAI_API_KEY environment variable if you'd like to use OpenAI models.")

self.client = OpenAI(
api_key=os.getenv("OPENAI_API_KEY")
)
elif base_url and base_url == OLLAMA_BASE_URL:
self.client = OpenAI(
base_url=base_url,
api_key="ollama"
)
else:
raise ValueError("Invalid base URL provided. Please use the default OLLAMA base URL or None.")

self.model_string = model_string
self.is_multimodal = is_multimodal

Expand Down
16 changes: 16 additions & 0 deletions textgrad/tasks/multimodal/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from textgrad.engine import EngineLM


def load_multimodal_instance_task(task_name: str, evaluation_api: EngineLM, *args, **kwargs):
if task_name == "mathvista":
from textgrad.tasks.multimodal.mathvista import MathVistaDataset
test_set = MathVistaDataset(evaluation_api=evaluation_api, split="testmini", *args, **kwargs)
return test_set

elif task_name == "scienceqa":
from textgrad.tasks.multimodal.scienceqa import ScienceQADataset
test_set = ScienceQADataset(evaluation_api=evaluation_api, split="test", *args, **kwargs)
return test_set

else:
raise ValueError(f"Instance task {task_name} not found.")
Loading

0 comments on commit cd0394e

Please sign in to comment.