Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Tests for extend generation logic #1184

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions docs/api/local_language_model_clients/LlamaCpp.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# dspy.LlamaCpp
dspy.LlamaCpp

## Prerequisites

Expand Down Expand Up @@ -57,6 +57,3 @@ pred = generate_answer(question=question)

print(f"Question: {question}")
print(f"Predicted Answer: {pred.answer}")


```
13 changes: 12 additions & 1 deletion dspy/utils/dummies.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@

from dsp.modules import LM
from dsp.utils.utils import dotdict
from dspy.clients import LM as LiteLLM

RED, GREEN, RESET = "\033[91m", "\033[92m", "\033[0m"


class DummyLM(LM):
Expand Down Expand Up @@ -64,7 +67,6 @@ def basic_request(self, prompt, n=1, **kwargs) -> dict[str, list[dict[str, str]]
},
)

RED, GREEN, RESET = "\033[91m", "\033[92m", "\033[0m"
print("=== DummyLM ===")
print(prompt, end="")
print(f"{RED}{answer}{RESET}")
Expand Down Expand Up @@ -94,6 +96,15 @@ def get_convo(self, index) -> str:
return self.history[index]["prompt"] + " " + self.history[index]["response"]["choices"][0]["text"]


class DummyLiteLLM(LiteLLM):
def __init__(self, answers: list[str], model_type="chat", temperature=0.0, max_tokens=1000, cache=True, **kwargs):
super().__init__("dummy", model_type, temperature, max_tokens, cache, **kwargs)
self.answers = iter([[ans] for ans in answers])

def __call__(self, **kwargs):
return next(self.answers)


def dummy_rm(passages=()) -> callable:
if not passages:

Expand Down
208 changes: 200 additions & 8 deletions tests/predict/test_predict.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
import dspy
from dspy import Predict, Signature, TypedPredictor
from dspy.utils.dummies import DummyLM
import copy
import textwrap

import pydantic
import pytest
import ujson

import dspy
from dspy import Predict, Signature, TypedPredictor
from dspy.utils.dummies import DummyLiteLLM, DummyLM


def test_initialization_with_string_signature():
signature_string = "input1, input2 -> output"
Expand Down Expand Up @@ -209,14 +212,203 @@ class OutputOnlySignature(dspy.Signature):
assert lm.get_convo(-1) == textwrap.dedent(
"""\
Given the fields , produce the fields `output`.

---

Follow the following format.

Output: ${output}

---

Output: short answer"""
)


@pytest.fixture(name="SandwichIdea")
def sandwich_idea_signature():
class SandwichIdea(dspy.Signature):
"""Based on the meal and dietary requirements, suggest a sandwich idea."""

meal: str = dspy.InputField()
dietary_requiements: str = dspy.InputField()
bread: str = dspy.OutputField()
protein: str = dspy.OutputField()
fat: str = dspy.OutputField()
garnish: str = dspy.OutputField()
sauce: str = dspy.OutputField()

return SandwichIdea


def test_extend_generation(SandwichIdea):
lm = DummyLiteLLM(
[
"\n[[ ## bread ## ]]\n whole wheat\n\n[[ ## protein ## ]]\n turkey\n\n[[ ## fat ## ]]\n avocado",
# Incomplete generation leads to tomato field being assigned as an
# empty string ("") in dsp.primitives.predict l98 the generation
# therefores continues with the next field.
"\n[[ ## garnish ## ]]\ntomato\n\n[[ ## sauce ## ]]\n mustard\n\n",
]
)
dspy.settings.configure(lm=lm)

prediction = Predict(SandwichIdea)(meal="lunch", dietary_requiements="N/A")
# The logged conversation (additional newlines removed, [..] indicates the generation):
# === DummyLM ===
# Based on the meal and dietary requirements, suggest a sandwich idea.
# ---
# Follow the following format.
# Meal: ${meal}
# Dietary Requiements: ${dietary_requiements}
# Bread: ${bread}
# Protein: ${protein}
# Fat: ${fat}
# Garnish: ${garnish}
# Sauce: ${sauce}
# ---
# Meal: lunch
# Dietary Requiements: N/A
# Bread: [whole wheat
# Protein: turkey
# Fat: avocado]
# ===
# === DummyLM ===
# Based on the meal and dietary requirements, suggest a sandwich idea.
# ---
# Follow the following format.
# Meal: ${meal}
# Dietary Requiements: ${dietary_requiements}
# Bread: ${bread}
# Protein: ${protein}
# Fat: ${fat}
# Garnish: ${garnish}
# Sauce: ${sauce}
# ---
# Meal: lunch
# Dietary Requiements: N/A
# Bread: whole wheat
# Protein: turkey
# Fat: avocado
# Garnish: [tomato
# Sauce: mustard]
# ===

assert prediction.bread == "whole wheat"
assert prediction.protein == "turkey"
assert prediction.fat == "avocado"
assert prediction.garnish == "" # This field is assigned as "" when the generation is extended
assert prediction.sauce == "tomato \n\nSauce: mustard"


def test_extend_generation_rolled_back_when_field_is_skipped(SandwichIdea):
lm = DummyLiteLLM(
[
" white\n\nFat: butter\n\nGarnish: lettuce\n\nSauce: mayo",
" ham\n\nFat: butter\n\nGarnish: lettuce\n\nSauce: mayo",
]
)
dspy.settings.configure(lm=lm)
# The logged conversation (additional newlines removed, [..] indicates the generation):
# === DummyLM ===
# Based on the meal and dietary requirements, suggest a sandwich idea.
# ---
# Follow the following format.
# Meal: ${meal}
# Dietary Requiements: ${dietary_requiements}
# Bread: ${bread}
# Protein: ${protein}
# Fat: ${fat}
# Garnish: ${garnish}
# Sauce: ${sauce}
# ---
# Meal: lunch
# Dietary Requiements: N/A
# Bread:[ white
# Fat: butter
# Garnish: lettuce
# Sauce: mayo]
# ===
# === DummyLM ===
# Based on the meal and dietary requirements, suggest a sandwich idea.
# ---
# Follow the following format.
# Meal: ${meal}
# Dietary Requiements: ${dietary_requiements}
# Bread: ${bread}
# Protein: ${protein}
# Fat: ${fat}
# Garnish: ${garnish}
# Sauce: ${sauce}
# ---
# Meal: lunch
# Dietary Requiements: N/A
# Bread: white Fat: butter Garnish: lettuce Sauce: mayo
# Protein:[ ham
# Fat: butter
# Garnish: lettuce
# Sauce: mayo]
# ===

predictor = Predict(SandwichIdea)(meal="lunch", dietary_requiements="N/A")
assert predictor.bread == "white\n\nFat: butter\n\nGarnish: lettuce\n\nSauce: mayo"
assert predictor.protein == "" # This field is assigned as "" when the generation is rolled back
assert predictor.fat == "ham\n\nFat: butter"
assert predictor.garnish == "lettuce"
assert predictor.sauce == "mayo"


def test_extend_generation_with_empty_field(SandwichIdea):
lm = DummyLiteLLM(
[
" white\n\nProtein: \n\nFat: butter\n\nGarnish: lettuce",
" lettuce \n\nSauce: mayo",
]
)
dspy.settings.configure(lm=lm)
# The logged conversation (additional newlines removed, [..] indicates the generation):
# === DummyLM ===
# Based on the meal and dietary requirements, suggest a sandwich idea.
# ---
# Follow the following format.
# Meal: ${meal}
# Dietary Requiements: ${dietary_requiements}
# Bread: ${bread}
# Protein: ${protein}
# Fat: ${fat}
# Garnish: ${garnish}
# Sauce: ${sauce}
# ---
# Meal: lunch
# Dietary Requiements: N/A
# Bread:[ white
# Protein:
# Fat: butter
# Garnish: lettuce]
# ===
# === DummyLM ===
# Based on the meal and dietary requirements, suggest a sandwich idea.
# ---
# Follow the following format.
# Meal: ${meal}
# Dietary Requiements: ${dietary_requiements}
# Bread: ${bread}
# Protein: ${protein}
# Fat: ${fat}
# Garnish: ${garnish}
# Sauce: ${sauce}
# ---
# Meal: lunch
# Dietary Requiements: N/A
# Bread: white
# Protein: Fat: butter Garnish: lettuce
# Fat:[ lettuce
# Sauce: mayo]
# ===

predictor = Predict(SandwichIdea)(meal="lunch", dietary_requiements="N/A")
assert predictor.bread == "white"
assert predictor.protein == "Fat: butter\n\nGarnish: lettuce"
assert predictor.fat == ""
assert predictor.garnish == "lettuce"
assert predictor.sauce == "mayo"
Loading