Skip to content

Commit

Permalink
Clean 2.0
Browse files Browse the repository at this point in the history
  • Loading branch information
jlowin committed Jan 11, 2024
1 parent 57e48c7 commit 44fe2e2
Show file tree
Hide file tree
Showing 46 changed files with 232 additions and 2,864 deletions.
28 changes: 18 additions & 10 deletions src/marvin/__init__.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,34 @@
from .settings import settings

from .components import fn, image, speech, model, cast, extract, classify
from .components.prompt.fn import prompt_fn
from .apis.text import fn, cast, extract, classify, classifier, generate, model, Model
from .apis.images import paint, image
from .apis.audio import speak, speech


try:
from ._version import version as __version__
except ImportError:
__version__ = "unknown"


__all__ = [
"fn",
"image",
"model",
# --- text ---
"Model",
"cast",
"extract",
"classify",
"classifier",
"extract",
"fn",
"generate",
"model",
# --- images ---
"image",
"paint",
# --- audio ---
"speak",
"speech",
"prompt_fn",
"settings",
]


# compatibility with Marvin v1
from .components import fn as ai_fn, model as ai_model
from .components.classifier import classifier as ai_classifier
from marvin.apis._v1_compat import ai_fn, ai_model, ai_classifier
6 changes: 0 additions & 6 deletions src/marvin/ai.py

This file was deleted.

1 change: 1 addition & 0 deletions src/marvin/apis/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from . import text, images, audio
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from .llm import fn, model
from .text import classifier, fn, model

ai_fn = fn
ai_model = model
ai_classifier = fn
ai_classifier = classifier

__all__ = [
"ai_fn",
Expand Down
2 changes: 1 addition & 1 deletion src/marvin/v2/ai/audio.py → src/marvin/apis/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
from openai._base_client import HttpxBinaryResponseContent

import marvin
from marvin.client.openai import MarvinClient
from marvin.requests import SpeechRequest
from marvin.utilities.jinja import Environment
from marvin.utilities.logging import get_logger
from marvin.utilities.python import PythonFunction
from marvin.v2.client import MarvinClient

T = TypeVar("T")

Expand Down
8 changes: 4 additions & 4 deletions src/marvin/v2/ai/images.py → src/marvin/apis/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@
from openai.types.images_response import ImagesResponse

import marvin
from marvin.apis.prompts.image_templates import IMAGE_PROMPT
from marvin.client.openai import MarvinClient
from marvin.requests import ImageRequest
from marvin.utilities.jinja import Environment
from marvin.utilities.logging import get_logger
from marvin.utilities.python import PythonFunction
from marvin.v2.ai.prompt_templates import IMAGE_PROMPT
from marvin.v2.client import MarvinClient

T = TypeVar("T")

Expand All @@ -34,7 +34,7 @@ def generate_image(
return response


def imagine(instructions: str = None, context: dict = None, model_kwargs: dict = None):
def paint(instructions: str = None, context: dict = None, model_kwargs: dict = None):
response = generate_image(
prompt_template=IMAGE_PROMPT,
prompt_kwargs=dict(
Expand All @@ -50,7 +50,7 @@ def image(fn: Callable):
@wraps(fn)
def wrapper(*args, **kwargs):
model = PythonFunction.from_function_call(fn, *args, **kwargs)
return imagine(
return paint(
instructions=model.docstring,
context=dict(
prompt_source="function call",
Expand Down
File renamed without changes.
10 changes: 10 additions & 0 deletions src/marvin/apis/prompts/image_templates.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import inspect

IMAGE_PROMPT = inspect.cleandoc(
"""
{{ instructions }}
Additional context:
{{ context }}
"""
)
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,17 @@
## Response format
Call the `FormatResponse` tool to validate your response, and use the following schema:
{{ response_format }}
Call the `FormatResponse` tool to validate your response, and use the
following schema: {{ response_format }}
{% if is_str_response %}
The user expects a string response, but do not return JSON or a quoted
string unless they provided instructions requiring it. If you do return
JSON, it must be valid and parseable including double quotes.
{% endif %}
- When providing integers, do not write out any decimals at all
- Use deduction where appropriate e.g. "3 dollars fifty cents" is a single
value [3.5] not two values [3, 50] unless the user specifically asks for
each part.
- When providing a string response, do not return JSON or a quoted string
unless they provided instructions requiring it. If you do return JSON, it
must be valid and parseable including double quotes.
"""
)
Expand Down Expand Up @@ -66,6 +69,11 @@
Call the `FormatResponse` tool to validate your response, and use the
following schema: {{ response_format }}
- When providing integers, do not write out any decimals at all
- Use deduction where appropriate e.g. "3 dollars fifty cents" is a single
value [3.5] not two values [3, 50] unless the user specifically asks for
each part.
"""
)

Expand Down
59 changes: 34 additions & 25 deletions src/marvin/v2/ai/llm.py → src/marvin/apis/text.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,18 +20,18 @@
cast_labels_to_grammar,
cast_type_to_labels,
)
from marvin.requests import ChatRequest, ChatResponse
from marvin.utilities.jinja import Transcript
from marvin.utilities.logging import get_logger
from marvin.utilities.python import PythonFunction
from marvin.v2.ai.prompt_templates import (
from marvin.apis.prompts.text_templates import (
CAST_PROMPT,
CLASSIFY_PROMPT,
EXTRACT_PROMPT,
FUNCTION_PROMPT,
GENERATE_PROMPT,
)
from marvin.v2.client import ChatCompletion, MarvinClient
from marvin.client.openai import ChatCompletion, MarvinClient
from marvin.requests import ChatRequest, ChatResponse
from marvin.utilities.jinja import Transcript
from marvin.utilities.logging import get_logger
from marvin.utilities.python import PythonFunction

T = TypeVar("T")
M = TypeVar("M", bound=BaseModel)
Expand Down Expand Up @@ -142,7 +142,7 @@ def _generate_typed_llm_response_with_logit_bias(

def cast(
data: str,
to: type[T],
target: type[T],
instructions: str = None,
model_kwargs: dict = None,
) -> T:
Expand All @@ -154,28 +154,29 @@ def cast(
# if the user provided a `to` type that represents a list of labels, we use
# `classify()` for performance.
if (
get_origin(to) == Literal
or (isinstance(to, type) and issubclass(to, Enum))
or isinstance(to, list)
or to is bool
get_origin(target) == Literal
or (isinstance(target, type) and issubclass(target, Enum))
or isinstance(target, list)
or target is bool
):
return classify(
data=data, labels=to, instructions=instructions, model_kwargs=model_kwargs
data=data,
labels=target,
instructions=instructions,
model_kwargs=model_kwargs,
)

return _generate_typed_llm_response_with_tool(
prompt_template=CAST_PROMPT,
prompt_kwargs=dict(
data=data, instructions=instructions, is_str_response=to is str
),
type_=to,
prompt_kwargs=dict(data=data, instructions=instructions),
type_=target,
model_kwargs=model_kwargs | dict(temperature=0),
)


def extract(
data: str,
type_: type[T],
target: type[T],
instructions: str = None,
model_kwargs: dict = None,
) -> list[T]:
Expand All @@ -186,7 +187,7 @@ def extract(
return _generate_typed_llm_response_with_tool(
prompt_template=EXTRACT_PROMPT,
prompt_kwargs=dict(data=data, instructions=instructions),
type_=list[type_],
type_=list[target],
model_kwargs=model_kwargs | dict(temperature=0),
)

Expand Down Expand Up @@ -214,16 +215,24 @@ def classify(


def generate(
type_: type[T],
n: int = 1,
type_: type[T] = None,
instructions: str = None,
n: int = 1,
temperature: float = 1,
model_kwargs: dict = None,
) -> list[T]:
"""
Generate a list of n items of the provided type or description.
Generate a list of n items of the provided type or instructions.
Either a type or instructions must be provided. If instructions are
provided without a type, the type is assumed to be a string.
"""

if type_ is None and instructions is None:
raise ValueError("Must provide either a type or instructions.")
elif type_ is None:
type_ = str

# make sure we generate at least n items
result = [0] * (n + 1)
while len(result) != n:
Expand Down Expand Up @@ -295,14 +304,14 @@ def wrapper(*args, **kwargs):
return wrapper


class AIModel(BaseModel):
class Model(BaseModel):
"""
A Pydantic model that can be instantiated from a natural language string, in
addition to keyword arguments.
"""

@classmethod
def from_text(cls, text: str, model_kwargs: dict = None, **kwargs) -> "AIModel":
def from_text(cls, text: str, model_kwargs: dict = None, **kwargs) -> "Model":
"""Async text constructor"""
ai_kwargs = cast(text, cls, model_kwargs=model_kwargs, **kwargs)
ai_kwargs.update(kwargs)
Expand Down Expand Up @@ -368,12 +377,12 @@ def model(
) -> Union[Type[M], Callable[[Type[M]], Type[M]]]:
"""
Class decorator for instantiating a Pydantic model from a string. Equivalent
to subclassing AIModel.
to subclassing Model.
"""
model_kwargs = model_kwargs or {}

def decorator(cls: Type[M]) -> Type[M]:
class WrappedModel(AIModel, cls):
class WrappedModel(Model, cls):
@wraps(cls.__init__)
def __init__(self, *args, **kwargs):
super().__init__(*args, model_kwargs=model_kwargs, **kwargs)
Expand Down
13 changes: 2 additions & 11 deletions src/marvin/beta/ai_flow/ai_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,10 @@

from marvin.beta.assistants import Assistant, Run, Thread
from marvin.beta.assistants.runs import CancelRun
from marvin.serializers import create_tool_from_type
from marvin.tools.assistants import AssistantTool
from marvin.utilities.context import ScopedContext
from marvin.utilities.jinja import Environment as JinjaEnvironment
from marvin.utilities.tools import tool_from_function
from marvin.utilities.tools import tool_from_function, tool_from_type

T = TypeVar("T", bound=BaseModel)

Expand Down Expand Up @@ -245,15 +244,7 @@ def task_completed():

# otherwise we need to create a tool with the correct parameter signature

tool = create_tool_from_type(
_type=self.fn.__annotations__["return"],
model_name="task_completed",
model_description=(
"Indicate that the task completed and produced the provided `result`."
),
field_name="result",
field_description="The task result",
)
tool = tool_from_type(type_=self.fn.__annotations__["return"])

def task_completed_with_result(result: T):
self.status = Status.COMPLETED
Expand Down
Loading

0 comments on commit 44fe2e2

Please sign in to comment.