Skip to content

Commit

Permalink
v0.4.1
Browse files Browse the repository at this point in the history
  • Loading branch information
ju-bezdek committed Dec 18, 2023
1 parent 6bc7bef commit 5fef779
Show file tree
Hide file tree
Showing 6 changed files with 37 additions and 15 deletions.
7 changes: 6 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -86,4 +86,9 @@ New parameters in llm decorator
## Version 0.4.0 (2023-11-25)
- Input kwargs augmentations by implementing the llm_prompt function (checkout example: [code_examples/augmenting_llm_prompt_inputs.py](https://github.com/ju-bezdek/langchain-decorators/blob/main/code_examples/augmenting_llm_prompt_inputs.py) )
- support for automatic json fix using if `json_repair` is installed
(*not even OpenAI JSON format is not yet perfect*)
(*not even OpenAI JSON format is not yet perfect*)

## Version 0.4.1 (2023-12-18)
- support for func_description passed as part of llm_function decorator
- allowed not having func_description
- minor fixes
2 changes: 1 addition & 1 deletion src/langchain_decorators/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@
from .function_decorator import llm_function, get_function_schema
from .chains import FunctionsProvider, FollowupHandle

__version__="0.4.0"
__version__="0.4.1"


12 changes: 8 additions & 4 deletions src/langchain_decorators/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage
from langchain.prompts.chat import ChatMessagePromptTemplate
from .schema import OutputWithFunctionCall
from typing_inspect import is_generic_type, is_union_type

import pydantic
Expand Down Expand Up @@ -186,17 +187,17 @@ def define_settings(cls,
"""
if llm_selector is None and default_llm is None and default_streaming_llm is None:
# only use llm_selector if no default_llm and default_streaming_llm is defined, because than we dont know what rules to set up
default_llm = ChatOpenAI(temperature=0.0, model="gpt-3.5-turbo-1106" if USE_PREVIEW_MODELS else "gpt-3.5-turbo", request_timeout=90) # '-0613' - has function calling
default_llm = ChatOpenAI(temperature=0.0, model="gpt-3.5-turbo-1106" if USE_PREVIEW_MODELS else "gpt-3.5-turbo", request_timeout=30) # '-0613' - has function calling
default_streaming_llm = make_llm_streamable(default_llm)
llm_selector = LlmSelector()\
.with_llm(default_llm, llm_selector_rule_key="chatGPT")\
.with_llm(ChatOpenAI(temperature=0.0, model="gpt-4-1106-preview" if USE_PREVIEW_MODELS else "gpt-3.5-turbo-16k", request_timeout=120), llm_selector_rule_key="GPT4")\
.with_llm(ChatOpenAI(temperature=0.0, model="gpt-4-1106-preview" if USE_PREVIEW_MODELS else "gpt-3.5-turbo-16k", request_timeout=60), llm_selector_rule_key="GPT4")\
#.with_llm(ChatOpenAI(temperature=0.0, model="gpt-3.5-turbo-1106"), llm_selector_rule_key="chatGPT")\
#.with_llm(ChatOpenAI(temperature=0.0, model="gpt-4-32k"), llm_selector_rule_key="GPT4")

else:
if default_llm is None:
default_llm = ChatOpenAI(temperature=0.0, model="gpt-3.5-turbo-1106" if USE_PREVIEW_MODELS else "gpt-3.5-turbo", request_timeout=90) # '-0613' - has function calling
default_llm = ChatOpenAI(temperature=0.0, model="gpt-3.5-turbo-1106" if USE_PREVIEW_MODELS else "gpt-3.5-turbo", request_timeout=60) # '-0613' - has function calling
if default_streaming_llm is None:
default_streaming_llm = make_llm_streamable(default_llm)

Expand Down Expand Up @@ -352,7 +353,10 @@ def get_func_return_type(func: callable, with_args:bool=False)->Union[Type, Tupl
raise Exception(f"Invalid Union annotation {return_type}. Expected Union[ <return_type>, None] or just <return_type>")
elif is_generic_type(return_type):
# this should cover list and dict
return get_origin(return_type) if not with_args else (get_origin(return_type), get_args(return_type))
if get_origin(return_type) !=OutputWithFunctionCall and return_type!=OutputWithFunctionCall:
return get_origin(return_type) if not with_args else (get_origin(return_type), get_args(return_type))
else:
return get_args(return_type)[0]
else:
return return_type if not with_args else (return_type, None)

Expand Down
8 changes: 4 additions & 4 deletions src/langchain_decorators/function_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,13 +333,13 @@ def pop_prop_title(schema):

description = parse_function_description_from_docstrings(func_docs) if func_docs else func_description

if not description:
raise ValueError(f"LLM Function {get_function_full_name(func)} has no description in docstrings")
return {
result_schema = {
"name":func_name,
"description":description,
"parameters":args_schema
}
if description:
result_schema["description"]=description
return result_schema



Expand Down
21 changes: 17 additions & 4 deletions src/langchain_decorators/prompt_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,11 @@
from .common import *
from .prompt_template import PromptDecoratorTemplate
from .output_parsers import *
from .schema import OutputWithFunctionCall
from .streaming_context import StreamingContext
from .function_decorator import is_dynamic_llm_func, get_dynamic_function_template_args


SPECIAL_KWARGS=["callbacks","followup_handle","llm_selector_rule_key","memory","functions","function_call","capture_stream","llm_selector_rule_key", "stop", "output_parser", "llm_kwargs"]

def llm_prompt(
Expand Down Expand Up @@ -115,8 +117,10 @@ def llm_prompt(

def decorator(func):
name=func.__name__

full_name=f"{func.__module__}.{name}" if func.__module__!="__main__" else name
is_async = inspect.iscoroutinefunction(func)

_llm_selector_rule_key=llm_selector_rule_key


Expand All @@ -134,10 +138,6 @@ def build_chain(*args, **kwargs)->LLMDecoratorChain:

capture_stream=_capture_stream





if "capture_stream" in kwargs:
if not isinstance(capture_stream,bool):
raise ValueError("capture_stream is a reserved kwarg and must be of type bool")
Expand Down Expand Up @@ -401,6 +401,14 @@ def wrapper(*args, **kwargs):
llmChain = build_chain(*args, **kwargs)
return llmChain.execute()
wrapper.build_chain=build_chain

if inspect.signature(func).parameters.get("functions"):
if not func.__annotations__.get('return') or func.__annotations__.get('return') == OutputWithFunctionCall:
wrapper.__annotations__['return']= OutputWithFunctionCall
else:
wrapper.__annotations__['return']= OutputWithFunctionCall[func.__annotations__.get('return') ]


return wrapper

else:
Expand All @@ -419,6 +427,11 @@ async def async_wrapper(*args, **kwargs):
return await llmChain.aexecute()

async_wrapper.build_chain=build_chain
if inspect.signature(func).parameters.get("functions"):
if not func.__annotations__.get('return') or func.__annotations__.get('return') == OutputWithFunctionCall or func.__annotations__.get('return') == Coroutine[Any,Any,OutputWithFunctionCall]:
async_wrapper.__annotations__['return'] = Coroutine[Any,Any,OutputWithFunctionCall]
else:
async_wrapper.__annotations__['return'] = Coroutine[Any,Any,OutputWithFunctionCall[func.__annotations__.get('return') ]]
return async_wrapper
if func:
return decorator(func)
Expand Down
2 changes: 1 addition & 1 deletion src/langchain_decorators/prompt_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def from_func(cls,
elif return_type==bool:
output_parser = "boolean"
elif issubclass(return_type, OutputWithFunctionCall):
return_type = "str"
output_parser = "str"
elif issubclass(return_type,BaseModel):
output_parser = PydanticOutputParser(model=return_type)
else:
Expand Down

0 comments on commit 5fef779

Please sign in to comment.