From 8b83a41c9904b774af62cbc4e93f0a585751b66a Mon Sep 17 00:00:00 2001 From: Daniel McKnight Date: Thu, 19 Dec 2024 16:19:24 -0800 Subject: [PATCH] Define models for tokenizer request/response --- neon_data_models/models/api/mq/brainforge.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/neon_data_models/models/api/mq/brainforge.py b/neon_data_models/models/api/mq/brainforge.py index 0133c76..9e566fe 100644 --- a/neon_data_models/models/api/mq/brainforge.py +++ b/neon_data_models/models/api/mq/brainforge.py @@ -24,7 +24,7 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from typing import List, Optional, Any, Dict +from typing import List, Optional, Any, Dict, Literal from pydantic import Field from neon_data_models.models.base.contexts import MQContext @@ -85,6 +85,18 @@ class LLMGetCompletionResponse(MQContext): description="Raw completion response from an OpenAI endpoint") +class LLMGetTokenizerChatTemplate(LLMGetModels): + model: str = Field(description="Model to request (@)") + messages: List[Dict[Literal["role", "content"], str]] = Field( + description="List of dict messages in OpenAI format") + tokenize: bool = Field(False) + add_generation_prompt: bool = Field(True) + + +class LLMGetTokenizerChatTemplateResponse(MQContext): + prompt: str = Field(description="Prompt generated by the tokenizer") + + class LLMGetInferenceResponse(LLMResponse, MQContext): pass