diff --git a/README.md b/README.md index 353a84c..ce20d97 100644 --- a/README.md +++ b/README.md @@ -25,15 +25,16 @@ MQ: port: server: users: - mq-chatgpt-api: + neon_llm_chat_gpt: password: user: neon_chatgpt -ChatGPT: +LLM_CHAT_GPT: key: "" model: "gpt-3.5-turbo" role: "You are trying to give a short answer in less than 40 words." context_depth: 3 max_tokens: 100 + num_parallel_processes: 2 ``` For example, if your configuration resides in `~/.config`: diff --git a/docker_overlay/etc/neon/diana.yaml b/docker_overlay/etc/neon/diana.yaml index 78e5933..3c0fa01 100644 --- a/docker_overlay/etc/neon/diana.yaml +++ b/docker_overlay/etc/neon/diana.yaml @@ -14,8 +14,9 @@ MQ: mq_handler: user: neon_api_utils password: Klatchat2021 -ChatGPT: +LLM_CHAT_GPT: model: "gpt-3.5-turbo" role: "You are trying to give a short answer in less than 40 words." context_depth: 3 - max_tokens: 100 \ No newline at end of file + max_tokens: 100 + num_parallel_processes: 2 \ No newline at end of file diff --git a/neon_llm_chatgpt/chatgpt.py b/neon_llm_chatgpt/chatgpt.py index 5751cfa..0f356c1 100644 --- a/neon_llm_chatgpt/chatgpt.py +++ b/neon_llm_chatgpt/chatgpt.py @@ -25,39 +25,132 @@ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import openai +from openai.embeddings_utils import get_embeddings, distances_from_embeddings +from typing import List, Dict +from neon_llm_core.llm import NeonLLM + + +class ChatGPT(NeonLLM): + + mq_to_llm_role = { + "user": "user", + "llm": "assistant" + } -class ChatGPT: def __init__(self, config): - self.model = config["model"] + super().__init__(config) + self.model_name = config["model"] self.role = config["role"] self.context_depth = config["context_depth"] self.max_tokens = config["max_tokens"] - openai.api_key = config["key"] + self.api_key = config["key"] + self.num_parallel_processes = config["num_parallel_processes"] + self.warmup() + + @property + def tokenizer(self) -> None: + return self._tokenizer + + @property + def tokenizer_model_name(self) -> str: + return "" + + @property + def model(self) -> openai: + if self._model is None: + openai.api_key = self.api_key + self._model = openai + return self._model + + @property + def llm_model_name(self) -> str: + return self.model_name - @staticmethod - def convert_role(role): - if role == "user": - role_chatgpt = "user" - elif role == "llm": - role_chatgpt = "assistant" - return role_chatgpt + @property + def _system_prompt(self) -> str: + return self.role - def ask(self, message, chat_history): + def warmup(self): + self.model + + def get_sorted_answer_indexes(self, question: str, answers: List[str]) -> List[int]: + """ + Creates sorted list of answer indexes with respect to order provided in :param answers based on PPL score + Answers are sorted from best to worst + :param question: incoming question + :param answers: list of answers to rank + :returns list of indexes + """ + if not answers: + return [] + scores = self._score(prompt=question, targets=answers) + sorted_items = sorted(zip(range(len(answers)), scores), key=lambda x: x[1]) + sorted_items_indexes = [x[0] for x in sorted_items] + return sorted_items_indexes + + def _call_model(self, prompt: List[Dict[str, str]]) -> str: + """ + Wrapper for ChatGPT Model generation logic + :param prompt: Input messages sequence + :returns: Output text sequence generated by model + """ + + response = openai.ChatCompletion.create( + model=self.llm_model_name, + messages=prompt, + temperature=0, + max_tokens=self.max_tokens, + ) + text = response.choices[0].message['content'] + + return text + + def _assemble_prompt(self, message: str, chat_history: List[List[str]]) -> List[Dict[str, str]]: + """ + Assembles prompt engineering logic + Setup Guidance: + https://platform.openai.com/docs/guides/gpt/chat-completions-api + + :param message: Incoming prompt + :param chat_history: History of preceding conversation + :returns: assembled prompt + """ messages = [ - {"role": "system", "content": self.role}, + {"role": "system", "content": self._system_prompt}, ] # Context N messages for role, content in chat_history[-self.context_depth:]: role_chatgpt = self.convert_role(role) messages.append({"role": role_chatgpt, "content": content}) messages.append({"role": "user", "content": message}) - - response = openai.ChatCompletion.create( - model=self.model, - messages=messages, - temperature=0, - max_tokens=self.max_tokens, - ) - bot_message = response.choices[0].message['content'] - return bot_message + return messages + + def _score(self, prompt: str, targets: List[str]) -> List[float]: + """ + Calculates logarithmic probabilities for the list of provided text sequences + :param prompt: Input text sequence + :param targets: Output text sequences + :returns: List of calculated logarithmic probabilities per output text sequence + """ + + question_embeddings, answers_embeddings = self._embeddings(question=prompt, answers=targets) + scores_list = distances_from_embeddings(question_embeddings, answers_embeddings) + return scores_list + + def _tokenize(self, prompt: str) -> None: + pass + + def _embeddings(self, question: str, answers: List[str]) -> (List[float], List[List[float]]): + """ + Computes embeddings for the list of provided answers + :param question: Question for LLM to response to + :param answers: List of provided answers + :returns ppl values for each answer + """ + response = self.ask(question, []) + texts = [response] + answers + embeddings = get_embeddings(texts, engine="text-embedding-ada-002") + question_embeddings = embeddings[0] + answers_embeddings = embeddings[1:] + return question_embeddings, answers_embeddings \ No newline at end of file diff --git a/neon_llm_chatgpt/config.py b/neon_llm_chatgpt/config.py deleted file mode 100644 index 2787463..0000000 --- a/neon_llm_chatgpt/config.py +++ /dev/null @@ -1,50 +0,0 @@ -# NEON AI (TM) SOFTWARE, Software Development Kit & Application Development System -# All trademark and other rights reserved by their respective owners -# Copyright 2008-2021 Neongecko.com Inc. -# BSD-3 -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# 3. Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from this -# software without specific prior written permission. -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, -# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import json - -from os.path import join, dirname, isfile -from ovos_utils.log import LOG -from ovos_config.config import Configuration - - -def load_config() -> dict: - """ - Load and return a configuration object, - """ - legacy_config_path = "/app/app/config.json" - if isfile(legacy_config_path): - LOG.warning(f"Deprecated configuration found at {legacy_config_path}") - with open(legacy_config_path) as f: - config = json.load(f) - return config - config = Configuration() - if not config: - LOG.warning(f"No configuration found! falling back to defaults") - default_config_path = join(dirname(__file__), "default_config.json") - with open(default_config_path) as f: - config = json.load(f) - return config diff --git a/neon_llm_chatgpt/rmq.py b/neon_llm_chatgpt/rmq.py index 52fcceb..3409a95 100644 --- a/neon_llm_chatgpt/rmq.py +++ b/neon_llm_chatgpt/rmq.py @@ -23,71 +23,33 @@ # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import pika - -from neon_mq_connector.connector import MQConnector -from neon_mq_connector.utils.network_utils import dict_to_b64 -from neon_mq_connector.utils.rabbit_utils import create_mq_callback -from ovos_utils.log import LOG +from neon_llm_core.rmq import NeonLLMMQConnector from neon_llm_chatgpt.chatgpt import ChatGPT -from neon_llm_chatgpt.config import load_config -class ChatgptMQ(MQConnector): +class ChatgptMQ(NeonLLMMQConnector): + """ + Module for processing MQ requests to ChatGPT """ - Module for processing MQ requests from PyKlatchat to LibreTranslate""" def __init__(self): - config = load_config() - chatgpt_config = config.get("ChatGPT", None) - self.chatGPT = ChatGPT(chatgpt_config) - - self.service_name = 'neon_llm_chatgpt' - - mq_config = config.get("MQ", None) - super().__init__(config=mq_config, service_name=self.service_name) - - self.vhost = "/llm" - self.queue = "chat_gpt_input" - self.register_consumer(name=self.service_name, - vhost=self.vhost, - queue=self.queue, - callback=self.handle_request, - on_error=self.default_error_handler, - auto_ack=False) - - @create_mq_callback(include_callback_props=('channel', 'method', 'body')) - def handle_request(self, - channel: pika.channel.Channel, - method: pika.spec.Basic.Return, - body: dict): - """ - Handles requests from MQ to ChatGPT received on queue - "request_chatgpt" - - :param channel: MQ channel object (pika.channel.Channel) - :param method: MQ return method (pika.spec.Basic.Return) - :param body: request body (dict) - """ - message_id = body["message_id"] - routing_key = body["routing_key"] + super().__init__() + self.warmup() - query = body["query"] - history = body["history"] + @property + def name(self): + return "chat_gpt" - response = self.chatGPT.ask(message=query, chat_history=history) + @property + def model(self): + if self._model is None: + self._model = ChatGPT(self.model_config) + return self._model - api_response = { - "message_id": message_id, - "response": response - } + def warmup(self): + self.model - channel.basic_publish(exchange='', - routing_key=routing_key, - body=dict_to_b64(api_response), - properties=pika.BasicProperties( - expiration=str(1000))) - channel.basic_ack(method.delivery_tag) - LOG.info(f"Handled request: {message_id}") + @staticmethod + def compose_opinion_prompt(respondent_nick: str, question: str, answer: str) -> str: + return f'Why Answer "{answer}" to the Question "{question}" generated by Bot named "{respondent_nick}" is good?' diff --git a/requirements/requirements.txt b/requirements/requirements.txt index a769c8b..3cb78a9 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,4 +1,4 @@ +# model openai~=0.27 -neon-mq-connector~=0.7 -ovos-utils~=0.0.32 -ovos-config~=0.0.10 \ No newline at end of file +# networking +neon_llm_core==0.0.6 \ No newline at end of file