Skip to content

Commit

Permalink
Add newest GPT-4 models (#153)
Browse files Browse the repository at this point in the history
  • Loading branch information
ianarawjo authored Nov 15, 2023
1 parent c508852 commit 0b32792
Show file tree
Hide file tree
Showing 8 changed files with 53 additions and 51 deletions.
6 changes: 3 additions & 3 deletions chainforge/react-server/build/asset-manifest.json
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
{
"files": {
"main.css": "/static/css/main.8665fcca.css",
"main.js": "/static/js/main.ae4e3642.js",
"main.js": "/static/js/main.38f29425.js",
"static/js/787.4c72bb55.chunk.js": "/static/js/787.4c72bb55.chunk.js",
"index.html": "/index.html",
"main.8665fcca.css.map": "/static/css/main.8665fcca.css.map",
"main.ae4e3642.js.map": "/static/js/main.ae4e3642.js.map",
"main.38f29425.js.map": "/static/js/main.38f29425.js.map",
"787.4c72bb55.chunk.js.map": "/static/js/787.4c72bb55.chunk.js.map"
},
"entrypoints": [
"static/css/main.8665fcca.css",
"static/js/main.ae4e3642.js"
"static/js/main.38f29425.js"
]
}
2 changes: 1 addition & 1 deletion chainforge/react-server/build/index.html
Original file line number Diff line number Diff line change
@@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><script async src="https://www.googletagmanager.com/gtag/js?id=G-RN3FDBLMCR"></script><script>function gtag(){dataLayer.push(arguments)}window.dataLayer=window.dataLayer||[],gtag("js",new Date),gtag("config","G-RN3FDBLMCR")</script><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="A visual programming environment for prompt engineering"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>ChainForge</title><script defer="defer" src="/static/js/main.ae4e3642.js"></script><link href="/static/css/main.8665fcca.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><script async src="https://www.googletagmanager.com/gtag/js?id=G-RN3FDBLMCR"></script><script>function gtag(){dataLayer.push(arguments)}window.dataLayer=window.dataLayer||[],gtag("js",new Date),gtag("config","G-RN3FDBLMCR")</script><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="A visual programming environment for prompt engineering"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>ChainForge</title><script defer="defer" src="/static/js/main.38f29425.js"></script><link href="/static/css/main.8665fcca.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

Large diffs are not rendered by default.

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions chainforge/react-server/src/ModelSettingSchemas.js
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ const ChatGPTSettings = {
"type": "string",
"title": "Model Version",
"description": "Select an OpenAI model to query. For more details on the differences, see the OpenAI API documentation.",
"enum": ["gpt-3.5-turbo", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-4", "gpt-4-0613", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0613", "gpt-4-32k-0314", "text-davinci-003", "text-davinci-002", "code-davinci-002"],
"default": "gpt-3.5-turbo"
"enum": ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-4", "gpt-4-1106-preview","gpt-4-32k", "gpt-4-0613", "gpt-4-0314", "gpt-4-32k-0613", "gpt-4-32k-0314", "gpt-3.5-turbo-instruct", "text-davinci-003", "text-davinci-002", "code-davinci-002"],
"default": "gpt-3.5-turbo",
},
"system_msg": {
"type": "string",
Expand All @@ -60,6 +60,13 @@ const ChatGPTSettings = {
"maximum": 2,
"multipleOf": 0.01
},
"response_format": {
"type": "string",
"title": "response_format",
"enum": ["text", "json_object"],
"description": "An object specifying the format that the model must output. Currently, can only be text or JSON. Only works with newest GPT models. IMPORTANT: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly 'stuck' request.",
"default": "text",
},
"functions": {
"type": "string",
"title": "functions",
Expand Down Expand Up @@ -87,6 +94,12 @@ const ChatGPTSettings = {
"description": "Up to 4 sequences where the API will stop generating further tokens. Enclose stop sequences in double-quotes \"\" and use whitespace to separate them.",
"default": ""
},
"seed": {
"type": "integer",
"title": "seed",
"description": "If specified, the OpenAI API will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.",
"allow_empty_str": true,
},
"max_tokens": {
"type": "integer",
"title": "max_tokens",
Expand All @@ -110,7 +123,7 @@ const ChatGPTSettings = {
"maximum": 2,
"multipleOf": 0.005
},
"logit_bias": {
"logit_bias": {
"type": "string",
"title": "logit_bias",
"description": "Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token."
Expand All @@ -134,6 +147,9 @@ const ChatGPTSettings = {
"ui:help": "Defaults to 1.0. Leave at default if you prefer to set top_p.",
"ui:widget": "range"
},
"response_format": {
"ui:help": "Defaults to text.",
},
"functions": {
"ui:help": "Leave blank to not specify any functions. NOTE: JSON schema MUST NOT have trailing commas.",
"ui:widget": "textarea",
Expand All @@ -160,6 +176,9 @@ const ChatGPTSettings = {
"max_tokens": {
"ui:help": "Defaults to infinity."
},
"seed": {
"ui:help": "Defaults to blank (no seed)."
},
"logit_bias": {
"ui:widget": "textarea",
"ui:help": "Defaults to none."
Expand All @@ -180,8 +199,11 @@ const ChatGPTSettings = {
'stop': (str) => {
if (str.trim().length === 0) return [];
return str.match(/"((?:[^"\\]|\\.)*)"/g).map(s => s.substring(1, s.length-1)); // split on double-quotes but exclude escaped double-quotes inside the group
}
}
},
'response_format': (str) => {
return { type: str };
},
}
};

const GPT4Settings = {
Expand Down Expand Up @@ -273,13 +295,13 @@ const ClaudeSettings = {
"default": -1
},
"top_p": {
"type": "number",
"title": "top_p",
"description": "Does nucleus sampling, in which we compute the cumulative distribution over all the options for each subsequent token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p. Defaults to -1, which disables it. Note that you should either alter temperature or top_p, but not both.",
"default": -1,
"minimum": -1,
"maximum": 1,
"multipleOf": 0.001,
"type": "number",
"title": "top_p",
"description": "Does nucleus sampling, in which we compute the cumulative distribution over all the options for each subsequent token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p. Defaults to -1, which disables it. Note that you should either alter temperature or top_p, but not both.",
"default": -1,
"minimum": -1,
"maximum": 1,
"multipleOf": 0.001,
},
}
},
Expand Down Expand Up @@ -342,11 +364,7 @@ const PaLM2Settings = {
"title": "Model",
"description": "Select a PaLM model to query. For more details on the differences, see the Google PaLM API documentation.",
"enum": ["text-bison-001", "chat-bison-001"],
"default": "chat-bison-001",
"shortname_map": {
"text-bison-001": "PaLM2-text",
"chat-bison-001": "PaLM2-chat",
}
"default": "chat-bison-001"
},
"temperature": {
"type": "number",
Expand Down Expand Up @@ -618,19 +636,9 @@ const HuggingFaceTextInferenceSettings = {
"model": {
"type": "string",
"title": "Model",
"description": "Select a suggested HuggingFace-hosted model to query using the Inference API. For more details, check out https://huggingface.co/inference-api",
"enum": ["mistralai/Mistral-7B-Instruct-v0.1", "HuggingFaceH4/zephyr-7b-beta", "tiiuae/falcon-7b-instruct", "microsoft/DialoGPT-large", "bigscience/bloom-560m", "gpt2", "bigcode/santacoder", "bigcode/starcoder", "Other (HuggingFace)"],
"default": "tiiuae/falcon-7b-instruct",
"shortname_map": {
"mistralai/Mistral-7B-Instruct-v0.1": "Mistral-7B",
"HuggingFaceH4/zephyr-7b-beta": "Zephyr-7B",
"tiiuae/falcon-7b-instruct": "Falcon-7B",
"microsoft/DialoGPT-large": "DialoGPT",
"bigscience/bloom-560m": "Bloom560M",
"gpt2": "GPT-2",
"bigcode/santacoder": "santacoder",
"bigcode/starcoder": "starcoder"
}
"description": "Select a suggested HuggingFace-hosted model to query using the Inference API. For more details, check out https://huggingface.co/inference-api",
"enum": ["tiiuae/falcon-7b-instruct", "microsoft/DialoGPT-large", "bigscience/bloom-560m", "gpt2", "bigcode/santacoder", "bigcode/starcoder", "Other (HuggingFace)"],
"default": "tiiuae/falcon-7b-instruct",
},
"custom_model": {
"type": "string",
Expand Down Expand Up @@ -673,9 +681,9 @@ const HuggingFaceTextInferenceSettings = {
"type": "number",
"title": "top_p",
"description": "Sets the maximum cumulative probability of tokens to sample from (from 0 to 1.0). Set to -1 to remain unspecified.",
"default": -1,
"minimum": -1,
"maximum": 1,
"default": -1,
"minimum": -1,
"maximum": 1,
"multipleOf": 0.001,
},
"repetition_penalty": {
Expand Down Expand Up @@ -783,14 +791,6 @@ const AlephAlphaLuminousSettings = {
"luminous-supreme-control",
],
default: "luminous-base",
shortname_map: {
"luminous-extended": "luminous-ext",
"luminous-extended-control": "luminous-ext-ctrl",
"luminous-base-control": "luminous-base-ctrl",
"luminous-base": "luminous-base",
"luminous-supreme": "luminous-supr",
"luminous-supreme-control": "luminous-supr-ctrl",
}
},
temperature: {
type: "number",
Expand Down Expand Up @@ -1097,7 +1097,7 @@ export const getTemperatureSpecForModel = (modelName) => {
};

export const postProcessFormData = (settingsSpec, formData) => {
// Strip all 'model' and 'shortname' props in the submitted form, as these are passed elsewhere or unnecessary for the backend
// Strip all 'model' and 'shortname' props in the submitted form, as these are passed elsewhere or unecessary for the backend
const skip_keys = {'model': true, 'shortname': true};

let new_data = {};
Expand Down
2 changes: 2 additions & 0 deletions chainforge/react-server/src/backend/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,8 @@ export async function call_chatgpt(prompt: string, model: LLM, n: number = 1, te
let modelname: string = model.toString();
if (params?.stop !== undefined && (!Array.isArray(params.stop) || params.stop.length === 0))
delete params.stop;
if (params?.seed !== undefined && (params.seed.toString().length === 0))
delete params?.seed;
if (params?.functions !== undefined && (!Array.isArray(params.functions) || params.functions.length === 0))
delete params?.functions;
if (params?.function_call !== undefined && ((!(typeof params.function_call === 'string')) || params.function_call.trim().length === 0))
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ def readme():

setup(
name='chainforge',
version='0.2.6.8',
version='0.2.6.9',
packages=find_packages(),
author="Ian Arawjo",
description="A Visual Programming Environment for Prompt Engineering",
Expand Down

0 comments on commit 0b32792

Please sign in to comment.