Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
haiiliin committed Oct 28, 2023
2 parents fba1f9d + 5a530df commit d8a7904
Show file tree
Hide file tree
Showing 15 changed files with 256 additions and 62 deletions.
23 changes: 18 additions & 5 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,12 @@

# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
"api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
"chatglm", "moss", "newbing", "claude-2"]
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]


# 百度千帆(LLM_MODEL="qianfan")
Expand Down Expand Up @@ -139,12 +141,16 @@
SLACK_CLAUDE_USER_TOKEN = ''


# 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md
# 如果需要使用AZURE(方法一:单个azure模型部署)详情请见额外文档 docs\use_azure.md
AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/"
AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写,该选项即将被弃用
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md


# 如果需要使用AZURE(方法二:多个azure模型部署+动态切换)详情请见额外文档 docs\use_azure.md
AZURE_CFG_ARRAY = {}


# 使用Newbing (不推荐使用,未来将删除)
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
NEWBING_COOKIES = """
Expand Down Expand Up @@ -191,6 +197,10 @@
ALLOW_RESET_CONFIG = False


# 在使用AutoGen插件时,是否使用Docker容器运行代码
AUTOGEN_USE_DOCKER = True


# 临时的上传文件夹位置,请勿修改
PATH_PRIVATE_UPLOAD = "private_upload"

Expand All @@ -216,13 +226,16 @@
│ ├── API_ORG(不常用)
│ └── API_URL_REDIRECT(不常用)
├── "azure-gpt-3.5" 等azure模型
├── "azure-gpt-3.5" 等azure模型(单个azure模型,不需要动态切换)
│ ├── API_KEY
│ ├── AZURE_ENDPOINT
│ ├── AZURE_API_KEY
│ ├── AZURE_ENGINE
│ └── API_URL_REDIRECT
├── "azure-gpt-3.5" 等azure模型(多个azure模型,需要动态切换,高优先级)
│ └── AZURE_CFG_ARRAY
├── "spark" 星火认知大模型 spark & sparkv2
│ ├── XFYUN_APPID
│ ├── XFYUN_API_SECRET
Expand Down
2 changes: 1 addition & 1 deletion crazy_functions/agent_fns/auto_agent.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
from crazy_functions.agent_fns.autogen_general import AutoGenGeneral
from crazy_functions.agent_fns.general import AutoGenGeneral
import time


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,8 @@ def do_audogen(self, input):
# ⭐⭐ 子进程执行
input = input.content
with ProxyNetworkActivate("AutoGen"):
from autogen import AssistantAgent, UserProxyAgent
config_list = [{
'model': self.llm_kwargs['llm_model'],
'api_key': self.llm_kwargs['api_key'],
},]
code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":True}
config_list = self.get_config_list()
code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":self.use_docker}
agents = self.define_agents()
user_proxy = None
assistant = None
Expand All @@ -67,6 +63,20 @@ def do_audogen(self, input):
tb_str = '```\n' + trimmed_format_exc() + '```'
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))

def get_config_list(self):
model = self.llm_kwargs['llm_model']
api_base = None
if self.llm_kwargs['llm_model'].startswith('api2d-'):
model = self.llm_kwargs['llm_model'][len('api2d-'):]
api_base = "https://openai.api2d.net/v1"
config_list = [{
'model': model,
'api_key': self.llm_kwargs['api_key'],
},]
if api_base is not None:
config_list[0]['api_base'] = api_base
return config_list

def subprocess_worker(self, child_conn):
# ⭐⭐ 子进程执行
self.child_conn = child_conn
Expand Down
32 changes: 29 additions & 3 deletions crazy_functions/agent_fns/pipe.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from toolbox import get_log_folder, update_ui, gen_time_str, trimmed_format_exc, promote_file_to_downloadzone
from toolbox import get_log_folder, update_ui, gen_time_str, get_conf, promote_file_to_downloadzone
from crazy_functions.agent_fns.watchdog import WatchDog
import time, os

class PipeCom():
Expand All @@ -19,6 +20,16 @@ def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, w
self.system_prompt = system_prompt
self.web_port = web_port
self.alive = True
self.use_docker, = get_conf('AUTOGEN_USE_DOCKER')

# create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time
timeout_seconds = 5*60
self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5)
self.heartbeat_watchdog.begin_watch()

def feed_heartbeat_watchdog(self):
# feed this `dog`, so the dog will not `bark` (bark_fn will terminate the instance)
self.heartbeat_watchdog.feed()

def is_alive(self):
return self.alive
Expand Down Expand Up @@ -50,7 +61,7 @@ def immediate_showoff_when_possible(self, fp):
# 获取fp的拓展名
file_type = fp.split('.')[-1]
# 如果是文本文件, 则直接显示文本内容
if file_type in ['png', 'jpg']:
if file_type.lower() in ['png', 'jpg']:
image_path = os.path.abspath(fp)
self.chatbot.append(['检测到新生图像:', f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'])
yield from update_ui(chatbot=self.chatbot, history=self.history)
Expand Down Expand Up @@ -79,7 +90,7 @@ def overwatch_workdir_file_change(self):
for f in change_list:
res = promote_file_to_downloadzone(f)
file_links += f'<br/><a href="file={res}" target="_blank">{res}</a>'
yield from self.immediate_showoff_when_possible(file_path)
yield from self.immediate_showoff_when_possible(f)

self.chatbot.append(['检测到新生文档.', f'文档清单如下: {file_links}'])
yield from update_ui(chatbot=self.chatbot, history=self.history)
Expand All @@ -98,9 +109,17 @@ def main_process_ui_control(self, txt, create_or_resume) -> str:
self.terminate()
return "terminate"

# patience = 10

while True:
time.sleep(0.5)
if not self.alive:
# the heartbeat watchdog might have it killed
self.terminate()
return "terminate"

if self.parent_conn.poll():
self.feed_heartbeat_watchdog()
if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]:
self.chatbot.pop(-1) # remove the last line
msg = self.parent_conn.recv() # PipeCom
Expand All @@ -124,10 +143,17 @@ def main_process_ui_control(self, txt, create_or_resume) -> str:
# do not terminate here, leave the subprocess_worker instance alive
return "wait_feedback"
else:
self.feed_heartbeat_watchdog()
if '[GPT-Academic] 等待中' not in self.chatbot[-1][-1]:
# begin_waiting_time = time.time()
self.chatbot.append(["[GPT-Academic] 等待AutoGen执行结果 ...", "[GPT-Academic] 等待中"])
self.chatbot[-1] = [self.chatbot[-1][0], self.chatbot[-1][1].replace("[GPT-Academic] 等待中", "[GPT-Academic] 等待中.")]
yield from update_ui(chatbot=self.chatbot, history=self.history)
# if time.time() - begin_waiting_time > patience:
# self.chatbot.append([f"结束", "等待超时, 终止AutoGen程序。"])
# yield from update_ui(chatbot=self.chatbot, history=self.history)
# self.terminate()
# return "terminate"

self.terminate()
return "terminate"
Expand Down
28 changes: 28 additions & 0 deletions crazy_functions/agent_fns/watchdog.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import threading, time

class WatchDog():
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
self.last_feed = None
self.timeout = timeout
self.bark_fn = bark_fn
self.interval = interval
self.msg = msg
self.kill_dog = False

def watch(self):
while True:
if self.kill_dog: break
if time.time() - self.last_feed > self.timeout:
if len(self.msg) > 0: print(self.msg)
self.bark_fn()
break
time.sleep(self.interval)

def begin_watch(self):
self.last_feed = time.time()
th = threading.Thread(target=self.watch)
th.daemon = True
th.start()

def feed(self):
self.last_feed = time.time()
8 changes: 3 additions & 5 deletions crazy_functions/多智能体.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
# 本源代码中, ⭐ = 关键步骤
"""
测试:
- 裁剪图像,保留下半部分
- 交换图像的蓝色通道和红色通道
- 将图像转为灰度图像
- 将csv文件转excel表格
- show me the solution of $x^2=cos(x)$, solve this problem with figure, and plot and save image to t.jpg
Testing:
- Crop the image, keeping the bottom half.
Expand Down Expand Up @@ -35,7 +32,8 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
web_port 当前软件运行的端口号
"""
# 检查当前的模型是否符合要求
supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k']
supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k',
'api2d-gpt-3.5-turbo-16k', 'api2d-gpt-4']
llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
if llm_kwargs['llm_model'] not in supported_llms:
chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."])
Expand Down
27 changes: 1 addition & 26 deletions crazy_functions/语音助手.py
Original file line number Diff line number Diff line change
@@ -1,39 +1,14 @@
from toolbox import update_ui
from toolbox import CatchException, get_conf, markdown_convertion
from crazy_functions.crazy_utils import input_clipping
from crazy_functions.agent_fns.watchdog import WatchDog
from request_llm.bridge_all import predict_no_ui_long_connection
import threading, time
import numpy as np
from .live_audio.aliyunASR import AliyunASR
import json
import re

class WatchDog():
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
self.last_feed = None
self.timeout = timeout
self.bark_fn = bark_fn
self.interval = interval
self.msg = msg
self.kill_dog = False

def watch(self):
while True:
if self.kill_dog: break
if time.time() - self.last_feed > self.timeout:
if len(self.msg) > 0: print(self.msg)
self.bark_fn()
break
time.sleep(self.interval)

def begin_watch(self):
self.last_feed = time.time()
th = threading.Thread(target=self.watch)
th.daemon = True
th.start()

def feed(self):
self.last_feed = time.time()

def chatbot2history(chatbot):
history = []
Expand Down
39 changes: 39 additions & 0 deletions docs/use_azure.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,42 @@
# 微软Azure云接入指南

## 方法一(旧方法,只能接入一个Azure模型)

- 通过以下教程,获取AZURE_ENDPOINT,AZURE_API_KEY,AZURE_ENGINE,直接修改 config 配置即可。配置的修改方法见本项目wiki。

## 方法二(新方法,接入多个Azure模型,并支持动态切换)

- 在方法一的基础上,注册并获取多组 AZURE_ENDPOINT,AZURE_API_KEY,AZURE_ENGINE
- 修改config中的AZURE_CFG_ARRAY配置项,按照格式填入多个Azure模型的配置,如下所示:

```
AZURE_CFG_ARRAY = {
"azure-gpt-3.5": # 第一个模型,azure模型必须以"azure-"开头
{
"AZURE_ENDPOINT": "https://你亲手写的api名称.openai.azure.com/",
"AZURE_API_KEY": "cccccccccccccccccccccccccccccccc",
"AZURE_ENGINE": "填入你亲手写的部署名1",
"AZURE_MODEL_MAX_TOKEN": 4096,
},
"azure-gpt-4": # 第二个模型,azure模型必须以"azure-"开头
{
"AZURE_ENDPOINT": "https://你亲手写的api名称.openai.azure.com/",
"AZURE_API_KEY": "dddddddddddddddddddddddddddddddd",
"AZURE_ENGINE": "填入你亲手写的部署名2",
"AZURE_MODEL_MAX_TOKEN": 8192,
},
"azure-gpt-3.5-16k": # 第三个模型,azure模型必须以"azure-"开头
{
"AZURE_ENDPOINT": "https://你亲手写的api名称.openai.azure.com/",
"AZURE_API_KEY": "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
"AZURE_ENGINE": "填入你亲手写的部署名3",
"AZURE_MODEL_MAX_TOKEN": 16384,
},
}
```



# 通过微软Azure云服务申请 Openai API

由于Openai和微软的关系,现在是可以通过微软的Azure云计算服务直接访问openai的api,免去了注册和网络的问题。
Expand Down
14 changes: 11 additions & 3 deletions multi_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
4. Run `python multi_language.py`.
Note: You need to run it multiple times to increase translation coverage because GPT makes mistakes sometimes.
(You can also run `CACHE_ONLY=True python multi_language.py` to use cached translation mapping)
5. Find the translated program in `multi-language\English\*`
Expand All @@ -35,6 +36,8 @@
import time
from toolbox import get_conf

CACHE_ONLY = os.environ.get('CACHE_ONLY', False)

CACHE_FOLDER, = get_conf('PATH_LOGGING')

blacklist = ['multi-language', CACHE_FOLDER, '.git', 'private_upload', 'multi_language.py', 'build', '.github', '.vscode', '__pycache__', 'venv']
Expand Down Expand Up @@ -336,7 +339,10 @@ def extract_chinese_characters_from_directory(directory_path):
if d not in cached_translation_keys:
need_translate.append(d)

need_translate_mapping = trans(need_translate, language=LANG_STD, special=True)
if CACHE_ONLY:
need_translate_mapping = {}
else:
need_translate_mapping = trans(need_translate, language=LANG_STD, special=True)
map_to_json(need_translate_mapping, language=LANG_STD)
cached_translation = read_map_from_json(language=LANG_STD)
cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
Expand Down Expand Up @@ -476,8 +482,10 @@ def get_strings(node):
if d not in cached_translation_keys:
need_translate.append(d)


up = trans_json(need_translate, language=LANG, special=False)
if CACHE_ONLY:
up = {}
else:
up = trans_json(need_translate, language=LANG, special=False)
map_to_json(up, language=LANG)
cached_translation = read_map_from_json(language=LANG)
LANG_STD = 'std'
Expand Down
Loading

0 comments on commit d8a7904

Please sign in to comment.