From a711db0b5b357b0ac886bd0531d1ffc06bc23403 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Wed, 25 Oct 2023 11:32:32 +0800 Subject: [PATCH 001/117] stashed commit --- config.py | 5 ++++ crazy_functions/crazy_utils.py | 9 +++--- crazy_functions/latex_fns/latex_actions.py | 22 ++++++++++++-- crazy_functions/latex_fns/latex_toolbox.py | 34 ++++++++++++++++++++++ tests/test_plugins.py | 4 ++- 5 files changed, 66 insertions(+), 8 deletions(-) diff --git a/config.py b/config.py index 387fab6152..b76997fccf 100644 --- a/config.py +++ b/config.py @@ -212,6 +212,11 @@ # 自定义按钮的最大数量限制 NUM_CUSTOM_BASIC_BTN = 4 + +# LATEX实验性功能 +LATEX_EXPERIMENTAL = False + + """ 在线大模型配置关联关系示意图 │ diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 8533d088e7..8cc4042f17 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -205,13 +205,12 @@ def _req_gpt(index, inputs, history, sys_prompt): retry_op = retry_times_at_unknown_error exceeded_cnt = 0 mutable[index][2] = "执行中" + detect_timeout = lambda: len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience while True: # watchdog error - if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience: - raise RuntimeError("检测到程序终止。") + if detect_timeout(): raise RuntimeError("检测到程序终止。") try: # 【第一种情况】:顺利完成 - # time.sleep(10); raise RuntimeError("测试") gpt_say = predict_no_ui_long_connection( inputs=inputs, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True @@ -219,7 +218,7 @@ def _req_gpt(index, inputs, history, sys_prompt): mutable[index][2] = "已成功" return gpt_say except ConnectionAbortedError as token_exceeded_error: - # 【第二种情况】:Token溢出, + # 【第二种情况】:Token溢出 if handle_token_exceed: exceeded_cnt += 1 # 【选择处理】 尝试计算比例,尽可能多地保留文本 @@ -240,6 +239,7 @@ def _req_gpt(index, inputs, history, sys_prompt): return gpt_say # 放弃 except: # 【第三种情况】:其他错误 + if detect_timeout(): raise RuntimeError("检测到程序终止。") tb_str = '```\n' + trimmed_format_exc() + '```' print(tb_str) gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n" @@ -256,6 +256,7 @@ def _req_gpt(index, inputs, history, sys_prompt): for i in range(wait): mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1) # 开始重试 + if detect_timeout(): raise RuntimeError("检测到程序终止。") mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}" continue # 返回重试 else: diff --git a/crazy_functions/latex_fns/latex_actions.py b/crazy_functions/latex_fns/latex_actions.py index 7e561df2a5..6232b19785 100644 --- a/crazy_functions/latex_fns/latex_actions.py +++ b/crazy_functions/latex_fns/latex_actions.py @@ -1,9 +1,10 @@ from toolbox import update_ui, update_ui_lastest_msg, get_log_folder -from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone +from toolbox import get_conf, objdump, objload, promote_file_to_downloadzone from .latex_toolbox import PRESERVE, TRANSFORM from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout +from .latex_toolbox import find_title_and_abs import os, shutil import re @@ -90,7 +91,15 @@ def __init__(self) -> None: "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。" # 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者) self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" + self.title = "unknown" + self.abstract = "unknown" + def read_title_and_abstract(self, txt): + title, abstract = find_title_and_abs(txt) + if title is not None: + self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') + if abstract is not None: + self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10): """ @@ -234,8 +243,8 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。')) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 lps = LatexPaperSplit() + lps.read_title_and_abstract(merged_content) res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数 - # <-------- 拆分过长的latex片段 ----------> pfg = LatexPaperFileGroup() for index, r in enumerate(res): @@ -256,12 +265,19 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin else: # <-------- gpt 多线程请求 ----------> + LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL') + history_array = [[""] for _ in range(n_split)] + if LATEX_EXPERIMENTAL: + paper_meta = f"The paper you processing is `{lps.title}`, a part of the abstraction is `{lps.abstract}`" + paper_meta_max_len = 888 + history_array = [[ paper_meta[:paper_meta_max_len] + '...', "Understand, what should I do?"] for _ in range(n_split)] + gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( inputs_array=inputs_array, inputs_show_user_array=inputs_show_user_array, llm_kwargs=llm_kwargs, chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], + history_array=history_array, sys_prompt_array=sys_prompt_array, # max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待 scroller_max_len = 40 diff --git a/crazy_functions/latex_fns/latex_toolbox.py b/crazy_functions/latex_fns/latex_toolbox.py index 330cb6570b..b56825aa65 100644 --- a/crazy_functions/latex_fns/latex_toolbox.py +++ b/crazy_functions/latex_fns/latex_toolbox.py @@ -318,6 +318,40 @@ def merge_tex_files_(project_foler, main_file, mode): main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:] return main_file + +def find_title_and_abs(main_file): + + def extract_abstract_1(text): + pattern = r"\\abstract\{(.*?)\}" + match = re.search(pattern, text, re.DOTALL) + if match: + return match.group(1) + else: + return None + + def extract_abstract_2(text): + pattern = r"\\begin\{abstract\}(.*?)\\end\{abstract\}" + match = re.search(pattern, text, re.DOTALL) + if match: + return match.group(1) + else: + return None + + def extract_title(string): + pattern = r"\\title\{(.*?)\}" + match = re.search(pattern, string, re.DOTALL) + + if match: + return match.group(1) + else: + return None + + abstract = extract_abstract_1(main_file) + if abstract is None: + abstract = extract_abstract_2(main_file) + title = extract_title(main_file) + return title, abstract + def merge_tex_files(project_foler, main_file, mode): """ Merge Tex project recrusively diff --git a/tests/test_plugins.py b/tests/test_plugins.py index 5998bc461d..8470895d91 100644 --- a/tests/test_plugins.py +++ b/tests/test_plugins.py @@ -11,7 +11,9 @@ def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os. from tests.test_utils import plugin_test # plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"}) - plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522") + # plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522") + + plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix") # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep') From cf085565a7c6ad11687eddb0e1541674e2534fac Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 28 Oct 2023 17:44:17 +0800 Subject: [PATCH 002/117] rename folder --- .gitignore | 6 +- README.md | 6 +- check_proxy.py | 2 +- config.py | 2 +- ...50\346\226\207\346\266\246\350\211\262.py" | 2 +- ...50\346\226\207\347\277\273\350\257\221.py" | 2 +- crazy_functions/crazy_utils.py | 6 +- crazy_functions/latex_fns/latex_actions.py | 2 +- crazy_functions/pdf_fns/parse_pdf.py | 2 +- crazy_functions/vt_fns/vt_call_plugin.py | 2 +- crazy_functions/vt_fns/vt_modify_config.py | 2 +- ...76\347\211\207\347\224\237\346\210\220.py" | 2 +- ...47\273\223word\346\226\207\346\241\243.py" | 2 +- ...23\351\237\263\350\247\206\351\242\221.py" | 2 +- ...07\217Markdown\347\277\273\350\257\221.py" | 2 +- ...347\273\223PDF\346\226\207\346\241\243.py" | 2 +- ...3_\345\244\232\347\272\277\347\250\213.py" | 2 +- ...07\346\241\243\345\206\205\345\256\271.py" | 2 +- ...201\224\347\275\221\347\232\204ChatGPT.py" | 2 +- ...21\347\232\204ChatGPT_bing\347\211\210.py" | 2 +- ...32\347\251\272\347\273\210\347\253\257.py" | 2 +- ...350\247\243\346\236\220JupyterNotebook.py" | 2 +- ...55\351\237\263\345\212\251\346\211\213.py" | 2 +- docker-compose.yml | 2 +- docs/GithubAction+AllCapacity | 10 +- docs/GithubAction+ChatGLM+Moss | 10 +- docs/GithubAction+JittorLLMs | 8 +- docs/README.md.German.md | 8 +- docs/README.md.Italian.md | 8 +- docs/README.md.Korean.md | 6 +- docs/README.md.Portuguese.md | 8 +- docs/README_EN.md | 8 +- docs/README_FR.md | 8 +- docs/README_JP.md | 8 +- docs/README_RS.md | 8 +- docs/self_analysis.md | 96 +++++++++---------- docs/translate_english.json | 4 +- docs/translate_japanese.json | 4 +- docs/translate_traditionalchinese.json | 4 +- main.py | 2 +- {request_llm => request_llms}/README.md | 2 +- {request_llm => request_llms}/bridge_all.py | 0 .../bridge_chatglm.py | 6 +- .../bridge_chatglmft.py | 8 +- .../bridge_chatglmonnx.py | 10 +- .../bridge_chatgpt.py | 0 .../bridge_chatgpt_website.py | 0 .../bridge_claude.py | 0 .../bridge_internlm.py | 2 +- .../bridge_jittorllms_llama.py | 12 +-- .../bridge_jittorllms_pangualpha.py | 12 +-- .../bridge_jittorllms_rwkv.py | 12 +-- .../bridge_llama2.py | 2 +- {request_llm => request_llms}/bridge_moss.py | 12 +-- .../bridge_newbingfree.py | 8 +- .../bridge_qianfan.py | 4 +- {request_llm => request_llms}/bridge_qwen.py | 2 +- {request_llm => request_llms}/bridge_spark.py | 4 +- .../bridge_stackclaude.py | 6 +- {request_llm => request_llms}/bridge_tgui.py | 0 {request_llm => request_llms}/chatglmoonx.py | 0 {request_llm => request_llms}/com_sparkapi.py | 0 .../edge_gpt_free.py | 0 .../local_llm_class.py | 4 +- .../requirements_chatglm.txt | 0 .../requirements_chatglm_onnx.txt | 0 .../requirements_jittorllms.txt | 0 .../requirements_moss.txt | 0 .../requirements_newbing.txt | 0 .../requirements_qwen.txt | 0 .../requirements_slackclaude.txt | 0 tests/test_llms.py | 16 ++-- toolbox.py | 4 +- 73 files changed, 193 insertions(+), 193 deletions(-) rename {request_llm => request_llms}/README.md (96%) rename {request_llm => request_llms}/bridge_all.py (100%) rename {request_llm => request_llms}/bridge_chatglm.py (97%) rename {request_llm => request_llms}/bridge_chatglmft.py (97%) rename {request_llm => request_llms}/bridge_chatglmonnx.py (83%) rename {request_llm => request_llms}/bridge_chatgpt.py (100%) rename {request_llm => request_llms}/bridge_chatgpt_website.py (100%) rename {request_llm => request_llms}/bridge_claude.py (100%) rename {request_llm => request_llms}/bridge_internlm.py (99%) rename {request_llm => request_llms}/bridge_jittorllms_llama.py (93%) rename {request_llm => request_llms}/bridge_jittorllms_pangualpha.py (93%) rename {request_llm => request_llms}/bridge_jittorllms_rwkv.py (93%) rename {request_llm => request_llms}/bridge_llama2.py (98%) rename {request_llm => request_llms}/bridge_moss.py (96%) rename {request_llm => request_llms}/bridge_newbingfree.py (97%) rename {request_llm => request_llms}/bridge_qianfan.py (98%) rename {request_llm => request_llms}/bridge_qwen.py (97%) rename {request_llm => request_llms}/bridge_spark.py (95%) rename {request_llm => request_llms}/bridge_stackclaude.py (98%) rename {request_llm => request_llms}/bridge_tgui.py (100%) rename {request_llm => request_llms}/chatglmoonx.py (100%) rename {request_llm => request_llms}/com_sparkapi.py (100%) rename {request_llm => request_llms}/edge_gpt_free.py (100%) rename {request_llm => request_llms}/local_llm_class.py (98%) rename {request_llm => request_llms}/requirements_chatglm.txt (100%) rename {request_llm => request_llms}/requirements_chatglm_onnx.txt (100%) rename {request_llm => request_llms}/requirements_jittorllms.txt (100%) rename {request_llm => request_llms}/requirements_moss.txt (100%) rename {request_llm => request_llms}/requirements_newbing.txt (100%) rename {request_llm => request_llms}/requirements_qwen.txt (100%) rename {request_llm => request_llms}/requirements_slackclaude.txt (100%) diff --git a/.gitignore b/.gitignore index c4df28740f..286a67d8a0 100644 --- a/.gitignore +++ b/.gitignore @@ -146,9 +146,9 @@ debug* private* crazy_functions/test_project/pdf_and_word crazy_functions/test_samples -request_llm/jittorllms +request_llms/jittorllms multi-language -request_llm/moss +request_llms/moss media flagged -request_llm/ChatGLM-6b-onnx-u8s8 +request_llms/ChatGLM-6b-onnx-u8s8 diff --git a/README.md b/README.md index 77ff15e91d..667636c3db 100644 --- a/README.md +++ b/README.md @@ -126,11 +126,11 @@ python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步 【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): ```sh # 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +python -m pip install -r request_llms/requirements_chatglm.txt # 【可选步骤II】支持复旦MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径 +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径 # 【可选步骤III】支持RWKV Runner 参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner diff --git a/check_proxy.py b/check_proxy.py index 740eed230e..75de7ab48d 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -156,7 +156,7 @@ def auto_update(raise_error=False): def warm_up_modules(): print('正在执行一些模块的预热...') from toolbox import ProxyNetworkActivate - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info with ProxyNetworkActivate("Warmup_Modules"): enc = model_info["gpt-3.5-turbo"]['tokenizer'] enc.encode("模块预热", disallowed_special=()) diff --git a/config.py b/config.py index 56c8ea356c..a18bc4ad2e 100644 --- a/config.py +++ b/config.py @@ -136,7 +136,7 @@ API_ORG = "" -# 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md +# 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md SLACK_CLAUDE_BOT_ID = '' SLACK_CLAUDE_USER_TOKEN = '' diff --git "a/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" "b/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" index 462f965758..268a344644 100644 --- "a/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" +++ "b/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" @@ -11,7 +11,7 @@ def __init__(self): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) self.get_token_num = get_token_num diff --git "a/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" "b/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" index b5aad71bf9..697f5ac8aa 100644 --- "a/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" +++ "b/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" @@ -11,7 +11,7 @@ def __init__(self): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) self.get_token_num = get_token_num diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 8533d088e7..04a4e67d27 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -5,7 +5,7 @@ def input_clipping(inputs, history, max_token_limit): import numpy as np - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) @@ -63,7 +63,7 @@ def request_gpt_model_in_new_thread_with_ui_alive( """ import time from concurrent.futures import ThreadPoolExecutor - from request_llm.bridge_all import predict_no_ui_long_connection + from request_llms.bridge_all import predict_no_ui_long_connection # 用户反馈 chatbot.append([inputs_show_user, ""]) yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 @@ -177,7 +177,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( """ import time, random from concurrent.futures import ThreadPoolExecutor - from request_llm.bridge_all import predict_no_ui_long_connection + from request_llms.bridge_all import predict_no_ui_long_connection assert len(inputs_array) == len(history_array) assert len(inputs_array) == len(sys_prompt_array) if max_workers == -1: # 读取配置文件 diff --git a/crazy_functions/latex_fns/latex_actions.py b/crazy_functions/latex_fns/latex_actions.py index 7e561df2a5..ead3bc4c81 100644 --- a/crazy_functions/latex_fns/latex_actions.py +++ b/crazy_functions/latex_fns/latex_actions.py @@ -165,7 +165,7 @@ def __init__(self): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) self.get_token_num = get_token_num diff --git a/crazy_functions/pdf_fns/parse_pdf.py b/crazy_functions/pdf_fns/parse_pdf.py index 9853fd54bf..396b608ab3 100644 --- a/crazy_functions/pdf_fns/parse_pdf.py +++ b/crazy_functions/pdf_fns/parse_pdf.py @@ -103,7 +103,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi inputs_show_user_array = [] # get_token_num - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info[llm_kwargs['llm_model']]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) diff --git a/crazy_functions/vt_fns/vt_call_plugin.py b/crazy_functions/vt_fns/vt_call_plugin.py index 455ac88bc0..f33644d9ad 100644 --- a/crazy_functions/vt_fns/vt_call_plugin.py +++ b/crazy_functions/vt_fns/vt_call_plugin.py @@ -1,7 +1,7 @@ from pydantic import BaseModel, Field from typing import List from toolbox import update_ui_lastest_msg, disable_auto_promotion -from request_llm.bridge_all import predict_no_ui_long_connection +from request_llms.bridge_all import predict_no_ui_long_connection from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError import copy, json, pickle, os, sys, time diff --git a/crazy_functions/vt_fns/vt_modify_config.py b/crazy_functions/vt_fns/vt_modify_config.py index e7fd745c3d..0e2b3146fd 100644 --- a/crazy_functions/vt_fns/vt_modify_config.py +++ b/crazy_functions/vt_fns/vt_modify_config.py @@ -1,7 +1,7 @@ from pydantic import BaseModel, Field from typing import List from toolbox import update_ui_lastest_msg, get_conf -from request_llm.bridge_all import predict_no_ui_long_connection +from request_llms.bridge_all import predict_no_ui_long_connection from crazy_functions.json_fns.pydantic_io import GptJsonIO import copy, json, pickle, os, sys diff --git "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" index 51a1baff54..09bd9beb7d 100644 --- "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" +++ "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" @@ -5,7 +5,7 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"): import requests, json, time, os - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info proxies, = get_conf('proxies') # Set up OpenAI API key and model diff --git "a/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" "b/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" index 4ea753cbdf..7c822e9f88 100644 --- "a/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" +++ "b/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" @@ -32,7 +32,7 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot print(file_content) # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名 from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info max_token = model_info[llm_kwargs['llm_model']]['max_token'] TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4 paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( diff --git "a/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" "b/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" index 7c113f476a..b946d37d49 100644 --- "a/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" +++ "b/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" @@ -41,7 +41,7 @@ def split_audio_file(filename, split_duration=1000): def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history): import os, requests from moviepy.editor import AudioFileClip - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info # 设置OpenAI密钥和模型 api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) diff --git "a/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" "b/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" index 9485b1ec18..e245b72d5e 100644 --- "a/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" +++ "b/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" @@ -13,7 +13,7 @@ def __init__(self): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) self.get_token_num = get_token_num diff --git "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" "b/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" index b87d482585..57a6cdf135 100644 --- "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" +++ "b/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" @@ -21,7 +21,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, TOKEN_LIMIT_PER_FRAGMENT = 2500 from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( diff --git "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" index 79c4a26242..f2e5cf99d3 100644 --- "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" +++ "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" @@ -95,7 +95,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, # 递归地切割PDF文件 from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( diff --git "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" "b/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" index f1a89a7ec9..afc12234e5 100644 --- "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" +++ "b/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" @@ -19,7 +19,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro TOKEN_LIMIT_PER_FRAGMENT = 2500 from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( diff --git "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" index 4ed9aebf97..be286bcd5e 100644 --- "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" +++ "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" @@ -2,7 +2,7 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping import requests from bs4 import BeautifulSoup -from request_llm.bridge_all import model_info +from request_llms.bridge_all import model_info def google(query, proxies): query = query # 在此处替换您要搜索的关键词 diff --git "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" index db5adb7992..666fcb8b29 100644 --- "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" +++ "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" @@ -2,7 +2,7 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping import requests from bs4 import BeautifulSoup -from request_llm.bridge_all import model_info +from request_llms.bridge_all import model_info def bing_search(query, proxies=None): diff --git "a/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py" "b/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py" index 5f33249e3e..439e71ca3d 100644 --- "a/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py" +++ "b/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py" @@ -48,7 +48,7 @@ from typing import List from toolbox import CatchException, update_ui, is_the_upload_folder from toolbox import update_ui_lastest_msg, disable_auto_promotion -from request_llm.bridge_all import predict_no_ui_long_connection +from request_llms.bridge_all import predict_no_ui_long_connection from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from crazy_functions.crazy_utils import input_clipping from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError diff --git "a/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" "b/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" index d4a3b49e69..709b7e1cdc 100644 --- "a/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" +++ "b/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" @@ -13,7 +13,7 @@ def __init__(self): self.sp_file_tag = [] # count_token - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info enc = model_info["gpt-3.5-turbo"]['tokenizer'] def get_token_num(txt): return len( enc.encode(txt, disallowed_special=())) diff --git "a/crazy_functions/\350\257\255\351\237\263\345\212\251\346\211\213.py" "b/crazy_functions/\350\257\255\351\237\263\345\212\251\346\211\213.py" index f48286dfae..3e93ceaed5 100644 --- "a/crazy_functions/\350\257\255\351\237\263\345\212\251\346\211\213.py" +++ "b/crazy_functions/\350\257\255\351\237\263\345\212\251\346\211\213.py" @@ -2,7 +2,7 @@ from toolbox import CatchException, get_conf, markdown_convertion from crazy_functions.crazy_utils import input_clipping from crazy_functions.agent_fns.watchdog import WatchDog -from request_llm.bridge_all import predict_no_ui_long_connection +from request_llms.bridge_all import predict_no_ui_long_connection import threading, time import numpy as np from .live_audio.aliyunASR import AliyunASR diff --git a/docker-compose.yml b/docker-compose.yml index dd40dd12bf..9472a0f941 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -137,7 +137,7 @@ services: # P.S. 通过对 command 进行微调,可以便捷地安装额外的依赖 # command: > - # bash -c "pip install -r request_llm/requirements_qwen.txt && python3 -u main.py" + # bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py" ### =================================================== ### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型 diff --git a/docs/GithubAction+AllCapacity b/docs/GithubAction+AllCapacity index bf9482d387..4ba0e31a9e 100644 --- a/docs/GithubAction+AllCapacity +++ b/docs/GithubAction+AllCapacity @@ -19,13 +19,13 @@ RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad sc WORKDIR /gpt RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git WORKDIR /gpt/gpt_academic -RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss +RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llm/requirements_moss.txt -RUN python3 -m pip install -r request_llm/requirements_qwen.txt -RUN python3 -m pip install -r request_llm/requirements_chatglm.txt -RUN python3 -m pip install -r request_llm/requirements_newbing.txt +RUN python3 -m pip install -r request_llms/requirements_moss.txt +RUN python3 -m pip install -r request_llms/requirements_qwen.txt +RUN python3 -m pip install -r request_llms/requirements_chatglm.txt +RUN python3 -m pip install -r request_llms/requirements_newbing.txt RUN python3 -m pip install nougat-ocr diff --git a/docs/GithubAction+ChatGLM+Moss b/docs/GithubAction+ChatGLM+Moss index 3087d5513c..3212dc2f4d 100644 --- a/docs/GithubAction+ChatGLM+Moss +++ b/docs/GithubAction+ChatGLM+Moss @@ -14,12 +14,12 @@ RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/ WORKDIR /gpt RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git WORKDIR /gpt/gpt_academic -RUN git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss +RUN git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llm/requirements_moss.txt -RUN python3 -m pip install -r request_llm/requirements_qwen.txt -RUN python3 -m pip install -r request_llm/requirements_chatglm.txt -RUN python3 -m pip install -r request_llm/requirements_newbing.txt +RUN python3 -m pip install -r request_llms/requirements_moss.txt +RUN python3 -m pip install -r request_llms/requirements_qwen.txt +RUN python3 -m pip install -r request_llms/requirements_chatglm.txt +RUN python3 -m pip install -r request_llms/requirements_newbing.txt diff --git a/docs/GithubAction+JittorLLMs b/docs/GithubAction+JittorLLMs index dc883bcfcc..189eb24431 100644 --- a/docs/GithubAction+JittorLLMs +++ b/docs/GithubAction+JittorLLMs @@ -16,12 +16,12 @@ WORKDIR /gpt RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git WORKDIR /gpt/gpt_academic RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip install -r request_llm/requirements_chatglm.txt -RUN python3 -m pip install -r request_llm/requirements_newbing.txt -RUN python3 -m pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I +RUN python3 -m pip install -r request_llms/requirements_chatglm.txt +RUN python3 -m pip install -r request_llms/requirements_newbing.txt +RUN python3 -m pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I # 下载JittorLLMs -RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llm/jittorllms +RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llms/jittorllms # 禁用缓存,确保更新代码 ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache diff --git a/docs/README.md.German.md b/docs/README.md.German.md index d514de30f5..b7a53f1f9d 100644 --- a/docs/README.md.German.md +++ b/docs/README.md.German.md @@ -103,12 +103,12 @@ python -m pip install -r requirements.txt # Same step as pip installation [Optional Step] If supporting Tsinghua ChatGLM/Fudan MOSS as backend, additional dependencies need to be installed (Prerequisites: Familiar with Python + Used Pytorch + Sufficient computer configuration): ```sh -# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llm/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llms/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the project root path +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the project root path # [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README.md.Italian.md b/docs/README.md.Italian.md index 76efe1857b..1e24a535b5 100644 --- a/docs/README.md.Italian.md +++ b/docs/README.md.Italian.md @@ -109,12 +109,12 @@ python -m pip install -r requirements.txt # questo passaggio funziona allo stess 【Passaggio facoltativo】 Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, è necessario installare ulteriori dipendenze (prerequisiti: conoscenza di Python, esperienza con Pytorch e computer sufficientemente potente): ```sh -# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llms/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # 【Passaggio facoltativo II】 Supporto a MOSS di Fudan -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto # 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README.md.Korean.md b/docs/README.md.Korean.md index 61b8e4a051..db4b2d8fa5 100644 --- a/docs/README.md.Korean.md +++ b/docs/README.md.Korean.md @@ -104,11 +104,11 @@ python -m pip install -r requirements.txt # 이 단계도 pip install의 단계 # 1 : 기본 설치된 것들은 torch + cpu 버전입니다. cuda를 사용하려면 torch를 제거한 다음 torch + cuda를 다시 설치해야합니다. # 2 : 모델을 로드할 수 없는 기계 구성 때문에, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)를 # AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)로 변경합니다. -python -m pip install -r request_llm/requirements_chatglm.txt +python -m pip install -r request_llms/requirements_chatglm.txt # [선택 사항 II] Fudan MOSS 지원 -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다. +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다. # [선택 사항III] AVAIL_LLM_MODELS config.py 구성 파일에 기대하는 모델이 포함되어 있는지 확인하십시오. # 현재 지원되는 전체 모델 : diff --git a/docs/README.md.Portuguese.md b/docs/README.md.Portuguese.md index 2347d5a74f..4a3aba0862 100644 --- a/docs/README.md.Portuguese.md +++ b/docs/README.md.Portuguese.md @@ -119,12 +119,12 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins [Optional Step] If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, you need to install more dependencies (prerequisite: familiar with Python + used Pytorch + computer configuration is strong): ```sh -# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # 【Optional Step II】support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When executing this line of code, you must be in the project root path +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: When executing this line of code, you must be in the project root path # 【Optional Step III】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports docker solutions): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README_EN.md b/docs/README_EN.md index 02b8588c38..029186c718 100644 --- a/docs/README_EN.md +++ b/docs/README_EN.md @@ -106,12 +106,12 @@ python -m pip install -r requirements.txt # this step is the same as pip install [Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (prerequisites: familiar with Python + used Pytorch + computer configuration is strong enough): ```sh -# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True) -python -m pip install -r request_llm/requirements_chatglm.txt +# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True) +python -m pip install -r request_llms/requirements_chatglm.txt # [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the root directory of the project +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the root directory of the project # [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. Currently supported models are as follows (the jittorllms series only supports the docker solution for the time being): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README_FR.md b/docs/README_FR.md index af3bb42c79..62d81ebfc6 100644 --- a/docs/README_FR.md +++ b/docs/README_FR.md @@ -111,12 +111,12 @@ python -m pip install -r requirements.txt # Same step as pip instalation 【Optional】 Si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend, des dépendances supplémentaires doivent être installées (prérequis: compétent en Python + utilisez Pytorch + configuration suffisante de l'ordinateur): ```sh -# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llm/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llms/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # 【Optional Step II】 Support FDU MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When running this line of code, you must be in the project root path. +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: When running this line of code, you must be in the project root path. # 【Optional Step III】Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the desired model. Currently, all models supported are as follows (the jittorllms series currently only supports the docker scheme): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README_JP.md b/docs/README_JP.md index 46145e1f9c..8ade71b4b8 100644 --- a/docs/README_JP.md +++ b/docs/README_JP.md @@ -120,12 +120,12 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins [Optional Steps] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (precondition: familiar with Python + used Pytorch + computer configuration). Strong enough): ```sh -# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True). -python -m pip install -r request_llm/requirements_chatglm.txt +# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True). +python -m pip install -r request_llms/requirements_chatglm.txt # Optional Step II: Support Fudan MOSS. -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, it must be in the project root. +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note that when executing this line of code, it must be in the project root. # 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/README_RS.md b/docs/README_RS.md index d4888a0522..52d18dfcf6 100644 --- a/docs/README_RS.md +++ b/docs/README_RS.md @@ -108,12 +108,12 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins [Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, you need to install more dependencies (prerequisites: familiar with Python + have used Pytorch + computer configuration is strong): ```sh -# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt +# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt # [Optional step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path +python -m pip install -r request_llms/requirements_moss.txt +git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note that when executing this line of code, you must be in the project root path # [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/docs/self_analysis.md b/docs/self_analysis.md index ebc2337194..c3736193bc 100644 --- a/docs/self_analysis.md +++ b/docs/self_analysis.md @@ -38,20 +38,20 @@ | crazy_functions\读文章写摘要.py | 对论文进行解析和全文摘要生成 | | crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | | crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llm\test_llms.py | 对llm模型进行单元测试。 | +| request_llms\bridge_all.py | 基于不同LLM模型进行对话。 | +| request_llms\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | +| request_llms\bridge_chatgpt.py | 基于GPT模型完成对话。 | +| request_llms\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | +| request_llms\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | +| request_llms\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | +| request_llms\bridge_moss.py | 加载Moss模型完成对话功能。 | +| request_llms\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | +| request_llms\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | +| request_llms\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | +| request_llms\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | +| request_llms\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | +| request_llms\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | +| request_llms\test_llms.py | 对llm模型进行单元测试。 | ## 接下来请你逐文件分析下面的工程[0/48] 请对下面的程序文件做一个概述: check_proxy.py @@ -129,7 +129,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 1. `input_clipping`: 该函数用于裁剪输入文本长度,使其不超过一定的限制。 2. `request_gpt_model_in_new_thread_with_ui_alive`: 该函数用于请求 GPT 模型并保持用户界面的响应,支持多线程和实时更新用户界面。 -这两个函数都依赖于从 `toolbox` 和 `request_llm` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。 +这两个函数都依赖于从 `toolbox` 和 `request_llms` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。 ## [12/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文润色.py @@ -137,7 +137,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 ## [13/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文翻译.py -这个文件包含两个函数 `Latex英译中` 和 `Latex中译英`,它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llm` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。 +这个文件包含两个函数 `Latex英译中` 和 `Latex中译英`,它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llms` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。 ## [14/48] 请对下面的程序文件做一个概述: crazy_functions\__init__.py @@ -227,19 +227,19 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 该程序文件定义了一个名为高阶功能模板函数的函数,该函数接受多个参数,包括输入的文本、gpt模型参数、插件模型参数、聊天显示框的句柄、聊天历史等,并利用送出请求,使用 Unsplash API 发送相关图片。其中,为了避免输入溢出,函数会在开始时清空历史。函数也有一些 UI 更新的语句。该程序文件还依赖于其他两个模块:CatchException 和 update_ui,以及一个名为 request_gpt_model_in_new_thread_with_ui_alive 的来自 crazy_utils 模块(应该是自定义的工具包)的函数。 -## [34/48] 请对下面的程序文件做一个概述: request_llm\bridge_all.py +## [34/48] 请对下面的程序文件做一个概述: request_llms\bridge_all.py 该文件包含两个函数:predict和predict_no_ui_long_connection,用于基于不同的LLM模型进行对话。该文件还包含一个lazyloadTiktoken类和一个LLM_CATCH_EXCEPTION修饰器函数。其中lazyloadTiktoken类用于懒加载模型的tokenizer,LLM_CATCH_EXCEPTION用于错误处理。整个文件还定义了一些全局变量和模型信息字典,用于引用和配置LLM模型。 -## [35/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatglm.py +## [35/48] 请对下面的程序文件做一个概述: request_llms\bridge_chatglm.py 这是一个Python程序文件,名为`bridge_chatglm.py`,其中定义了一个名为`GetGLMHandle`的类和三个方法:`predict_no_ui_long_connection`、 `predict`和 `stream_chat`。该文件依赖于多个Python库,如`transformers`和`sentencepiece`。该文件实现了一个聊天机器人,使用ChatGLM模型来生成回复,支持单线程和多线程方式。程序启动时需要加载ChatGLM的模型和tokenizer,需要一段时间。在配置文件`config.py`中设置参数会影响模型的内存和显存使用,因此程序可能会导致低配计算机卡死。 -## [36/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatgpt.py +## [36/48] 请对下面的程序文件做一个概述: request_llms\bridge_chatgpt.py -该文件为 Python 代码文件,文件名为 request_llm\bridge_chatgpt.py。该代码文件主要提供三个函数:predict、predict_no_ui和 predict_no_ui_long_connection,用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。 +该文件为 Python 代码文件,文件名为 request_llms\bridge_chatgpt.py。该代码文件主要提供三个函数:predict、predict_no_ui和 predict_no_ui_long_connection,用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。 -## [37/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_llama.py +## [37/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_llama.py 该代码文件实现了一个聊天机器人,其中使用了 JittorLLMs 模型。主要包括以下几个部分: 1. GetGLMHandle 类:一个进程类,用于加载 JittorLLMs 模型并接收并处理请求。 @@ -248,17 +248,17 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 这个文件中还有一些辅助函数和全局变量,例如 importlib、time、threading 等。 -## [38/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_pangualpha.py +## [38/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_pangualpha.py 这个文件是为了实现使用jittorllms(一种机器学习模型)来进行聊天功能的代码。其中包括了模型加载、模型的参数加载、消息的收发等相关操作。其中使用了多进程和多线程来提高性能和效率。代码中还包括了处理依赖关系的函数和预处理函数等。 -## [39/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_rwkv.py +## [39/48] 请对下面的程序文件做一个概述: request_llms\bridge_jittorllms_rwkv.py 这个文件是一个Python程序,文件名为request_llm\bridge_jittorllms_rwkv.py。它依赖transformers、time、threading、importlib、multiprocessing等库。在文件中,通过定义GetGLMHandle类加载jittorllms模型参数和定义stream_chat方法来实现与jittorllms模型的交互。同时,该文件还定义了predict_no_ui_long_connection和predict方法来处理历史信息、调用jittorllms模型、接收回复信息并输出结果。 -## [40/48] 请对下面的程序文件做一个概述: request_llm\bridge_moss.py +## [40/48] 请对下面的程序文件做一个概述: request_llms\bridge_moss.py -该文件为一个Python源代码文件,文件名为 request_llm\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。 +该文件为一个Python源代码文件,文件名为 request_llms\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。 GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个子进程并加载 MOSS 模型参数,通过 Pipe 进行主子进程的通信。该类还定义了 check_dependency、moss_init、run 和 stream_chat 等方法,其中 check_dependency 和 moss_init 是子进程的初始化方法,run 是子进程运行方法,stream_chat 实现了主进程和子进程的交互过程。 @@ -266,7 +266,7 @@ GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个 函数 predict 是单线程方法,通过调用 update_ui 将交互过程中 MOSS 的回复实时更新到UI(User Interface)中,并执行一个 named function(additional_fn)指定的函数对输入进行预处理。 -## [41/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbing.py +## [41/48] 请对下面的程序文件做一个概述: request_llms\bridge_newbing.py 这是一个名为`bridge_newbing.py`的程序文件,包含三个部分: @@ -276,11 +276,11 @@ GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个 第三部分定义了一个名为`newbing_handle`的全局变量,并导出了`predict_no_ui_long_connection`和`predict`这两个方法,以供其他程序可以调用。 -## [42/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbingfree.py +## [42/48] 请对下面的程序文件做一个概述: request_llms\bridge_newbingfree.py 这个Python文件包含了三部分内容。第一部分是来自edge_gpt_free.py文件的聊天机器人程序。第二部分是子进程Worker,用于调用主体。第三部分提供了两个函数:predict_no_ui_long_connection和predict用于调用NewBing聊天机器人和返回响应。其中predict函数还提供了一些参数用于控制聊天机器人的回复和更新UI界面。 -## [43/48] 请对下面的程序文件做一个概述: request_llm\bridge_stackclaude.py +## [43/48] 请对下面的程序文件做一个概述: request_llms\bridge_stackclaude.py 这是一个Python源代码文件,文件名为request_llm\bridge_stackclaude.py。代码分为三个主要部分: @@ -290,21 +290,21 @@ GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个 第三部分定义了predict_no_ui_long_connection和predict两个函数,主要用于通过调用ClaudeHandle对象的stream_chat方法来获取Claude的回复,并更新ui以显示相关信息。其中predict函数采用单线程方法,而predict_no_ui_long_connection函数使用多线程方法。 -## [44/48] 请对下面的程序文件做一个概述: request_llm\bridge_tgui.py +## [44/48] 请对下面的程序文件做一个概述: request_llms\bridge_tgui.py 该文件是一个Python代码文件,名为request_llm\bridge_tgui.py。它包含了一些函数用于与chatbot UI交互,并通过WebSocket协议与远程LLM模型通信完成文本生成任务,其中最重要的函数是predict()和predict_no_ui_long_connection()。这个程序还有其他的辅助函数,如random_hash()。整个代码文件在协作的基础上完成了一次修改。 -## [45/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt.py +## [45/48] 请对下面的程序文件做一个概述: request_llms\edge_gpt.py 该文件是一个用于调用Bing chatbot API的Python程序,它由多个类和辅助函数构成,可以根据给定的对话连接在对话中提出问题,使用websocket与远程服务通信。程序实现了一个聊天机器人,可以为用户提供人工智能聊天。 -## [46/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt_free.py +## [46/48] 请对下面的程序文件做一个概述: request_llms\edge_gpt_free.py 该代码文件为一个会话API,可通过Chathub发送消息以返回响应。其中使用了 aiohttp 和 httpx 库进行网络请求并发送。代码中包含了一些函数和常量,多数用于生成请求数据或是请求头信息等。同时该代码文件还包含了一个 Conversation 类,调用该类可实现对话交互。 -## [47/48] 请对下面的程序文件做一个概述: request_llm\test_llms.py +## [47/48] 请对下面的程序文件做一个概述: request_llms\test_llms.py -这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llm.bridge_newbingfree"的模块,然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。 +这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llms.bridge_newbingfree"的模块,然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。 ## 用一张Markdown表格简要描述以下文件的功能: check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, multi_language.py, theme.py, toolbox.py, crazy_functions\crazy_functions_test.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py。根据以上分析,用一句话概括程序的整体功能。 @@ -355,24 +355,24 @@ crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生 概括程序的整体功能:提供了一系列处理文本、文件和代码的功能,使用了各类语言模型、多线程、网络请求和数据解析技术来提高效率和精度。 ## 用一张Markdown表格简要描述以下文件的功能: -crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llm\bridge_all.py, request_llm\bridge_chatglm.py, request_llm\bridge_chatgpt.py, request_llm\bridge_jittorllms_llama.py, request_llm\bridge_jittorllms_pangualpha.py, request_llm\bridge_jittorllms_rwkv.py, request_llm\bridge_moss.py, request_llm\bridge_newbing.py, request_llm\bridge_newbingfree.py, request_llm\bridge_stackclaude.py, request_llm\bridge_tgui.py, request_llm\edge_gpt.py, request_llm\edge_gpt_free.py, request_llm\test_llms.py。根据以上分析,用一句话概括程序的整体功能。 +crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llms\bridge_all.py, request_llms\bridge_chatglm.py, request_llms\bridge_chatgpt.py, request_llms\bridge_jittorllms_llama.py, request_llms\bridge_jittorllms_pangualpha.py, request_llms\bridge_jittorllms_rwkv.py, request_llms\bridge_moss.py, request_llms\bridge_newbing.py, request_llms\bridge_newbingfree.py, request_llms\bridge_stackclaude.py, request_llms\bridge_tgui.py, request_llms\edge_gpt.py, request_llms\edge_gpt_free.py, request_llms\test_llms.py。根据以上分析,用一句话概括程序的整体功能。 | 文件名 | 功能描述 | | --- | --- | | crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | | crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llm\test_llms.py | 对llm模型进行单元测试。 | +| request_llms\bridge_all.py | 基于不同LLM模型进行对话。 | +| request_llms\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | +| request_llms\bridge_chatgpt.py | 基于GPT模型完成对话。 | +| request_llms\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | +| request_llms\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | +| request_llms\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | +| request_llms\bridge_moss.py | 加载Moss模型完成对话功能。 | +| request_llms\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | +| request_llms\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | +| request_llms\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | +| request_llms\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | +| request_llms\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | +| request_llms\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | +| request_llms\test_llms.py | 对llm模型进行单元测试。 | | 程序整体功能 | 实现不同种类的聊天机器人,可以根据输入进行文本生成。 | diff --git a/docs/translate_english.json b/docs/translate_english.json index c13ac81a09..850cae5471 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -1184,7 +1184,7 @@ "Call ChatGLM fail 不能正常加载ChatGLM的参数": "Call ChatGLM fail, unable to load parameters for ChatGLM", "不能正常加载ChatGLM的参数!": "Unable to load parameters for ChatGLM!", "多线程方法": "Multithreading method", - "函数的说明请见 request_llm/bridge_all.py": "For function details, please see request_llm/bridge_all.py", + "函数的说明请见 request_llms/bridge_all.py": "For function details, please see request_llms/bridge_all.py", "程序终止": "Program terminated", "单线程方法": "Single-threaded method", "等待ChatGLM响应中": "Waiting for response from ChatGLM", @@ -1543,7 +1543,7 @@ "str类型": "str type", "所有音频都总结完成了吗": "Are all audio summaries completed?", "SummaryAudioVideo内容": "SummaryAudioVideo content", - "使用教程详情见 request_llm/README.md": "See request_llm/README.md for detailed usage instructions", + "使用教程详情见 request_llms/README.md": "See request_llms/README.md for detailed usage instructions", "删除中间文件夹": "Delete intermediate folder", "Claude组件初始化成功": "Claude component initialized successfully", "$c$ 是光速": "$c$ is the speed of light", diff --git a/docs/translate_japanese.json b/docs/translate_japanese.json index fa3af4e0ee..ae86dc06ad 100644 --- a/docs/translate_japanese.json +++ b/docs/translate_japanese.json @@ -782,7 +782,7 @@ "主进程统一调用函数接口": "メインプロセスが関数インターフェースを統一的に呼び出します", "再例如一个包含了待处理文件的路径": "処理待ちのファイルを含むパスの例", "负责把学术论文准确翻译成中文": "学術論文を正確に中国語に翻訳する責任があります", - "函数的说明请见 request_llm/bridge_all.py": "関数の説明については、request_llm/bridge_all.pyを参照してください", + "函数的说明请见 request_llms/bridge_all.py": "関数の説明については、request_llms/bridge_all.pyを参照してください", "然后回车提交": "そしてEnterを押して提出してください", "防止爆token": "トークンの爆発を防止する", "Latex项目全文中译英": "LaTeXプロジェクト全文の中国語から英語への翻訳", @@ -1616,7 +1616,7 @@ "正在重试": "再試行中", "从而更全面地理解项目的整体功能": "プロジェクトの全体的な機能をより理解するために", "正在等您说完问题": "質問が完了するのをお待ちしています", - "使用教程详情见 request_llm/README.md": "使用方法の詳細については、request_llm/README.mdを参照してください", + "使用教程详情见 request_llms/README.md": "使用方法の詳細については、request_llms/README.mdを参照してください", "6.25 加入判定latex模板的代码": "6.25 テンプレートの判定コードを追加", "找不到任何音频或视频文件": "音声またはビデオファイルが見つかりません", "请求GPT模型的": "GPTモデルのリクエスト", diff --git a/docs/translate_traditionalchinese.json b/docs/translate_traditionalchinese.json index 53570aead2..a677f10815 100644 --- a/docs/translate_traditionalchinese.json +++ b/docs/translate_traditionalchinese.json @@ -123,7 +123,7 @@ "的第": "的第", "减少重复": "減少重複", "如果超过期限没有喂狗": "如果超過期限沒有餵狗", - "函数的说明请见 request_llm/bridge_all.py": "函數的說明請見 request_llm/bridge_all.py", + "函数的说明请见 request_llms/bridge_all.py": "函數的說明請見 request_llms/bridge_all.py", "第7步": "第7步", "说": "說", "中途接收可能的终止指令": "中途接收可能的終止指令", @@ -1887,7 +1887,7 @@ "请继续分析其他源代码": "請繼續分析其他源代碼", "质能方程式": "質能方程式", "功能尚不稳定": "功能尚不穩定", - "使用教程详情见 request_llm/README.md": "使用教程詳情見 request_llm/README.md", + "使用教程详情见 request_llms/README.md": "使用教程詳情見 request_llms/README.md", "从以上搜索结果中抽取信息": "從以上搜索結果中抽取信息", "虽然PDF生成失败了": "雖然PDF生成失敗了", "找图片": "尋找圖片", diff --git a/main.py b/main.py index 9f3899515c..991dd473eb 100644 --- a/main.py +++ b/main.py @@ -7,7 +7,7 @@ def main(): import gradio as gr if gr.__version__ not in ['3.32.6']: raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.") - from request_llm.bridge_all import predict + from request_llms.bridge_all import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION') diff --git a/request_llm/README.md b/request_llms/README.md similarity index 96% rename from request_llm/README.md rename to request_llms/README.md index 545bc1ffba..92b856e30b 100644 --- a/request_llm/README.md +++ b/request_llms/README.md @@ -2,7 +2,7 @@ ## ChatGLM -- 安装依赖 `pip install -r request_llm/requirements_chatglm.txt` +- 安装依赖 `pip install -r request_llms/requirements_chatglm.txt` - 修改配置,在config.py中将LLM_MODEL的值改为"chatglm" ``` sh diff --git a/request_llm/bridge_all.py b/request_llms/bridge_all.py similarity index 100% rename from request_llm/bridge_all.py rename to request_llms/bridge_all.py diff --git a/request_llm/bridge_chatglm.py b/request_llms/bridge_chatglm.py similarity index 97% rename from request_llm/bridge_chatglm.py rename to request_llms/bridge_chatglm.py index 387b3e21e2..194cd1a208 100644 --- a/request_llm/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -27,7 +27,7 @@ def check_dependency(self): self.info = "依赖检测通过" self.success = True except: - self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。" + self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_chatglm.txt`安装ChatGLM的依赖。" self.success = False def ready(self): @@ -100,7 +100,7 @@ def stream_chat(self, **kwargs): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global glm_handle if glm_handle is None: @@ -131,7 +131,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_chatglmft.py b/request_llms/bridge_chatglmft.py similarity index 97% rename from request_llm/bridge_chatglmft.py rename to request_llms/bridge_chatglmft.py index 4416382a0f..8755bc1137 100644 --- a/request_llm/bridge_chatglmft.py +++ b/request_llms/bridge_chatglmft.py @@ -44,7 +44,7 @@ def check_dependency(self): self.info = "依赖检测通过" self.success = True except: - self.info = "缺少ChatGLMFT的依赖,如果要使用ChatGLMFT,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。" + self.info = "缺少ChatGLMFT的依赖,如果要使用ChatGLMFT,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_chatglm.txt`安装ChatGLM的依赖。" self.success = False def ready(self): @@ -59,7 +59,7 @@ def run(self): if self.chatglmft_model is None: from transformers import AutoConfig import torch - # conf = 'request_llm/current_ptune_model.json' + # conf = 'request_llms/current_ptune_model.json' # if not os.path.exists(conf): raise RuntimeError('找不到微调模型信息') # with open(conf, 'r', encoding='utf8') as f: # model_args = json.loads(f.read()) @@ -140,7 +140,7 @@ def stream_chat(self, **kwargs): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global glmft_handle if glmft_handle is None: @@ -171,7 +171,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_chatglmonnx.py b/request_llms/bridge_chatglmonnx.py similarity index 83% rename from request_llm/bridge_chatglmonnx.py rename to request_llms/bridge_chatglmonnx.py index 594bcca15f..312c6846ff 100644 --- a/request_llm/bridge_chatglmonnx.py +++ b/request_llms/bridge_chatglmonnx.py @@ -1,5 +1,5 @@ model_name = "ChatGLM-ONNX" -cmd_to_install = "`pip install -r request_llm/requirements_chatglm_onnx.txt`" +cmd_to_install = "`pip install -r request_llms/requirements_chatglm_onnx.txt`" from transformers import AutoModel, AutoTokenizer @@ -28,13 +28,13 @@ def load_model_info(self): def load_model_and_tokenizer(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 import os, glob - if not len(glob.glob("./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/*.bin")) >= 7: # 该模型有七个 bin 文件 + if not len(glob.glob("./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/*.bin")) >= 7: # 该模型有七个 bin 文件 from huggingface_hub import snapshot_download - snapshot_download(repo_id="K024/ChatGLM-6b-onnx-u8s8", local_dir="./request_llm/ChatGLM-6b-onnx-u8s8") + snapshot_download(repo_id="K024/ChatGLM-6b-onnx-u8s8", local_dir="./request_llms/ChatGLM-6b-onnx-u8s8") def create_model(): return ChatGLMModel( - tokenizer_path = "./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/sentencepiece.model", - onnx_model_path = "./request_llm/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/chatglm-6b-int8.onnx" + tokenizer_path = "./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/sentencepiece.model", + onnx_model_path = "./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/chatglm-6b-int8.onnx" ) self._model = create_model() return self._model, None diff --git a/request_llm/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py similarity index 100% rename from request_llm/bridge_chatgpt.py rename to request_llms/bridge_chatgpt.py diff --git a/request_llm/bridge_chatgpt_website.py b/request_llms/bridge_chatgpt_website.py similarity index 100% rename from request_llm/bridge_chatgpt_website.py rename to request_llms/bridge_chatgpt_website.py diff --git a/request_llm/bridge_claude.py b/request_llms/bridge_claude.py similarity index 100% rename from request_llm/bridge_claude.py rename to request_llms/bridge_claude.py diff --git a/request_llm/bridge_internlm.py b/request_llms/bridge_internlm.py similarity index 99% rename from request_llm/bridge_internlm.py rename to request_llms/bridge_internlm.py index 0ec65b641d..3304fe234f 100644 --- a/request_llm/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -1,5 +1,5 @@ model_name = "InternLM" -cmd_to_install = "`pip install -r request_llm/requirements_chatglm.txt`" +cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer import time diff --git a/request_llm/bridge_jittorllms_llama.py b/request_llms/bridge_jittorllms_llama.py similarity index 93% rename from request_llm/bridge_jittorllms_llama.py rename to request_llms/bridge_jittorllms_llama.py index d4853578fa..6099cd675d 100644 --- a/request_llm/bridge_jittorllms_llama.py +++ b/request_llms/bridge_jittorllms_llama.py @@ -28,8 +28,8 @@ def check_dependency(self): self.success = True except: from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ + self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ + r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llms/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() self.success = False @@ -45,8 +45,8 @@ def validate_path(): env = os.environ.get("PATH", "") os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') + os.chdir(root_dir_assume + '/request_llms/jittorllms') + sys.path.append(root_dir_assume + '/request_llms/jittorllms') validate_path() # validate path so you can run from base directory def load_model(): @@ -109,7 +109,7 @@ def stream_chat(self, **kwargs): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global llama_glm_handle if llama_glm_handle is None: @@ -140,7 +140,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_jittorllms_pangualpha.py b/request_llms/bridge_jittorllms_pangualpha.py similarity index 93% rename from request_llm/bridge_jittorllms_pangualpha.py rename to request_llms/bridge_jittorllms_pangualpha.py index 20a3021303..eebefcc2bd 100644 --- a/request_llm/bridge_jittorllms_pangualpha.py +++ b/request_llms/bridge_jittorllms_pangualpha.py @@ -28,8 +28,8 @@ def check_dependency(self): self.success = True except: from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ + self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ + r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llms/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() self.success = False @@ -45,8 +45,8 @@ def validate_path(): env = os.environ.get("PATH", "") os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') + os.chdir(root_dir_assume + '/request_llms/jittorllms') + sys.path.append(root_dir_assume + '/request_llms/jittorllms') validate_path() # validate path so you can run from base directory def load_model(): @@ -109,7 +109,7 @@ def stream_chat(self, **kwargs): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global pangu_glm_handle if pangu_glm_handle is None: @@ -140,7 +140,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_jittorllms_rwkv.py b/request_llms/bridge_jittorllms_rwkv.py similarity index 93% rename from request_llm/bridge_jittorllms_rwkv.py rename to request_llms/bridge_jittorllms_rwkv.py index ee4f592f5a..32ba3b8d4c 100644 --- a/request_llm/bridge_jittorllms_rwkv.py +++ b/request_llms/bridge_jittorllms_rwkv.py @@ -28,8 +28,8 @@ def check_dependency(self): self.success = True except: from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ + self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ + r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llms/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() self.success = False @@ -45,8 +45,8 @@ def validate_path(): env = os.environ.get("PATH", "") os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') + os.chdir(root_dir_assume + '/request_llms/jittorllms') + sys.path.append(root_dir_assume + '/request_llms/jittorllms') validate_path() # validate path so you can run from base directory def load_model(): @@ -109,7 +109,7 @@ def stream_chat(self, **kwargs): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global rwkv_glm_handle if rwkv_glm_handle is None: @@ -140,7 +140,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_llama2.py b/request_llms/bridge_llama2.py similarity index 98% rename from request_llm/bridge_llama2.py rename to request_llms/bridge_llama2.py index d1be446352..bc8ef7ebb0 100644 --- a/request_llm/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -1,5 +1,5 @@ model_name = "LLaMA" -cmd_to_install = "`pip install -r request_llm/requirements_chatglm.txt`" +cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer diff --git a/request_llm/bridge_moss.py b/request_llms/bridge_moss.py similarity index 96% rename from request_llm/bridge_moss.py rename to request_llms/bridge_moss.py index 3c6217d2b2..5061fcf9c5 100644 --- a/request_llm/bridge_moss.py +++ b/request_llms/bridge_moss.py @@ -24,12 +24,12 @@ def __init__(self): # 主进程执行 def check_dependency(self): # 主进程执行 try: import datasets, os - assert os.path.exists('request_llm/moss/models') + assert os.path.exists('request_llms/moss/models') self.info = "依赖检测通过" self.success = True except: self.info = """ - 缺少MOSS的依赖,如果要使用MOSS,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_moss.txt`和`git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss`安装MOSS的依赖。 + 缺少MOSS的依赖,如果要使用MOSS,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_moss.txt`和`git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss`安装MOSS的依赖。 """ self.success = False return self.success @@ -110,8 +110,8 @@ def run(self): # 子进程执行 def validate_path(): import os, sys root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/moss') - sys.path.append(root_dir_assume + '/request_llm/moss') + os.chdir(root_dir_assume + '/request_llms/moss') + sys.path.append(root_dir_assume + '/request_llms/moss') validate_path() # validate path so you can run from base directory try: @@ -176,7 +176,7 @@ def stream_chat(self, **kwargs): # 主进程执行 def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global moss_handle if moss_handle is None: @@ -206,7 +206,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_newbingfree.py b/request_llms/bridge_newbingfree.py similarity index 97% rename from request_llm/bridge_newbingfree.py rename to request_llms/bridge_newbingfree.py index c606645498..b5bfb30660 100644 --- a/request_llm/bridge_newbingfree.py +++ b/request_llms/bridge_newbingfree.py @@ -54,7 +54,7 @@ def check_dependency(self): self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。" self.success = True except: - self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。" + self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_newbing.txt`安装Newbing的依赖。" self.success = False def ready(self): @@ -63,7 +63,7 @@ def ready(self): async def async_run(self): # 读取配置 NEWBING_STYLE, = get_conf('NEWBING_STYLE') - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info endpoint = model_info['newbing']['endpoint'] while True: # 等待 @@ -181,7 +181,7 @@ def stream_chat(self, **kwargs): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global newbingfree_handle if (newbingfree_handle is None) or (not newbingfree_handle.success): @@ -210,7 +210,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ...")) diff --git a/request_llm/bridge_qianfan.py b/request_llms/bridge_qianfan.py similarity index 98% rename from request_llm/bridge_qianfan.py rename to request_llms/bridge_qianfan.py index be7397607a..bf78a34457 100644 --- a/request_llm/bridge_qianfan.py +++ b/request_llms/bridge_qianfan.py @@ -119,7 +119,7 @@ def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ ⭐多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ watch_dog_patience = 5 response = "" @@ -134,7 +134,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ ⭐单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/bridge_qwen.py b/request_llms/bridge_qwen.py similarity index 97% rename from request_llm/bridge_qwen.py rename to request_llms/bridge_qwen.py index 07ed243feb..62682cfa31 100644 --- a/request_llm/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -1,5 +1,5 @@ model_name = "Qwen" -cmd_to_install = "`pip install -r request_llm/requirements_qwen.txt`" +cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`" from transformers import AutoModel, AutoTokenizer diff --git a/request_llm/bridge_spark.py b/request_llms/bridge_spark.py similarity index 95% rename from request_llm/bridge_spark.py rename to request_llms/bridge_spark.py index 0fe925f7a0..8c7bf59b41 100644 --- a/request_llm/bridge_spark.py +++ b/request_llms/bridge_spark.py @@ -16,7 +16,7 @@ def validate_key(): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ ⭐多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ watch_dog_patience = 5 response = "" @@ -36,7 +36,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ ⭐单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llm/bridge_stackclaude.py b/request_llms/bridge_stackclaude.py similarity index 98% rename from request_llm/bridge_stackclaude.py rename to request_llms/bridge_stackclaude.py index 3f2ee67428..48612b3cd2 100644 --- a/request_llm/bridge_stackclaude.py +++ b/request_llms/bridge_stackclaude.py @@ -99,7 +99,7 @@ def check_dependency(self): self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。" self.success = True except: - self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_slackclaude.txt`安装Claude的依赖,然后重启程序。" + self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_slackclaude.txt`安装Claude的依赖,然后重启程序。" self.success = False def ready(self): @@ -204,7 +204,7 @@ def stream_chat(self, **kwargs): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): """ 多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ global claude_handle if (claude_handle is None) or (not claude_handle.success): @@ -234,7 +234,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None): """ 单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "[Local Message]: 等待Claude响应中 ...")) diff --git a/request_llm/bridge_tgui.py b/request_llms/bridge_tgui.py similarity index 100% rename from request_llm/bridge_tgui.py rename to request_llms/bridge_tgui.py diff --git a/request_llm/chatglmoonx.py b/request_llms/chatglmoonx.py similarity index 100% rename from request_llm/chatglmoonx.py rename to request_llms/chatglmoonx.py diff --git a/request_llm/com_sparkapi.py b/request_llms/com_sparkapi.py similarity index 100% rename from request_llm/com_sparkapi.py rename to request_llms/com_sparkapi.py diff --git a/request_llm/edge_gpt_free.py b/request_llms/edge_gpt_free.py similarity index 100% rename from request_llm/edge_gpt_free.py rename to request_llms/edge_gpt_free.py diff --git a/request_llm/local_llm_class.py b/request_llms/local_llm_class.py similarity index 98% rename from request_llm/local_llm_class.py rename to request_llms/local_llm_class.py index c9c7253440..e742d519eb 100644 --- a/request_llm/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -120,7 +120,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name): def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ ⭐多线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ _llm_handle = LLMSingletonClass() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.info @@ -146,7 +146,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): """ ⭐单线程方法 - 函数的说明请见 request_llm/bridge_all.py + 函数的说明请见 request_llms/bridge_all.py """ chatbot.append((inputs, "")) diff --git a/request_llm/requirements_chatglm.txt b/request_llms/requirements_chatglm.txt similarity index 100% rename from request_llm/requirements_chatglm.txt rename to request_llms/requirements_chatglm.txt diff --git a/request_llm/requirements_chatglm_onnx.txt b/request_llms/requirements_chatglm_onnx.txt similarity index 100% rename from request_llm/requirements_chatglm_onnx.txt rename to request_llms/requirements_chatglm_onnx.txt diff --git a/request_llm/requirements_jittorllms.txt b/request_llms/requirements_jittorllms.txt similarity index 100% rename from request_llm/requirements_jittorllms.txt rename to request_llms/requirements_jittorllms.txt diff --git a/request_llm/requirements_moss.txt b/request_llms/requirements_moss.txt similarity index 100% rename from request_llm/requirements_moss.txt rename to request_llms/requirements_moss.txt diff --git a/request_llm/requirements_newbing.txt b/request_llms/requirements_newbing.txt similarity index 100% rename from request_llm/requirements_newbing.txt rename to request_llms/requirements_newbing.txt diff --git a/request_llm/requirements_qwen.txt b/request_llms/requirements_qwen.txt similarity index 100% rename from request_llm/requirements_qwen.txt rename to request_llms/requirements_qwen.txt diff --git a/request_llm/requirements_slackclaude.txt b/request_llms/requirements_slackclaude.txt similarity index 100% rename from request_llm/requirements_slackclaude.txt rename to request_llms/requirements_slackclaude.txt diff --git a/tests/test_llms.py b/tests/test_llms.py index 75e230327e..6b7019d16c 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -10,14 +10,14 @@ def validate_path(): validate_path() # validate path so you can run from base directory if __name__ == "__main__": - # from request_llm.bridge_newbingfree import predict_no_ui_long_connection - # from request_llm.bridge_moss import predict_no_ui_long_connection - # from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection - # from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection - # from request_llm.bridge_claude import predict_no_ui_long_connection - # from request_llm.bridge_internlm import predict_no_ui_long_connection - # from request_llm.bridge_qwen import predict_no_ui_long_connection - from request_llm.bridge_spark import predict_no_ui_long_connection + # from request_llms.bridge_newbingfree import predict_no_ui_long_connection + # from request_llms.bridge_moss import predict_no_ui_long_connection + # from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection + # from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection + # from request_llms.bridge_claude import predict_no_ui_long_connection + # from request_llms.bridge_internlm import predict_no_ui_long_connection + # from request_llms.bridge_qwen import predict_no_ui_long_connection + from request_llms.bridge_spark import predict_no_ui_long_connection llm_kwargs = { 'max_length': 4096, diff --git a/toolbox.py b/toolbox.py index 07a9fda0b3..4a783a3743 100644 --- a/toolbox.py +++ b/toolbox.py @@ -878,7 +878,7 @@ def clip_history(inputs, history, tokenizer, max_token_limit): 直到历史记录的标记数量降低到阈值以下。 """ import numpy as np - from request_llm.bridge_all import model_info + from request_llms.bridge_all import model_info def get_token_num(txt): return len(tokenizer.encode(txt, disallowed_special=())) input_token_num = get_token_num(inputs) @@ -1069,7 +1069,7 @@ def get_plugin_handle(plugin_name): def get_chat_handle(): """ """ - from request_llm.bridge_all import predict_no_ui_long_connection + from request_llms.bridge_all import predict_no_ui_long_connection return predict_no_ui_long_connection def get_plugin_default_kwargs(): From 127385b8469dc2faa6debab949bd3506ad1c08e5 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 28 Oct 2023 19:23:43 +0800 Subject: [PATCH 003/117] =?UTF-8?q?=E6=8E=A5=E5=85=A5=E6=96=B0=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 7 +- request_llms/bridge_all.py | 16 +++++ request_llms/bridge_chatglm.py | 6 +- request_llms/bridge_chatglmft.py | 6 +- request_llms/bridge_jittorllms_llama.py | 6 +- request_llms/bridge_jittorllms_pangualpha.py | 6 +- request_llms/bridge_jittorllms_rwkv.py | 6 +- request_llms/bridge_moss.py | 6 +- request_llms/bridge_newbingfree.py | 10 +-- request_llms/bridge_qianfan.py | 6 +- request_llms/bridge_spark.py | 6 +- request_llms/bridge_stackclaude.py | 12 ++-- request_llms/bridge_zhipu.py | 59 +++++++++++++++++ request_llms/com_zhipuapi.py | 67 ++++++++++++++++++++ request_llms/local_llm_class.py | 6 +- tests/test_llms.py | 3 +- tests/test_markdown.py | 44 +++++++++++++ toolbox.py | 21 ++++++ 18 files changed, 253 insertions(+), 40 deletions(-) create mode 100644 request_llms/bridge_zhipu.py create mode 100644 request_llms/com_zhipuapi.py create mode 100644 tests/test_markdown.py diff --git a/config.py b/config.py index a18bc4ad2e..6d62a67607 100644 --- a/config.py +++ b/config.py @@ -87,7 +87,7 @@ "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] -# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" +# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] @@ -172,6 +172,11 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# 接入智谱大模型 +ZHIPUAI_API_KEY = "" +ZHIPUAI_MODEL = "chatglm_turbo" + + # Claude API KEY ANTHROPIC_API_KEY = "" diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 70e2c43769..37357ed9f7 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -483,6 +483,22 @@ def decode(self, *args, **kwargs): }) except: print(trimmed_format_exc()) +if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai + try: + from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui + from .bridge_zhipu import predict as zhipu_ui + model_info.update({ + "zhipuai": { + "fn_with_ui": zhipu_ui, + "fn_without_ui": zhipu_noui, + "endpoint": None, + "max_token": 4096, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + } + }) + except: + print(trimmed_format_exc()) # <-- 用于定义和切换多个azure模型 --> AZURE_CFG_ARRAY, = get_conf("AZURE_CFG_ARRAY") diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index 194cd1a208..3a7cc72399 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -155,13 +155,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收chatglm的回复 - response = "[Local Message]: 等待ChatGLM响应中 ..." + response = "[Local Message] 等待ChatGLM响应中 ..." for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待ChatGLM响应中 ...": - response = "[Local Message]: ChatGLM响应异常 ..." + if response == "[Local Message] 等待ChatGLM响应中 ...": + response = "[Local Message] ChatGLM响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_chatglmft.py b/request_llms/bridge_chatglmft.py index 8755bc1137..63f36049ac 100644 --- a/request_llms/bridge_chatglmft.py +++ b/request_llms/bridge_chatglmft.py @@ -195,13 +195,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收chatglmft的回复 - response = "[Local Message]: 等待ChatGLMFT响应中 ..." + response = "[Local Message] 等待ChatGLMFT响应中 ..." for response in glmft_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待ChatGLMFT响应中 ...": - response = "[Local Message]: ChatGLMFT响应异常 ..." + if response == "[Local Message] 等待ChatGLMFT响应中 ...": + response = "[Local Message] ChatGLMFT响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_jittorllms_llama.py b/request_llms/bridge_jittorllms_llama.py index 6099cd675d..af2d9fbdc3 100644 --- a/request_llms/bridge_jittorllms_llama.py +++ b/request_llms/bridge_jittorllms_llama.py @@ -163,13 +163,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." + response = "[Local Message] 等待jittorllms响应中 ..." for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." + if response == "[Local Message] 等待jittorllms响应中 ...": + response = "[Local Message] jittorllms响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_jittorllms_pangualpha.py b/request_llms/bridge_jittorllms_pangualpha.py index eebefcc2bd..df0523b488 100644 --- a/request_llms/bridge_jittorllms_pangualpha.py +++ b/request_llms/bridge_jittorllms_pangualpha.py @@ -163,13 +163,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." + response = "[Local Message] 等待jittorllms响应中 ..." for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." + if response == "[Local Message] 等待jittorllms响应中 ...": + response = "[Local Message] jittorllms响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_jittorllms_rwkv.py b/request_llms/bridge_jittorllms_rwkv.py index 32ba3b8d4c..875673aac7 100644 --- a/request_llms/bridge_jittorllms_rwkv.py +++ b/request_llms/bridge_jittorllms_rwkv.py @@ -163,13 +163,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." + response = "[Local Message] 等待jittorllms响应中 ..." for response in rwkv_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." + if response == "[Local Message] 等待jittorllms响应中 ...": + response = "[Local Message] jittorllms响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_moss.py b/request_llms/bridge_moss.py index 5061fcf9c5..d7399f52b6 100644 --- a/request_llms/bridge_moss.py +++ b/request_llms/bridge_moss.py @@ -219,7 +219,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp moss_handle = None return else: - response = "[Local Message]: 等待MOSS响应中 ..." + response = "[Local Message] 等待MOSS响应中 ..." chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) @@ -238,7 +238,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待MOSS响应中 ...": - response = "[Local Message]: MOSS响应异常 ..." + if response == "[Local Message] 等待MOSS响应中 ...": + response = "[Local Message] MOSS响应异常 ..." history.extend([inputs, response.strip('<|MOSS|>: ')]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_newbingfree.py b/request_llms/bridge_newbingfree.py index b5bfb30660..5dddb6105c 100644 --- a/request_llms/bridge_newbingfree.py +++ b/request_llms/bridge_newbingfree.py @@ -199,7 +199,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 response = "" - if len(observe_window) >= 1: observe_window[0] = "[Local Message]: 等待NewBing响应中 ..." + if len(observe_window) >= 1: observe_window[0] = "[Local Message] 等待NewBing响应中 ..." for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): if len(observe_window) >= 1: observe_window[0] = preprocess_newbing_out_simple(response) if len(observe_window) >= 2: @@ -212,7 +212,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp 单线程方法 函数的说明请见 request_llms/bridge_all.py """ - chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ...")) + chatbot.append((inputs, "[Local Message] 等待NewBing响应中 ...")) global newbingfree_handle if (newbingfree_handle is None) or (not newbingfree_handle.success): @@ -231,13 +231,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp for i in range(len(history)//2): history_feedin.append([history[2*i], history[2*i+1]] ) - chatbot[-1] = (inputs, "[Local Message]: 等待NewBing响应中 ...") - response = "[Local Message]: 等待NewBing响应中 ..." + chatbot[-1] = (inputs, "[Local Message] 等待NewBing响应中 ...") + response = "[Local Message] 等待NewBing响应中 ..." yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, preprocess_newbing_out(response)) yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") - if response == "[Local Message]: 等待NewBing响应中 ...": response = "[Local Message]: NewBing响应异常,请刷新界面重试 ..." + if response == "[Local Message] 等待NewBing响应中 ...": response = "[Local Message] NewBing响应异常,请刷新界面重试 ..." history.extend([inputs, response]) logging.info(f'[raw_input] {inputs}') logging.info(f'[response] {response}') diff --git a/request_llms/bridge_qianfan.py b/request_llms/bridge_qianfan.py index bf78a34457..99f0623f38 100644 --- a/request_llms/bridge_qianfan.py +++ b/request_llms/bridge_qianfan.py @@ -158,8 +158,8 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp return # 总结输出 - response = f"[Local Message]: {model_name}响应异常 ..." - if response == f"[Local Message]: 等待{model_name}响应中 ...": - response = f"[Local Message]: {model_name}响应异常 ..." + response = f"[Local Message] {model_name}响应异常 ..." + if response == f"[Local Message] 等待{model_name}响应中 ...": + response = f"[Local Message] {model_name}响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/request_llms/bridge_spark.py b/request_llms/bridge_spark.py index 8c7bf59b41..d6ff42faed 100644 --- a/request_llms/bridge_spark.py +++ b/request_llms/bridge_spark.py @@ -42,7 +42,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp yield from update_ui(chatbot=chatbot, history=history) if validate_key() is False: - yield from update_ui_lastest_msg(lastmsg="[Local Message]: 请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET", chatbot=chatbot, history=history, delay=0) + yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET", chatbot=chatbot, history=history, delay=0) return if additional_fn is not None: @@ -57,7 +57,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == f"[Local Message]: 等待{model_name}响应中 ...": - response = f"[Local Message]: {model_name}响应异常 ..." + if response == f"[Local Message] 等待{model_name}响应中 ...": + response = f"[Local Message] {model_name}响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/request_llms/bridge_stackclaude.py b/request_llms/bridge_stackclaude.py index 48612b3cd2..3b57615671 100644 --- a/request_llms/bridge_stackclaude.py +++ b/request_llms/bridge_stackclaude.py @@ -222,7 +222,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 response = "" - observe_window[0] = "[Local Message]: 等待Claude响应中 ..." + observe_window[0] = "[Local Message] 等待Claude响应中 ..." for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): observe_window[0] = preprocess_newbing_out_simple(response) if len(observe_window) >= 2: @@ -236,7 +236,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp 单线程方法 函数的说明请见 request_llms/bridge_all.py """ - chatbot.append((inputs, "[Local Message]: 等待Claude响应中 ...")) + chatbot.append((inputs, "[Local Message] 等待Claude响应中 ...")) global claude_handle if (claude_handle is None) or (not claude_handle.success): @@ -255,14 +255,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp for i in range(len(history)//2): history_feedin.append([history[2*i], history[2*i+1]]) - chatbot[-1] = (inputs, "[Local Message]: 等待Claude响应中 ...") - response = "[Local Message]: 等待Claude响应中 ..." + chatbot[-1] = (inputs, "[Local Message] 等待Claude响应中 ...") + response = "[Local Message] 等待Claude响应中 ..." yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt): chatbot[-1] = (inputs, preprocess_newbing_out(response)) yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") - if response == "[Local Message]: 等待Claude响应中 ...": - response = "[Local Message]: Claude响应异常,请刷新界面重试 ..." + if response == "[Local Message] 等待Claude响应中 ...": + response = "[Local Message] Claude响应异常,请刷新界面重试 ..." history.extend([inputs, response]) logging.info(f'[raw_input] {inputs}') logging.info(f'[response] {response}') diff --git a/request_llms/bridge_zhipu.py b/request_llms/bridge_zhipu.py new file mode 100644 index 0000000000..2fd19ad508 --- /dev/null +++ b/request_llms/bridge_zhipu.py @@ -0,0 +1,59 @@ + +import time +from toolbox import update_ui, get_conf, update_ui_lastest_msg + +model_name = '智谱AI大模型' + +def validate_key(): + ZHIPUAI_API_KEY, = get_conf("ZHIPUAI_API_KEY") + if ZHIPUAI_API_KEY == '': return False + return True + +def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): + """ + ⭐多线程方法 + 函数的说明请见 request_llms/bridge_all.py + """ + watch_dog_patience = 5 + response = "" + + if validate_key() is False: + raise RuntimeError('请配置ZHIPUAI_API_KEY') + + from .com_zhipuapi import ZhipuRequestInstance + sri = ZhipuRequestInstance() + for response in sri.generate(inputs, llm_kwargs, history, sys_prompt): + if len(observe_window) >= 1: + observe_window[0] = response + if len(observe_window) >= 2: + if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") + return response + +def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): + """ + ⭐单线程方法 + 函数的说明请见 request_llms/bridge_all.py + """ + chatbot.append((inputs, "")) + yield from update_ui(chatbot=chatbot, history=history) + + if validate_key() is False: + yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0) + return + + if additional_fn is not None: + from core_functional import handle_core_functionality + inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) + + # 开始接收回复 + from .com_zhipuapi import ZhipuRequestInstance + sri = ZhipuRequestInstance() + for response in sri.generate(inputs, llm_kwargs, history, system_prompt): + chatbot[-1] = (inputs, response) + yield from update_ui(chatbot=chatbot, history=history) + + # 总结输出 + if response == f"[Local Message] 等待{model_name}响应中 ...": + response = f"[Local Message] {model_name}响应异常 ..." + history.extend([inputs, response]) + yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/request_llms/com_zhipuapi.py b/request_llms/com_zhipuapi.py new file mode 100644 index 0000000000..445720d881 --- /dev/null +++ b/request_llms/com_zhipuapi.py @@ -0,0 +1,67 @@ +from toolbox import get_conf +import threading +import logging + +timeout_bot_msg = '[Local Message] Request timeout. Network error.' + +class ZhipuRequestInstance(): + def __init__(self): + + self.time_to_yield_event = threading.Event() + self.time_to_exit_event = threading.Event() + + self.result_buf = "" + + def generate(self, inputs, llm_kwargs, history, system_prompt): + # import _thread as thread + import zhipuai + ZHIPUAI_API_KEY, ZHIPUAI_MODEL = get_conf("ZHIPUAI_API_KEY", "ZHIPUAI_MODEL") + zhipuai.api_key = ZHIPUAI_API_KEY + self.result_buf = "" + response = zhipuai.model_api.sse_invoke( + model=ZHIPUAI_MODEL, + prompt=generate_message_payload(inputs, llm_kwargs, history, system_prompt), + top_p=llm_kwargs['top_p'], + temperature=llm_kwargs['temperature'], + ) + for event in response.events(): + if event.event == "add": + self.result_buf += event.data + yield self.result_buf + elif event.event == "error" or event.event == "interrupted": + raise RuntimeError("Unknown error:" + event.data) + elif event.event == "finish": + yield self.result_buf + break + else: + raise RuntimeError("Unknown error:" + str(event)) + + logging.info(f'[raw_input] {inputs}') + logging.info(f'[response] {self.result_buf}') + return self.result_buf + +def generate_message_payload(inputs, llm_kwargs, history, system_prompt): + conversation_cnt = len(history) // 2 + messages = [{"role": "user", "content": system_prompt}, {"role": "assistant", "content": "Certainly!"}] + if conversation_cnt: + for index in range(0, 2*conversation_cnt, 2): + what_i_have_asked = {} + what_i_have_asked["role"] = "user" + what_i_have_asked["content"] = history[index] + what_gpt_answer = {} + what_gpt_answer["role"] = "assistant" + what_gpt_answer["content"] = history[index+1] + if what_i_have_asked["content"] != "": + if what_gpt_answer["content"] == "": + continue + if what_gpt_answer["content"] == timeout_bot_msg: + continue + messages.append(what_i_have_asked) + messages.append(what_gpt_answer) + else: + messages[-1]['content'] = what_gpt_answer['content'] + what_i_ask_now = {} + what_i_ask_now["role"] = "user" + what_i_ask_now["content"] = inputs + messages.append(what_i_ask_now) + return messages diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index e742d519eb..a421ddf366 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -166,14 +166,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收回复 - response = f"[Local Message]: 等待{model_name}响应中 ..." + response = f"[Local Message] 等待{model_name}响应中 ..." for response in _llm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == f"[Local Message]: 等待{model_name}响应中 ...": - response = f"[Local Message]: {model_name}响应异常 ..." + if response == f"[Local Message] 等待{model_name}响应中 ...": + response = f"[Local Message] {model_name}响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/tests/test_llms.py b/tests/test_llms.py index 6b7019d16c..f43f3680b1 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -17,7 +17,8 @@ def validate_path(): # from request_llms.bridge_claude import predict_no_ui_long_connection # from request_llms.bridge_internlm import predict_no_ui_long_connection # from request_llms.bridge_qwen import predict_no_ui_long_connection - from request_llms.bridge_spark import predict_no_ui_long_connection + # from request_llms.bridge_spark import predict_no_ui_long_connection + from request_llms.bridge_zhipu import predict_no_ui_long_connection llm_kwargs = { 'max_length': 4096, diff --git a/tests/test_markdown.py b/tests/test_markdown.py new file mode 100644 index 0000000000..c92b4c4d0a --- /dev/null +++ b/tests/test_markdown.py @@ -0,0 +1,44 @@ +md = """ +作为您的写作和编程助手,我可以为您提供以下服务: + +1. 写作: + - 帮助您撰写文章、报告、散文、故事等。 + - 提供写作建议和技巧。 + - 协助您进行文案策划和内容创作。 + +2. 编程: + - 帮助您解决编程问题,提供编程思路和建议。 + - 协助您编写代码,包括但不限于 Python、Java、C++ 等。 + - 为您解释复杂的技术概念,让您更容易理解。 + +3. 项目支持: + - 协助您规划项目进度和任务分配。 + - 提供项目管理和协作建议。 + - 在项目实施过程中提供支持,确保项目顺利进行。 + +4. 学习辅导: + - 帮助您巩固编程基础,提高编程能力。 + - 提供计算机科学、数据科学、人工智能等相关领域的学习资源和建议。 + - 解答您在学习过程中遇到的问题,让您更好地掌握知识。 + +5. 行业动态和趋势分析: + - 为您提供业界最新的新闻和技术趋势。 + - 分析行业动态,帮助您了解市场发展和竞争态势。 + - 为您制定技术战略提供参考和建议。 + +请随时告诉我您的需求,我会尽力提供帮助。如果您有任何问题或需要解答的议题,请随时提问。 +""" + +def validate_path(): + import os, sys + dir_name = os.path.dirname(__file__) + root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') + os.chdir(root_dir_assume) + sys.path.append(root_dir_assume) +validate_path() # validate path so you can run from base directory +from toolbox import markdown_convertion + +html = markdown_convertion(md) +print(html) +with open('test.html', 'w', encoding='utf-8') as f: + f.write(html) \ No newline at end of file diff --git a/toolbox.py b/toolbox.py index 4a783a3743..137313efa7 100644 --- a/toolbox.py +++ b/toolbox.py @@ -7,6 +7,7 @@ import gradio import shutil import glob +import math from latex2mathml.converter import convert as tex2mathml from functools import wraps, lru_cache pj = os.path.join @@ -372,6 +373,26 @@ def is_equation(txt): contain_any_eq = True return contain_any_eq + def fix_markdown_indent(txt): + # fix markdown indent + if (' - ' not in txt) or ('. ' not in txt): + return txt # do not need to fix, fast escape + # walk through the lines and fix non-standard indentation + lines = txt.split("\n") + pattern = re.compile(r'^\s+-') + activated = False + for i, line in enumerate(lines): + if line.startswith('- ') or line.startswith('1. '): + activated = True + if activated and pattern.match(line): + stripped_string = line.lstrip() + num_spaces = len(line) - len(stripped_string) + if (num_spaces % 4) == 3: + num_spaces_should_be = math.ceil(num_spaces/4) * 4 + lines[i] = ' ' * num_spaces_should_be + stripped_string + return '\n'.join(lines) + + txt = fix_markdown_indent(txt) if is_equation(txt): # 有$标识的公式符号,且没有代码段```的标识 # convert everything to html format split = markdown.markdown(text='---') From 0b1e599b0139b8bed489bb7ab7b87fb1fffb6bcd Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 28 Oct 2023 19:43:48 +0800 Subject: [PATCH 004/117] =?UTF-8?q?=E7=B4=A7=E6=80=A5=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E6=8A=A5=E9=94=99=E5=BC=82=E5=B8=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatgpt.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index f91eea5094..b8b4d3a185 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -45,16 +45,18 @@ def decode_chunk(chunk): chunk_decoded = chunk.decode() chunkjson = None has_choices = False + choice_valid = False has_content = False has_role = False try: chunkjson = json.loads(chunk_decoded[6:]) - has_choices = ('choices' in chunkjson) and (len(chunkjson['choices']) > 0) - if has_choices: has_content = "content" in chunkjson['choices'][0]["delta"] - if has_choices: has_role = "role" in chunkjson['choices'][0]["delta"] + has_choices = 'choices' in chunkjson + if has_choices: choice_valid = (len(chunkjson['choices']) > 0) + if has_choices and choice_valid: has_content = "content" in chunkjson['choices'][0]["delta"] + if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"] except: pass - return chunk_decoded, chunkjson, has_choices, has_content, has_role + return chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role from functools import lru_cache @lru_cache(maxsize=32) @@ -64,7 +66,6 @@ def verify_endpoint(endpoint): """ if "你亲手写的api名称" in endpoint: raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint) - print(endpoint) return endpoint def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): @@ -97,7 +98,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if retry > MAX_RETRY: raise TimeoutError if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') - stream_response = response.iter_lines() + stream_response = response.iter_lines() result = '' json_data = None while True: @@ -213,6 +214,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp while True: try: chunk = next(stream_response) + print(chunk) except StopIteration: # 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里 chunk_decoded = chunk.decode() @@ -227,7 +229,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp return # 提前读取一些信息 (用于判断异常) - chunk_decoded, chunkjson, has_choices, has_content, has_role = decode_chunk(chunk) + chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk) if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded): # 数据流的第一帧不携带content @@ -235,7 +237,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if chunk: try: - if not has_choices: + if has_choices and not choice_valid: # 一些垃圾第三方接口的出现这样的错误 continue # 前者是API2D的结束条件,后者是OPENAI的结束条件 @@ -287,6 +289,8 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website) elif "associated with a deactivated account" in error_msg: chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website) + elif "API key has been deactivated" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. OpenAI以账户失效为由, 拒绝服务." + openai_website) elif "bad forward key" in error_msg: chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") elif "Not enough point" in error_msg: From e64c26e6176ae5d09b7c37495a29224769e8c780 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 28 Oct 2023 19:53:05 +0800 Subject: [PATCH 005/117] =?UTF-8?q?=E7=B4=A7=E6=80=A5=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E6=8A=A5=E9=94=99=E5=BC=82=E5=B8=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llm/bridge_all.py | 2 +- request_llm/bridge_chatgpt.py | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 70e2c43769..f85d1b6b34 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -505,7 +505,7 @@ def decode(self, *args, **kwargs): } }) if azure_model_name not in AVAIL_LLM_MODELS: - azure_model_name += [azure_model_name] + AVAIL_LLM_MODELS += [azure_model_name] diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index f91eea5094..9903da9d86 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -45,16 +45,18 @@ def decode_chunk(chunk): chunk_decoded = chunk.decode() chunkjson = None has_choices = False + choice_valid = False has_content = False has_role = False try: chunkjson = json.loads(chunk_decoded[6:]) - has_choices = ('choices' in chunkjson) and (len(chunkjson['choices']) > 0) - if has_choices: has_content = "content" in chunkjson['choices'][0]["delta"] - if has_choices: has_role = "role" in chunkjson['choices'][0]["delta"] + has_choices = 'choices' in chunkjson + if has_choices: choice_valid = (len(chunkjson['choices']) > 0) + if has_choices and choice_valid: has_content = "content" in chunkjson['choices'][0]["delta"] + if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"] except: pass - return chunk_decoded, chunkjson, has_choices, has_content, has_role + return chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role from functools import lru_cache @lru_cache(maxsize=32) @@ -64,7 +66,6 @@ def verify_endpoint(endpoint): """ if "你亲手写的api名称" in endpoint: raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint) - print(endpoint) return endpoint def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): @@ -97,7 +98,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if retry > MAX_RETRY: raise TimeoutError if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') - stream_response = response.iter_lines() + stream_response = response.iter_lines() result = '' json_data = None while True: @@ -227,7 +228,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp return # 提前读取一些信息 (用于判断异常) - chunk_decoded, chunkjson, has_choices, has_content, has_role = decode_chunk(chunk) + chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk) if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded): # 数据流的第一帧不携带content @@ -235,7 +236,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if chunk: try: - if not has_choices: + if has_choices and not choice_valid: # 一些垃圾第三方接口的出现这样的错误 continue # 前者是API2D的结束条件,后者是OPENAI的结束条件 @@ -287,6 +288,8 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website) elif "associated with a deactivated account" in error_msg: chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website) + elif "API key has been deactivated" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. OpenAI以账户失效为由, 拒绝服务." + openai_website) elif "bad forward key" in error_msg: chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") elif "Not enough point" in error_msg: From b14d4de0b1d6e653069a57c31c720980d49729e0 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 28 Oct 2023 20:08:50 +0800 Subject: [PATCH 006/117] =?UTF-8?q?=E5=B0=86=E9=BB=98=E8=AE=A4=E7=B3=BB?= =?UTF-8?q?=E7=BB=9F=E6=8F=90=E7=A4=BA=E8=AF=8D=E8=BD=AC=E7=A7=BB=E5=88=B0?= =?UTF-8?q?Config=E4=B8=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 12 ++++++++++-- config.py | 4 ++++ docs/use_azure.md | 8 ++++---- main.py | 4 ++-- 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 77ff15e91d..c93b57a1fc 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ > **Note** > -> 2023.10.8: Gradio, Pydantic依赖调整,已修改 `requirements.txt`。请及时**更新代码**,安装依赖时,请严格选择`requirements.txt`中**指定的版本**。 +> 2023.10.28: 紧急修复了若干问题,安装依赖时,请选择`requirements.txt`中**指定的版本**。 > > `pip install -r requirements.txt` @@ -310,6 +310,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h ### II:版本: - version 3.60(todo): 优化虚空终端,引入code interpreter和更多插件 +- version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面 - version 3.55: 重构前端界面,引入悬浮窗口与菜单栏 - version 3.54: 新增动态代码解释器(Code Interpreter)(待完善) - version 3.53: 支持动态选择不同界面主题,提高稳定性&解决多用户冲突问题 @@ -344,7 +345,14 @@ GPT Academic开发者QQ群:`610599535` 1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) -### IV:参考与学习 +### IV:本项目的开发分支 + +1. `master` 分支: 主分支,稳定版 + +2. `frontier` 分支: 开发分支,测试版 + + +### V:参考与学习 ``` 代码中参考了很多其他优秀项目中的设计,顺序不分先后: diff --git a/config.py b/config.py index 56c8ea356c..abea7ea30b 100644 --- a/config.py +++ b/config.py @@ -49,6 +49,10 @@ AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"] +# 默认的系统提示词(system prompt) +INIT_SYS_PROMPT = "Serve me as a writing and programming assistant." + + # 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效) CHATBOT_HEIGHT = 1115 diff --git a/docs/use_azure.md b/docs/use_azure.md index 041bc5254a..0e192ba6a2 100644 --- a/docs/use_azure.md +++ b/docs/use_azure.md @@ -7,25 +7,25 @@ ## 方法二(新方法,接入多个Azure模型,并支持动态切换) - 在方法一的基础上,注册并获取多组 AZURE_ENDPOINT,AZURE_API_KEY,AZURE_ENGINE -- 修改config中的AZURE_CFG_ARRAY配置项,按照格式填入多个Azure模型的配置,如下所示: +- 修改config中的AZURE_CFG_ARRAY和AVAIL_LLM_MODELS配置项,按照格式填入多个Azure模型的配置,如下所示: ``` AZURE_CFG_ARRAY = { - "azure-gpt-3.5": # 第一个模型,azure模型必须以"azure-"开头 + "azure-gpt-3.5": # 第一个模型,azure模型必须以"azure-"开头,注意您还需要将"azure-gpt-3.5"加入AVAIL_LLM_MODELS(模型下拉菜单) { "AZURE_ENDPOINT": "https://你亲手写的api名称.openai.azure.com/", "AZURE_API_KEY": "cccccccccccccccccccccccccccccccc", "AZURE_ENGINE": "填入你亲手写的部署名1", "AZURE_MODEL_MAX_TOKEN": 4096, }, - "azure-gpt-4": # 第二个模型,azure模型必须以"azure-"开头 + "azure-gpt-4": # 第二个模型,azure模型必须以"azure-"开头,注意您还需要将"azure-gpt-4"加入AVAIL_LLM_MODELS(模型下拉菜单) { "AZURE_ENDPOINT": "https://你亲手写的api名称.openai.azure.com/", "AZURE_API_KEY": "dddddddddddddddddddddddddddddddd", "AZURE_ENGINE": "填入你亲手写的部署名2", "AZURE_MODEL_MAX_TOKEN": 8192, }, - "azure-gpt-3.5-16k": # 第三个模型,azure模型必须以"azure-"开头 + "azure-gpt-3.5-16k": # 第三个模型,azure模型必须以"azure-"开头,注意您还需要将"azure-gpt-3.5-16k"加入AVAIL_LLM_MODELS(模型下拉菜单) { "AZURE_ENDPOINT": "https://你亲手写的api名称.openai.azure.com/", "AZURE_API_KEY": "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", diff --git a/main.py b/main.py index 9f3899515c..f0914756eb 100644 --- a/main.py +++ b/main.py @@ -14,13 +14,13 @@ def main(): CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME') DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE') + INIT_SYS_PROMPT, = get_conf('INIT_SYS_PROMPT') # 如果WEB_PORT是-1, 则随机选取WEB端口 PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT from check_proxy import get_current_version from themes.theme import adjust_theme, advanced_css, theme_declaration, load_dynamic_theme - initial_prompt = "Serve me as a writing and programming assistant." title_html = f"

GPT 学术优化 {get_current_version()}

{theme_declaration}" description = "Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), " description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)." @@ -153,7 +153,7 @@ def main(): top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",) max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",) - system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=initial_prompt) + system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=INIT_SYS_PROMPT) with gr.Tab("界面外观", elem_id="interact-panel"): theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False) From 12b2a229b627744bdb3ba31001be8ed83944e7e9 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 28 Oct 2023 20:15:59 +0800 Subject: [PATCH 007/117] =?UTF-8?q?=E7=A7=BB=E9=99=A4=E8=B0=83=E8=AF=95?= =?UTF-8?q?=E6=89=93=E5=8D=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatgpt.py | 1 - 1 file changed, 1 deletion(-) diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index b8b4d3a185..9903da9d86 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -214,7 +214,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp while True: try: chunk = next(stream_response) - print(chunk) except StopIteration: # 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里 chunk_decoded = chunk.decode() From 527f9d28ad12d7ed7a9476bb9ed0ef31a8548aec Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 29 Oct 2023 00:34:40 +0800 Subject: [PATCH 008/117] change get_conf --- check_proxy.py | 6 ++-- config.py | 4 +++ crazy_functional.py | 2 +- ...345\207\272PDF\347\273\223\346\236\234.py" | 2 +- crazy_functions/agent_fns/pipe.py | 2 +- crazy_functions/crazy_utils.py | 4 +-- crazy_functions/pdf_fns/parse_pdf.py | 2 +- crazy_functions/vt_fns/vt_modify_config.py | 4 +-- ...73\350\257\221\346\221\230\350\246\201.py" | 4 +-- ...76\347\211\207\347\224\237\346\210\220.py" | 2 +- ...32\346\231\272\350\203\275\344\275\223.py" | 2 +- ...23\351\237\263\350\247\206\351\242\221.py" | 2 +- ...07\217Markdown\347\277\273\350\257\221.py" | 2 +- ...201\224\347\275\221\347\232\204ChatGPT.py" | 2 +- ...21\347\232\204ChatGPT_bing\347\211\210.py" | 2 +- ...42\345\260\217\345\212\251\346\211\213.py" | 2 +- docs/WithFastapi.md | 4 +-- main.py | 6 ++-- multi_language.py | 2 +- request_llms/bridge_all.py | 4 +-- request_llms/bridge_chatglmft.py | 2 +- request_llms/bridge_internlm.py | 2 +- request_llms/bridge_jittorllms_llama.py | 2 +- request_llms/bridge_jittorllms_pangualpha.py | 2 +- request_llms/bridge_jittorllms_rwkv.py | 2 +- request_llms/bridge_newbingfree.py | 2 +- request_llms/bridge_stackclaude.py | 4 +-- request_llms/bridge_zhipu.py | 2 +- request_llms/key_manager.py | 29 +++++++++++++++++++ themes/gradios.py | 2 +- themes/theme.py | 2 +- toolbox.py | 24 ++++++++------- 32 files changed, 85 insertions(+), 50 deletions(-) create mode 100644 request_llms/key_manager.py diff --git a/check_proxy.py b/check_proxy.py index 75de7ab48d..977ac276a9 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -46,7 +46,7 @@ def backup_and_download(current_version, remote_version): return new_version_dir os.makedirs(new_version_dir) shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history']) - proxies, = get_conf('proxies') + proxies = get_conf('proxies') r = requests.get( 'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True) zip_file_path = backup_dir+'/master.zip' @@ -113,7 +113,7 @@ def auto_update(raise_error=False): import requests import time import json - proxies, = get_conf('proxies') + proxies = get_conf('proxies') response = requests.get( "https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5) remote_json_data = json.loads(response.text) @@ -167,5 +167,5 @@ def warm_up_modules(): import os os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 from toolbox import get_conf - proxies, = get_conf('proxies') + proxies = get_conf('proxies') check_proxy(proxies) diff --git a/config.py b/config.py index 3175867123..ec047ae805 100644 --- a/config.py +++ b/config.py @@ -223,6 +223,10 @@ "Warmup_Modules", "Nougat_Download", "AutoGen"] +# *实验性功能*: 自动检测并屏蔽失效的KEY,请勿使用 +BLOCK_INVALID_APIKEY = False + + # 自定义按钮的最大数量限制 NUM_CUSTOM_BASIC_BTN = 4 diff --git a/crazy_functional.py b/crazy_functional.py index 795bd5f7b7..2d7fa74b1e 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -498,7 +498,7 @@ def get_crazy_functions(): try: from toolbox import get_conf - ENABLE_AUDIO, = get_conf('ENABLE_AUDIO') + ENABLE_AUDIO = get_conf('ENABLE_AUDIO') if ENABLE_AUDIO: from crazy_functions.语音助手 import 语音助手 function_plugins.update({ diff --git "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" "b/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" index 7ec5a4b2da..9edfea6862 100644 --- "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" +++ "b/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" @@ -129,7 +129,7 @@ def is_float(s): yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面 else: yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面 - proxies, = get_conf('proxies') + proxies = get_conf('proxies') r = requests.get(url_tar, proxies=proxies) with open(dst, 'wb+') as f: f.write(r.content) diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index 0e430b4075..5ebe3fc620 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -20,7 +20,7 @@ def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, w self.system_prompt = system_prompt self.web_port = web_port self.alive = True - self.use_docker, = get_conf('AUTOGEN_USE_DOCKER') + self.use_docker = get_conf('AUTOGEN_USE_DOCKER') # create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time timeout_seconds = 5*60 diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 04a4e67d27..a23c732b58 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -181,7 +181,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( assert len(inputs_array) == len(history_array) assert len(inputs_array) == len(sys_prompt_array) if max_workers == -1: # 读取配置文件 - try: max_workers, = get_conf('DEFAULT_WORKER_NUM') + try: max_workers = get_conf('DEFAULT_WORKER_NUM') except: max_workers = 8 if max_workers <= 0: max_workers = 3 # 屏蔽掉 chatglm的多线程,可能会导致严重卡顿 @@ -602,7 +602,7 @@ def get_files_from_everything(txt, type): # type='.md' import requests from toolbox import get_conf from toolbox import get_log_folder, gen_time_str - proxies, = get_conf('proxies') + proxies = get_conf('proxies') try: r = requests.get(txt, proxies=proxies) except: diff --git a/crazy_functions/pdf_fns/parse_pdf.py b/crazy_functions/pdf_fns/parse_pdf.py index 396b608ab3..6646c5bfec 100644 --- a/crazy_functions/pdf_fns/parse_pdf.py +++ b/crazy_functions/pdf_fns/parse_pdf.py @@ -14,7 +14,7 @@ class GROBID_OFFLINE_EXCEPTION(Exception): pass def get_avail_grobid_url(): - GROBID_URLS, = get_conf('GROBID_URLS') + GROBID_URLS = get_conf('GROBID_URLS') if len(GROBID_URLS) == 0: return None try: _grobid_url = random.choice(GROBID_URLS) # 随机负载均衡 diff --git a/crazy_functions/vt_fns/vt_modify_config.py b/crazy_functions/vt_fns/vt_modify_config.py index 0e2b3146fd..58a8531e8e 100644 --- a/crazy_functions/vt_fns/vt_modify_config.py +++ b/crazy_functions/vt_fns/vt_modify_config.py @@ -7,7 +7,7 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): - ALLOW_RESET_CONFIG, = get_conf('ALLOW_RESET_CONFIG') + ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG') if not ALLOW_RESET_CONFIG: yield from update_ui_lastest_msg( lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。", @@ -66,7 +66,7 @@ class ModifyConfigurationIntention(BaseModel): ) def modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention): - ALLOW_RESET_CONFIG, = get_conf('ALLOW_RESET_CONFIG') + ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG') if not ALLOW_RESET_CONFIG: yield from update_ui_lastest_msg( lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。", diff --git "a/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" "b/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" index 8b4a5037a2..c711cf4531 100644 --- "a/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" +++ "b/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" @@ -43,7 +43,7 @@ def download_arxiv_(url_pdf): file_path = download_dir+title_str print('下载中') - proxies, = get_conf('proxies') + proxies = get_conf('proxies') r = requests.get(requests_pdf_url, proxies=proxies) with open(file_path, 'wb+') as f: f.write(r.content) @@ -77,7 +77,7 @@ def get_name(_url_): # print('在缓存中') # return arxiv_recall[_url_] - proxies, = get_conf('proxies') + proxies = get_conf('proxies') res = requests.get(_url_, proxies=proxies) bs = BeautifulSoup(res.text, 'html.parser') diff --git "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" index 09bd9beb7d..1b7dff5d9d 100644 --- "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" +++ "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" @@ -7,7 +7,7 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"): import requests, json, time, os from request_llms.bridge_all import model_info - proxies, = get_conf('proxies') + proxies = get_conf('proxies') # Set up OpenAI API key and model api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] diff --git "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" index 5a4c4a58ae..99b3e86b60 100644 --- "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" +++ "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" @@ -41,7 +41,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ return # 检查当前的模型是否符合要求 - API_URL_REDIRECT, = get_conf('API_URL_REDIRECT') + API_URL_REDIRECT = get_conf('API_URL_REDIRECT') if len(API_URL_REDIRECT) > 0: chatbot.append([f"处理任务: {txt}", f"暂不支持中转."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git "a/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" "b/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" index b946d37d49..b88775b415 100644 --- "a/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" +++ "b/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" @@ -79,7 +79,7 @@ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history): chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - proxies, = get_conf('proxies') + proxies = get_conf('proxies') response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text chatbot.append(["音频解析结果", response]) diff --git "a/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" "b/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" index e245b72d5e..2bdffc8662 100644 --- "a/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" +++ "b/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" @@ -118,7 +118,7 @@ def get_files_from_everything(txt, preference=''): if txt.startswith('http'): import requests from toolbox import get_conf - proxies, = get_conf('proxies') + proxies = get_conf('proxies') # 网络的远程文件 if preference == 'Github': logging.info('正在从github下载资源 ...') diff --git "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" index be286bcd5e..6fa50ec2e3 100644 --- "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" +++ "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" @@ -72,7 +72,7 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s # ------------- < 第1步:爬取搜索引擎的结果 > ------------- from toolbox import get_conf - proxies, = get_conf('proxies') + proxies = get_conf('proxies') urls = google(txt, proxies) history = [] if len(urls) == 0: diff --git "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" index 666fcb8b29..009ebdce52 100644 --- "a/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" +++ "b/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT_bing\347\211\210.py" @@ -72,7 +72,7 @@ def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, histor # ------------- < 第1步:爬取搜索引擎的结果 > ------------- from toolbox import get_conf - proxies, = get_conf('proxies') + proxies = get_conf('proxies') urls = bing_search(txt, proxies) history = [] if len(urls) == 0: diff --git "a/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" "b/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" index dae8a2bb3d..5924a2860d 100644 --- "a/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" +++ "b/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" @@ -17,7 +17,7 @@ def get_meta_information(url, chatbot, history): from urllib.parse import urlparse session = requests.session() - proxies, = get_conf('proxies') + proxies = get_conf('proxies') headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', 'Accept-Encoding': 'gzip, deflate, br', diff --git a/docs/WithFastapi.md b/docs/WithFastapi.md index 188b527164..bbbb386e8a 100644 --- a/docs/WithFastapi.md +++ b/docs/WithFastapi.md @@ -16,7 +16,7 @@ nano config.py + demo.queue(concurrency_count=CONCURRENT_COUNT) - # 如果需要在二级路径下运行 - - # CUSTOM_PATH, = get_conf('CUSTOM_PATH') + - # CUSTOM_PATH = get_conf('CUSTOM_PATH') - # if CUSTOM_PATH != "/": - # from toolbox import run_gradio_in_subpath - # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) @@ -24,7 +24,7 @@ nano config.py - # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png") + 如果需要在二级路径下运行 - + CUSTOM_PATH, = get_conf('CUSTOM_PATH') + + CUSTOM_PATH = get_conf('CUSTOM_PATH') + if CUSTOM_PATH != "/": + from toolbox import run_gradio_in_subpath + run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) diff --git a/main.py b/main.py index fff1a3e3c5..ee8f5cf78c 100644 --- a/main.py +++ b/main.py @@ -14,7 +14,7 @@ def main(): CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME') DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE') - INIT_SYS_PROMPT, = get_conf('INIT_SYS_PROMPT') + INIT_SYS_PROMPT = get_conf('INIT_SYS_PROMPT') # 如果WEB_PORT是-1, 则随机选取WEB端口 PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT @@ -48,7 +48,7 @@ def main(): # 高级函数插件 from crazy_functional import get_crazy_functions - DEFAULT_FN_GROUPS, = get_conf('DEFAULT_FN_GROUPS') + DEFAULT_FN_GROUPS = get_conf('DEFAULT_FN_GROUPS') plugins = get_crazy_functions() all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')])) match_group = lambda tags, groups: any([g in groups for g in tags.split('|')]) @@ -436,7 +436,7 @@ def warm_up_mods(): time.sleep(4); warm_up_modules() blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) # 如果需要在二级路径下运行 - # CUSTOM_PATH, = get_conf('CUSTOM_PATH') + # CUSTOM_PATH = get_conf('CUSTOM_PATH') # if CUSTOM_PATH != "/": # from toolbox import run_gradio_in_subpath # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) diff --git a/multi_language.py b/multi_language.py index f78cd997ca..a20fb5af86 100644 --- a/multi_language.py +++ b/multi_language.py @@ -38,7 +38,7 @@ CACHE_ONLY = os.environ.get('CACHE_ONLY', False) -CACHE_FOLDER, = get_conf('PATH_LOGGING') +CACHE_FOLDER = get_conf('PATH_LOGGING') blacklist = ['multi-language', CACHE_FOLDER, '.git', 'private_upload', 'multi_language.py', 'build', '.github', '.vscode', '__pycache__', 'venv'] diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 83c3661236..b484d1f81e 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -56,7 +56,7 @@ def decode(self, *args, **kwargs): azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15' # 兼容旧版的配置 try: - API_URL, = get_conf("API_URL") + API_URL = get_conf("API_URL") if API_URL != "https://api.openai.com/v1/chat/completions": openai_endpoint = API_URL print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置") @@ -501,7 +501,7 @@ def decode(self, *args, **kwargs): print(trimmed_format_exc()) # <-- 用于定义和切换多个azure模型 --> -AZURE_CFG_ARRAY, = get_conf("AZURE_CFG_ARRAY") +AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY") if len(AZURE_CFG_ARRAY) > 0: for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items(): # 可能会覆盖之前的配置,但这是意料之中的 diff --git a/request_llms/bridge_chatglmft.py b/request_llms/bridge_chatglmft.py index 63f36049ac..d812bae3c3 100644 --- a/request_llms/bridge_chatglmft.py +++ b/request_llms/bridge_chatglmft.py @@ -63,7 +63,7 @@ def run(self): # if not os.path.exists(conf): raise RuntimeError('找不到微调模型信息') # with open(conf, 'r', encoding='utf8') as f: # model_args = json.loads(f.read()) - CHATGLM_PTUNING_CHECKPOINT, = get_conf('CHATGLM_PTUNING_CHECKPOINT') + CHATGLM_PTUNING_CHECKPOINT = get_conf('CHATGLM_PTUNING_CHECKPOINT') assert os.path.exists(CHATGLM_PTUNING_CHECKPOINT), "找不到微调模型检查点" conf = os.path.join(CHATGLM_PTUNING_CHECKPOINT, "config.json") with open(conf, 'r', encoding='utf8') as f: diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index 3304fe234f..073c193ad4 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -52,7 +52,7 @@ def load_model_and_tokenizer(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 import torch from transformers import AutoModelForCausalLM, AutoTokenizer - device, = get_conf('LOCAL_MODEL_DEVICE') + device = get_conf('LOCAL_MODEL_DEVICE') if self._model is None: tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) if device=='cpu': diff --git a/request_llms/bridge_jittorllms_llama.py b/request_llms/bridge_jittorllms_llama.py index af2d9fbdc3..2d3005e52d 100644 --- a/request_llms/bridge_jittorllms_llama.py +++ b/request_llms/bridge_jittorllms_llama.py @@ -53,7 +53,7 @@ def load_model(): import types try: if self.jittorllms_model is None: - device, = get_conf('LOCAL_MODEL_DEVICE') + device = get_conf('LOCAL_MODEL_DEVICE') from .jittorllms.models import get_model # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] args_dict = {'model': 'llama'} diff --git a/request_llms/bridge_jittorllms_pangualpha.py b/request_llms/bridge_jittorllms_pangualpha.py index df0523b488..26401764c9 100644 --- a/request_llms/bridge_jittorllms_pangualpha.py +++ b/request_llms/bridge_jittorllms_pangualpha.py @@ -53,7 +53,7 @@ def load_model(): import types try: if self.jittorllms_model is None: - device, = get_conf('LOCAL_MODEL_DEVICE') + device = get_conf('LOCAL_MODEL_DEVICE') from .jittorllms.models import get_model # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] args_dict = {'model': 'pangualpha'} diff --git a/request_llms/bridge_jittorllms_rwkv.py b/request_llms/bridge_jittorllms_rwkv.py index 875673aac7..0021a50d0a 100644 --- a/request_llms/bridge_jittorllms_rwkv.py +++ b/request_llms/bridge_jittorllms_rwkv.py @@ -53,7 +53,7 @@ def load_model(): import types try: if self.jittorllms_model is None: - device, = get_conf('LOCAL_MODEL_DEVICE') + device = get_conf('LOCAL_MODEL_DEVICE') from .jittorllms.models import get_model # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] args_dict = {'model': 'chatrwkv'} diff --git a/request_llms/bridge_newbingfree.py b/request_llms/bridge_newbingfree.py index 5dddb6105c..cb83a0fb0c 100644 --- a/request_llms/bridge_newbingfree.py +++ b/request_llms/bridge_newbingfree.py @@ -62,7 +62,7 @@ def ready(self): async def async_run(self): # 读取配置 - NEWBING_STYLE, = get_conf('NEWBING_STYLE') + NEWBING_STYLE = get_conf('NEWBING_STYLE') from request_llms.bridge_all import model_info endpoint = model_info['newbing']['endpoint'] while True: diff --git a/request_llms/bridge_stackclaude.py b/request_llms/bridge_stackclaude.py index 3b57615671..a39398444d 100644 --- a/request_llms/bridge_stackclaude.py +++ b/request_llms/bridge_stackclaude.py @@ -146,14 +146,14 @@ def run(self): self.local_history = [] if (self.claude_model is None) or (not self.success): # 代理设置 - proxies, = get_conf('proxies') + proxies = get_conf('proxies') if proxies is None: self.proxies_https = None else: self.proxies_https = proxies['https'] try: - SLACK_CLAUDE_USER_TOKEN, = get_conf('SLACK_CLAUDE_USER_TOKEN') + SLACK_CLAUDE_USER_TOKEN = get_conf('SLACK_CLAUDE_USER_TOKEN') self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https) print('Claude组件初始化成功。') except: diff --git a/request_llms/bridge_zhipu.py b/request_llms/bridge_zhipu.py index 2fd19ad508..a1e0de5918 100644 --- a/request_llms/bridge_zhipu.py +++ b/request_llms/bridge_zhipu.py @@ -5,7 +5,7 @@ model_name = '智谱AI大模型' def validate_key(): - ZHIPUAI_API_KEY, = get_conf("ZHIPUAI_API_KEY") + ZHIPUAI_API_KEY = get_conf("ZHIPUAI_API_KEY") if ZHIPUAI_API_KEY == '': return False return True diff --git a/request_llms/key_manager.py b/request_llms/key_manager.py new file mode 100644 index 0000000000..8563d2ef82 --- /dev/null +++ b/request_llms/key_manager.py @@ -0,0 +1,29 @@ +import random + +def Singleton(cls): + _instance = {} + + def _singleton(*args, **kargs): + if cls not in _instance: + _instance[cls] = cls(*args, **kargs) + return _instance[cls] + + return _singleton + + +@Singleton +class OpenAI_ApiKeyManager(): + def __init__(self, mode='blacklist') -> None: + # self.key_avail_list = [] + self.key_black_list = [] + + def add_key_to_blacklist(self, key): + self.key_black_list.append(key) + + def select_avail_key(self, key_list): + # select key from key_list, but avoid keys also in self.key_black_list, raise error if no key can be found + available_keys = [key for key in key_list if key not in self.key_black_list] + if not available_keys: + raise KeyError("No available key found.") + selected_key = random.choice(available_keys) + return selected_key \ No newline at end of file diff --git a/themes/gradios.py b/themes/gradios.py index 7693a23808..96a9c54e60 100644 --- a/themes/gradios.py +++ b/themes/gradios.py @@ -18,7 +18,7 @@ def adjust_theme(): set_theme = gr.themes.ThemeClass() with ProxyNetworkActivate('Download_Gradio_Theme'): logging.info('正在下载Gradio主题,请稍等。') - THEME, = get_conf('THEME') + THEME = get_conf('THEME') if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-') if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-') set_theme = set_theme.from_hub(THEME.lower()) diff --git a/themes/theme.py b/themes/theme.py index 42ee75000d..f59db9f832 100644 --- a/themes/theme.py +++ b/themes/theme.py @@ -1,6 +1,6 @@ import gradio as gr from toolbox import get_conf -THEME, = get_conf('THEME') +THEME = get_conf('THEME') def load_dynamic_theme(THEME): adjust_dynamic_theme = None diff --git a/toolbox.py b/toolbox.py index 137313efa7..5b7a751921 100644 --- a/toolbox.py +++ b/toolbox.py @@ -152,7 +152,7 @@ def decorated(main_input, llm_kwargs, plugin_kwargs, chatbot_with_cookie, histor except Exception as e: from check_proxy import check_proxy from toolbox import get_conf - proxies, = get_conf('proxies') + proxies = get_conf('proxies') tb_str = '```\n' + trimmed_format_exc() + '```' if len(chatbot_with_cookie) == 0: chatbot_with_cookie.clear() @@ -555,14 +555,14 @@ def disable_auto_promotion(chatbot): return def is_the_upload_folder(string): - PATH_PRIVATE_UPLOAD, = get_conf('PATH_PRIVATE_UPLOAD') + PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') pattern = r'^PATH_PRIVATE_UPLOAD/[A-Za-z0-9_-]+/\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}$' pattern = pattern.replace('PATH_PRIVATE_UPLOAD', PATH_PRIVATE_UPLOAD) if re.match(pattern, string): return True else: return False def del_outdated_uploads(outdate_time_seconds): - PATH_PRIVATE_UPLOAD, = get_conf('PATH_PRIVATE_UPLOAD') + PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') current_time = time.time() one_hour_ago = current_time - outdate_time_seconds # Get a list of all subdirectories in the PATH_PRIVATE_UPLOAD folder @@ -588,7 +588,7 @@ def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkbo # 创建工作路径 user_name = "default" if not request.username else request.username time_tag = gen_time_str() - PATH_PRIVATE_UPLOAD, = get_conf('PATH_PRIVATE_UPLOAD') + PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') target_path_base = pj(PATH_PRIVATE_UPLOAD, user_name, time_tag) os.makedirs(target_path_base, exist_ok=True) @@ -626,7 +626,7 @@ def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkbo def on_report_generated(cookies, files, chatbot): from toolbox import find_recent_files - PATH_LOGGING, = get_conf('PATH_LOGGING') + PATH_LOGGING = get_conf('PATH_LOGGING') if 'files_to_promote' in cookies: report_files = cookies['files_to_promote'] cookies.pop('files_to_promote') @@ -669,7 +669,7 @@ def load_chat_cookies(): return {'api_key': API_KEY, 'llm_model': LLM_MODEL, 'customize_fn_overwrite': customize_fn_overwrite_} def is_openai_api_key(key): - CUSTOM_API_KEY_PATTERN, = get_conf('CUSTOM_API_KEY_PATTERN') + CUSTOM_API_KEY_PATTERN = get_conf('CUSTOM_API_KEY_PATTERN') if len(CUSTOM_API_KEY_PATTERN) != 0: API_MATCH_ORIGINAL = re.match(CUSTOM_API_KEY_PATTERN, key) else: @@ -732,6 +732,7 @@ def select_api_key(keys, llm_model): raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源(右下角更换模型菜单中可切换openai,azure,claude,api2d等请求源)。") api_key = random.choice(avail_key_list) # 随机负载均衡 + if ENABLE return api_key def read_env_variable(arg, default_value): @@ -828,6 +829,7 @@ def get_conf(*args): for arg in args: r = read_single_conf_with_lru_cache(arg) res.append(r) + if len(res) == 1: return res[0] return res @@ -989,7 +991,7 @@ def gen_time_str(): return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) def get_log_folder(user='default', plugin_name='shared'): - PATH_LOGGING, = get_conf('PATH_LOGGING') + PATH_LOGGING = get_conf('PATH_LOGGING') _dir = pj(PATH_LOGGING, user, plugin_name) if not os.path.exists(_dir): os.makedirs(_dir) return _dir @@ -1006,13 +1008,13 @@ def __init__(self, task=None) -> None: else: # 给定了task, 我们检查一下 from toolbox import get_conf - WHEN_TO_USE_PROXY, = get_conf('WHEN_TO_USE_PROXY') + WHEN_TO_USE_PROXY = get_conf('WHEN_TO_USE_PROXY') self.valid = (task in WHEN_TO_USE_PROXY) def __enter__(self): if not self.valid: return self from toolbox import get_conf - proxies, = get_conf('proxies') + proxies = get_conf('proxies') if 'no_proxy' in os.environ: os.environ.pop('no_proxy') if proxies is not None: if 'http' in proxies: os.environ['HTTP_PROXY'] = proxies['http'] @@ -1054,7 +1056,7 @@ def _singleton(*args, **kargs): """ ======================================================================== 第四部分 -接驳虚空终端: +接驳void-terminal: - set_conf: 在运行过程中动态地修改配置 - set_multi_conf: 在运行过程中动态地修改多个配置 - get_plugin_handle: 获取插件的句柄 @@ -1069,7 +1071,7 @@ def set_conf(key, value): read_single_conf_with_lru_cache.cache_clear() get_conf.cache_clear() os.environ[key] = str(value) - altered, = get_conf(key) + altered = get_conf(key) return altered def set_multi_conf(dic): From f3f90f7b90979d56489d65297342b401b15ec31e Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Mon, 30 Oct 2023 01:10:45 +0800 Subject: [PATCH 009/117] Update README.md --- README.md | 81 +++++++++++++++++++------------------------------------ 1 file changed, 28 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index c93b57a1fc..83f385c9f2 100644 --- a/README.md +++ b/README.md @@ -7,18 +7,18 @@ #
GPT 学术优化 (GPT Academic)
-**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或函数插件,欢迎发pull requests!** +**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!** -If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself. +If you like this project, please give it a Star. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). > **Note** > -> 1.请注意只有 **高亮** 标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 +> 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 > -> 2.本项目中每个文件的功能都在[自译解报告`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)。[常规安装方法](#installation) | [一键安装脚本](https://github.com/binary-husky/gpt_academic/releases) | [配置说明](https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E)。 +> 2.本项目中每个文件的功能都在[自译解报告`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)。[常规安装方法](#installation) | [一键安装脚本](https://github.com/binary-husky/gpt_academic/releases) | [配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 > -> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM和Moss等等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。 +> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。 @@ -101,16 +101,16 @@ cd gpt_academic 2. 配置API_KEY -在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E)。 +在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」 -「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」 +「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」 3. 安装依赖 ```sh -# (选择I: 如熟悉python)(python版本3.9以上,越新越好),备注:使用官方pip源或者阿里pip源,临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +# (选择I: 如熟悉python, python>=3.9)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ python -m pip install -r requirements.txt # (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr): @@ -151,11 +151,11 @@ python main.py ### 安装方法II:使用Docker -0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个,建议使用方案1)(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时) -[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) +0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐使用这个) +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) ``` sh -# 修改docker-compose.yml,保留方案0并删除其他方案。修改docker-compose.yml中方案0的配置,参考其中注释即可 +# 修改docker-compose.yml,保留方案0并删除其他方案。然后运行: docker-compose up ``` @@ -165,7 +165,7 @@ docker-compose up [![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) ``` sh -# 修改docker-compose.yml,保留方案1并删除其他方案。修改docker-compose.yml中方案1的配置,参考其中注释即可 +# 修改docker-compose.yml,保留方案1并删除其他方案。然后运行: docker-compose up ``` @@ -175,48 +175,30 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) ``` sh -# 修改docker-compose.yml,保留方案2并删除其他方案。修改docker-compose.yml中方案2的配置,参考其中注释即可 -docker-compose up -``` - -3. ChatGPT + LLAMA + 盘古 + RWKV(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时) -[![jittorllms](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-jittorllms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-jittorllms.yml) - -``` sh -# 修改docker-compose.yml,保留方案3并删除其他方案。修改docker-compose.yml中方案3的配置,参考其中注释即可 +# 修改docker-compose.yml,保留方案2并删除其他方案。然后运行: docker-compose up ``` ### 安装方法III:其他部署姿势 -1. 一键运行脚本。 +1. **Windows一键运行脚本**。 完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。 脚本的贡献来源是[oobabooga](https://github.com/oobabooga/one-click-installers)。 -2. 使用docker-compose运行。 -请阅读docker-compose.yml后,按照其中的提示操作即可 - -3. 如何使用反代URL -按照`config.py`中的说明配置API_URL_REDIRECT即可。 - -4. 微软云AzureAPI -按照`config.py`中的说明配置即可(AZURE_ENDPOINT等四个配置) +2. 使用第三方API、Azure等、文心一言、星火等,见[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) -5. 远程云服务器部署(需要云服务器知识与经验)。 -请访问[部署wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) +3. 云服务器远程部署避坑指南。 +请访问[云服务器远程部署wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) -6. 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)。 - -7. 使用WSL2(Windows Subsystem for Linux 子系统)。 -请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -8. 如何在二级网址(如`http://localhost/subpath`)下运行。 -请访问[FastAPI运行说明](docs/WithFastapi.md) +4. 一些新型的部署平台或方法 + - 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)。 + - 使用WSL2(Windows Subsystem for Linux 子系统)。请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) + - 如何在二级网址(如`http://localhost/subpath`)下运行。请访问[FastAPI运行说明](docs/WithFastapi.md) # Advanced Usage ### I:自定义新的便捷按钮(学术快捷键) -任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。) +任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序。(如按钮已存在,那么前缀、后缀都支持热修改,无需重启程序即可生效。) 例如 ``` "超级英译中": { @@ -232,14 +214,13 @@ docker-compose up ### II:自定义函数插件 - 编写强大的函数插件来执行任何你想得到的和想不到的任务。 本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。 详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。 -# Latest Update -### I:新功能动态 +# Updates +### I:动态 1. 对话保存功能。在函数插件区调用 `保存当前的对话` 即可将当前对话保存为可读+可复原的html文件, 另外在函数插件区(下拉菜单)调用 `载入对话历史存档` ,即可还原之前的会话。 @@ -280,28 +261,23 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h -7. 新增MOSS大语言模型支持 -
- -
- -8. OpenAI图像生成 +7. OpenAI图像生成
-9. OpenAI音频解析与总结 +8. OpenAI音频解析与总结
-10. Latex全文校对纠错 +9. Latex全文校对纠错
===>
-11. 语言、主题切换 +10. 语言、主题切换
@@ -348,7 +324,6 @@ GPT Academic开发者QQ群:`610599535` ### IV:本项目的开发分支 1. `master` 分支: 主分支,稳定版 - 2. `frontier` 分支: 开发分支,测试版 From 9a1aff5bb6446b2d9b5c6a5d9c6880799774cc5c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 30 Oct 2023 11:10:05 +0800 Subject: [PATCH 010/117] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dget=5Fconf=E6=8E=A5?= =?UTF-8?q?=E5=8F=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 2 +- request_llms/bridge_qianfan.py | 2 +- request_llms/bridge_spark.py | 2 +- request_llms/bridge_stackclaude.py | 4 ++-- toolbox.py | 1 - 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/main.py b/main.py index ee8f5cf78c..bf84382578 100644 --- a/main.py +++ b/main.py @@ -433,7 +433,7 @@ def warm_up_mods(): time.sleep(4); warm_up_modules() server_port=PORT, favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, - blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) + blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile","gpt_log/admin"]) # 如果需要在二级路径下运行 # CUSTOM_PATH = get_conf('CUSTOM_PATH') diff --git a/request_llms/bridge_qianfan.py b/request_llms/bridge_qianfan.py index 99f0623f38..81e7a9c1c7 100644 --- a/request_llms/bridge_qianfan.py +++ b/request_llms/bridge_qianfan.py @@ -75,7 +75,7 @@ def generate_message_payload(inputs, llm_kwargs, history, system_prompt): def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt): - BAIDU_CLOUD_QIANFAN_MODEL, = get_conf('BAIDU_CLOUD_QIANFAN_MODEL') + BAIDU_CLOUD_QIANFAN_MODEL = get_conf('BAIDU_CLOUD_QIANFAN_MODEL') url_lib = { "ERNIE-Bot": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions" , diff --git a/request_llms/bridge_spark.py b/request_llms/bridge_spark.py index d6ff42faed..6ba39ee75d 100644 --- a/request_llms/bridge_spark.py +++ b/request_llms/bridge_spark.py @@ -8,7 +8,7 @@ model_name = '星火认知大模型' def validate_key(): - XFYUN_APPID, = get_conf('XFYUN_APPID', ) + XFYUN_APPID = get_conf('XFYUN_APPID') if XFYUN_APPID == '00000000' or XFYUN_APPID == '': return False return True diff --git a/request_llms/bridge_stackclaude.py b/request_llms/bridge_stackclaude.py index a39398444d..0b42a17c0c 100644 --- a/request_llms/bridge_stackclaude.py +++ b/request_llms/bridge_stackclaude.py @@ -36,7 +36,7 @@ class SlackClient(AsyncWebClient): CHANNEL_ID = None async def open_channel(self): - response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0]) + response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')) self.CHANNEL_ID = response["channel"]["id"] async def chat(self, text): @@ -51,7 +51,7 @@ async def get_slack_messages(self): # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题 resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1) msg = [msg for msg in resp["messages"] - if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]] + if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')] return msg except (SlackApiError, KeyError) as e: raise RuntimeError(f"获取Slack消息失败。") diff --git a/toolbox.py b/toolbox.py index 5b7a751921..8c6e7fae9a 100644 --- a/toolbox.py +++ b/toolbox.py @@ -732,7 +732,6 @@ def select_api_key(keys, llm_model): raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源(右下角更换模型菜单中可切换openai,azure,claude,api2d等请求源)。") api_key = random.choice(avail_key_list) # 随机负载均衡 - if ENABLE return api_key def read_env_variable(arg, default_value): From 9fb29f249bd146e70343d51c29795eff895896bb Mon Sep 17 00:00:00 2001 From: Zorn Wang Date: Mon, 30 Oct 2023 19:20:05 +0800 Subject: [PATCH 011/117] =?UTF-8?q?Feature:=20=E6=B7=BB=E5=8A=A0=E7=99=BE?= =?UTF-8?q?=E5=BA=A6=E5=8D=83=E5=B8=86=E6=96=87=E5=BF=834.0=E5=A4=A7?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 2 +- request_llm/bridge_qianfan.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/config.py b/config.py index abea7ea30b..b4f00a6360 100644 --- a/config.py +++ b/config.py @@ -98,7 +98,7 @@ # 百度千帆(LLM_MODEL="qianfan") BAIDU_CLOUD_API_KEY = '' BAIDU_CLOUD_SECRET_KEY = '' -BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat" +BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat" # 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径 diff --git a/request_llm/bridge_qianfan.py b/request_llm/bridge_qianfan.py index be7397607a..bbae5630f0 100644 --- a/request_llm/bridge_qianfan.py +++ b/request_llm/bridge_qianfan.py @@ -78,8 +78,9 @@ def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt): BAIDU_CLOUD_QIANFAN_MODEL, = get_conf('BAIDU_CLOUD_QIANFAN_MODEL') url_lib = { - "ERNIE-Bot": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions" , - "ERNIE-Bot-turbo": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant" , + "ERNIE-Bot-4": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro", + "ERNIE-Bot": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions", + "ERNIE-Bot-turbo": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant", "BLOOMZ-7B": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/bloomz_7b1", "Llama-2-70B-Chat": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/llama_2_70b", From 08f036aafd7dc48b22ebf951bfc5012d30f98fb8 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 31 Oct 2023 03:08:50 +0800 Subject: [PATCH 012/117] =?UTF-8?q?=E6=94=AF=E6=8C=81chatglm3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_all.py | 12 ++++- request_llms/bridge_chatglm3.py | 78 +++++++++++++++++++++++++++++++++ request_llms/local_llm_class.py | 59 ++++++++++++++++++++----- tests/test_llms.py | 3 +- 4 files changed, 139 insertions(+), 13 deletions(-) create mode 100644 request_llms/bridge_chatglm3.py diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index b484d1f81e..27b91c267a 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -19,8 +19,8 @@ from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui from .bridge_chatglm import predict as chatglm_ui -from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui -from .bridge_chatglm import predict as chatglm_ui +from .bridge_chatglm3 import predict_no_ui_long_connection as chatglm3_noui +from .bridge_chatglm3 import predict as chatglm3_ui from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui from .bridge_qianfan import predict as qianfan_ui @@ -208,6 +208,14 @@ def decode(self, *args, **kwargs): "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, + "chatglm3": { + "fn_with_ui": chatglm3_ui, + "fn_without_ui": chatglm3_noui, + "endpoint": None, + "max_token": 8192, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, "qianfan": { "fn_with_ui": qianfan_ui, "fn_without_ui": qianfan_noui, diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py new file mode 100644 index 0000000000..5f1ec54344 --- /dev/null +++ b/request_llms/bridge_chatglm3.py @@ -0,0 +1,78 @@ +model_name = "ChatGLM3" +cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" + + +from transformers import AutoModel, AutoTokenizer +from toolbox import get_conf, ProxyNetworkActivate +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM + + + +# ------------------------------------------------------------------------------------------------------------------------ +# 🔌💻 Local Model +# ------------------------------------------------------------------------------------------------------------------------ +@SingletonLocalLLM +class GetONNXGLMHandle(LocalLLMHandle): + + def load_model_info(self): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + self.model_name = model_name + self.cmd_to_install = cmd_to_install + + def load_model_and_tokenizer(self): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + import os, glob + import os + import platform + LOCAL_MODEL_QUANT, device = get_conf('LOCAL_MODEL_QUANT', 'LOCAL_MODEL_DEVICE') + + if LOCAL_MODEL_QUANT == "INT4": # INT4 + _model_name_ = "THUDM/chatglm3-6b-int4" + elif LOCAL_MODEL_QUANT == "INT8": # INT8 + _model_name_ = "THUDM/chatglm3-6b-int8" + else: + _model_name_ = "THUDM/chatglm3-6b" # FP16 + with ProxyNetworkActivate('Download_LLM'): + chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True) + if device=='cpu': + chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cpu').float() + else: + chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cuda') + chatglm_model = chatglm_model.eval() + + self._model = chatglm_model + self._tokenizer = chatglm_tokenizer + return self._model, self._tokenizer + + def llm_stream_generator(self, **kwargs): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + def adaptor(kwargs): + query = kwargs['query'] + max_length = kwargs['max_length'] + top_p = kwargs['top_p'] + temperature = kwargs['temperature'] + history = kwargs['history'] + return query, max_length, top_p, temperature, history + + query, max_length, top_p, temperature, history = adaptor(kwargs) + + for response, history in self._model.stream_chat(self._tokenizer, + query, + history, + max_length=max_length, + top_p=top_p, + temperature=temperature, + ): + yield response + + def try_to_import_special_deps(self, **kwargs): + # import something that will raise error if the user does not install requirement_*.txt + # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 + import importlib + importlib.import_module('modelscope') + + +# ------------------------------------------------------------------------------------------------------------------------ +# 🔌💻 GPT-Academic Interface +# ------------------------------------------------------------------------------------------------------------------------ +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name, history_format='chatglm3') \ No newline at end of file diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index a421ddf366..626db918f6 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -114,7 +114,7 @@ def stream_chat(self, **kwargs): -def get_local_llm_predict_fns(LLMSingletonClass, model_name): +def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='classic'): load_message = f"{model_name}尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,{model_name}消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): @@ -126,11 +126,30 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.info if not _llm_handle.running: raise RuntimeError(_llm_handle.info) - # chatglm 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - history_feedin.append([sys_prompt, "Certainly!"]) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) + if history_format == 'classic': + # 没有 sys_prompt 接口,因此把prompt加入 history + history_feedin = [] + history_feedin.append([sys_prompt, "Certainly!"]) + for i in range(len(history)//2): + history_feedin.append([history[2*i], history[2*i+1]] ) + elif history_format == 'chatglm3': + # 有 sys_prompt 接口 + conversation_cnt = len(history) // 2 + history_feedin = [{"role": "system", "content": sys_prompt}] + if conversation_cnt: + for index in range(0, 2*conversation_cnt, 2): + what_i_have_asked = {} + what_i_have_asked["role"] = "user" + what_i_have_asked["content"] = history[index] + what_gpt_answer = {} + what_gpt_answer["role"] = "assistant" + what_gpt_answer["content"] = history[index+1] + if what_i_have_asked["content"] != "": + if what_gpt_answer["content"] == "": continue + history_feedin.append(what_i_have_asked) + history_feedin.append(what_gpt_answer) + else: + history_feedin[-1]['content'] = what_gpt_answer['content'] watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 response = "" @@ -160,10 +179,30 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) # 处理历史信息 - history_feedin = [] - history_feedin.append([system_prompt, "Certainly!"]) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) + if history_format == 'classic': + # 没有 sys_prompt 接口,因此把prompt加入 history + history_feedin = [] + history_feedin.append([system_prompt, "Certainly!"]) + for i in range(len(history)//2): + history_feedin.append([history[2*i], history[2*i+1]] ) + elif history_format == 'chatglm3': + # 有 sys_prompt 接口 + conversation_cnt = len(history) // 2 + history_feedin = [{"role": "system", "content": system_prompt}] + if conversation_cnt: + for index in range(0, 2*conversation_cnt, 2): + what_i_have_asked = {} + what_i_have_asked["role"] = "user" + what_i_have_asked["content"] = history[index] + what_gpt_answer = {} + what_gpt_answer["role"] = "assistant" + what_gpt_answer["content"] = history[index+1] + if what_i_have_asked["content"] != "": + if what_gpt_answer["content"] == "": continue + history_feedin.append(what_i_have_asked) + history_feedin.append(what_gpt_answer) + else: + history_feedin[-1]['content'] = what_gpt_answer['content'] # 开始接收回复 response = f"[Local Message] 等待{model_name}响应中 ..." diff --git a/tests/test_llms.py b/tests/test_llms.py index f43f3680b1..5c5d2f6cba 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -18,7 +18,8 @@ def validate_path(): # from request_llms.bridge_internlm import predict_no_ui_long_connection # from request_llms.bridge_qwen import predict_no_ui_long_connection # from request_llms.bridge_spark import predict_no_ui_long_connection - from request_llms.bridge_zhipu import predict_no_ui_long_connection + # from request_llms.bridge_zhipu import predict_no_ui_long_connection + from request_llms.bridge_chatglm3 import predict_no_ui_long_connection llm_kwargs = { 'max_length': 4096, From 136162ec0d5b52e4040d3e981e3bec0709c0248f Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 31 Oct 2023 16:17:52 +0800 Subject: [PATCH 013/117] better local model interaction --- request_llms/bridge_chatglm.py | 210 ++++++++++---------------------- request_llms/bridge_chatglm3.py | 6 +- request_llms/local_llm_class.py | 121 ++++++++++++------ 3 files changed, 148 insertions(+), 189 deletions(-) diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index 3a7cc72399..16e1d8fc78 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -1,42 +1,29 @@ +model_name = "ChatGLM" +cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" + from transformers import AutoModel, AutoTokenizer -import time -import threading -import importlib -from toolbox import update_ui, get_conf, ProxyNetworkActivate -from multiprocessing import Process, Pipe +from toolbox import get_conf, ProxyNetworkActivate +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM -load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" -################################################################################# -class GetGLMHandle(Process): - def __init__(self): - super().__init__(daemon=True) - self.parent, self.child = Pipe() - self.chatglm_model = None - self.chatglm_tokenizer = None - self.info = "" - self.success = True - self.check_dependency() - self.start() - self.threadLock = threading.Lock() - - def check_dependency(self): - try: - import sentencepiece - self.info = "依赖检测通过" - self.success = True - except: - self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_chatglm.txt`安装ChatGLM的依赖。" - self.success = False - def ready(self): - return self.chatglm_model is not None +# ------------------------------------------------------------------------------------------------------------------------ +# 🔌💻 Local Model +# ------------------------------------------------------------------------------------------------------------------------ +@SingletonLocalLLM +class GetGLM2Handle(LocalLLMHandle): - def run(self): - # 子进程执行 - # 第一次运行,加载参数 - retry = 0 + def load_model_info(self): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + self.model_name = model_name + self.cmd_to_install = cmd_to_install + + def load_model_and_tokenizer(self): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + import os, glob + import os + import platform LOCAL_MODEL_QUANT, device = get_conf('LOCAL_MODEL_QUANT', 'LOCAL_MODEL_DEVICE') if LOCAL_MODEL_QUANT == "INT4": # INT4 @@ -46,122 +33,47 @@ def run(self): else: _model_name_ = "THUDM/chatglm2-6b" # FP16 - while True: - try: - with ProxyNetworkActivate('Download_LLM'): - if self.chatglm_model is None: - self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True) - if device=='cpu': - self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float() - else: - self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda() - self.chatglm_model = self.chatglm_model.eval() - break - else: - break - except: - retry += 1 - if retry > 3: - self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。') - raise RuntimeError("不能正常加载ChatGLM的参数!") - - while True: - # 进入任务等待状态 - kwargs = self.child.recv() - # 收到消息,开始请求 - try: - for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs): - self.child.send(response) - # # 中途接收可能的终止指令(如果有的话) - # if self.child.poll(): - # command = self.child.recv() - # if command == '[Terminate]': break - except: - from toolbox import trimmed_format_exc - self.child.send('[Local Message] Call ChatGLM fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n') - # 请求处理结束,开始下一个循环 - self.child.send('[Finish]') - - def stream_chat(self, **kwargs): - # 主进程执行 - self.threadLock.acquire() - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res != '[Finish]': - yield res + with ProxyNetworkActivate('Download_LLM'): + chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True) + if device=='cpu': + chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float() else: - break - self.threadLock.release() - -global glm_handle -glm_handle = None -################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): - """ - 多线程方法 - 函数的说明请见 request_llms/bridge_all.py - """ - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info - if not glm_handle.success: - error = glm_handle.info - glm_handle = None - raise RuntimeError(error) - - # chatglm 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - history_feedin.append(["What can I do?", sys_prompt]) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 - response = "" - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("程序终止。") - return response - - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 单线程方法 - 函数的说明请见 request_llms/bridge_all.py - """ - chatbot.append((inputs, "")) - - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info) - yield from update_ui(chatbot=chatbot, history=[]) - if not glm_handle.success: - glm_handle = None - return - - if additional_fn is not None: - from core_functional import handle_core_functionality - inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) - - # 处理历史信息 - history_feedin = [] - history_feedin.append(["What can I do?", system_prompt] ) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) + chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda() + chatglm_model = chatglm_model.eval() + + self._model = chatglm_model + self._tokenizer = chatglm_tokenizer + return self._model, self._tokenizer + + def llm_stream_generator(self, **kwargs): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + def adaptor(kwargs): + query = kwargs['query'] + max_length = kwargs['max_length'] + top_p = kwargs['top_p'] + temperature = kwargs['temperature'] + history = kwargs['history'] + return query, max_length, top_p, temperature, history + + query, max_length, top_p, temperature, history = adaptor(kwargs) + + for response, history in self._model.stream_chat(self._tokenizer, + query, + history, + max_length=max_length, + top_p=top_p, + temperature=temperature, + ): + yield response + + def try_to_import_special_deps(self, **kwargs): + # import something that will raise error if the user does not install requirement_*.txt + # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 + import importlib + # importlib.import_module('modelscope') - # 开始接收chatglm的回复 - response = "[Local Message] 等待ChatGLM响应中 ..." - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) - # 总结输出 - if response == "[Local Message] 等待ChatGLM响应中 ...": - response = "[Local Message] ChatGLM响应异常 ..." - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history) +# ------------------------------------------------------------------------------------------------------------------------ +# 🔌💻 GPT-Academic Interface +# ------------------------------------------------------------------------------------------------------------------------ +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetGLM2Handle, model_name) \ No newline at end of file diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py index 5f1ec54344..461c3064a3 100644 --- a/request_llms/bridge_chatglm3.py +++ b/request_llms/bridge_chatglm3.py @@ -12,7 +12,7 @@ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ @SingletonLocalLLM -class GetONNXGLMHandle(LocalLLMHandle): +class GetGLM3Handle(LocalLLMHandle): def load_model_info(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 @@ -69,10 +69,10 @@ def try_to_import_special_deps(self, **kwargs): # import something that will raise error if the user does not install requirement_*.txt # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 import importlib - importlib.import_module('modelscope') + # importlib.import_module('modelscope') # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 GPT-Academic Interface # ------------------------------------------------------------------------------------------------------------------------ -predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name, history_format='chatglm3') \ No newline at end of file +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetGLM3Handle, model_name, history_format='chatglm3') \ No newline at end of file diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index 626db918f6..096b52120b 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -1,15 +1,16 @@ -from transformers import AutoModel, AutoTokenizer import time import threading -import importlib -from toolbox import update_ui, get_conf, Singleton +from toolbox import update_ui from multiprocessing import Process, Pipe +from contextlib import redirect_stdout + def SingletonLocalLLM(cls): """ 一个单实例装饰器 """ _instance = {} + def _singleton(*args, **kargs): if cls not in _instance: _instance[cls] = cls(*args, **kargs) @@ -21,6 +22,28 @@ def _singleton(*args, **kargs): return _instance[cls] return _singleton + +def reset_tqdm_output(): + import sys, tqdm + def status_printer(self, file): + fp = file + if fp in (sys.stderr, sys.stdout): + getattr(sys.stderr, 'flush', lambda: None)() + getattr(sys.stdout, 'flush', lambda: None)() + + def fp_write(s): + print(s) + last_len = [0] + + def print_status(s): + from tqdm.utils import disp_len + len_s = disp_len(s) + fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0))) + last_len[0] = len_s + return print_status + tqdm.tqdm.status_printer = status_printer + + class LocalLLMHandle(Process): def __init__(self): # ⭐主进程执行 @@ -28,6 +51,9 @@ def __init__(self): self.corrupted = False self.load_model_info() self.parent, self.child = Pipe() + # allow redirect_stdout + self.std_tag = "[Subprocess Message] " + self.child.write = lambda x: self.child.send(self.std_tag + x) self.running = True self._model = None self._tokenizer = None @@ -52,7 +78,7 @@ def load_model_and_tokenizer(self): def llm_stream_generator(self, **kwargs): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 raise NotImplementedError("Method not implemented yet") - + def try_to_import_special_deps(self, **kwargs): """ import something that will raise error if the user does not install requirement_*.txt @@ -64,7 +90,7 @@ def check_dependency(self): # ⭐主进程执行 try: self.try_to_import_special_deps() - self.info = "依赖检测通过" + self.info = "`依赖检测通过`" self.running = True except: self.info = f"缺少{self.model_name}的依赖,如果要使用{self.model_name},除了基础的pip依赖以外,您还需要运行{self.cmd_to_install}安装{self.model_name}的依赖。" @@ -73,15 +99,21 @@ def check_dependency(self): def run(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 # 第一次运行,加载参数 + reset_tqdm_output() + self.info = "`尝试加载模型`" try: - self._model, self._tokenizer = self.load_model_and_tokenizer() + with redirect_stdout(self.child): + self._model, self._tokenizer = self.load_model_and_tokenizer() except: + self.info = "`加载模型失败`" self.running = False from toolbox import trimmed_format_exc - self.child.send(f'[Local Message] 不能正常加载{self.model_name}的参数.' + '\n```\n' + trimmed_format_exc() + '\n```\n') + self.child.send( + f'[Local Message] 不能正常加载{self.model_name}的参数.' + '\n```\n' + trimmed_format_exc() + '\n```\n') self.child.send('[FinishBad]') raise RuntimeError(f"不能正常加载{self.model_name}的参数!") + self.info = "`准备就绪`" while True: # 进入任务等待状态 kwargs = self.child.recv() @@ -93,25 +125,35 @@ def run(self): # 请求处理结束,开始下一个循环 except: from toolbox import trimmed_format_exc - self.child.send(f'[Local Message] 调用{self.model_name}失败.' + '\n```\n' + trimmed_format_exc() + '\n```\n') + self.child.send( + f'[Local Message] 调用{self.model_name}失败.' + '\n```\n' + trimmed_format_exc() + '\n```\n') self.child.send('[Finish]') def stream_chat(self, **kwargs): # ⭐主进程执行 - self.threadLock.acquire() - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res == '[Finish]': - break - if res == '[FinishBad]': - self.running = False - self.corrupted = True - break - else: - yield res - self.threadLock.release() - + if self.info == "`准备就绪`": + yield "`正在等待线程锁,排队中请稍后 ...`" + with self.threadLock: + self.parent.send(kwargs) + std_out = "" + std_out_clip_len = 4096 + while True: + res = self.parent.recv() + if res.startswith(self.std_tag): + new_output = res[len(self.std_tag):] + std_out = std_out[:std_out_clip_len] + print(new_output, end='') + std_out = new_output + std_out + yield self.std_tag + '\n```\n' + std_out + '\n```\n' + elif res == '[Finish]': + break + elif res == '[FinishBad]': + self.running = False + self.corrupted = True + break + else: + std_out = "" + yield res def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='classic'): @@ -123,15 +165,17 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", 函数的说明请见 request_llms/bridge_all.py """ _llm_handle = LLMSingletonClass() - if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.info - if not _llm_handle.running: raise RuntimeError(_llm_handle.info) + if len(observe_window) >= 1: + observe_window[0] = load_message + "\n\n" + _llm_handle.info + if not _llm_handle.running: + raise RuntimeError(_llm_handle.info) if history_format == 'classic': # 没有 sys_prompt 接口,因此把prompt加入 history history_feedin = [] history_feedin.append([sys_prompt, "Certainly!"]) for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) + history_feedin.append([history[2*i], history[2*i+1]]) elif history_format == 'chatglm3': # 有 sys_prompt 接口 conversation_cnt = len(history) // 2 @@ -145,24 +189,24 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", what_gpt_answer["role"] = "assistant" what_gpt_answer["content"] = history[index+1] if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": continue + if what_gpt_answer["content"] == "": + continue history_feedin.append(what_i_have_asked) history_feedin.append(what_gpt_answer) else: history_feedin[-1]['content'] = what_gpt_answer['content'] - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 + watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 response = "" for response in _llm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") + if len(observe_window) >= 2: + if (time.time()-observe_window[1]) > watch_dog_patience: + raise RuntimeError("程序终止。") return response - - - def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): + def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None): """ ⭐单线程方法 函数的说明请见 request_llms/bridge_all.py @@ -172,11 +216,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp _llm_handle = LLMSingletonClass() chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.info) yield from update_ui(chatbot=chatbot, history=[]) - if not _llm_handle.running: raise RuntimeError(_llm_handle.info) + if not _llm_handle.running: + raise RuntimeError(_llm_handle.info) if additional_fn is not None: from core_functional import handle_core_functionality - inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) + inputs, history = handle_core_functionality( + additional_fn, inputs, history, chatbot) # 处理历史信息 if history_format == 'classic': @@ -184,7 +230,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin = [] history_feedin.append([system_prompt, "Certainly!"]) for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) + history_feedin.append([history[2*i], history[2*i+1]]) elif history_format == 'chatglm3': # 有 sys_prompt 接口 conversation_cnt = len(history) // 2 @@ -198,7 +244,8 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp what_gpt_answer["role"] = "assistant" what_gpt_answer["content"] = history[index+1] if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": continue + if what_gpt_answer["content"] == "": + continue history_feedin.append(what_i_have_asked) history_feedin.append(what_gpt_answer) else: @@ -216,4 +263,4 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) - return predict_no_ui_long_connection, predict \ No newline at end of file + return predict_no_ui_long_connection, predict From 17cf47dcd661121e193a5d08331131fef950f143 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 31 Oct 2023 18:02:14 +0800 Subject: [PATCH 014/117] =?UTF-8?q?=E9=98=B2=E6=AD=A2=E5=A4=9A=E7=BA=BF?= =?UTF-8?q?=E7=A8=8B=E6=95=B0=E6=8D=AE=E4=BA=A4=E5=8F=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/local_llm_class.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index 096b52120b..97170c9f45 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -134,6 +134,8 @@ def stream_chat(self, **kwargs): if self.info == "`准备就绪`": yield "`正在等待线程锁,排队中请稍后 ...`" with self.threadLock: + if self.parent.poll(): + while self.parent.poll(): self.parent.recv() self.parent.send(kwargs) std_out = "" std_out_clip_len = 4096 From 09857ea4556bd595951b17abbe1434e3db808d28 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 31 Oct 2023 20:37:07 +0800 Subject: [PATCH 015/117] =?UTF-8?q?=E8=A7=A3=E9=99=A4=E6=9C=AC=E5=9C=B0?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E7=9A=84=E8=8B=A5=E5=B9=B2=E5=B9=B6=E5=8F=91?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/local_llm_class.py | 109 ++++++++++++++++++++++++-------- request_llms/queued_pipe.py | 24 +++++++ 2 files changed, 105 insertions(+), 28 deletions(-) create mode 100644 request_llms/queued_pipe.py diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index 97170c9f45..b6f49ba4ff 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -3,11 +3,32 @@ from toolbox import update_ui from multiprocessing import Process, Pipe from contextlib import redirect_stdout +from request_llms.queued_pipe import create_queue_pipe +class DebugLock(object): + def __init__(self): + self._lock = threading.Lock() + + def acquire(self): + print("acquiring", self) + #traceback.print_tb + self._lock.acquire() + print("acquired", self) + + def release(self): + print("released", self) + #traceback.print_tb + self._lock.release() + + def __enter__(self): + self.acquire() + + def __exit__(self, type, value, traceback): + self.release() def SingletonLocalLLM(cls): """ - 一个单实例装饰器 + Singleton Decroator for LocalLLMHandle """ _instance = {} @@ -46,24 +67,41 @@ def print_status(s): class LocalLLMHandle(Process): def __init__(self): - # ⭐主进程执行 + # ⭐run in main process super().__init__(daemon=True) + self.is_main_process = True # init self.corrupted = False self.load_model_info() - self.parent, self.child = Pipe() + self.parent, self.child = create_queue_pipe() + self.parent_state, self.child_state = create_queue_pipe() # allow redirect_stdout self.std_tag = "[Subprocess Message] " self.child.write = lambda x: self.child.send(self.std_tag + x) self.running = True self._model = None self._tokenizer = None - self.info = "" + self.state = "" self.check_dependency() + self.is_main_process = False # state wrap for child process self.start() - self.threadLock = threading.Lock() + self.is_main_process = True # state wrap for child process + self.threadLock = DebugLock() + + def get_state(self): + # ⭐run in main process + while self.parent_state.poll(): + self.state = self.parent_state.recv() + return self.state + + def set_state(self, new_state): + # ⭐run in main process or 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process + if self.is_main_process: + self.state = new_state + else: + self.child_state.send(new_state) def load_model_info(self): - # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + # 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process raise NotImplementedError("Method not implemented yet") self.model_name = "" self.cmd_to_install = "" @@ -72,40 +110,40 @@ def load_model_and_tokenizer(self): """ This function should return the model and the tokenizer """ - # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + # 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process raise NotImplementedError("Method not implemented yet") def llm_stream_generator(self, **kwargs): - # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + # 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process raise NotImplementedError("Method not implemented yet") def try_to_import_special_deps(self, **kwargs): """ import something that will raise error if the user does not install requirement_*.txt """ - # ⭐主进程执行 + # ⭐run in main process raise NotImplementedError("Method not implemented yet") def check_dependency(self): - # ⭐主进程执行 + # ⭐run in main process try: self.try_to_import_special_deps() - self.info = "`依赖检测通过`" + self.set_state("`依赖检测通过`") self.running = True except: - self.info = f"缺少{self.model_name}的依赖,如果要使用{self.model_name},除了基础的pip依赖以外,您还需要运行{self.cmd_to_install}安装{self.model_name}的依赖。" + self.set_state(f"缺少{self.model_name}的依赖,如果要使用{self.model_name},除了基础的pip依赖以外,您还需要运行{self.cmd_to_install}安装{self.model_name}的依赖。") self.running = False def run(self): - # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + # 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process # 第一次运行,加载参数 reset_tqdm_output() - self.info = "`尝试加载模型`" + self.set_state("`尝试加载模型`") try: with redirect_stdout(self.child): self._model, self._tokenizer = self.load_model_and_tokenizer() except: - self.info = "`加载模型失败`" + self.set_state("`加载模型失败`") self.running = False from toolbox import trimmed_format_exc self.child.send( @@ -113,7 +151,7 @@ def run(self): self.child.send('[FinishBad]') raise RuntimeError(f"不能正常加载{self.model_name}的参数!") - self.info = "`准备就绪`" + self.set_state("`准备就绪`") while True: # 进入任务等待状态 kwargs = self.child.recv() @@ -121,6 +159,7 @@ def run(self): try: for response_full in self.llm_stream_generator(**kwargs): self.child.send(response_full) + print('debug' + response_full) self.child.send('[Finish]') # 请求处理结束,开始下一个循环 except: @@ -129,18 +168,35 @@ def run(self): f'[Local Message] 调用{self.model_name}失败.' + '\n```\n' + trimmed_format_exc() + '\n```\n') self.child.send('[Finish]') + def clear_pending_messages(self): + # ⭐run in main process + while True: + if self.parent.poll(): + self.parent.recv() + continue + for _ in range(5): + time.sleep(0.5) + if self.parent.poll(): + r = self.parent.recv() + continue + break + return + def stream_chat(self, **kwargs): - # ⭐主进程执行 - if self.info == "`准备就绪`": + # ⭐run in main process + if self.get_state() == "`准备就绪`": yield "`正在等待线程锁,排队中请稍后 ...`" + with self.threadLock: if self.parent.poll(): - while self.parent.poll(): self.parent.recv() + yield "`排队中请稍后 ...`" + self.clear_pending_messages() self.parent.send(kwargs) std_out = "" std_out_clip_len = 4096 while True: res = self.parent.recv() + # pipe_watch_dog.feed() if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] @@ -157,20 +213,18 @@ def stream_chat(self, **kwargs): std_out = "" yield res - def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='classic'): load_message = f"{model_name}尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,{model_name}消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ - ⭐多线程方法 - 函数的说明请见 request_llms/bridge_all.py + refer to request_llms/bridge_all.py """ _llm_handle = LLMSingletonClass() if len(observe_window) >= 1: - observe_window[0] = load_message + "\n\n" + _llm_handle.info + observe_window[0] = load_message + "\n\n" + _llm_handle.get_state() if not _llm_handle.running: - raise RuntimeError(_llm_handle.info) + raise RuntimeError(_llm_handle.get_state()) if history_format == 'classic': # 没有 sys_prompt 接口,因此把prompt加入 history @@ -210,16 +264,15 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None): """ - ⭐单线程方法 - 函数的说明请见 request_llms/bridge_all.py + refer to request_llms/bridge_all.py """ chatbot.append((inputs, "")) _llm_handle = LLMSingletonClass() - chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.info) + chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state()) yield from update_ui(chatbot=chatbot, history=[]) if not _llm_handle.running: - raise RuntimeError(_llm_handle.info) + raise RuntimeError(_llm_handle.get_state()) if additional_fn is not None: from core_functional import handle_core_functionality diff --git a/request_llms/queued_pipe.py b/request_llms/queued_pipe.py new file mode 100644 index 0000000000..1fc2e5bd53 --- /dev/null +++ b/request_llms/queued_pipe.py @@ -0,0 +1,24 @@ +from multiprocessing import Pipe, Queue +import time +import threading + +class PipeSide(object): + def __init__(self, q_2remote, q_2local) -> None: + self.q_2remote = q_2remote + self.q_2local = q_2local + + def recv(self): + return self.q_2local.get() + + def send(self, buf): + self.q_2remote.put(buf) + + def poll(self): + return not self.q_2local.empty() + +def create_queue_pipe(): + q_p2c = Queue() + q_c2p = Queue() + pipe_c = PipeSide(q_2local=q_p2c, q_2remote=q_c2p) + pipe_p = PipeSide(q_2local=q_c2p, q_2remote=q_p2c) + return pipe_c, pipe_p From ca7ff47fcb3105ba3134196161c3f557be693cf2 Mon Sep 17 00:00:00 2001 From: ji-jinlong <61379293+ji-jinlong@users.noreply.github.com> Date: Wed, 1 Nov 2023 16:05:57 +0800 Subject: [PATCH 016/117] =?UTF-8?q?Update=20=E7=90=86=E8=A7=A3PDF=E6=96=87?= =?UTF-8?q?=E6=A1=A3=E5=86=85=E5=AE=B9.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...7\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" "b/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" index f1a89a7ec9..4674b935ed 100644 --- "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" +++ "b/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" @@ -49,7 +49,7 @@ def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 llm_kwargs, chatbot, history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果 - sys_prompt="Extract the main idea of this section." # 提示 + sys_prompt="Extract the main idea of this section.用中文回答我" # 提示 ) iteration_results.append(gpt_say) last_iteration_result = gpt_say From cd9f2ec402f952b0a112f1f71c19f00c90697648 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:25:27 +0800 Subject: [PATCH 017/117] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 83f385c9f2..60e7be15d8 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ Latex论文一键校对 | [函数插件] 仿Grammarly对Latex文章进行语法 - 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
- +
From f016323b8aafe2c4c3d4dd4561fdfc8d7cbf234d Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:26:46 +0800 Subject: [PATCH 018/117] =?UTF-8?q?Update=20=E7=90=86=E8=A7=A3PDF=E6=96=87?= =?UTF-8?q?=E6=A1=A3=E5=86=85=E5=AE=B9.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...7\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" "b/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" index 4674b935ed..3b2db998c6 100644 --- "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" +++ "b/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" @@ -49,7 +49,7 @@ def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 llm_kwargs, chatbot, history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果 - sys_prompt="Extract the main idea of this section.用中文回答我" # 提示 + sys_prompt="Extract the main idea of this section, answer me with Chinese." # 提示 ) iteration_results.append(gpt_say) last_iteration_result = gpt_say From 4824905592d608e133eb9e4962c803ace0a369b0 Mon Sep 17 00:00:00 2001 From: Yao Xiao Date: Tue, 7 Nov 2023 09:48:01 +0800 Subject: [PATCH 019/117] Add new API support --- config.py | 2 +- request_llm/bridge_all.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/config.py b/config.py index b4f00a6360..903ee20f98 100644 --- a/config.py +++ b/config.py @@ -87,7 +87,7 @@ # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", +AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index f85d1b6b34..3d6e4bd78b 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -117,6 +117,15 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt35, }, + "gpt-3.5-turbo-1106": {#16k + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 16385, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + "gpt-4": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -135,6 +144,15 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, + "gpt-4-1106-preview": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 128000, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + "gpt-3.5-random": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, From 245585be81b3670207f4ada9d35c0c898ea37c34 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Tue, 7 Nov 2023 10:39:35 +0800 Subject: [PATCH 020/117] Update README.md --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 60e7be15d8..0378eaa1eb 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,11 @@ > **Note** -> +> > 2023.10.28: 紧急修复了若干问题,安装依赖时,请选择`requirements.txt`中**指定的版本**。 -> +> > `pip install -r requirements.txt` +> +> 2023.11.7: 本项目开源免费,**近期发现有人蔑视开源协议,利用本项目违法圈钱**,请各位提高警惕,谨防上当受骗。 + #
GPT 学术优化 (GPT Academic)
From 8e375b0ed2c00b065608df9c3e90dcafffa3145c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 14:07:30 +0800 Subject: [PATCH 021/117] support chatglm3 --- config.py | 6 +++++- ...257\255\350\250\200\346\250\241\345\236\213.py" | 7 ++++--- main.py | 4 ++-- request_llms/local_llm_class.py | 14 +++++++------- 4 files changed, 18 insertions(+), 13 deletions(-) diff --git a/config.py b/config.py index 06840dd8f0..f578aa853e 100644 --- a/config.py +++ b/config.py @@ -90,11 +90,15 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", - "chatglm", "moss", "newbing", "claude-2"] + "chatglm3", "moss", "newbing", "claude-2"] # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] +# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4" +MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3" + + # 百度千帆(LLM_MODEL="qianfan") BAIDU_CLOUD_API_KEY = '' BAIDU_CLOUD_SECRET_KEY = '' diff --git "a/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" "b/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" index 80e09fcdf5..4210fb2136 100644 --- "a/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" +++ "b/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" @@ -1,4 +1,4 @@ -from toolbox import CatchException, update_ui +from toolbox import CatchException, update_ui, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive import datetime @CatchException @@ -13,11 +13,12 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt web_port 当前软件运行的端口号 """ history = [] # 清空历史,以免输入溢出 - chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……")) + MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS') + chatbot.append((txt, "正在同时咨询" + MULTI_QUERY_LLM_MODELS)) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 + llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=txt, inputs_show_user=txt, llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, diff --git a/main.py b/main.py index bf84382578..a621deb1a1 100644 --- a/main.py +++ b/main.py @@ -433,7 +433,7 @@ def warm_up_mods(): time.sleep(4); warm_up_modules() server_port=PORT, favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, - blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile","gpt_log/admin"]) + blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) # 如果需要在二级路径下运行 # CUSTOM_PATH = get_conf('CUSTOM_PATH') @@ -442,7 +442,7 @@ def warm_up_mods(): time.sleep(4); warm_up_modules() # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) # else: # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png", - # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) + # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) if __name__ == "__main__": main() diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6f49ba4ff..b6ce801ef4 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -5,18 +5,18 @@ from contextlib import redirect_stdout from request_llms.queued_pipe import create_queue_pipe -class DebugLock(object): +class ThreadLock(object): def __init__(self): self._lock = threading.Lock() def acquire(self): - print("acquiring", self) + # print("acquiring", self) #traceback.print_tb self._lock.acquire() - print("acquired", self) + # print("acquired", self) def release(self): - print("released", self) + # print("released", self) #traceback.print_tb self._lock.release() @@ -85,7 +85,7 @@ def __init__(self): self.is_main_process = False # state wrap for child process self.start() self.is_main_process = True # state wrap for child process - self.threadLock = DebugLock() + self.threadLock = ThreadLock() def get_state(self): # ⭐run in main process @@ -159,7 +159,7 @@ def run(self): try: for response_full in self.llm_stream_generator(**kwargs): self.child.send(response_full) - print('debug' + response_full) + # print('debug' + response_full) self.child.send('[Finish]') # 请求处理结束,开始下一个循环 except: @@ -200,7 +200,7 @@ def stream_chat(self, **kwargs): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - print(new_output, end='') + # print(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': From 136e6aaa21102cc41ca87afa62be8ee510e07c41 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:08:24 +0800 Subject: [PATCH 022/117] Update config.py --- config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config.py b/config.py index b4f00a6360..b74f1e6649 100644 --- a/config.py +++ b/config.py @@ -87,9 +87,9 @@ # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', - "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", + "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] # P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] From 0897057be1ab8ebdf3ffad9b3d4c50add68b6aa5 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:11:52 +0800 Subject: [PATCH 023/117] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0378eaa1eb..eeb354a710 100644 --- a/README.md +++ b/README.md @@ -139,7 +139,7 @@ git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss # 参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner # 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

From 4d9256296d2ae17e49d523ce4dc557104c35ce1c Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:13:37 +0800 Subject: [PATCH 024/117] =?UTF-8?q?Update=20=E5=A4=9A=E6=99=BA=E8=83=BD?= =?UTF-8?q?=E4=BD=93.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../\345\244\232\346\231\272\350\203\275\344\275\223.py" | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" index 5a4c4a58ae..a2d0ce7457 100644 --- "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" +++ "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" @@ -32,7 +32,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ web_port 当前软件运行的端口号 """ # 检查当前的模型是否符合要求 - supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k', + supported_llms = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo-1106', 'gpt-4', 'gpt-4-32k', 'gpt-4-1106-preview', 'api2d-gpt-3.5-turbo-16k', 'api2d-gpt-4'] llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) if llm_kwargs['llm_model'] not in supported_llms: From 3ed0e8012d86be9f37d11be8db5dc2a1dfb1db51 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:17:01 +0800 Subject: [PATCH 025/117] Update bridge_all.py --- request_llm/bridge_all.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index f85d1b6b34..2c53537870 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -108,6 +108,15 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt35, }, + "gpt-3.5-turbo-1106": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 1024 *16, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + "gpt-3.5-turbo-16k-0613": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -135,6 +144,15 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, + "gpt-4-1106-preview": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 1024 * 128, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + "gpt-3.5-random": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, From 61cf2b32eb979607b41f1240f478f70d9d447b5a Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:21:08 +0800 Subject: [PATCH 026/117] Update README.md.German.md --- docs/README.md.German.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README.md.German.md b/docs/README.md.German.md index d514de30f5..fa15a8a73a 100644 --- a/docs/README.md.German.md +++ b/docs/README.md.German.md @@ -111,7 +111,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the project root path # [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -304,4 +304,4 @@ https://github.com/kaixindelele/ChatPaper # Mehr: https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From bba3419ace4031bb8a29695a53ffe0ea54abac98 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:21:32 +0800 Subject: [PATCH 027/117] Update README.md.Italian.md --- docs/README.md.Italian.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md.Italian.md b/docs/README.md.Italian.md index 76efe1857b..fc01a27e69 100644 --- a/docs/README.md.Italian.md +++ b/docs/README.md.Italian.md @@ -117,7 +117,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto # 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

From 6c3405ba550c9e846643ce78b51675310ecfbb2a Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:21:52 +0800 Subject: [PATCH 028/117] Update README.md.Korean.md --- docs/README.md.Korean.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md.Korean.md b/docs/README.md.Korean.md index 61b8e4a051..7bdcb8b987 100644 --- a/docs/README.md.Korean.md +++ b/docs/README.md.Korean.md @@ -112,7 +112,7 @@ git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 다음 코 # [선택 사항III] AVAIL_LLM_MODELS config.py 구성 파일에 기대하는 모델이 포함되어 있는지 확인하십시오. # 현재 지원되는 전체 모델 : -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

From cd40bf9ae2e9dd047aebc42ab4e40a6ba95bb219 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:22:12 +0800 Subject: [PATCH 029/117] Update README.md.Portuguese.md --- docs/README.md.Portuguese.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md.Portuguese.md b/docs/README.md.Portuguese.md index 2347d5a74f..6c368f752e 100644 --- a/docs/README.md.Portuguese.md +++ b/docs/README.md.Portuguese.md @@ -127,7 +127,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When executing this line of code, you must be in the project root path # 【Optional Step III】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports docker solutions): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

From 77220002e0faad0627f09fa40320c9c70a68269a Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:22:29 +0800 Subject: [PATCH 030/117] Update README_EN.md --- docs/README_EN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README_EN.md b/docs/README_EN.md index 02b8588c38..1a68810a2f 100644 --- a/docs/README_EN.md +++ b/docs/README_EN.md @@ -114,7 +114,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the root directory of the project # [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. Currently supported models are as follows (the jittorllms series only supports the docker solution for the time being): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -319,4 +319,4 @@ https://github.com/kaixindelele/ChatPaper # More: https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From 3141cd392a881edf37644acbd61b87d728fc1a16 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:22:46 +0800 Subject: [PATCH 031/117] Update README_FR.md --- docs/README_FR.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README_FR.md b/docs/README_FR.md index af3bb42c79..4df3c84039 100644 --- a/docs/README_FR.md +++ b/docs/README_FR.md @@ -119,7 +119,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When running this line of code, you must be in the project root path. # 【Optional Step III】Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the desired model. Currently, all models supported are as follows (the jittorllms series currently only supports the docker scheme): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -320,4 +320,4 @@ https://github.com/kaixindelele/ChatPaper # Plus : https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From 6c795809f76826fda9da5630133d816c46dee856 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:23:01 +0800 Subject: [PATCH 032/117] Update README_JP.md --- docs/README_JP.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README_JP.md b/docs/README_JP.md index 46145e1f9c..3b03545a05 100644 --- a/docs/README_JP.md +++ b/docs/README_JP.md @@ -128,7 +128,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, it must be in the project root. # 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -326,4 +326,4 @@ https://github.com/kaixindelele/ChatPaper # その他: https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From 3a2466fe4ea6281f24affdccf22d3321b7ec7d52 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:23:16 +0800 Subject: [PATCH 033/117] Update README_RS.md --- docs/README_RS.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README_RS.md b/docs/README_RS.md index d4888a0522..e050d4421b 100644 --- a/docs/README_RS.md +++ b/docs/README_RS.md @@ -116,7 +116,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path # [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -275,4 +275,4 @@ https://github.com/kaixindelele/ChatPaper # Больше: https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From ffe6c1403e2d7f4a07434da3af07de5f21134973 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:25:36 +0800 Subject: [PATCH 034/117] Update bridge_chatgpt.py --- request_llm/bridge_chatgpt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index 9903da9d86..292de0ad3c 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -351,6 +351,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): model = random.choice([ "gpt-3.5-turbo", "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0301", From 804599bbc3c64a6c1a422af30a11ad05ef2e434e Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 15:36:05 +0800 Subject: [PATCH 035/117] autogen --- crazy_functional.py | 24 +- crazy_functions/agent_fns/auto_agent.py | 2 + crazy_functions/agent_fns/bridge_autogen.py | 584 ++++++++++++++++++ crazy_functions/agent_fns/general.py | 126 +++- crazy_functions/agent_fns/pipe.py | 53 +- ...32\346\231\272\350\203\275\344\275\223.py" | 11 +- main.py | 1 - 7 files changed, 733 insertions(+), 68 deletions(-) create mode 100644 crazy_functions/agent_fns/bridge_autogen.py diff --git a/crazy_functional.py b/crazy_functional.py index 2d7fa74b1e..155fc76c5e 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -539,18 +539,18 @@ def get_crazy_functions(): except: print('Load function plugin failed') - # try: - # from crazy_functions.多智能体 import 多智能体终端 - # function_plugins.update({ - # "多智能体终端(微软AutoGen)": { - # "Group": "智能体", - # "Color": "stop", - # "AsButton": True, - # "Function": HotReload(多智能体终端) - # } - # }) - # except: - # print('Load function plugin failed') + try: + from crazy_functions.多智能体 import 多智能体终端 + function_plugins.update({ + "多智能体终端(微软AutoGen)": { + "Group": "智能体", + "Color": "stop", + "AsButton": True, + "Function": HotReload(多智能体终端) + } + }) + except: + print('Load function plugin failed') # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 diff --git a/crazy_functions/agent_fns/auto_agent.py b/crazy_functions/agent_fns/auto_agent.py index 16ca29598b..f6a2832c36 100644 --- a/crazy_functions/agent_fns/auto_agent.py +++ b/crazy_functions/agent_fns/auto_agent.py @@ -3,6 +3,8 @@ from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom from crazy_functions.agent_fns.general import AutoGenGeneral import time +from autogen import AssistantAgent, UserProxyAgent + class AutoGenMath(AutoGenGeneral): diff --git a/crazy_functions/agent_fns/bridge_autogen.py b/crazy_functions/agent_fns/bridge_autogen.py new file mode 100644 index 0000000000..5bf4aacd51 --- /dev/null +++ b/crazy_functions/agent_fns/bridge_autogen.py @@ -0,0 +1,584 @@ +from time import sleep +import logging +import time +from typing import List, Optional, Dict, Callable, Union +import sys +import shutil +import numpy as np +from flaml import tune, BlendSearch +from flaml.tune.space import is_constant +from flaml.automl.logger import logger_formatter +from collections import defaultdict + +try: + import openai + from openai.error import ( + ServiceUnavailableError, + RateLimitError, + APIError, + InvalidRequestError, + APIConnectionError, + Timeout, + AuthenticationError, + ) + from openai import Completion as openai_Completion + import diskcache + + ERROR = None +except ImportError: + ERROR = ImportError("please install openai and diskcache to use the autogen.oai subpackage.") + openai_Completion = object +logger = logging.getLogger(__name__) +if not logger.handlers: + # Add the console handler. + _ch = logging.StreamHandler(stream=sys.stdout) + _ch.setFormatter(logger_formatter) + logger.addHandler(_ch) + + +class Completion(openai_Completion): + """A class for OpenAI completion API. + + It also supports: ChatCompletion, Azure OpenAI API. + """ + + # set of models that support chat completion + chat_models = { + "gpt-3.5-turbo", + "gpt-3.5-turbo-0301", # deprecate in Sep + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-16k-0613", + "gpt-35-turbo", + "gpt-35-turbo-16k", + "gpt-4", + "gpt-4-32k", + "gpt-4-32k-0314", # deprecate in Sep + "gpt-4-0314", # deprecate in Sep + "gpt-4-0613", + "gpt-4-32k-0613", + } + + # price per 1k tokens + price1K = { + "text-ada-001": 0.0004, + "text-babbage-001": 0.0005, + "text-curie-001": 0.002, + "code-cushman-001": 0.024, + "code-davinci-002": 0.1, + "text-davinci-002": 0.02, + "text-davinci-003": 0.02, + "gpt-3.5-turbo": (0.0015, 0.002), + "gpt-3.5-turbo-instruct": (0.0015, 0.002), + "gpt-3.5-turbo-0301": (0.0015, 0.002), # deprecate in Sep + "gpt-3.5-turbo-0613": (0.0015, 0.002), + "gpt-3.5-turbo-16k": (0.003, 0.004), + "gpt-3.5-turbo-16k-0613": (0.003, 0.004), + "gpt-35-turbo": (0.0015, 0.002), + "gpt-35-turbo-16k": (0.003, 0.004), + "gpt-35-turbo-instruct": (0.0015, 0.002), + "gpt-4": (0.03, 0.06), + "gpt-4-32k": (0.06, 0.12), + "gpt-4-0314": (0.03, 0.06), # deprecate in Sep + "gpt-4-32k-0314": (0.06, 0.12), # deprecate in Sep + "gpt-4-0613": (0.03, 0.06), + "gpt-4-32k-0613": (0.06, 0.12), + } + + default_search_space = { + "model": tune.choice( + [ + "text-ada-001", + "text-babbage-001", + "text-davinci-003", + "gpt-3.5-turbo", + "gpt-4", + ] + ), + "temperature_or_top_p": tune.choice( + [ + {"temperature": tune.uniform(0, 2)}, + {"top_p": tune.uniform(0, 1)}, + ] + ), + "max_tokens": tune.lograndint(50, 1000), + "n": tune.randint(1, 100), + "prompt": "{prompt}", + } + + seed = 41 + cache_path = f".cache/{seed}" + # retry after this many seconds + retry_wait_time = 10 + # fail a request after hitting RateLimitError for this many seconds + max_retry_period = 120 + # time out for request to openai server + request_timeout = 60 + + openai_completion_class = not ERROR and openai.Completion + _total_cost = 0 + optimization_budget = None + + _history_dict = _count_create = None + + @classmethod + def set_cache(cls, seed: Optional[int] = 41, cache_path_root: Optional[str] = ".cache"): + """Set cache path. + + Args: + seed (int, Optional): The integer identifier for the pseudo seed. + Results corresponding to different seeds will be cached in different places. + cache_path (str, Optional): The root path for the cache. + The complete cache path will be {cache_path}/{seed}. + """ + cls.seed = seed + cls.cache_path = f"{cache_path_root}/{seed}" + + @classmethod + def clear_cache(cls, seed: Optional[int] = None, cache_path_root: Optional[str] = ".cache"): + """Clear cache. + + Args: + seed (int, Optional): The integer identifier for the pseudo seed. + If omitted, all caches under cache_path_root will be cleared. + cache_path (str, Optional): The root path for the cache. + The complete cache path will be {cache_path}/{seed}. + """ + if seed is None: + shutil.rmtree(cache_path_root, ignore_errors=True) + return + with diskcache.Cache(f"{cache_path_root}/{seed}") as cache: + cache.clear() + + @classmethod + def _book_keeping(cls, config: Dict, response): + """Book keeping for the created completions.""" + if response != -1 and "cost" not in response: + response["cost"] = cls.cost(response) + if cls._history_dict is None: + return + if cls._history_compact: + value = { + "created_at": [], + "cost": [], + "token_count": [], + } + if "messages" in config: + messages = config["messages"] + if len(messages) > 1 and messages[-1]["role"] != "assistant": + existing_key = get_key(messages[:-1]) + value = cls._history_dict.pop(existing_key, value) + key = get_key(messages + [choice["message"] for choice in response["choices"]]) + else: + key = get_key([config["prompt"]] + [choice.get("text") for choice in response["choices"]]) + value["created_at"].append(cls._count_create) + value["cost"].append(response["cost"]) + value["token_count"].append( + { + "model": response["model"], + "prompt_tokens": response["usage"]["prompt_tokens"], + "completion_tokens": response["usage"].get("completion_tokens", 0), + "total_tokens": response["usage"]["total_tokens"], + } + ) + cls._history_dict[key] = value + cls._count_create += 1 + return + cls._history_dict[cls._count_create] = { + "request": config, + "response": response.to_dict_recursive(), + } + cls._count_create += 1 + + @classmethod + def _get_response(cls, config: Dict, raise_on_ratelimit_or_timeout=False, use_cache=True): + """Get the response from the openai api call. + + Try cache first. If not found, call the openai api. If the api call fails, retry after retry_wait_time. + """ + config = config.copy() + + + @classmethod + def _get_max_valid_n(cls, key, max_tokens): + # find the max value in max_valid_n_per_max_tokens + # whose key is equal or larger than max_tokens + return max( + (value for k, value in cls._max_valid_n_per_max_tokens.get(key, {}).items() if k >= max_tokens), + default=1, + ) + + @classmethod + def _get_min_invalid_n(cls, key, max_tokens): + # find the min value in min_invalid_n_per_max_tokens + # whose key is equal or smaller than max_tokens + return min( + (value for k, value in cls._min_invalid_n_per_max_tokens.get(key, {}).items() if k <= max_tokens), + default=None, + ) + + @classmethod + def _get_region_key(cls, config): + # get a key for the valid/invalid region corresponding to the given config + config = cls._pop_subspace(config, always_copy=False) + return ( + config["model"], + config.get("prompt", config.get("messages")), + config.get("stop"), + ) + + @classmethod + def _update_invalid_n(cls, prune, region_key, max_tokens, num_completions): + if prune: + # update invalid n and prune this config + cls._min_invalid_n_per_max_tokens[region_key] = invalid_n = cls._min_invalid_n_per_max_tokens.get( + region_key, {} + ) + invalid_n[max_tokens] = min(num_completions, invalid_n.get(max_tokens, np.inf)) + + @classmethod + def _pop_subspace(cls, config, always_copy=True): + if "subspace" in config: + config = config.copy() + config.update(config.pop("subspace")) + return config.copy() if always_copy else config + + @classmethod + def _get_params_for_create(cls, config: Dict) -> Dict: + """Get the params for the openai api call from a config in the search space.""" + params = cls._pop_subspace(config) + if cls._prompts: + params["prompt"] = cls._prompts[config["prompt"]] + else: + params["messages"] = cls._messages[config["messages"]] + if "stop" in params: + params["stop"] = cls._stops and cls._stops[params["stop"]] + temperature_or_top_p = params.pop("temperature_or_top_p", None) + if temperature_or_top_p: + params.update(temperature_or_top_p) + if cls._config_list and "config_list" not in params: + params["config_list"] = cls._config_list + return params + + @classmethod + def create( + cls, + context: Optional[Dict] = None, + use_cache: Optional[bool] = True, + config_list: Optional[List[Dict]] = None, + filter_func: Optional[Callable[[Dict, Dict, Dict], bool]] = None, + raise_on_ratelimit_or_timeout: Optional[bool] = True, + allow_format_str_template: Optional[bool] = False, + **config, + ): + """Make a completion for a given context. + + Args: + context (Dict, Optional): The context to instantiate the prompt. + It needs to contain keys that are used by the prompt template or the filter function. + E.g., `prompt="Complete the following sentence: {prefix}, context={"prefix": "Today I feel"}`. + The actual prompt will be: + "Complete the following sentence: Today I feel". + More examples can be found at [templating](https://microsoft.github.io/autogen/docs/Use-Cases/enhanced_inference#templating). + use_cache (bool, Optional): Whether to use cached responses. + config_list (List, Optional): List of configurations for the completion to try. + The first one that does not raise an error will be used. + Only the differences from the default config need to be provided. + E.g., + + ```python + response = oai.Completion.create( + config_list=[ + { + "model": "gpt-4", + "api_key": os.environ.get("AZURE_OPENAI_API_KEY"), + "api_type": "azure", + "api_base": os.environ.get("AZURE_OPENAI_API_BASE"), + "api_version": "2023-03-15-preview", + }, + { + "model": "gpt-3.5-turbo", + "api_key": os.environ.get("OPENAI_API_KEY"), + "api_type": "open_ai", + "api_base": "https://api.openai.com/v1", + }, + { + "model": "llama-7B", + "api_base": "http://127.0.0.1:8080", + "api_type": "open_ai", + } + ], + prompt="Hi", + ) + ``` + + filter_func (Callable, Optional): A function that takes in the context, the config and the response and returns a boolean to indicate whether the response is valid. E.g., + + ```python + def yes_or_no_filter(context, config, response): + return context.get("yes_or_no_choice", False) is False or any( + text in ["Yes.", "No."] for text in oai.Completion.extract_text(response) + ) + ``` + + raise_on_ratelimit_or_timeout (bool, Optional): Whether to raise RateLimitError or Timeout when all configs fail. + When set to False, -1 will be returned when all configs fail. + allow_format_str_template (bool, Optional): Whether to allow format string template in the config. + **config: Configuration for the openai API call. This is used as parameters for calling openai API. + The "prompt" or "messages" parameter can contain a template (str or Callable) which will be instantiated with the context. + Besides the parameters for the openai API call, it can also contain: + - `max_retry_period` (int): the total time (in seconds) allowed for retrying failed requests. + - `retry_wait_time` (int): the time interval to wait (in seconds) before retrying a failed request. + - `seed` (int) for the cache. This is useful when implementing "controlled randomness" for the completion. + + Returns: + Responses from OpenAI API, with additional fields. + - `cost`: the total cost. + When `config_list` is provided, the response will contain a few more fields: + - `config_id`: the index of the config in the config_list that is used to generate the response. + - `pass_filter`: whether the response passes the filter function. None if no filter is provided. + """ + if ERROR: + raise ERROR + config_list = [ + { + "model": "llama-7B", + "api_base": "http://127.0.0.1:8080", + "api_type": "open_ai", + } + ] + last = len(config_list) - 1 + cost = 0 + for i, each_config in enumerate(config_list): + base_config = config.copy() + base_config["allow_format_str_template"] = allow_format_str_template + base_config.update(each_config) + if i < last and filter_func is None and "max_retry_period" not in base_config: + # max_retry_period = 0 to avoid retrying when no filter is given + base_config["max_retry_period"] = 0 + try: + response = cls.create( + context, + use_cache, + raise_on_ratelimit_or_timeout=i < last or raise_on_ratelimit_or_timeout, + **base_config, + ) + if response == -1: + return response + pass_filter = filter_func is None or filter_func( + context=context, base_config=config, response=response + ) + if pass_filter or i == last: + response["cost"] = cost + response["cost"] + response["config_id"] = i + response["pass_filter"] = pass_filter + return response + cost += response["cost"] + except (AuthenticationError, RateLimitError, Timeout, InvalidRequestError): + logger.debug(f"failed with config {i}", exc_info=1) + if i == last: + raise + + params = cls._construct_params(context, config, allow_format_str_template=allow_format_str_template) + if not use_cache: + return cls._get_response( + params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout, use_cache=False + ) + seed = cls.seed + if "seed" in params: + cls.set_cache(params.pop("seed")) + with diskcache.Cache(cls.cache_path) as cls._cache: + cls.set_cache(seed) + return cls._get_response(params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout) + + @classmethod + def instantiate( + cls, + template: Union[str, None], + context: Optional[Dict] = None, + allow_format_str_template: Optional[bool] = False, + ): + if not context or template is None: + return template + if isinstance(template, str): + return template.format(**context) if allow_format_str_template else template + return template(context) + + @classmethod + def _construct_params(cls, context, config, prompt=None, messages=None, allow_format_str_template=False): + params = config.copy() + model = config["model"] + prompt = config.get("prompt") if prompt is None else prompt + messages = config.get("messages") if messages is None else messages + # either "prompt" should be in config (for being compatible with non-chat models) + # or "messages" should be in config (for tuning chat models only) + if prompt is None and (model in cls.chat_models or issubclass(cls, ChatCompletion)): + if messages is None: + raise ValueError("Either prompt or messages should be in config for chat models.") + if prompt is None: + params["messages"] = ( + [ + { + **m, + "content": cls.instantiate(m["content"], context, allow_format_str_template), + } + if m.get("content") + else m + for m in messages + ] + if context + else messages + ) + elif model in cls.chat_models or issubclass(cls, ChatCompletion): + # convert prompt to messages + params["messages"] = [ + { + "role": "user", + "content": cls.instantiate(prompt, context, allow_format_str_template), + }, + ] + params.pop("prompt", None) + else: + params["prompt"] = cls.instantiate(prompt, context, allow_format_str_template) + return params + + @classmethod + def extract_text(cls, response: dict) -> List[str]: + """Extract the text from a completion or chat response. + + Args: + response (dict): The response from OpenAI API. + + Returns: + A list of text in the responses. + """ + choices = response["choices"] + if "text" in choices[0]: + return [choice["text"] for choice in choices] + return [choice["message"].get("content", "") for choice in choices] + + @classmethod + def extract_text_or_function_call(cls, response: dict) -> List[str]: + """Extract the text or function calls from a completion or chat response. + + Args: + response (dict): The response from OpenAI API. + + Returns: + A list of text or function calls in the responses. + """ + choices = response["choices"] + if "text" in choices[0]: + return [choice["text"] for choice in choices] + return [ + choice["message"] if "function_call" in choice["message"] else choice["message"].get("content", "") + for choice in choices + ] + + @classmethod + @property + def logged_history(cls) -> Dict: + """Return the book keeping dictionary.""" + return cls._history_dict + + @classmethod + def print_usage_summary(cls) -> Dict: + """Return the usage summary.""" + if cls._history_dict is None: + print("No usage summary available.", flush=True) + + token_count_summary = defaultdict(lambda: {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}) + + if not cls._history_compact: + source = cls._history_dict.values() + total_cost = sum(msg_pair["response"]["cost"] for msg_pair in source) + else: + # source = cls._history_dict["token_count"] + # total_cost = sum(cls._history_dict['cost']) + total_cost = sum(sum(value_list["cost"]) for value_list in cls._history_dict.values()) + source = ( + token_data for value_list in cls._history_dict.values() for token_data in value_list["token_count"] + ) + + for entry in source: + if not cls._history_compact: + model = entry["response"]["model"] + token_data = entry["response"]["usage"] + else: + model = entry["model"] + token_data = entry + + token_count_summary[model]["prompt_tokens"] += token_data["prompt_tokens"] + token_count_summary[model]["completion_tokens"] += token_data["completion_tokens"] + token_count_summary[model]["total_tokens"] += token_data["total_tokens"] + + print(f"Total cost: {total_cost}", flush=True) + for model, counts in token_count_summary.items(): + print( + f"Token count summary for model {model}: prompt_tokens: {counts['prompt_tokens']}, completion_tokens: {counts['completion_tokens']}, total_tokens: {counts['total_tokens']}", + flush=True, + ) + + @classmethod + def start_logging( + cls, history_dict: Optional[Dict] = None, compact: Optional[bool] = True, reset_counter: Optional[bool] = True + ): + """Start book keeping. + + Args: + history_dict (Dict): A dictionary for book keeping. + If no provided, a new one will be created. + compact (bool): Whether to keep the history dictionary compact. + Compact history contains one key per conversation, and the value is a dictionary + like: + ```python + { + "create_at": [0, 1], + "cost": [0.1, 0.2], + } + ``` + where "created_at" is the index of API calls indicating the order of all the calls, + and "cost" is the cost of each call. This example shows that the conversation is based + on two API calls. The compact format is useful for condensing the history of a conversation. + If compact is False, the history dictionary will contain all the API calls: the key + is the index of the API call, and the value is a dictionary like: + ```python + { + "request": request_dict, + "response": response_dict, + } + ``` + where request_dict is the request sent to OpenAI API, and response_dict is the response. + For a conversation containing two API calls, the non-compact history dictionary will be like: + ```python + { + 0: { + "request": request_dict_0, + "response": response_dict_0, + }, + 1: { + "request": request_dict_1, + "response": response_dict_1, + }, + ``` + The first request's messages plus the response is equal to the second request's messages. + For a conversation with many turns, the non-compact history dictionary has a quadratic size + while the compact history dict has a linear size. + reset_counter (bool): whether to reset the counter of the number of API calls. + """ + cls._history_dict = {} if history_dict is None else history_dict + cls._history_compact = compact + cls._count_create = 0 if reset_counter or cls._count_create is None else cls._count_create + + @classmethod + def stop_logging(cls): + """End book keeping.""" + cls._history_dict = cls._count_create = None + + +class ChatCompletion(Completion): + """A class for OpenAI API ChatCompletion. Share the same API as Completion.""" + + default_search_space = Completion.default_search_space.copy() + default_search_space["model"] = tune.choice(["gpt-3.5-turbo", "gpt-4"]) + openai_completion_class = not ERROR and openai.ChatCompletion diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index beb6d7ebc6..f0b9ce8789 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -1,23 +1,39 @@ -from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import trimmed_format_exc, get_conf, ProxyNetworkActivate from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom +from request_llms.bridge_all import predict_no_ui_long_connection import time +def gpt_academic_generate_oai_reply( + self, + messages, + sender, + config, +): + from .bridge_autogen import Completion + llm_config = self.llm_config if config is None else config + if llm_config is False: + return False, None + if messages is None: + messages = self._oai_messages[sender] -class AutoGenGeneral(PluginMultiprocessManager): + response = Completion.create( + context=messages[-1].pop("context", None), messages=self._oai_system_message + messages, **llm_config + ) + return True, Completion.extract_text_or_function_call(response)[0] +class AutoGenGeneral(PluginMultiprocessManager): def gpt_academic_print_override(self, user_proxy, message, sender): - # ⭐⭐ 子进程执行 - self.child_conn.send(PipeCom("show", sender.name + '\n\n---\n\n' + message['content'])) + # ⭐⭐ run in subprocess + self.child_conn.send(PipeCom("show", sender.name + "\n\n---\n\n" + message["content"])) def gpt_academic_get_human_input(self, user_proxy, message): - # ⭐⭐ 子进程执行 + # ⭐⭐ run in subprocess patience = 300 begin_waiting_time = time.time() self.child_conn.send(PipeCom("interact", message)) while True: time.sleep(0.5) - if self.child_conn.poll(): + if self.child_conn.poll(): wait_success = True break if time.time() - begin_waiting_time > patience: @@ -29,29 +45,55 @@ def gpt_academic_get_human_input(self, user_proxy, message): else: raise TimeoutError("等待用户输入超时") + # def gpt_academic_generate_oai_reply(self, agent, messages, sender, config): + # from .bridge_autogen import Completion + # if messages is None: + # messages = agent._oai_messages[sender] + + # def instantiate( + # cls, + # template: Union[str, None], + # context: Optional[Dict] = None, + # allow_format_str_template: Optional[bool] = False, + # ): + # if not context or template is None: + # return template + # if isinstance(template, str): + # return template.format(**context) if allow_format_str_template else template + # return template(context) + + # res = predict_no_ui_long_connection( + # messages[-1].pop("context", None), + # llm_kwargs=self.llm_kwargs, + # history=messages, + # sys_prompt=agent._oai_system_message, + # observe_window=None, + # console_slience=False) + # return True, res + def define_agents(self): raise NotImplementedError - def do_audogen(self, input): - # ⭐⭐ 子进程执行 + def exe_autogen(self, input): + # ⭐⭐ run in subprocess input = input.content with ProxyNetworkActivate("AutoGen"): - config_list = self.get_config_list() - code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":self.use_docker} + code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker} agents = self.define_agents() user_proxy = None assistant = None for agent_kwargs in agents: agent_cls = agent_kwargs.pop('cls') kwargs = { - 'llm_config':{ - "config_list": config_list, - }, + 'llm_config':{}, 'code_execution_config':code_execution_config } kwargs.update(agent_kwargs) agent_handle = agent_cls(**kwargs) agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b) + for d in agent_handle._reply_func_list: + if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply': + d['reply_func'] = gpt_academic_generate_oai_reply if agent_kwargs['name'] == 'user_proxy': agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a) user_proxy = agent_handle @@ -63,23 +105,45 @@ def do_audogen(self, input): tb_str = '```\n' + trimmed_format_exc() + '```' self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str)) - def get_config_list(self): - model = self.llm_kwargs['llm_model'] - api_base = None - if self.llm_kwargs['llm_model'].startswith('api2d-'): - model = self.llm_kwargs['llm_model'][len('api2d-'):] - api_base = "https://openai.api2d.net/v1" - config_list = [{ - 'model': model, - 'api_key': self.llm_kwargs['api_key'], - },] - if api_base is not None: - config_list[0]['api_base'] = api_base - return config_list - def subprocess_worker(self, child_conn): - # ⭐⭐ 子进程执行 + # ⭐⭐ run in subprocess self.child_conn = child_conn while True: - msg = self.child_conn.recv() # PipeCom - self.do_audogen(msg) + msg = self.child_conn.recv() # PipeCom + self.exe_autogen(msg) + + +class AutoGenGroupChat(AutoGenGeneral): + def exe_autogen(self, input): + # ⭐⭐ run in subprocess + import autogen + + input = input.content + with ProxyNetworkActivate("AutoGen"): + code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker} + agents = self.define_agents() + agents_instances = [] + for agent_kwargs in agents: + agent_cls = agent_kwargs.pop("cls") + kwargs = {"code_execution_config": code_execution_config} + kwargs.update(agent_kwargs) + agent_handle = agent_cls(**kwargs) + agent_handle._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b) + agents_instances.append(agent_handle) + if agent_kwargs["name"] == "user_proxy": + user_proxy = agent_handle + user_proxy.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a) + try: + groupchat = autogen.GroupChat(agents=agents_instances, messages=[], max_round=50) + manager = autogen.GroupChatManager(groupchat=groupchat, **self.define_group_chat_manager_config()) + manager._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b) + manager.get_human_input = lambda a: self.gpt_academic_get_human_input(manager, a) + if user_proxy is None: + raise Exception("user_proxy is not defined") + user_proxy.initiate_chat(manager, message=input) + except Exception: + tb_str = "```\n" + trimmed_format_exc() + "```" + self.child_conn.send(PipeCom("done", "AutoGen exe failed: \n\n" + tb_str)) + + def define_group_chat_manager_config(self): + raise NotImplementedError diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index 5ebe3fc620..680e91c944 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -2,28 +2,28 @@ from crazy_functions.agent_fns.watchdog import WatchDog import time, os -class PipeCom(): +class PipeCom: def __init__(self, cmd, content) -> None: self.cmd = cmd self.content = content -class PluginMultiprocessManager(): +class PluginMultiprocessManager: def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # ⭐ 主进程 - self.autogen_work_dir = os.path.join(get_log_folder('autogen'), gen_time_str()) + # ⭐ run in main process + self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str()) self.previous_work_dir_files = {} self.llm_kwargs = llm_kwargs self.plugin_kwargs = plugin_kwargs self.chatbot = chatbot self.history = history self.system_prompt = system_prompt - self.web_port = web_port + # self.web_port = web_port self.alive = True - self.use_docker = get_conf('AUTOGEN_USE_DOCKER') + self.use_docker = get_conf("AUTOGEN_USE_DOCKER") # create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time - timeout_seconds = 5*60 + timeout_seconds = 5 * 60 self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5) self.heartbeat_watchdog.begin_watch() @@ -35,8 +35,9 @@ def is_alive(self): return self.alive def launch_subprocess_with_pipe(self): - # ⭐ 主进程 + # ⭐ run in main process from multiprocessing import Process, Pipe + parent_conn, child_conn = Pipe() self.p = Process(target=self.subprocess_worker, args=(child_conn,)) self.p.daemon = True @@ -46,14 +47,14 @@ def launch_subprocess_with_pipe(self): def terminate(self): self.p.terminate() self.alive = False - print('[debug] instance terminated') + print("[debug] instance terminated") def subprocess_worker(self, child_conn): - # ⭐⭐ 子进程 + # ⭐⭐ run in subprocess raise NotImplementedError def send_command(self, cmd): - # ⭐ 主进程 + # ⭐ run in main process self.parent_conn.send(PipeCom("user_input", cmd)) def immediate_showoff_when_possible(self, fp): @@ -63,7 +64,10 @@ def immediate_showoff_when_possible(self, fp): # 如果是文本文件, 则直接显示文本内容 if file_type.lower() in ['png', 'jpg']: image_path = os.path.abspath(fp) - self.chatbot.append(['检测到新生图像:', f'本地文件预览:
']) + self.chatbot.append([ + '检测到新生图像:', + f'本地文件预览:
' + ]) yield from update_ui(chatbot=self.chatbot, history=self.history) def overwatch_workdir_file_change(self): @@ -78,7 +82,7 @@ def overwatch_workdir_file_change(self): file_path = os.path.join(root, file) if file_path not in self.previous_work_dir_files.keys(): last_modified_time = os.stat(file_path).st_mtime - self.previous_work_dir_files.update({file_path:last_modified_time}) + self.previous_work_dir_files.update({file_path: last_modified_time}) change_list.append(file_path) else: last_modified_time = os.stat(file_path).st_mtime @@ -86,8 +90,8 @@ def overwatch_workdir_file_change(self): self.previous_work_dir_files[file_path] = last_modified_time change_list.append(file_path) if len(change_list) > 0: - file_links = '' - for f in change_list: + file_links = "" + for f in change_list: res = promote_file_to_downloadzone(f) file_links += f'
{res}' yield from self.immediate_showoff_when_possible(f) @@ -117,19 +121,25 @@ def main_process_ui_control(self, txt, create_or_resume) -> str: # the heartbeat watchdog might have it killed self.terminate() return "terminate" - if self.parent_conn.poll(): self.feed_heartbeat_watchdog() + if "[GPT-Academic] 等待中" in self.chatbot[-1][-1]: + self.chatbot.pop(-1) # remove the last line + if "等待您的进一步指令" in self.chatbot[-1][-1]: + self.chatbot.pop(-1) # remove the last line if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]: self.chatbot.pop(-1) # remove the last line msg = self.parent_conn.recv() # PipeCom if msg.cmd == "done": - self.chatbot.append([f"结束", msg.content]); self.cnt += 1 + self.chatbot.append([f"结束", msg.content]) + self.cnt += 1 yield from update_ui(chatbot=self.chatbot, history=self.history) - self.terminate(); break + self.terminate() + break if msg.cmd == "show": yield from self.overwatch_workdir_file_change() - self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]); self.cnt += 1 + self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]) + self.cnt += 1 yield from update_ui(chatbot=self.chatbot, history=self.history) if msg.cmd == "interact": yield from self.overwatch_workdir_file_change() @@ -159,13 +169,13 @@ def main_process_ui_control(self, txt, create_or_resume) -> str: return "terminate" def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"): - # ⭐⭐ 子进程 + # ⭐⭐ run in subprocess patience = 5 * 60 begin_waiting_time = time.time() self.child_conn.send(PipeCom("interact", wait_msg)) while True: time.sleep(0.5) - if self.child_conn.poll(): + if self.child_conn.poll(): wait_success = True break if time.time() - begin_waiting_time > patience: @@ -173,4 +183,3 @@ def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"): wait_success = False break return wait_success - diff --git "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" index 99b3e86b60..8a530f175c 100644 --- "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" +++ "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" @@ -32,8 +32,15 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ web_port 当前软件运行的端口号 """ # 检查当前的模型是否符合要求 - supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k', - 'api2d-gpt-3.5-turbo-16k', 'api2d-gpt-4'] + supported_llms = [ + "gpt-3.5-turbo-16k", + "gpt-4", + "gpt-4-32k", + "azure-gpt-3.5-turbo-16k", + "azure-gpt-3.5-16k", + "azure-gpt-4", + "azure-gpt-4-32k", + ] llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) if llm_kwargs['llm_model'] not in supported_llms: chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."]) diff --git a/main.py b/main.py index a621deb1a1..d754216ad6 100644 --- a/main.py +++ b/main.py @@ -1,6 +1,5 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 import pickle -import codecs import base64 def main(): From 996057e588e54df709fcfbee9f5bacbc5358c049 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 15:41:04 +0800 Subject: [PATCH 036/117] support chatglm3 --- config.py | 6 +++++- ...257\255\350\250\200\346\250\241\345\236\213.py" | 7 ++++--- main.py | 4 ++-- request_llms/local_llm_class.py | 14 +++++++------- version | 2 +- 5 files changed, 19 insertions(+), 14 deletions(-) diff --git a/config.py b/config.py index 06840dd8f0..f578aa853e 100644 --- a/config.py +++ b/config.py @@ -90,11 +90,15 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", - "chatglm", "moss", "newbing", "claude-2"] + "chatglm3", "moss", "newbing", "claude-2"] # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] +# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4" +MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3" + + # 百度千帆(LLM_MODEL="qianfan") BAIDU_CLOUD_API_KEY = '' BAIDU_CLOUD_SECRET_KEY = '' diff --git "a/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" "b/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" index 80e09fcdf5..4210fb2136 100644 --- "a/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" +++ "b/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" @@ -1,4 +1,4 @@ -from toolbox import CatchException, update_ui +from toolbox import CatchException, update_ui, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive import datetime @CatchException @@ -13,11 +13,12 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt web_port 当前软件运行的端口号 """ history = [] # 清空历史,以免输入溢出 - chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……")) + MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS') + chatbot.append((txt, "正在同时咨询" + MULTI_QUERY_LLM_MODELS)) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 + llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=txt, inputs_show_user=txt, llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, diff --git a/main.py b/main.py index bf84382578..a621deb1a1 100644 --- a/main.py +++ b/main.py @@ -433,7 +433,7 @@ def warm_up_mods(): time.sleep(4); warm_up_modules() server_port=PORT, favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, - blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile","gpt_log/admin"]) + blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) # 如果需要在二级路径下运行 # CUSTOM_PATH = get_conf('CUSTOM_PATH') @@ -442,7 +442,7 @@ def warm_up_mods(): time.sleep(4); warm_up_modules() # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) # else: # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png", - # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) + # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) if __name__ == "__main__": main() diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6f49ba4ff..b6ce801ef4 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -5,18 +5,18 @@ from contextlib import redirect_stdout from request_llms.queued_pipe import create_queue_pipe -class DebugLock(object): +class ThreadLock(object): def __init__(self): self._lock = threading.Lock() def acquire(self): - print("acquiring", self) + # print("acquiring", self) #traceback.print_tb self._lock.acquire() - print("acquired", self) + # print("acquired", self) def release(self): - print("released", self) + # print("released", self) #traceback.print_tb self._lock.release() @@ -85,7 +85,7 @@ def __init__(self): self.is_main_process = False # state wrap for child process self.start() self.is_main_process = True # state wrap for child process - self.threadLock = DebugLock() + self.threadLock = ThreadLock() def get_state(self): # ⭐run in main process @@ -159,7 +159,7 @@ def run(self): try: for response_full in self.llm_stream_generator(**kwargs): self.child.send(response_full) - print('debug' + response_full) + # print('debug' + response_full) self.child.send('[Finish]') # 请求处理结束,开始下一个循环 except: @@ -200,7 +200,7 @@ def stream_chat(self, **kwargs): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - print(new_output, end='') + # print(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': diff --git a/version b/version index 1470eb4031..f9db97e50b 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { "version": 3.56, "show_feature": true, - "new_feature": "支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验 <-> 新增动态代码解释器(CodeInterpreter) <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率" + "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From e9cf3d3d1219b365d813835e985ce57cb6b4217c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 15:52:08 +0800 Subject: [PATCH 037/117] version 3.57 --- README.md | 5 +++-- version | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5e1f2d4f75..d8b4756c2f 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ > > `pip install -r requirements.txt` > -> 2023.11.7: 本项目开源免费,**近期发现有人蔑视开源协议,利用本项目违法圈钱**,请各位提高警惕,谨防上当受骗。 +> 2023.11.7: 本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。 @@ -288,7 +288,8 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h ### II:版本: -- version 3.60(todo): 优化虚空终端,引入code interpreter和更多插件 +- version 3.60(todo): 优化虚空终端,并引入AutoGen作为新一代插件的基石 +- version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG - version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面 - version 3.55: 重构前端界面,引入悬浮窗口与菜单栏 - version 3.54: 新增动态代码解释器(Code Interpreter)(待完善) diff --git a/version b/version index f9db97e50b..5e4fb7d00b 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.56, + "version": 3.57, "show_feature": true, "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From 736f1214ee156a80c461cf011156f973bbd6cb56 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 15:55:23 +0800 Subject: [PATCH 038/117] Update bridge_all.py --- request_llm/bridge_all.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 2c53537870..a1dd7f6c36 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -112,7 +112,7 @@ def decode(self, *args, **kwargs): "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, - "max_token": 1024 *16, + "max_token": 16385, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -148,7 +148,7 @@ def decode(self, *args, **kwargs): "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, - "max_token": 1024 * 128, + "max_token": 128000, "tokenizer": tokenizer_gpt4, "token_cnt": get_token_num_gpt4, }, From 8d94564e675ac492068ffb9884a2df217bf7bf37 Mon Sep 17 00:00:00 2001 From: awwaawwa <8493196+awwaawwa@users.noreply.github.com> Date: Tue, 7 Nov 2023 15:59:07 +0800 Subject: [PATCH 039/117] =?UTF-8?q?=E4=BF=AE=E6=94=B9=20gpt-3.5-turbo-16k?= =?UTF-8?q?=20=E7=B3=BB=E5=88=97=E6=A8=A1=E5=9E=8B=20max=5Ftoken=20?= =?UTF-8?q?=E4=B8=BA=2016385?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 根据 https://platform.openai.com/docs/models/gpt-3-5 ,这个16k的3.5上下文窗口其实是16385 --- request_llms/bridge_all.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 27b91c267a..69a99e9b2a 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -94,7 +94,7 @@ def decode(self, *args, **kwargs): "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, - "max_token": 1024*16, + "max_token": 16385, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -112,7 +112,7 @@ def decode(self, *args, **kwargs): "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, - "max_token": 1024 * 16, + "max_token": 16385, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -186,7 +186,7 @@ def decode(self, *args, **kwargs): "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": api2d_endpoint, - "max_token": 1024*16, + "max_token": 16385, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, From 12df41563a3446a8ca284b0837949d14a9025806 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 8 Nov 2023 18:40:36 +0800 Subject: [PATCH 040/117] hide audio btn border --- main.py | 2 +- themes/default.css | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index a621deb1a1..89ca7811f6 100644 --- a/main.py +++ b/main.py @@ -94,7 +94,7 @@ def main(): clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm") if ENABLE_AUDIO: with gr.Row(): - audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False).style(container=False) + audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False) with gr.Row(): status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel") with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn: diff --git a/themes/default.css b/themes/default.css index 65d5940be0..7c1d400f2d 100644 --- a/themes/default.css +++ b/themes/default.css @@ -1,3 +1,8 @@ +/* 插件下拉菜单 */ +#elem_audio { + border-style: hidden !important; +} + .dark { --background-fill-primary: #050810; --body-background-fill: var(--background-fill-primary); From 0ff750b60a645c739ab09173a86d0ab9b1482483 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 12:40:25 +0800 Subject: [PATCH 041/117] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E7=BC=A9=E8=BF=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 3 ++- request_llms/bridge_all.py | 12 ++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/config.py b/config.py index f44c47dd40..38d0519808 100644 --- a/config.py +++ b/config.py @@ -87,7 +87,8 @@ # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", +AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview", + "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm3", "moss", "newbing", "claude-2"] diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 6d34d951b8..3a93234e25 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -145,12 +145,12 @@ def decode(self, *args, **kwargs): }, "gpt-4-1106-preview": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": openai_endpoint, - "max_token": 128000, - "tokenizer": tokenizer_gpt4, - "token_cnt": get_token_num_gpt4, + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 128000, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, }, "gpt-3.5-random": { From a1a91c25a5ac2e3928e7dd17b21f70e7694cfca6 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 12:53:03 +0800 Subject: [PATCH 042/117] =?UTF-8?q?=E7=A7=BB=E9=99=A4=E9=87=8D=E5=A4=8D?= =?UTF-8?q?=E9=A1=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_all.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index cb71843e6c..3a93234e25 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -108,15 +108,6 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt35, }, - "gpt-3.5-turbo-1106": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": openai_endpoint, - "max_token": 16385, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, - "gpt-3.5-turbo-16k-0613": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, From 33bf795c663587ec86d5a852a14a7560b1af09a5 Mon Sep 17 00:00:00 2001 From: xiangsam Date: Fri, 10 Nov 2023 11:45:47 +0000 Subject: [PATCH 043/117] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E7=B2=BE=E5=87=86?= =?UTF-8?q?=E7=BF=BB=E8=AF=91PDF=E6=96=87=E6=A1=A3(NOUGAT)=E6=8F=92?= =?UTF-8?q?=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/crazy_utils.py | 2 +- ...\257\221PDF\346\226\207\346\241\243_NOUGAT.py" | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index a23c732b58..ce7a2e39d7 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -748,7 +748,7 @@ def NOUGAT_parse_pdf(self, fp, chatbot, history): yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)", chatbot=chatbot, history=history, delay=0) - self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600) + self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}" --recompute --no-skipping --markdown --batchsize 8', os.getcwd(), timeout=3600) res = glob.glob(os.path.join(dst,'*.mmd')) if len(res) == 0: self.threadLock.release() diff --git "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" index 3e50c93a1b..50e34c4e05 100644 --- "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" +++ "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" @@ -73,6 +73,11 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst from .crazy_utils import get_files_from_everything success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf') + success_mmd, file_manifest_mmd, _ = get_files_from_everything(txt, type='.mmd') + success = success or success_mmd + file_manifest += file_manifest_mmd + chatbot.append(["文件列表:", ", ".join([e.split('/')[-1] for e in file_manifest])]); + yield from update_ui( chatbot=chatbot, history=history) # 检测输入参数,如没有给定输入参数,直接退出 if not success: if txt == "": txt = '空空如也的输入栏' @@ -101,9 +106,13 @@ def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwa from crazy_functions.pdf_fns.report_gen_html import construct_html nougat_handle = nougat_interface() for index, fp in enumerate(file_manifest): - chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - fpp = yield from nougat_handle.NOUGAT_parse_pdf(fp, chatbot, history) - promote_file_to_downloadzone(fpp, rename_file=os.path.basename(fpp)+'.nougat.mmd', chatbot=chatbot) + if fp.endswith('pdf'): + chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + fpp = yield from nougat_handle.NOUGAT_parse_pdf(fp, chatbot, history) + promote_file_to_downloadzone(fpp, rename_file=os.path.basename(fpp)+'.nougat.mmd', chatbot=chatbot) + else: + chatbot.append(["当前论文无需解析:", fp]); yield from update_ui( chatbot=chatbot, history=history) + fpp = fp with open(fpp, 'r', encoding='utf8') as f: article_content = f.readlines() article_dict = markdown_to_dict(article_content) From 0299b0f95f6c264ca4dbf8809fb1e09c00702ec4 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 20:59:08 +0800 Subject: [PATCH 044/117] =?UTF-8?q?=E6=94=AF=E6=8C=81DALLE3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 17 ++++++++-- ...76\347\211\207\347\224\237\346\210\220.py" | 31 +++++++++++++++++-- docs/translate_english.json | 2 +- docs/translate_japanese.json | 2 +- docs/translate_traditionalchinese.json | 2 +- 5 files changed, 46 insertions(+), 8 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 2d7fa74b1e..60c85691cc 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -349,18 +349,29 @@ def get_crazy_functions(): print('Load function plugin failed') try: - from crazy_functions.图片生成 import 图片生成 + from crazy_functions.图片生成 import 图片生成, 图片生成_DALLE3 function_plugins.update({ "图片生成(先切换模型到openai或api2d)": { "Group": "对话", "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示 - "Info": "图片生成 | 输入参数字符串,提供图像的内容", + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认)", # 高级参数输入区的显示提示 + "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容", "Function": HotReload(图片生成) }, }) + function_plugins.update({ + "图片生成_DALLE3(先切换模型到openai或api2d)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认)", # 高级参数输入区的显示提示 + "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容", + "Function": HotReload(图片生成_DALLE3) + }, + }) except: print('Load function plugin failed') diff --git "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" index 1b7dff5d9d..95b4481366 100644 --- "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" +++ "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" @@ -3,7 +3,7 @@ import datetime -def gen_image(llm_kwargs, prompt, resolution="256x256"): +def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2"): import requests, json, time, os from request_llms.bridge_all import model_info @@ -23,6 +23,7 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"): 'prompt': prompt, 'n': 1, 'size': resolution, + 'model': model, 'response_format': 'url' } response = requests.post(url, headers=headers, json=data, proxies=proxies) @@ -58,7 +59,7 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - resolution = plugin_kwargs.get("advanced_arg", '256x256') + resolution = plugin_kwargs.get("advanced_arg", '1024x1024') image_url, image_path = gen_image(llm_kwargs, prompt, resolution) chatbot.append([prompt, f'图像中转网址:
`{image_url}`
'+ @@ -67,3 +68,29 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro f'本地文件预览:
' ]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + +@CatchException +def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + """ + txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 + llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 + plugin_kwargs 插件模型的参数,暂时没有用武之地 + chatbot 聊天显示框的句柄,用于显示给用户 + history 聊天历史,前情提要 + system_prompt 给gpt的静默提醒 + web_port 当前软件运行的端口号 + """ + history = [] # 清空历史,以免输入溢出 + chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") + resolution = plugin_kwargs.get("advanced_arg", '1024x1024') + image_url, image_path = gen_image(llm_kwargs, prompt, resolution) + chatbot.append([prompt, + f'图像中转网址:
`{image_url}`
'+ + f'中转网址预览:
' + f'本地文件地址:
`{image_path}`
'+ + f'本地文件预览:
' + ]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + diff --git a/docs/translate_english.json b/docs/translate_english.json index 850cae5471..44361f0263 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -265,7 +265,7 @@ "例如chatglm&gpt-3.5-turbo&api2d-gpt-4": "e.g. chatglm&gpt-3.5-turbo&api2d-gpt-4", "先切换模型到openai或api2d": "Switch the model to openai or api2d first", "在这里输入分辨率": "Enter the resolution here", - "如256x256": "e.g. 256x256", + "如1024x1024": "e.g. 1024x1024", "默认": "Default", "建议您复制一个config_private.py放自己的秘密": "We suggest you to copy a config_private.py file to keep your secrets, such as API and proxy URLs, from being accidentally uploaded to Github and seen by others.", "如API和代理网址": "Such as API and proxy URLs", diff --git a/docs/translate_japanese.json b/docs/translate_japanese.json index ae86dc06ad..29ebcc9609 100644 --- a/docs/translate_japanese.json +++ b/docs/translate_japanese.json @@ -854,7 +854,7 @@ "查询版本和用户意见": "バージョンとユーザーの意見を検索する", "提取摘要": "要約を抽出する", "在gpt输出代码的中途": "GPTがコードを出力する途中で", - "如256x256": "256x256のように", + "如1024x1024": "1024x1024のように", "概括其内容": "内容を要約する", "剩下的情况都开头除去": "残りの場合はすべて先頭を除去する", "至少一个线程任务意外失败": "少なくとも1つのスレッドタスクが予期しない失敗をした", diff --git a/docs/translate_traditionalchinese.json b/docs/translate_traditionalchinese.json index a677f10815..b75cbdb4b2 100644 --- a/docs/translate_traditionalchinese.json +++ b/docs/translate_traditionalchinese.json @@ -1147,7 +1147,7 @@ "Y+回车=确认": "Y+回車=確認", "正在同时咨询ChatGPT和ChatGLM……": "正在同時諮詢ChatGPT和ChatGLM……", "根据 heuristic 规则": "根據heuristic規則", - "如256x256": "如256x256", + "如1024x1024": "如1024x1024", "函数插件区": "函數插件區", "*** API_KEY 导入成功": "*** API_KEY 導入成功", "请对下面的程序文件做一个概述文件名是": "請對下面的程序文件做一個概述文件名是", From f9fc02948ac360e2906214e274a5ebd4770e8a28 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 21:04:21 +0800 Subject: [PATCH 045/117] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E5=88=86=E8=BE=A8?= =?UTF-8?q?=E7=8E=87=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 60c85691cc..e82f399550 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -356,7 +356,7 @@ def get_crazy_functions(): "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认)", # 高级参数输入区的显示提示 + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示 "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容", "Function": HotReload(图片生成) }, @@ -367,7 +367,7 @@ def get_crazy_functions(): "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认)", # 高级参数输入区的显示提示 + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容", "Function": HotReload(图片生成_DALLE3) }, From 362b545a45352b011adef023e54f9c34a8110fdf Mon Sep 17 00:00:00 2001 From: xiangsam Date: Fri, 10 Nov 2023 14:25:37 +0000 Subject: [PATCH 046/117] =?UTF-8?q?=E6=9B=B4=E6=94=B9import=20nougat?= =?UTF-8?q?=E6=97=B6=E6=9C=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...\221PDF\346\226\207\346\241\243_NOUGAT.py" | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" index 50e34c4e05..16dfd6bf3d 100644 --- "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" +++ "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" @@ -57,22 +57,22 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst "批量翻译PDF文档。函数插件贡献者: Binary-Husky"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import nougat - import tiktoken - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - # 清空历史,以免输入溢出 history = [] from .crazy_utils import get_files_from_everything success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf') + if len(file_manifest) > 0: + # 尝试导入依赖,如果缺少依赖,则给出安装建议 + try: + import nougat + import tiktoken + except: + report_execption(chatbot, history, + a=f"解析项目: {txt}", + b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return success_mmd, file_manifest_mmd, _ = get_files_from_everything(txt, type='.mmd') success = success or success_mmd file_manifest += file_manifest_mmd From da7c03e868b89f71b52444a0565ae4d08e50293a Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 22:54:55 +0800 Subject: [PATCH 047/117] =?UTF-8?q?=E5=9B=BE=E5=83=8F=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 17 ++- .../multi_stage/multi_stage_utils.py | 45 +++++++ ...76\347\211\207\347\224\237\346\210\220.py" | 125 ++++++++++++++++-- toolbox.py | 7 +- 4 files changed, 176 insertions(+), 18 deletions(-) create mode 100644 crazy_functions/multi_stage/multi_stage_utils.py diff --git a/crazy_functional.py b/crazy_functional.py index e82f399550..2e94570cbe 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -349,16 +349,16 @@ def get_crazy_functions(): print('Load function plugin failed') try: - from crazy_functions.图片生成 import 图片生成, 图片生成_DALLE3 + from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2 function_plugins.update({ - "图片生成(先切换模型到openai或api2d)": { + "图片生成_DALLE2(先切换模型到openai或api2d)": { "Group": "对话", "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示 "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成) + "Function": HotReload(图片生成_DALLE2) }, }) function_plugins.update({ @@ -372,6 +372,17 @@ def get_crazy_functions(): "Function": HotReload(图片生成_DALLE3) }, }) + # function_plugins.update({ + # "图片修改_DALLE2(启动DALLE2图像修改向导程序)": { + # "Group": "对话", + # "Color": "stop", + # "AsButton": False, + # "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + # "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 + # # "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容", + # "Function": HotReload(图片修改_DALLE2) + # }, + # }) except: print('Load function plugin failed') diff --git a/crazy_functions/multi_stage/multi_stage_utils.py b/crazy_functions/multi_stage/multi_stage_utils.py new file mode 100644 index 0000000000..60f0778320 --- /dev/null +++ b/crazy_functions/multi_stage/multi_stage_utils.py @@ -0,0 +1,45 @@ +from pydantic import BaseModel, Field +from typing import List +from toolbox import update_ui_lastest_msg, disable_auto_promotion +from request_llms.bridge_all import predict_no_ui_long_connection +from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError +import time +import pickle + +def have_any_recent_upload_files(chatbot): + _5min = 5 * 60 + if not chatbot: return False # chatbot is None + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + if not most_recent_uploaded: return False # most_recent_uploaded is None + if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new + else: return False # most_recent_uploaded is too old + +class GptAcademicState(): + def __init__(self): + self.reset() + + def reset(self): + pass + + def lock_plugin(self, chatbot): + chatbot._cookies['plugin_state'] = pickle.dumps(self) + + def unlock_plugin(self, chatbot): + self.reset() + chatbot._cookies['plugin_state'] = pickle.dumps(self) + + def set_state(self, chatbot, key, value): + setattr(self, key, value) + chatbot._cookies['plugin_state'] = pickle.dumps(self) + + def get_state(chatbot, cls=None): + state = chatbot._cookies.get('plugin_state', None) + if state is not None: state = pickle.loads(state) + elif cls is not None: state = cls() + else: state = GptAcademicState() + state.chatbot = chatbot + return state + +class GatherMaterials(): + def __init__(self, materials) -> None: + materials = ['image', 'prompt'] \ No newline at end of file diff --git "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" index 95b4481366..4968361a99 100644 --- "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" +++ "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" @@ -1,6 +1,5 @@ from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime +from crazy_functions.multi_stage.multi_stage_utils import GptAcademicState def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2"): @@ -43,9 +42,48 @@ def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2"): return image_url, file_path+file_name +def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="dall-e-2"): + import requests, json, time, os + from request_llms.bridge_all import model_info + + proxies = get_conf('proxies') + api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) + chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] + # 'https://api.openai.com/v1/chat/completions' + img_endpoint = chat_endpoint.replace('chat/completions','images/edits') + # # Generate the image + url = img_endpoint + headers = { + 'Authorization': f"Bearer {api_key}", + 'Content-Type': 'application/json' + } + data = { + 'image': open(image_path, 'rb'), + 'prompt': prompt, + 'n': 1, + 'size': resolution, + 'model': model, + 'response_format': 'url' + } + response = requests.post(url, headers=headers, json=data, proxies=proxies) + print(response.content) + try: + image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] + except: + raise RuntimeError(response.content.decode()) + # 文件保存到本地 + r = requests.get(image_url, proxies=proxies) + file_path = f'{get_log_folder()}/image_gen/' + os.makedirs(file_path, exist_ok=True) + file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png' + with open(file_path+file_name, 'wb+') as f: f.write(r.content) + + + return image_url, file_path+file_name + @CatchException -def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): +def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): """ txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 @@ -69,17 +107,9 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro ]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + @CatchException def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ history = [] # 清空历史,以免输入溢出 chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 @@ -94,3 +124,74 @@ def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys ]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + +class ImageEditState(GptAcademicState): + def get_image_file(self, x): + import os, glob + if len(x) == 0: return False, None + if not os.path.exists(x): return False, None + if x.endswith('.png'): return True, x + file_manifest = [f for f in glob.glob(f'{x}/**/*.png', recursive=True)] + confirm = (len(file_manifest) >= 1 and file_manifest[0].endswith('.png') and os.path.exists(file_manifest[0])) + file = None if not confirm else file_manifest[0] + return confirm, file + + def get_resolution(self, x): + return (x in ['256x256', '512x512', '1024x1024']), x + + def get_prompt(self, x): + confirm = (len(x)>=5) and (not self.get_resolution(x)[0]) and (not self.get_image_file(x)[0]) + return confirm, x + + def reset(self): + self.req = [ + {'value':None, 'description': '请先上传图像(必须是.png格式), 然后再次点击本插件', 'verify_fn': self.get_image_file}, + {'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024', 'verify_fn': self.get_resolution}, + {'value':None, 'description': '请输入修改需求,建议您使用英文提示词', 'verify_fn': self.get_prompt}, + ] + self.info = "" + + def feed(self, prompt, chatbot): + for r in self.req: + if r['value'] is None: + confirm, res = r['verify_fn'](prompt) + if confirm: + r['value'] = res + self.set_state(chatbot, 'dummy_key', 'dummy_value') + break + return self + + def next_req(self): + for r in self.req: + if r['value'] is None: + return r['description'] + return "已经收集到所有信息" + + def already_obtained_all_materials(self): + return all([x['value'] is not None for x in self.req]) + +@CatchException +def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + history = [] # 清空历史 + state = ImageEditState.get_state(chatbot, ImageEditState) + state = state.feed(prompt, chatbot) + if not state.already_obtained_all_materials(): + chatbot.append(["图片修改(先上传图片,再输入修改需求,最后输入分辨率)", state.next_req()]) + yield from update_ui(chatbot=chatbot, history=history) + return + + image_path = state.req[0] + resolution = state.req[1] + prompt = state.req[2] + chatbot.append(["图片修改, 执行中", f"图片:`{image_path}`
分辨率:`{resolution}`
修改需求:`{prompt}`"]) + yield from update_ui(chatbot=chatbot, history=history) + + image_url, image_path = edit_image(llm_kwargs, prompt, image_path, resolution) + chatbot.append([state.prompt, + f'图像中转网址:
`{image_url}`
'+ + f'中转网址预览:
' + f'本地文件地址:
`{image_path}`
'+ + f'本地文件预览:
' + ]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + diff --git a/toolbox.py b/toolbox.py index 8c6e7fae9a..b1e1ce7b49 100644 --- a/toolbox.py +++ b/toolbox.py @@ -625,13 +625,14 @@ def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkbo def on_report_generated(cookies, files, chatbot): - from toolbox import find_recent_files - PATH_LOGGING = get_conf('PATH_LOGGING') + # from toolbox import find_recent_files + # PATH_LOGGING = get_conf('PATH_LOGGING') if 'files_to_promote' in cookies: report_files = cookies['files_to_promote'] cookies.pop('files_to_promote') else: - report_files = find_recent_files(PATH_LOGGING) + report_files = [] + # report_files = find_recent_files(PATH_LOGGING) if len(report_files) == 0: return cookies, None, chatbot # files.extend(report_files) From 107ea868e15eac1687fc18249f55f09b4fdff207 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 23:08:56 +0800 Subject: [PATCH 048/117] =?UTF-8?q?API2D=E8=87=AA=E5=8A=A8=E5=AF=B9?= =?UTF-8?q?=E9=BD=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 4 ++-- request_llms/bridge_all.py | 22 ++++++++++------------ 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/config.py b/config.py index 38d0519808..611b158992 100644 --- a/config.py +++ b/config.py @@ -89,8 +89,8 @@ LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", - "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', - "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", + "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', + "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm3", "moss", "newbing", "claude-2"] # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 5da44b7920..139d3ae9cb 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -177,11 +177,11 @@ def decode(self, *args, **kwargs): "fn_without_ui": chatgpt_noui, "endpoint": azure_endpoint, "max_token": 8192, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, }, - # api_2d + # api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加) "api2d-gpt-3.5-turbo": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -200,15 +200,6 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, - "api2d-gpt-3.5-turbo-16k": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": api2d_endpoint, - "max_token": 16385, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, - # 将 chatglm 直接对齐到 chatglm2 "chatglm": { "fn_with_ui": chatglm_ui, @@ -244,6 +235,13 @@ def decode(self, *args, **kwargs): }, } +# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=- +for model in AVAIL_LLM_MODELS: + if model.startswith('api2d-') and (model.replace('api2d-','') in model_info.keys()): + mi = model_info[model.replace('api2d-','')] + mi.update({"endpoint": api2d_endpoint}) + model_info.update({model: mi}) + # -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=- if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS: from .bridge_claude import predict_no_ui_long_connection as claude_noui From 2b917edf26502b2e3c1e81794093f18839cbc42e Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 17:58:17 +0800 Subject: [PATCH 049/117] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=9C=AC=E5=9C=B0?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E5=9C=A8windows=E4=B8=8A=E7=9A=84=E5=85=BC?= =?UTF-8?q?=E5=AE=B9=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatglm.py | 3 +-- request_llms/bridge_chatglm3.py | 3 +-- request_llms/bridge_chatglmonnx.py | 3 +-- request_llms/bridge_internlm.py | 3 +-- request_llms/bridge_llama2.py | 3 +-- request_llms/bridge_qwen.py | 3 +-- request_llms/local_llm_class.py | 7 ++++--- 7 files changed, 10 insertions(+), 15 deletions(-) diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index 16e1d8fc78..83c50da133 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -4,14 +4,13 @@ from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM2Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py index 461c3064a3..4465660833 100644 --- a/request_llms/bridge_chatglm3.py +++ b/request_llms/bridge_chatglm3.py @@ -4,14 +4,13 @@ from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM3Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglmonnx.py b/request_llms/bridge_chatglmonnx.py index 312c6846ff..4b905718f6 100644 --- a/request_llms/bridge_chatglmonnx.py +++ b/request_llms/bridge_chatglmonnx.py @@ -8,7 +8,7 @@ import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from .chatglmoonx import ChatGLMModel, chat_template @@ -17,7 +17,6 @@ # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index 073c193ad4..b831dc5908 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -7,7 +7,7 @@ import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ @@ -34,7 +34,6 @@ def combine_history(prompt, hist): # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetInternlmHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_llama2.py b/request_llms/bridge_llama2.py index bc8ef7ebb0..e6da4b755a 100644 --- a/request_llms/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -5,14 +5,13 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from threading import Thread # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 62682cfa31..29168f6d7c 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -8,14 +8,13 @@ import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6ce801ef4..fe6be9613a 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -76,7 +76,6 @@ def __init__(self): self.parent_state, self.child_state = create_queue_pipe() # allow redirect_stdout self.std_tag = "[Subprocess Message] " - self.child.write = lambda x: self.child.send(self.std_tag + x) self.running = True self._model = None self._tokenizer = None @@ -137,6 +136,8 @@ def check_dependency(self): def run(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process # 第一次运行,加载参数 + self.child.flush = lambda *args: None + self.child.write = lambda x: self.child.send(self.std_tag + x) reset_tqdm_output() self.set_state("`尝试加载模型`") try: @@ -220,7 +221,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", """ refer to request_llms/bridge_all.py """ - _llm_handle = LLMSingletonClass() + _llm_handle = SingletonLocalLLM(LLMSingletonClass)() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.get_state() if not _llm_handle.running: @@ -268,7 +269,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp """ chatbot.append((inputs, "")) - _llm_handle = LLMSingletonClass() + _llm_handle = SingletonLocalLLM(LLMSingletonClass)() chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state()) yield from update_ui(chatbot=chatbot, history=[]) if not _llm_handle.running: From 2570e4b99705777bb218f3db2dce42b6ce7c7970 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 18:17:58 +0800 Subject: [PATCH 050/117] remove revision --- request_llms/bridge_qwen.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 29168f6d7c..0b226df72a 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -30,10 +30,9 @@ def load_model_and_tokenizer(self): from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig model_id = 'qwen/Qwen-7B-Chat' - revision = 'v1.0.1' - self._tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True) + self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) # use fp16 - model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", revision=revision, trust_remote_code=True, fp16=True).eval() + model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 self._model = model From e4409b94d1c82bf8f9dabb1696f12fee64f348a9 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 18:30:57 +0800 Subject: [PATCH 051/117] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E6=8B=BC=E5=86=99=20?= =?UTF-8?q?report=5Fexecption=20->=20report=5Fexception=20#1220?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...50\346\226\207\346\266\246\350\211\262.py" | 20 ++++---- ...50\346\226\207\347\277\273\350\257\221.py" | 14 +++--- ...345\207\272PDF\347\273\223\346\236\234.py" | 12 ++--- crazy_functions/agent_fns/auto_agent.py | 2 +- crazy_functions/agent_fns/general.py | 2 +- ...73\350\257\221\346\221\230\350\246\201.py" | 6 +-- ...47\273\223word\346\226\207\346\241\243.py" | 8 ++-- ...23\351\237\263\350\247\206\351\242\221.py" | 8 ++-- ...07\217Markdown\347\277\273\350\257\221.py" | 20 ++++---- ...347\273\223PDF\346\226\207\346\241\243.py" | 8 ++-- ...223PDF\346\226\207\346\241\243pdfminer.py" | 8 ++-- ...\221PDF\346\226\207\346\241\243_NOUGAT.py" | 6 +-- ...3_\345\244\232\347\272\277\347\250\213.py" | 6 +-- ...07\346\241\243\345\206\205\345\256\271.py" | 8 ++-- ...75\346\225\260\346\263\250\351\207\212.py" | 6 +-- ...350\247\243\346\236\220JupyterNotebook.py" | 6 +-- ...56\346\272\220\344\273\243\347\240\201.py" | 48 +++++++++---------- ...40\345\206\231\346\221\230\350\246\201.py" | 6 +-- ...42\345\260\217\345\212\251\346\211\213.py" | 6 +-- docs/self_analysis.md | 2 +- toolbox.py | 4 +- 21 files changed, 103 insertions(+), 103 deletions(-) diff --git "a/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" "b/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" index 268a344644..0bc7d4014f 100644 --- "a/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" +++ "b/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" @@ -1,5 +1,5 @@ from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder -from toolbox import CatchException, report_execption, write_history_to_file, zip_folder +from toolbox import CatchException, report_exception, write_history_to_file, zip_folder class PaperFileGroup(): @@ -146,7 +146,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -157,12 +157,12 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en') @@ -184,7 +184,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -195,12 +195,12 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh') @@ -220,7 +220,7 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -231,12 +231,12 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread') diff --git "a/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" "b/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" index 697f5ac8aa..846bd80d0f 100644 --- "a/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" +++ "b/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, write_history_to_file +from toolbox import CatchException, report_exception, write_history_to_file fast_debug = False class PaperFileGroup(): @@ -117,7 +117,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -128,12 +128,12 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh') @@ -154,7 +154,7 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -165,12 +165,12 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') \ No newline at end of file diff --git "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" "b/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" index 9edfea6862..a2545ddde8 100644 --- "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" +++ "b/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" @@ -1,5 +1,5 @@ from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str +from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str from functools import partial import glob, os, requests, time pj = os.path.join @@ -171,12 +171,12 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -249,7 +249,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history = [] txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) if txt.endswith('.pdf'): - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -258,13 +258,13 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/agent_fns/auto_agent.py b/crazy_functions/agent_fns/auto_agent.py index 16ca29598b..f04cbf8542 100644 --- a/crazy_functions/agent_fns/auto_agent.py +++ b/crazy_functions/agent_fns/auto_agent.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom from crazy_functions.agent_fns.general import AutoGenGeneral import time diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index beb6d7ebc6..a37f27aeec 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom import time diff --git "a/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" "b/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" index c711cf4531..1e0fe63052 100644 --- "a/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" +++ "b/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" @@ -1,6 +1,6 @@ from toolbox import update_ui, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, get_conf +from toolbox import CatchException, report_exception, get_conf import re, requests, unicodedata, os from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive def download_arxiv_(url_pdf): @@ -144,7 +144,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: import bs4 except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -157,7 +157,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: pdf_path, info = download_arxiv_(txt) except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"下载pdf文件未成功") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git "a/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" "b/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" index 7c822e9f88..b392307154 100644 --- "a/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" +++ "b/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -97,7 +97,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr try: from docx import Document except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -111,7 +111,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -124,7 +124,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git "a/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" "b/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" index b88775b415..b27bcce06c 100644 --- "a/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" +++ "b/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, select_api_key, update_ui, get_conf +from toolbox import CatchException, report_exception, select_api_key, update_ui, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder @@ -144,7 +144,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro try: from moviepy.editor import AudioFileClip except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -158,7 +158,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -174,7 +174,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git "a/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" "b/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" index 2bdffc8662..12b4ef0916 100644 --- "a/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" +++ "b/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" @@ -1,6 +1,6 @@ import glob, time, os, re, logging from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion -from toolbox import CatchException, report_execption, get_log_folder +from toolbox import CatchException, report_exception, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone fast_debug = False @@ -165,7 +165,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -177,12 +177,12 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -205,7 +205,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -215,11 +215,11 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') @@ -238,7 +238,7 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -248,11 +248,11 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" "b/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" index 57a6cdf135..7fc3e415c3 100644 --- "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" +++ "b/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from .crazy_utils import read_and_clean_pdf_text @@ -119,7 +119,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst try: import fitz except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -133,7 +133,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -142,7 +142,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" "b/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" index 213d8bb264..a729efaa40 100644 --- "a/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" +++ "b/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone @@ -138,7 +138,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo try: import pdfminer, bs4 except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -147,7 +147,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ @@ -155,7 +155,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" index 16dfd6bf3d..97170d0edd 100644 --- "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" +++ "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_NOUGAT.py" @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, get_log_folder, gen_time_str +from toolbox import CatchException, report_exception, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -68,7 +68,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import nougat import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -84,7 +84,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" index f2e5cf99d3..333b529b7e 100644 --- "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" +++ "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, get_log_folder, gen_time_str +from toolbox import CatchException, report_exception, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -26,7 +26,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import tiktoken import scipdf except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -43,7 +43,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" "b/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" index 4c0a1052c8..ef9678896f 100644 --- "a/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" +++ "b/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from .crazy_utils import read_and_clean_pdf_text from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -81,7 +81,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat try: import fitz except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -96,7 +96,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -105,7 +105,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git "a/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" "b/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" index bf3da6a4b9..d71a568053 100644 --- "a/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" +++ "b/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -43,14 +43,14 @@ def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git "a/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" "b/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" index 709b7e1cdc..eeccadf7e6 100644 --- "a/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" +++ "b/crazy_functions/\350\247\243\346\236\220JupyterNotebook.py" @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone fast_debug = True @@ -131,7 +131,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -141,7 +141,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p file_manifest = [f for f in glob.glob( f'{project_folder}/**/*.ipynb', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git "a/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" "b/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" index f17a584d4e..e319d5a874 100644 --- "a/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" +++ "b/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion -from toolbox import CatchException, report_execption, write_history_to_file +from toolbox import CatchException, report_exception, write_history_to_file from .crazy_utils import input_clipping def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): @@ -113,7 +113,7 @@ def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ [f for f in glob.glob('./*/*.py')] project_folder = './' if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -126,12 +126,12 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -144,12 +144,12 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") + report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -162,14 +162,14 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -182,7 +182,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ @@ -190,7 +190,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -204,7 +204,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ @@ -212,7 +212,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -226,7 +226,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \ @@ -241,7 +241,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s [f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -255,7 +255,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \ @@ -263,7 +263,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s [f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -276,14 +276,14 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -296,7 +296,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \ @@ -304,7 +304,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -318,13 +318,13 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -352,7 +352,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return # 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件 @@ -365,7 +365,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \ os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) \ No newline at end of file diff --git "a/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" "b/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" index acdf632c3b..a43b6aa29a 100644 --- "a/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" +++ "b/crazy_functions/\350\257\273\346\226\207\347\253\240\345\206\231\346\221\230\350\246\201.py" @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -51,14 +51,14 @@ def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \ # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git "a/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" "b/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" index 5924a2860d..14b21bfcd0 100644 --- "a/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" +++ "b/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" @@ -1,5 +1,5 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from toolbox import CatchException, report_execption, promote_file_to_downloadzone +from toolbox import CatchException, report_exception, promote_file_to_downloadzone from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file import logging import requests @@ -29,7 +29,7 @@ def get_meta_information(url, chatbot, history): try: session.proxies.update(proxies) except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议:检查USE_PROXY选项是否修改。", b=f"尝试直接连接") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -146,7 +146,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import math from bs4 import BeautifulSoup except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/docs/self_analysis.md b/docs/self_analysis.md index c3736193bc..0b76c7bdd3 100644 --- a/docs/self_analysis.md +++ b/docs/self_analysis.md @@ -217,7 +217,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 ## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py -这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_execption、write_results_to_file等。 +这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_exception、write_results_to_file等。 ## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py diff --git a/toolbox.py b/toolbox.py index b1e1ce7b49..a5425c083c 100644 --- a/toolbox.py +++ b/toolbox.py @@ -187,7 +187,7 @@ def decorated(*args, **kwargs): 其他小工具: - write_history_to_file: 将结果写入markdown文件中 - regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。 - - report_execption: 向chatbot中添加简单的意外错误信息 + - report_exception: 向chatbot中添加简单的意外错误信息 - text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。 - markdown_convertion: 用多种方式组合,将markdown转化为好看的html - format_io: 接管gradio默认的markdown处理方式 @@ -260,7 +260,7 @@ def regular_txt_to_markdown(text): -def report_execption(chatbot, history, a, b): +def report_exception(chatbot, history, a, b): """ 向chatbot中添加错误信息 """ From f75e39dc2734c62d7590e137c37c8504fa0eedbb Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 21:11:55 +0800 Subject: [PATCH 052/117] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=9C=AC=E5=9C=B0?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E5=9C=A8Windows=E4=B8=8B=E7=9A=84=E5=8A=A0?= =?UTF-8?q?=E8=BD=BDBUG?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatgpt.py | 3 +-- request_llms/bridge_chatgpt_website.py | 3 +-- request_llms/bridge_claude.py | 2 +- request_llms/bridge_internlm.py | 19 ++++++++++--------- request_llms/bridge_qwen.py | 15 ++++++++------- request_llms/local_llm_class.py | 2 +- tests/test_llms.py | 4 ++-- version | 4 ++-- 8 files changed, 26 insertions(+), 26 deletions(-) diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index 292de0ad3c..e55ad37ae7 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -7,8 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import json diff --git a/request_llms/bridge_chatgpt_website.py b/request_llms/bridge_chatgpt_website.py index 7f3147b1d6..f2f0709099 100644 --- a/request_llms/bridge_chatgpt_website.py +++ b/request_llms/bridge_chatgpt_website.py @@ -7,8 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import json diff --git a/request_llms/bridge_claude.py b/request_llms/bridge_claude.py index 6084b1f15c..42b75052f8 100644 --- a/request_llms/bridge_claude.py +++ b/request_llms/bridge_claude.py @@ -7,7 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import os diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index b831dc5908..20b53b442d 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -5,7 +5,7 @@ import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns @@ -52,14 +52,15 @@ def load_model_and_tokenizer(self): import torch from transformers import AutoModelForCausalLM, AutoTokenizer device = get_conf('LOCAL_MODEL_DEVICE') - if self._model is None: - tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) - if device=='cpu': - model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16) - else: - model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda() - - model = model.eval() + with ProxyNetworkActivate('Download_LLM'): + if self._model is None: + tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) + if device=='cpu': + model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16) + else: + model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda() + + model = model.eval() return model, tokenizer def llm_stream_generator(self, **kwargs): diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 0b226df72a..afd886bf9d 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -6,7 +6,7 @@ import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns @@ -29,12 +29,13 @@ def load_model_and_tokenizer(self): import platform from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig - model_id = 'qwen/Qwen-7B-Chat' - self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) - # use fp16 - model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() - model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 - self._model = model + with ProxyNetworkActivate('Download_LLM'): + model_id = 'qwen/Qwen-7B-Chat' + self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) + # use fp16 + model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() + model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 + self._model = model return self._model, self._tokenizer diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index fe6be9613a..38fcfc9134 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -201,7 +201,7 @@ def stream_chat(self, **kwargs): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - # print(new_output, end='') + print(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': diff --git a/tests/test_llms.py b/tests/test_llms.py index 5c5d2f6cba..6285f0309f 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -15,11 +15,11 @@ def validate_path(): # from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection # from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection # from request_llms.bridge_claude import predict_no_ui_long_connection - # from request_llms.bridge_internlm import predict_no_ui_long_connection + from request_llms.bridge_internlm import predict_no_ui_long_connection # from request_llms.bridge_qwen import predict_no_ui_long_connection # from request_llms.bridge_spark import predict_no_ui_long_connection # from request_llms.bridge_zhipu import predict_no_ui_long_connection - from request_llms.bridge_chatglm3 import predict_no_ui_long_connection + # from request_llms.bridge_chatglm3 import predict_no_ui_long_connection llm_kwargs = { 'max_length': 4096, diff --git a/version b/version index 5e4fb7d00b..69a871e0fe 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.57, + "version": 3.58, "show_feature": true, - "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" + "new_feature": "修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From 28119e343ce75b393019806b3600d512dddb8262 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 22:01:19 +0800 Subject: [PATCH 053/117] =?UTF-8?q?=E5=B0=86autogen=E5=A4=A7=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B=E8=B0=83=E7=94=A8=E5=BA=95=E5=B1=82hook=E6=8E=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 2 +- crazy_functional.py | 21 +- crazy_functions/agent_fns/auto_agent.py | 2 - crazy_functions/agent_fns/bridge_autogen.py | 584 ------------------ crazy_functions/agent_fns/general.py | 46 +- ...32\346\231\272\350\203\275\344\275\223.py" | 9 +- request_llms/bridge_all.py | 2 +- requirements.txt | 1 + 8 files changed, 33 insertions(+), 634 deletions(-) delete mode 100644 crazy_functions/agent_fns/bridge_autogen.py diff --git a/config.py b/config.py index f578aa853e..dfcd9cf137 100644 --- a/config.py +++ b/config.py @@ -211,7 +211,7 @@ # 在使用AutoGen插件时,是否使用Docker容器运行代码 -AUTOGEN_USE_DOCKER = True +AUTOGEN_USE_DOCKER = False # 临时的上传文件夹位置,请勿修改 diff --git a/crazy_functional.py b/crazy_functional.py index 155fc76c5e..a5c77ea0cd 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -539,18 +539,15 @@ def get_crazy_functions(): except: print('Load function plugin failed') - try: - from crazy_functions.多智能体 import 多智能体终端 - function_plugins.update({ - "多智能体终端(微软AutoGen)": { - "Group": "智能体", - "Color": "stop", - "AsButton": True, - "Function": HotReload(多智能体终端) - } - }) - except: - print('Load function plugin failed') + from crazy_functions.多智能体 import 多智能体终端 + function_plugins.update({ + "多智能体终端(微软AutoGen)": { + "Group": "智能体", + "Color": "stop", + "AsButton": True, + "Function": HotReload(多智能体终端) + } + }) # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 diff --git a/crazy_functions/agent_fns/auto_agent.py b/crazy_functions/agent_fns/auto_agent.py index f6a2832c36..6edf0e25f1 100644 --- a/crazy_functions/agent_fns/auto_agent.py +++ b/crazy_functions/agent_fns/auto_agent.py @@ -2,8 +2,6 @@ from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom from crazy_functions.agent_fns.general import AutoGenGeneral -import time -from autogen import AssistantAgent, UserProxyAgent diff --git a/crazy_functions/agent_fns/bridge_autogen.py b/crazy_functions/agent_fns/bridge_autogen.py deleted file mode 100644 index 5bf4aacd51..0000000000 --- a/crazy_functions/agent_fns/bridge_autogen.py +++ /dev/null @@ -1,584 +0,0 @@ -from time import sleep -import logging -import time -from typing import List, Optional, Dict, Callable, Union -import sys -import shutil -import numpy as np -from flaml import tune, BlendSearch -from flaml.tune.space import is_constant -from flaml.automl.logger import logger_formatter -from collections import defaultdict - -try: - import openai - from openai.error import ( - ServiceUnavailableError, - RateLimitError, - APIError, - InvalidRequestError, - APIConnectionError, - Timeout, - AuthenticationError, - ) - from openai import Completion as openai_Completion - import diskcache - - ERROR = None -except ImportError: - ERROR = ImportError("please install openai and diskcache to use the autogen.oai subpackage.") - openai_Completion = object -logger = logging.getLogger(__name__) -if not logger.handlers: - # Add the console handler. - _ch = logging.StreamHandler(stream=sys.stdout) - _ch.setFormatter(logger_formatter) - logger.addHandler(_ch) - - -class Completion(openai_Completion): - """A class for OpenAI completion API. - - It also supports: ChatCompletion, Azure OpenAI API. - """ - - # set of models that support chat completion - chat_models = { - "gpt-3.5-turbo", - "gpt-3.5-turbo-0301", # deprecate in Sep - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-16k-0613", - "gpt-35-turbo", - "gpt-35-turbo-16k", - "gpt-4", - "gpt-4-32k", - "gpt-4-32k-0314", # deprecate in Sep - "gpt-4-0314", # deprecate in Sep - "gpt-4-0613", - "gpt-4-32k-0613", - } - - # price per 1k tokens - price1K = { - "text-ada-001": 0.0004, - "text-babbage-001": 0.0005, - "text-curie-001": 0.002, - "code-cushman-001": 0.024, - "code-davinci-002": 0.1, - "text-davinci-002": 0.02, - "text-davinci-003": 0.02, - "gpt-3.5-turbo": (0.0015, 0.002), - "gpt-3.5-turbo-instruct": (0.0015, 0.002), - "gpt-3.5-turbo-0301": (0.0015, 0.002), # deprecate in Sep - "gpt-3.5-turbo-0613": (0.0015, 0.002), - "gpt-3.5-turbo-16k": (0.003, 0.004), - "gpt-3.5-turbo-16k-0613": (0.003, 0.004), - "gpt-35-turbo": (0.0015, 0.002), - "gpt-35-turbo-16k": (0.003, 0.004), - "gpt-35-turbo-instruct": (0.0015, 0.002), - "gpt-4": (0.03, 0.06), - "gpt-4-32k": (0.06, 0.12), - "gpt-4-0314": (0.03, 0.06), # deprecate in Sep - "gpt-4-32k-0314": (0.06, 0.12), # deprecate in Sep - "gpt-4-0613": (0.03, 0.06), - "gpt-4-32k-0613": (0.06, 0.12), - } - - default_search_space = { - "model": tune.choice( - [ - "text-ada-001", - "text-babbage-001", - "text-davinci-003", - "gpt-3.5-turbo", - "gpt-4", - ] - ), - "temperature_or_top_p": tune.choice( - [ - {"temperature": tune.uniform(0, 2)}, - {"top_p": tune.uniform(0, 1)}, - ] - ), - "max_tokens": tune.lograndint(50, 1000), - "n": tune.randint(1, 100), - "prompt": "{prompt}", - } - - seed = 41 - cache_path = f".cache/{seed}" - # retry after this many seconds - retry_wait_time = 10 - # fail a request after hitting RateLimitError for this many seconds - max_retry_period = 120 - # time out for request to openai server - request_timeout = 60 - - openai_completion_class = not ERROR and openai.Completion - _total_cost = 0 - optimization_budget = None - - _history_dict = _count_create = None - - @classmethod - def set_cache(cls, seed: Optional[int] = 41, cache_path_root: Optional[str] = ".cache"): - """Set cache path. - - Args: - seed (int, Optional): The integer identifier for the pseudo seed. - Results corresponding to different seeds will be cached in different places. - cache_path (str, Optional): The root path for the cache. - The complete cache path will be {cache_path}/{seed}. - """ - cls.seed = seed - cls.cache_path = f"{cache_path_root}/{seed}" - - @classmethod - def clear_cache(cls, seed: Optional[int] = None, cache_path_root: Optional[str] = ".cache"): - """Clear cache. - - Args: - seed (int, Optional): The integer identifier for the pseudo seed. - If omitted, all caches under cache_path_root will be cleared. - cache_path (str, Optional): The root path for the cache. - The complete cache path will be {cache_path}/{seed}. - """ - if seed is None: - shutil.rmtree(cache_path_root, ignore_errors=True) - return - with diskcache.Cache(f"{cache_path_root}/{seed}") as cache: - cache.clear() - - @classmethod - def _book_keeping(cls, config: Dict, response): - """Book keeping for the created completions.""" - if response != -1 and "cost" not in response: - response["cost"] = cls.cost(response) - if cls._history_dict is None: - return - if cls._history_compact: - value = { - "created_at": [], - "cost": [], - "token_count": [], - } - if "messages" in config: - messages = config["messages"] - if len(messages) > 1 and messages[-1]["role"] != "assistant": - existing_key = get_key(messages[:-1]) - value = cls._history_dict.pop(existing_key, value) - key = get_key(messages + [choice["message"] for choice in response["choices"]]) - else: - key = get_key([config["prompt"]] + [choice.get("text") for choice in response["choices"]]) - value["created_at"].append(cls._count_create) - value["cost"].append(response["cost"]) - value["token_count"].append( - { - "model": response["model"], - "prompt_tokens": response["usage"]["prompt_tokens"], - "completion_tokens": response["usage"].get("completion_tokens", 0), - "total_tokens": response["usage"]["total_tokens"], - } - ) - cls._history_dict[key] = value - cls._count_create += 1 - return - cls._history_dict[cls._count_create] = { - "request": config, - "response": response.to_dict_recursive(), - } - cls._count_create += 1 - - @classmethod - def _get_response(cls, config: Dict, raise_on_ratelimit_or_timeout=False, use_cache=True): - """Get the response from the openai api call. - - Try cache first. If not found, call the openai api. If the api call fails, retry after retry_wait_time. - """ - config = config.copy() - - - @classmethod - def _get_max_valid_n(cls, key, max_tokens): - # find the max value in max_valid_n_per_max_tokens - # whose key is equal or larger than max_tokens - return max( - (value for k, value in cls._max_valid_n_per_max_tokens.get(key, {}).items() if k >= max_tokens), - default=1, - ) - - @classmethod - def _get_min_invalid_n(cls, key, max_tokens): - # find the min value in min_invalid_n_per_max_tokens - # whose key is equal or smaller than max_tokens - return min( - (value for k, value in cls._min_invalid_n_per_max_tokens.get(key, {}).items() if k <= max_tokens), - default=None, - ) - - @classmethod - def _get_region_key(cls, config): - # get a key for the valid/invalid region corresponding to the given config - config = cls._pop_subspace(config, always_copy=False) - return ( - config["model"], - config.get("prompt", config.get("messages")), - config.get("stop"), - ) - - @classmethod - def _update_invalid_n(cls, prune, region_key, max_tokens, num_completions): - if prune: - # update invalid n and prune this config - cls._min_invalid_n_per_max_tokens[region_key] = invalid_n = cls._min_invalid_n_per_max_tokens.get( - region_key, {} - ) - invalid_n[max_tokens] = min(num_completions, invalid_n.get(max_tokens, np.inf)) - - @classmethod - def _pop_subspace(cls, config, always_copy=True): - if "subspace" in config: - config = config.copy() - config.update(config.pop("subspace")) - return config.copy() if always_copy else config - - @classmethod - def _get_params_for_create(cls, config: Dict) -> Dict: - """Get the params for the openai api call from a config in the search space.""" - params = cls._pop_subspace(config) - if cls._prompts: - params["prompt"] = cls._prompts[config["prompt"]] - else: - params["messages"] = cls._messages[config["messages"]] - if "stop" in params: - params["stop"] = cls._stops and cls._stops[params["stop"]] - temperature_or_top_p = params.pop("temperature_or_top_p", None) - if temperature_or_top_p: - params.update(temperature_or_top_p) - if cls._config_list and "config_list" not in params: - params["config_list"] = cls._config_list - return params - - @classmethod - def create( - cls, - context: Optional[Dict] = None, - use_cache: Optional[bool] = True, - config_list: Optional[List[Dict]] = None, - filter_func: Optional[Callable[[Dict, Dict, Dict], bool]] = None, - raise_on_ratelimit_or_timeout: Optional[bool] = True, - allow_format_str_template: Optional[bool] = False, - **config, - ): - """Make a completion for a given context. - - Args: - context (Dict, Optional): The context to instantiate the prompt. - It needs to contain keys that are used by the prompt template or the filter function. - E.g., `prompt="Complete the following sentence: {prefix}, context={"prefix": "Today I feel"}`. - The actual prompt will be: - "Complete the following sentence: Today I feel". - More examples can be found at [templating](https://microsoft.github.io/autogen/docs/Use-Cases/enhanced_inference#templating). - use_cache (bool, Optional): Whether to use cached responses. - config_list (List, Optional): List of configurations for the completion to try. - The first one that does not raise an error will be used. - Only the differences from the default config need to be provided. - E.g., - - ```python - response = oai.Completion.create( - config_list=[ - { - "model": "gpt-4", - "api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "api_type": "azure", - "api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "api_version": "2023-03-15-preview", - }, - { - "model": "gpt-3.5-turbo", - "api_key": os.environ.get("OPENAI_API_KEY"), - "api_type": "open_ai", - "api_base": "https://api.openai.com/v1", - }, - { - "model": "llama-7B", - "api_base": "http://127.0.0.1:8080", - "api_type": "open_ai", - } - ], - prompt="Hi", - ) - ``` - - filter_func (Callable, Optional): A function that takes in the context, the config and the response and returns a boolean to indicate whether the response is valid. E.g., - - ```python - def yes_or_no_filter(context, config, response): - return context.get("yes_or_no_choice", False) is False or any( - text in ["Yes.", "No."] for text in oai.Completion.extract_text(response) - ) - ``` - - raise_on_ratelimit_or_timeout (bool, Optional): Whether to raise RateLimitError or Timeout when all configs fail. - When set to False, -1 will be returned when all configs fail. - allow_format_str_template (bool, Optional): Whether to allow format string template in the config. - **config: Configuration for the openai API call. This is used as parameters for calling openai API. - The "prompt" or "messages" parameter can contain a template (str or Callable) which will be instantiated with the context. - Besides the parameters for the openai API call, it can also contain: - - `max_retry_period` (int): the total time (in seconds) allowed for retrying failed requests. - - `retry_wait_time` (int): the time interval to wait (in seconds) before retrying a failed request. - - `seed` (int) for the cache. This is useful when implementing "controlled randomness" for the completion. - - Returns: - Responses from OpenAI API, with additional fields. - - `cost`: the total cost. - When `config_list` is provided, the response will contain a few more fields: - - `config_id`: the index of the config in the config_list that is used to generate the response. - - `pass_filter`: whether the response passes the filter function. None if no filter is provided. - """ - if ERROR: - raise ERROR - config_list = [ - { - "model": "llama-7B", - "api_base": "http://127.0.0.1:8080", - "api_type": "open_ai", - } - ] - last = len(config_list) - 1 - cost = 0 - for i, each_config in enumerate(config_list): - base_config = config.copy() - base_config["allow_format_str_template"] = allow_format_str_template - base_config.update(each_config) - if i < last and filter_func is None and "max_retry_period" not in base_config: - # max_retry_period = 0 to avoid retrying when no filter is given - base_config["max_retry_period"] = 0 - try: - response = cls.create( - context, - use_cache, - raise_on_ratelimit_or_timeout=i < last or raise_on_ratelimit_or_timeout, - **base_config, - ) - if response == -1: - return response - pass_filter = filter_func is None or filter_func( - context=context, base_config=config, response=response - ) - if pass_filter or i == last: - response["cost"] = cost + response["cost"] - response["config_id"] = i - response["pass_filter"] = pass_filter - return response - cost += response["cost"] - except (AuthenticationError, RateLimitError, Timeout, InvalidRequestError): - logger.debug(f"failed with config {i}", exc_info=1) - if i == last: - raise - - params = cls._construct_params(context, config, allow_format_str_template=allow_format_str_template) - if not use_cache: - return cls._get_response( - params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout, use_cache=False - ) - seed = cls.seed - if "seed" in params: - cls.set_cache(params.pop("seed")) - with diskcache.Cache(cls.cache_path) as cls._cache: - cls.set_cache(seed) - return cls._get_response(params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout) - - @classmethod - def instantiate( - cls, - template: Union[str, None], - context: Optional[Dict] = None, - allow_format_str_template: Optional[bool] = False, - ): - if not context or template is None: - return template - if isinstance(template, str): - return template.format(**context) if allow_format_str_template else template - return template(context) - - @classmethod - def _construct_params(cls, context, config, prompt=None, messages=None, allow_format_str_template=False): - params = config.copy() - model = config["model"] - prompt = config.get("prompt") if prompt is None else prompt - messages = config.get("messages") if messages is None else messages - # either "prompt" should be in config (for being compatible with non-chat models) - # or "messages" should be in config (for tuning chat models only) - if prompt is None and (model in cls.chat_models or issubclass(cls, ChatCompletion)): - if messages is None: - raise ValueError("Either prompt or messages should be in config for chat models.") - if prompt is None: - params["messages"] = ( - [ - { - **m, - "content": cls.instantiate(m["content"], context, allow_format_str_template), - } - if m.get("content") - else m - for m in messages - ] - if context - else messages - ) - elif model in cls.chat_models or issubclass(cls, ChatCompletion): - # convert prompt to messages - params["messages"] = [ - { - "role": "user", - "content": cls.instantiate(prompt, context, allow_format_str_template), - }, - ] - params.pop("prompt", None) - else: - params["prompt"] = cls.instantiate(prompt, context, allow_format_str_template) - return params - - @classmethod - def extract_text(cls, response: dict) -> List[str]: - """Extract the text from a completion or chat response. - - Args: - response (dict): The response from OpenAI API. - - Returns: - A list of text in the responses. - """ - choices = response["choices"] - if "text" in choices[0]: - return [choice["text"] for choice in choices] - return [choice["message"].get("content", "") for choice in choices] - - @classmethod - def extract_text_or_function_call(cls, response: dict) -> List[str]: - """Extract the text or function calls from a completion or chat response. - - Args: - response (dict): The response from OpenAI API. - - Returns: - A list of text or function calls in the responses. - """ - choices = response["choices"] - if "text" in choices[0]: - return [choice["text"] for choice in choices] - return [ - choice["message"] if "function_call" in choice["message"] else choice["message"].get("content", "") - for choice in choices - ] - - @classmethod - @property - def logged_history(cls) -> Dict: - """Return the book keeping dictionary.""" - return cls._history_dict - - @classmethod - def print_usage_summary(cls) -> Dict: - """Return the usage summary.""" - if cls._history_dict is None: - print("No usage summary available.", flush=True) - - token_count_summary = defaultdict(lambda: {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}) - - if not cls._history_compact: - source = cls._history_dict.values() - total_cost = sum(msg_pair["response"]["cost"] for msg_pair in source) - else: - # source = cls._history_dict["token_count"] - # total_cost = sum(cls._history_dict['cost']) - total_cost = sum(sum(value_list["cost"]) for value_list in cls._history_dict.values()) - source = ( - token_data for value_list in cls._history_dict.values() for token_data in value_list["token_count"] - ) - - for entry in source: - if not cls._history_compact: - model = entry["response"]["model"] - token_data = entry["response"]["usage"] - else: - model = entry["model"] - token_data = entry - - token_count_summary[model]["prompt_tokens"] += token_data["prompt_tokens"] - token_count_summary[model]["completion_tokens"] += token_data["completion_tokens"] - token_count_summary[model]["total_tokens"] += token_data["total_tokens"] - - print(f"Total cost: {total_cost}", flush=True) - for model, counts in token_count_summary.items(): - print( - f"Token count summary for model {model}: prompt_tokens: {counts['prompt_tokens']}, completion_tokens: {counts['completion_tokens']}, total_tokens: {counts['total_tokens']}", - flush=True, - ) - - @classmethod - def start_logging( - cls, history_dict: Optional[Dict] = None, compact: Optional[bool] = True, reset_counter: Optional[bool] = True - ): - """Start book keeping. - - Args: - history_dict (Dict): A dictionary for book keeping. - If no provided, a new one will be created. - compact (bool): Whether to keep the history dictionary compact. - Compact history contains one key per conversation, and the value is a dictionary - like: - ```python - { - "create_at": [0, 1], - "cost": [0.1, 0.2], - } - ``` - where "created_at" is the index of API calls indicating the order of all the calls, - and "cost" is the cost of each call. This example shows that the conversation is based - on two API calls. The compact format is useful for condensing the history of a conversation. - If compact is False, the history dictionary will contain all the API calls: the key - is the index of the API call, and the value is a dictionary like: - ```python - { - "request": request_dict, - "response": response_dict, - } - ``` - where request_dict is the request sent to OpenAI API, and response_dict is the response. - For a conversation containing two API calls, the non-compact history dictionary will be like: - ```python - { - 0: { - "request": request_dict_0, - "response": response_dict_0, - }, - 1: { - "request": request_dict_1, - "response": response_dict_1, - }, - ``` - The first request's messages plus the response is equal to the second request's messages. - For a conversation with many turns, the non-compact history dictionary has a quadratic size - while the compact history dict has a linear size. - reset_counter (bool): whether to reset the counter of the number of API calls. - """ - cls._history_dict = {} if history_dict is None else history_dict - cls._history_compact = compact - cls._count_create = 0 if reset_counter or cls._count_create is None else cls._count_create - - @classmethod - def stop_logging(cls): - """End book keeping.""" - cls._history_dict = cls._count_create = None - - -class ChatCompletion(Completion): - """A class for OpenAI API ChatCompletion. Share the same API as Completion.""" - - default_search_space = Completion.default_search_space.copy() - default_search_space["model"] = tune.choice(["gpt-3.5-turbo", "gpt-4"]) - openai_completion_class = not ERROR and openai.ChatCompletion diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index f0b9ce8789..8b2884dec2 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -9,17 +9,27 @@ def gpt_academic_generate_oai_reply( sender, config, ): - from .bridge_autogen import Completion llm_config = self.llm_config if config is None else config if llm_config is False: return False, None if messages is None: messages = self._oai_messages[sender] - response = Completion.create( - context=messages[-1].pop("context", None), messages=self._oai_system_message + messages, **llm_config + inputs = messages[-1]['content'] + history = [] + for message in messages[:-1]: + history.append(message['content']) + context=messages[-1].pop("context", None) + assert context is None, "预留参数 context 未实现" + + reply = predict_no_ui_long_connection( + inputs=inputs, + llm_kwargs=llm_config, + history=history, + sys_prompt=self._oai_system_message[0]['content'], + console_slience=True ) - return True, Completion.extract_text_or_function_call(response)[0] + return True, reply class AutoGenGeneral(PluginMultiprocessManager): def gpt_academic_print_override(self, user_proxy, message, sender): @@ -45,32 +55,6 @@ def gpt_academic_get_human_input(self, user_proxy, message): else: raise TimeoutError("等待用户输入超时") - # def gpt_academic_generate_oai_reply(self, agent, messages, sender, config): - # from .bridge_autogen import Completion - # if messages is None: - # messages = agent._oai_messages[sender] - - # def instantiate( - # cls, - # template: Union[str, None], - # context: Optional[Dict] = None, - # allow_format_str_template: Optional[bool] = False, - # ): - # if not context or template is None: - # return template - # if isinstance(template, str): - # return template.format(**context) if allow_format_str_template else template - # return template(context) - - # res = predict_no_ui_long_connection( - # messages[-1].pop("context", None), - # llm_kwargs=self.llm_kwargs, - # history=messages, - # sys_prompt=agent._oai_system_message, - # observe_window=None, - # console_slience=False) - # return True, res - def define_agents(self): raise NotImplementedError @@ -85,7 +69,7 @@ def exe_autogen(self, input): for agent_kwargs in agents: agent_cls = agent_kwargs.pop('cls') kwargs = { - 'llm_config':{}, + 'llm_config':self.llm_kwargs, 'code_execution_config':code_execution_config } kwargs.update(agent_kwargs) diff --git "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" index 8a530f175c..22429f3d52 100644 --- "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" +++ "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" @@ -41,11 +41,11 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ "azure-gpt-4", "azure-gpt-4-32k", ] - llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) if llm_kwargs['llm_model'] not in supported_llms: chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return + llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) # 检查当前的模型是否符合要求 API_URL_REDIRECT = get_conf('API_URL_REDIRECT') @@ -56,7 +56,9 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ # 尝试导入依赖,如果缺少依赖,则给出安装建议 try: - import autogen, docker + import autogen + if get_conf("AUTOGEN_USE_DOCKER"): + import docker except: chatbot.append([ f"处理任务: {txt}", f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen docker```。"]) @@ -67,7 +69,8 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ try: import autogen import glob, os, time, subprocess - subprocess.Popen(['docker', '--version']) + if get_conf("AUTOGEN_USE_DOCKER"): + subprocess.Popen(["docker", "--version"]) except: chatbot.append([f"处理任务: {txt}", f"缺少docker运行环境!"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 27b91c267a..4c41f37491 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -548,7 +548,7 @@ def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_s return decorated -def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False): +def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window=[], console_slience=False): """ 发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 inputs: diff --git a/requirements.txt b/requirements.txt index e832a28ccd..1f86d336e5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ Markdown pygments pymupdf openai +pyautogen numpy arxiv rich From f7f6db831beb891a9b5daa291db9136c1ef11dc2 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 22:35:06 +0800 Subject: [PATCH 054/117] =?UTF-8?q?=E5=A4=84=E7=90=86=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E5=85=BC=E5=AE=B9=E7=9A=84=E4=B8=80=E4=BA=9B=E7=BB=86=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/agent_fns/general.py | 1 + crazy_functions/agent_fns/pipe.py | 7 ++++++- .../\345\244\232\346\231\272\350\203\275\344\275\223.py" | 8 +++++--- request_llms/bridge_all.py | 7 +++++++ 4 files changed, 19 insertions(+), 4 deletions(-) diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index 8b2884dec2..49bc4dc89e 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -29,6 +29,7 @@ def gpt_academic_generate_oai_reply( sys_prompt=self._oai_system_message[0]['content'], console_slience=True ) + assumed_done = reply.endswith('\nTERMINATE') return True, reply class AutoGenGeneral(PluginMultiprocessManager): diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index 680e91c944..6ce9961adc 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -21,7 +21,7 @@ def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, w # self.web_port = web_port self.alive = True self.use_docker = get_conf("AUTOGEN_USE_DOCKER") - + self.last_user_input = "" # create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time timeout_seconds = 5 * 60 self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5) @@ -55,6 +55,11 @@ def subprocess_worker(self, child_conn): def send_command(self, cmd): # ⭐ run in main process + if cmd == self.last_user_input: + print('repeated input detected, ignore') + cmd = "" + else: + self.last_user_input = cmd self.parent_conn.send(PipeCom("user_input", cmd)) def immediate_showoff_when_possible(self, fp): diff --git "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" index 590c638fad..8b9a69cd26 100644 --- "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" +++ "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" @@ -48,11 +48,13 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ "azure-gpt-4", "azure-gpt-4-32k", ] - if llm_kwargs['llm_model'] not in supported_llms: - chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."]) + from request_llms.bridge_all import model_info + if model_info[llm_kwargs['llm_model']]["max_token"] < 8000: # 至少是8k上下文的模型 + chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}的最大上下文长度太短, 不能支撑AutoGen运行。"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return - llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) + if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY + llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) # 检查当前的模型是否符合要求 API_URL_REDIRECT = get_conf('API_URL_REDIRECT') diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 646e7a64ec..7d13bbddcf 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -242,6 +242,13 @@ def decode(self, *args, **kwargs): mi.update({"endpoint": api2d_endpoint}) model_info.update({model: mi}) +# -=-=-=-=-=-=- azure 对齐支持 -=-=-=-=-=-=- +for model in AVAIL_LLM_MODELS: + if model.startswith('azure-') and (model.replace('azure-','') in model_info.keys()): + mi = model_info[model.replace('azure-','')] + mi.update({"endpoint": azure_endpoint}) + model_info.update({model: mi}) + # -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=- if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS: from .bridge_claude import predict_no_ui_long_connection as claude_noui From a55bc0c07cef0498e3e65c6b0605b6141c018f52 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 23:22:09 +0800 Subject: [PATCH 055/117] =?UTF-8?q?AutoGen=E8=87=AA=E5=8A=A8=E5=BF=BD?= =?UTF-8?q?=E7=95=A5=E9=87=8D=E5=A4=8D=E7=9A=84=E8=BE=93=E5=85=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/agent_fns/pipe.py | 10 +++++++--- ...345\244\232\346\231\272\350\203\275\344\275\223.py" | 5 ----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index 6ce9961adc..bb3bc78520 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -55,12 +55,14 @@ def subprocess_worker(self, child_conn): def send_command(self, cmd): # ⭐ run in main process + repeated = False if cmd == self.last_user_input: - print('repeated input detected, ignore') + repeated = True cmd = "" else: self.last_user_input = cmd self.parent_conn.send(PipeCom("user_input", cmd)) + return repeated, cmd def immediate_showoff_when_possible(self, fp): # ⭐ 主进程 @@ -111,7 +113,7 @@ def main_process_ui_control(self, txt, create_or_resume) -> str: if create_or_resume == 'create': self.cnt = 1 self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐ - self.send_command(txt) + repeated, cmd_to_autogen = self.send_command(txt) if txt == 'exit': self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"]) yield from update_ui(chatbot=self.chatbot, history=self.history) @@ -143,7 +145,9 @@ def main_process_ui_control(self, txt, create_or_resume) -> str: break if msg.cmd == "show": yield from self.overwatch_workdir_file_change() - self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]) + notice = "" + if repeated: notice = "(自动忽略重复的输入)" + self.chatbot.append([f"运行阶段-{self.cnt}(上次用户反馈输入为: 「{cmd_to_autogen}」{notice}", msg.content]) self.cnt += 1 yield from update_ui(chatbot=self.chatbot, history=self.history) if msg.cmd == "interact": diff --git "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" index 8b9a69cd26..d2adee0086 100644 --- "a/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" +++ "b/crazy_functions/\345\244\232\346\231\272\350\203\275\344\275\223.py" @@ -3,11 +3,6 @@ 测试: - show me the solution of $x^2=cos(x)$, solve this problem with figure, and plot and save image to t.jpg -Testing: - - Crop the image, keeping the bottom half. - - Swap the blue channel and red channel of the image. - - Convert the image to grayscale. - - Convert the CSV file to an Excel spreadsheet. """ From 2d91e438d658220c6b366aecf3aaa81e09eb75c4 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 23:22:50 +0800 Subject: [PATCH 056/117] =?UTF-8?q?=E4=BF=AE=E6=AD=A3internlm=E8=BE=93?= =?UTF-8?q?=E5=85=A5=E8=AE=BE=E5=A4=87bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_internlm.py | 3 ++- request_llms/local_llm_class.py | 35 +++++++++++++++------------------ 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index 20b53b442d..b2be36a4e6 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -94,8 +94,9 @@ def adaptor(): inputs = tokenizer([prompt], padding=True, return_tensors="pt") input_length = len(inputs["input_ids"][0]) + device = get_conf('LOCAL_MODEL_DEVICE') for k, v in inputs.items(): - inputs[k] = v.cuda() + inputs[k] = v.to(device) input_ids = inputs["input_ids"] batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] if generation_config is None: diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index 38fcfc9134..413df03f22 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -1,6 +1,6 @@ import time import threading -from toolbox import update_ui +from toolbox import update_ui, Singleton from multiprocessing import Process, Pipe from contextlib import redirect_stdout from request_llms.queued_pipe import create_queue_pipe @@ -26,23 +26,20 @@ def __enter__(self): def __exit__(self, type, value, traceback): self.release() -def SingletonLocalLLM(cls): - """ - Singleton Decroator for LocalLLMHandle - """ - _instance = {} - - def _singleton(*args, **kargs): - if cls not in _instance: - _instance[cls] = cls(*args, **kargs) - return _instance[cls] - elif _instance[cls].corrupted: - _instance[cls] = cls(*args, **kargs) - return _instance[cls] +@Singleton +class GetSingletonHandle(): + def __init__(self): + self.llm_model_already_running = {} + + def get_llm_model_instance(self, cls, *args, **kargs): + if cls not in self.llm_model_already_running: + self.llm_model_already_running[cls] = cls(*args, **kargs) + return self.llm_model_already_running[cls] + elif self.llm_model_already_running[cls].corrupted: + self.llm_model_already_running[cls] = cls(*args, **kargs) + return self.llm_model_already_running[cls] else: - return _instance[cls] - return _singleton - + return self.llm_model_already_running[cls] def reset_tqdm_output(): import sys, tqdm @@ -221,7 +218,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", """ refer to request_llms/bridge_all.py """ - _llm_handle = SingletonLocalLLM(LLMSingletonClass)() + _llm_handle = GetSingletonHandle().get_llm_model_instance(LLMSingletonClass) if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.get_state() if not _llm_handle.running: @@ -269,7 +266,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp """ chatbot.append((inputs, "")) - _llm_handle = SingletonLocalLLM(LLMSingletonClass)() + _llm_handle = GetSingletonHandle().get_llm_model_instance(LLMSingletonClass) chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state()) yield from update_ui(chatbot=chatbot, history=[]) if not _llm_handle.running: From eeb70e966ce7953b6caba910da6838bd044ae1d9 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 11 Nov 2023 23:35:11 +0800 Subject: [PATCH 057/117] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E6=8C=89=E9=92=AE=E9=A1=BA=E5=BA=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 18 +++++++++--------- version | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index c86aac1510..1e7ca58480 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -74,7 +74,7 @@ def get_crazy_functions(): "批量总结Word文档": { "Group": "学术", "Color": "stop", - "AsButton": True, + "AsButton": False, "Info": "批量总结word文档 | 输入参数为路径", "Function": HotReload(总结word文档) }, @@ -178,6 +178,13 @@ def get_crazy_functions(): "Info": "批量生成函数的注释 | 输入参数为路径", "Function": HotReload(批量生成函数注释) }, + "精准翻译PDF论文": { + "Group": "学术", + "Color": "stop", + "AsButton": True, + "Info": "精准翻译PDF论文为中文 | 输入参数为路径", + "Function": HotReload(批量翻译PDF文档) + }, "保存当前的对话": { "Group": "对话", "AsButton": True, @@ -196,13 +203,6 @@ def get_crazy_functions(): "Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数", "Function": HotReload(高阶功能模板函数) }, - "精准翻译PDF论文": { - "Group": "学术", - "Color": "stop", - "AsButton": True, - "Info": "精准翻译PDF论文为中文 | 输入参数为路径", - "Function": HotReload(批量翻译PDF文档) - }, "询问多个GPT模型": { "Group": "对话", "Color": "stop", @@ -563,7 +563,7 @@ def get_crazy_functions(): from crazy_functions.多智能体 import 多智能体终端 function_plugins.update({ - "多智能体终端(微软AutoGen)": { + "AutoGen多智能体终端": { "Group": "智能体", "Color": "stop", "AsButton": True, diff --git a/version b/version index 69a871e0fe..9b33c4f40e 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.58, + "version": 3.59, "show_feature": true, - "new_feature": "修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" + "new_feature": "AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From 899bbe9229626d0f4e7bc41193f93c73a9ef167d Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 11 Nov 2023 23:54:24 +0800 Subject: [PATCH 058/117] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/translate_english.json | 1 - docs/translate_japanese.json | 1 - docs/translate_traditionalchinese.json | 1 - toolbox.py | 2 +- 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/translate_english.json b/docs/translate_english.json index 44361f0263..3cdaf209ac 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -430,7 +430,6 @@ "并显示到聊天当中": "And display it in the chat", "插件调度异常": "Plugin scheduling exception", "异常原因": "Exception reason", - "实验性函数调用出错": "Experimental function call error", "当前代理可用性": "Current proxy availability", "异常": "Exception", "将文本按照段落分隔符分割开": "Split the text into paragraphs according to the paragraph separator", diff --git a/docs/translate_japanese.json b/docs/translate_japanese.json index 29ebcc9609..2f80792c4e 100644 --- a/docs/translate_japanese.json +++ b/docs/translate_japanese.json @@ -352,7 +352,6 @@ "感谢热情的": "熱心な感謝", "是本次输出": "今回の出力です", "协议": "プロトコル", - "实验性函数调用出错": "実験的な関数呼び出しエラー", "例如需要翻译的一段话": "翻訳が必要な例文", "本地文件地址": "ローカルファイルアドレス", "更好的UI视觉效果": "より良いUI視覚効果", diff --git a/docs/translate_traditionalchinese.json b/docs/translate_traditionalchinese.json index b75cbdb4b2..9ca7cbaa2a 100644 --- a/docs/translate_traditionalchinese.json +++ b/docs/translate_traditionalchinese.json @@ -780,7 +780,6 @@ "检测到程序终止": "偵測到程式終止", "对整个Latex项目进行润色": "對整個Latex專案進行潤色", "方法则会被调用": "方法則會被調用", - "实验性函数调用出错": "實驗性函數調用出錯", "把完整输入-输出结果显示在聊天框": "把完整輸入-輸出結果顯示在聊天框", "本地文件预览": "本地檔案預覽", "接下来请你逐文件分析下面的论文文件": "接下來請你逐檔案分析下面的論文檔案", diff --git a/toolbox.py b/toolbox.py index a5425c083c..293419c3f5 100644 --- a/toolbox.py +++ b/toolbox.py @@ -158,7 +158,7 @@ def decorated(main_input, llm_kwargs, plugin_kwargs, chatbot_with_cookie, histor chatbot_with_cookie.clear() chatbot_with_cookie.append(["插件调度异常", "异常原因"]) chatbot_with_cookie[-1] = (chatbot_with_cookie[-1][0], - f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}") + f"[Local Message] 插件调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}") yield from update_ui(chatbot=chatbot_with_cookie, history=history, msg=f'异常 {e}') # 刷新界面 return decorated From f34f1091c35bea0e6ce404a1a1214ed9a9879db3 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 12 Nov 2023 14:13:49 +0800 Subject: [PATCH 059/117] fix nougat --- crazy_functions/crazy_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index ce7a2e39d7..e674e4f9f6 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -748,7 +748,7 @@ def NOUGAT_parse_pdf(self, fp, chatbot, history): yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)", chatbot=chatbot, history=history, delay=0) - self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}" --recompute --no-skipping --markdown --batchsize 8', os.getcwd(), timeout=3600) + self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}" --batchsize 8', os.getcwd(), timeout=3600) res = glob.glob(os.path.join(dst,'*.mmd')) if len(res) == 0: self.threadLock.release() From c45336a3cdcb9251ac90d85ce62d0109c623f30c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 12 Nov 2023 15:57:18 +0800 Subject: [PATCH 060/117] change nougat batchsize --- crazy_functions/crazy_utils.py | 2 +- docs/translate_english.json | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index e674e4f9f6..832775d18d 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -748,7 +748,7 @@ def NOUGAT_parse_pdf(self, fp, chatbot, history): yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)", chatbot=chatbot, history=history, delay=0) - self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}" --batchsize 8', os.getcwd(), timeout=3600) + self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}" --batchsize 4', os.getcwd(), timeout=3600) res = glob.glob(os.path.join(dst,'*.mmd')) if len(res) == 0: self.threadLock.release() diff --git a/docs/translate_english.json b/docs/translate_english.json index 3cdaf209ac..f7deee3845 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -501,7 +501,8 @@ "环境变量": "Environment variable", "不支持通过环境变量设置!": "Setting through environment variables is not supported!", "加载失败!": "Loading failed!", - "成功读取环境变量": "Successfully read environment variables", + "如": " e.g., ", + "成功读取环境变量": "Successfully read environment variable: ", "本项目现已支持OpenAI和API2D的api-key": "This project now supports api-keys for OpenAI and API2D", "也支持同时填写多个api-key": "It also supports filling in multiple api-keys at the same time", "您既可以在config.py中修改api-key": "You can modify the api-key in config.py", @@ -512,7 +513,7 @@ "请在config文件中修改API密钥之后再运行": "Please modify the API key in the config file before running", "网络代理状态": "Network proxy status", "未配置": "Not configured", - "无代理状态下很可能无法访问OpenAI家族的模型": "It is very likely that you cannot access OpenAI's models without a proxy", + "无代理状态下很可能无法访问OpenAI家族的模型": "", "建议": "Suggestion", "检查USE_PROXY选项是否修改": "Check if the USE_PROXY option has been modified", "已配置": "Configured", From 51c70e9e470fd1274ef5cf5a168769335e40b52a Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 12 Nov 2023 16:04:55 +0800 Subject: [PATCH 061/117] update translation --- docs/translate_english.json | 117 +++++++++++++++++++++++++++++++++++- docs/translate_std.json | 5 +- 2 files changed, 120 insertions(+), 2 deletions(-) diff --git a/docs/translate_english.json b/docs/translate_english.json index f7deee3845..955dcaf9a2 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -2788,5 +2788,120 @@ "加载已保存": "Load saved", "打开浏览器页面": "Open browser page", "解锁插件": "Unlock plugin", - "如果话筒激活 / 如果处于回声收尾阶段": "If the microphone is active / If it is in the echo tail stage" + "如果话筒激活 / 如果处于回声收尾阶段": "If the microphone is active / If it is in the echo tail stage", + "分辨率": "Resolution", + "分析行业动态": "Analyze industry trends", + "在项目实施过程中提供支持": "Provide support during project implementation", + "azure 对齐支持 -=-=-=-=-=-=-": "Azure alignment support -=-=-=-=-=-=-", + "默认的系统提示词": "Default system prompts", + "为您解释复杂的技术概念": "Explain complex technical concepts to you", + "提供项目管理和协作建议": "Provide project management and collaboration advice", + "请从AVAIL_LLM_MODELS中选择": "Please select from AVAIL_LLM_MODELS", + "提高编程能力": "Improve programming skills", + "请注意Newbing组件已不再维护": "Please note that the Newbing component is no longer maintained", + "用于定义和切换多个azure模型 --": "Used to define and switch between multiple Azure models --", + "支持 256x256": "Supports 256x256", + "定义界面上“询问多个GPT模型”插件应该使用哪些模型": "Define which models the 'Ask multiple GPT models' plugin should use on the interface", + "必须是.png格式": "Must be in .png format", + "tokenizer只用于粗估token数量": "The tokenizer is only used to estimate the number of tokens", + "协助您进行文案策划和内容创作": "Assist you in copywriting and content creation", + "帮助您巩固编程基础": "Help you consolidate your programming foundation", + "修改需求": "Modify requirements", + "确保项目顺利进行": "Ensure the smooth progress of the project", + "帮助您了解市场发展和竞争态势": "Help you understand market development and competitive situation", + "不需要动态切换": "No need for dynamic switching", + "解答您在学习过程中遇到的问题": "Answer the questions you encounter during the learning process", + "Endpoint不正确": "Endpoint is incorrect", + "提供编程思路和建议": "Provide programming ideas and suggestions", + "先上传图片": "Upload the image first", + "提供计算机科学、数据科学、人工智能等相关领域的学习资源和建议": "Provide learning resources and advice in computer science, data science, artificial intelligence, and other related fields", + "提供写作建议和技巧": "Provide writing advice and tips", + "间隔": "Interval", + "此后不需要在此处添加api2d的接口了": "No need to add the api2d interface here anymore", + "4. 学习辅导": "4. Learning guidance", + "智谱AI大模型": "Zhipu AI large model", + "3. 项目支持": "3. Project support", + "但这是意料之中的": "But this is expected", + "检查endpoint是否可用": "Check if the endpoint is available", + "接入智谱大模型": "Access the intelligent spectrum model", + "如果您有任何问题或需要解答的议题": "If you have any questions or topics that need answers", + "api2d 对齐支持 -=-=-=-=-=-=-": "api2d alignment support -=-=-=-=-=-=-", + "支持多线程": "Support multi-threading", + "再输入修改需求": "Enter modification requirements again", + "Endpoint不满足要求": "Endpoint does not meet the requirements", + "检查endpoint是否合法": "Check if the endpoint is valid", + "为您制定技术战略提供参考和建议": "Provide reference and advice for developing your technical strategy", + "支持 1024x1024": "Support 1024x1024", + "因为下面的代码会自动添加": "Because the following code will be automatically added", + "尝试加载模型": "Try to load the model", + "使用DALLE3生成图片 | 输入参数字符串": "Use DALLE3 to generate images | Input parameter string", + "当前论文无需解析": "The current paper does not need to be parsed", + "单个azure模型部署": "Deploy a single Azure model", + "512x512 或 1024x1024": "512x512 or 1024x1024", + "至少是8k上下文的模型": "A model with at least 8k context", + "自动忽略重复的输入": "Automatically ignore duplicate inputs", + "让您更好地掌握知识": "Help you better grasp knowledge", + "文件列表": "File list", + "并在不同模型之间用": "And use it between different models", + "插件调用出错": "Plugin call error", + "帮助您撰写文章、报告、散文、故事等": "Help you write articles, reports, essays, stories, etc.", + "*实验性功能*": "*Experimental feature*", + "2. 编程": "2. Programming", + "让您更容易理解": "Make it easier for you to understand", + "的最大上下文长度太短": "The maximum context length is too short", + "方法二": "Method 2", + "多个azure模型部署+动态切换": "Deploy multiple Azure models + dynamic switching", + "详情请见额外文档 docs\\use_azure.md": "For details, please refer to the additional document docs\\use_azure.md", + "包括但不限于 Python、Java、C++ 等": "Including but not limited to Python, Java, C++, etc.", + "为您提供业界最新的新闻和技术趋势": "Providing you with the latest industry news and technology trends", + "自动检测并屏蔽失效的KEY": "Automatically detect and block invalid keys", + "请勿使用": "Please do not use", + "最后输入分辨率": "Enter the resolution at last", + "图片": "Image", + "请检查AZURE_ENDPOINT的配置! 当前的Endpoint为": "Please check the configuration of AZURE_ENDPOINT! The current Endpoint is", + "图片修改": "Image modification", + "已经收集到所有信息": "All information has been collected", + "加载API_KEY": "Loading API_KEY", + "协助您编写代码": "Assist you in writing code", + "我可以为您提供以下服务": "I can provide you with the following services", + "排队中请稍后 ...": "Please wait in line ...", + "建议您使用英文提示词": "It is recommended to use English prompts", + "不能支撑AutoGen运行": "Cannot support AutoGen operation", + "帮助您解决编程问题": "Help you solve programming problems", + "上次用户反馈输入为": "Last user feedback input is", + "请随时告诉我您的需求": "Please feel free to tell me your needs", + "有 sys_prompt 接口": "There is a sys_prompt interface", + "可能会覆盖之前的配置": "May overwrite previous configuration", + "5. 行业动态和趋势分析": "5. Industry dynamics and trend analysis", + "正在等待线程锁": "Waiting for thread lock", + "请输入分辨率": "Please enter the resolution", + "接驳void-terminal": "Connecting to void-terminal", + "启动DALLE2图像修改向导程序": "Launching DALLE2 image modification wizard program", + "加载模型失败": "Failed to load the model", + "是否使用Docker容器运行代码": "Whether to run the code using Docker container", + "请输入修改需求": "Please enter modification requirements", + "作为您的写作和编程助手": "As your writing and programming assistant", + "然后再次点击本插件": "Then click this plugin again", + "需要动态切换": "Dynamic switching is required", + "文心大模型4.0": "Wenxin Large Model 4.0", + "找不到任何.pdf拓展名的文件": "Cannot find any file with .pdf extension", + "在使用AutoGen插件时": "When using the AutoGen plugin", + "协助您规划项目进度和任务分配": "Assist you in planning project schedules and task assignments", + "1. 写作": "1. Writing", + "你亲手写的api名称": "The API name you wrote yourself", + "使用DALLE2生成图片 | 输入参数字符串": "Generate images using DALLE2 | Input parameter string", + "方法一": "Method 1", + "我会尽力提供帮助": "I will do my best to provide assistance", + "多个azure模型": "Multiple Azure models", + "准备就绪": "Ready", + "请随时提问": "Please feel free to ask", + "如果需要使用AZURE": "If you need to use AZURE", + "如果不是本地模型": "If it is not a local model", + "AZURE_CFG_ARRAY中配置的模型必须以azure开头": "The models configured in AZURE_CFG_ARRAY must start with 'azure'", + "API key has been deactivated. OpenAI以账户失效为由": "API key has been deactivated. OpenAI considers it as an account failure", + "请先上传图像": "Please upload the image first", + "高优先级": "High priority", + "请配置ZHIPUAI_API_KEY": "Please configure ZHIPUAI_API_KEY", + "单个azure模型": "Single Azure model", + "预留参数 context 未实现": "Reserved parameter 'context' not implemented" } \ No newline at end of file diff --git a/docs/translate_std.json b/docs/translate_std.json index 90eb68575f..ee8b2c69a3 100644 --- a/docs/translate_std.json +++ b/docs/translate_std.json @@ -94,5 +94,8 @@ "解析一个Matlab项目": "AnalyzeAMatlabProject", "函数动态生成": "DynamicFunctionGeneration", "多智能体终端": "MultiAgentTerminal", - "多智能体": "MultiAgent" + "多智能体": "MultiAgent", + "图片生成_DALLE2": "ImageGeneration_DALLE2", + "图片生成_DALLE3": "ImageGeneration_DALLE3", + "图片修改_DALLE2": "ImageModification_DALLE2" } \ No newline at end of file From 94ecbde198025912aec29e97ac88c57c128a3794 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 12 Nov 2023 18:22:46 +0800 Subject: [PATCH 062/117] =?UTF-8?q?=E5=B0=86AutoGen=E6=94=BE=E5=9B=9E?= =?UTF-8?q?=E4=B8=8B=E6=8B=89=E8=8F=9C=E5=8D=95=E4=B8=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 37 +++++++++++++------------------------ 1 file changed, 13 insertions(+), 24 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 1e7ca58480..be05efe8cf 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -74,7 +74,7 @@ def get_crazy_functions(): "批量总结Word文档": { "Group": "学术", "Color": "stop", - "AsButton": False, + "AsButton": True, "Info": "批量总结word文档 | 输入参数为路径", "Function": HotReload(总结word文档) }, @@ -178,13 +178,6 @@ def get_crazy_functions(): "Info": "批量生成函数的注释 | 输入参数为路径", "Function": HotReload(批量生成函数注释) }, - "精准翻译PDF论文": { - "Group": "学术", - "Color": "stop", - "AsButton": True, - "Info": "精准翻译PDF论文为中文 | 输入参数为路径", - "Function": HotReload(批量翻译PDF文档) - }, "保存当前的对话": { "Group": "对话", "AsButton": True, @@ -203,6 +196,13 @@ def get_crazy_functions(): "Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数", "Function": HotReload(高阶功能模板函数) }, + "精准翻译PDF论文": { + "Group": "学术", + "Color": "stop", + "AsButton": True, + "Info": "精准翻译PDF论文为中文 | 输入参数为路径", + "Function": HotReload(批量翻译PDF文档) + }, "询问多个GPT模型": { "Group": "对话", "Color": "stop", @@ -349,16 +349,16 @@ def get_crazy_functions(): print('Load function plugin failed') try: - from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2 + from crazy_functions.图片生成 import 图片生成, 图片生成_DALLE3 function_plugins.update({ - "图片生成_DALLE2(先切换模型到openai或api2d)": { + "图片生成(先切换模型到openai或api2d)": { "Group": "对话", "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示 "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成_DALLE2) + "Function": HotReload(图片生成) }, }) function_plugins.update({ @@ -372,17 +372,6 @@ def get_crazy_functions(): "Function": HotReload(图片生成_DALLE3) }, }) - # function_plugins.update({ - # "图片修改_DALLE2(启动DALLE2图像修改向导程序)": { - # "Group": "对话", - # "Color": "stop", - # "AsButton": False, - # "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - # "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 - # # "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容", - # "Function": HotReload(图片修改_DALLE2) - # }, - # }) except: print('Load function plugin failed') @@ -563,10 +552,10 @@ def get_crazy_functions(): from crazy_functions.多智能体 import 多智能体终端 function_plugins.update({ - "AutoGen多智能体终端": { + "AutoGen多智能体终端(仅供测试)": { "Group": "智能体", "Color": "stop", - "AsButton": True, + "AsButton": False, "Function": HotReload(多智能体终端) } }) From 69f37df356889f22c8fdfebdb580cf9815a6a79d Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 12 Nov 2023 22:15:54 +0800 Subject: [PATCH 063/117] =?UTF-8?q?=E7=B4=A7=E6=80=A5=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E7=BB=88=E7=BB=93=E7=82=B9=E8=A6=86=E7=9B=96=E9=94=99=E8=AF=AF?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_all.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 7d13bbddcf..89c9f76cee 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -8,7 +8,7 @@ 具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁 2. predict_no_ui_long_connection(...) """ -import tiktoken +import tiktoken, copy from functools import lru_cache from concurrent.futures import ThreadPoolExecutor from toolbox import get_conf, trimmed_format_exc @@ -238,14 +238,14 @@ def decode(self, *args, **kwargs): # -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=- for model in AVAIL_LLM_MODELS: if model.startswith('api2d-') and (model.replace('api2d-','') in model_info.keys()): - mi = model_info[model.replace('api2d-','')] + mi = copy.deepcopy(model_info[model.replace('api2d-','')]) mi.update({"endpoint": api2d_endpoint}) model_info.update({model: mi}) # -=-=-=-=-=-=- azure 对齐支持 -=-=-=-=-=-=- for model in AVAIL_LLM_MODELS: if model.startswith('azure-') and (model.replace('azure-','') in model_info.keys()): - mi = model_info[model.replace('azure-','')] + mi = copy.deepcopy(model_info[model.replace('azure-','')]) mi.update({"endpoint": azure_endpoint}) model_info.update({model: mi}) From 4306f8fd3e43619aeafd23848190195d5506e562 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 12 Nov 2023 22:26:00 +0800 Subject: [PATCH 064/117] =?UTF-8?q?version=203.60=20=E5=BC=80=E6=94=BEAuto?= =?UTF-8?q?Gen=E5=A4=9A=E6=99=BA=E8=83=BD=E4=BD=93=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index 9b33c4f40e..4e18fc8616 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.59, + "version": 3.60, "show_feature": true, "new_feature": "AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From 67a98de8415ddacbec9529775a1537bb24f4ee1a Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 12 Nov 2023 22:27:29 +0800 Subject: [PATCH 065/117] Merge branch 'master' of github.com:binary-husky/chatgpt_academic --- README.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 7703208a9e..4d163f1c8c 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,8 @@ > **Note** > -> 2023.10.28: 紧急修复了若干问题,安装依赖时,请选择`requirements.txt`中**指定的版本**。 +> 2023.11.12: 紧急修复了endpoint异常的问题。 > -> `pip install -r requirements.txt` -> -> 2023.11.7: 本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。 +> 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。 @@ -288,7 +286,8 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h ### II:版本: -- version 3.60(todo): 优化虚空终端,并引入AutoGen作为新一代插件的基石 +- version 3.70(todo): 优化AutoGen插件主题并设计一系列衍生插件 +- version 3.60: 引入AutoGen作为新一代插件的基石 - version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG - version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面 - version 3.55: 重构前端界面,引入悬浮窗口与菜单栏 From 7e56ace2c0a9ac4e3b88befd11d8380ee96c7a26 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 12 Nov 2023 23:31:50 +0800 Subject: [PATCH 066/117] =?UTF-8?q?=E6=9B=B4=E6=96=B0README?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 41 ++-- docs/README.Arabic.md | 344 +++++++++++++++++++++++++++++++++ docs/README.English.md | 357 ++++++++++++++++++++++++++++++++++ docs/README.French.md | 357 ++++++++++++++++++++++++++++++++++ docs/README.German.md | 364 +++++++++++++++++++++++++++++++++++ docs/README.Italian.md | 361 ++++++++++++++++++++++++++++++++++ docs/README.Japanese.md | 345 +++++++++++++++++++++++++++++++++ docs/README.Korean.md | 364 +++++++++++++++++++++++++++++++++++ docs/README.Portuguese.md | 358 ++++++++++++++++++++++++++++++++++ docs/README.Russian.md | 361 ++++++++++++++++++++++++++++++++++ docs/README.md.German.md | 307 ----------------------------- docs/README.md.Italian.md | 316 ------------------------------ docs/README.md.Korean.md | 270 -------------------------- docs/README.md.Portuguese.md | 324 ------------------------------- docs/README_EN.md | 322 ------------------------------- docs/README_FR.md | 323 ------------------------------- docs/README_JP.md | 329 ------------------------------- docs/README_RS.md | 278 -------------------------- toolbox.py | 26 ++- version | 2 +- 20 files changed, 3242 insertions(+), 2507 deletions(-) create mode 100644 docs/README.Arabic.md create mode 100644 docs/README.English.md create mode 100644 docs/README.French.md create mode 100644 docs/README.German.md create mode 100644 docs/README.Italian.md create mode 100644 docs/README.Japanese.md create mode 100644 docs/README.Korean.md create mode 100644 docs/README.Portuguese.md create mode 100644 docs/README.Russian.md delete mode 100644 docs/README.md.German.md delete mode 100644 docs/README.md.Italian.md delete mode 100644 docs/README.md.Korean.md delete mode 100644 docs/README.md.Portuguese.md delete mode 100644 docs/README_EN.md delete mode 100644 docs/README_FR.md delete mode 100644 docs/README_JP.md delete mode 100644 docs/README_RS.md diff --git a/README.md b/README.md index 4d163f1c8c..8e1e55b92d 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ **如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!** -If you like this project, please give it a Star. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself. +If you like this project, please give it a Star. We also have a README in [English|](docs/README.English.md)[日本語|](docs/README.Japanese.md)[한국어|](docs/README.Korean.md)[Русский|](docs/README.Russian.md)[Français](docs/README.French.md) translated by this project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). > **Note** @@ -28,34 +28,31 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l 功能(⭐= 近期新增功能) | 描述 --- | --- -⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, [通义千问](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) -一键润色 | 支持一键润色、一键查找论文语法错误 -一键中英互译 | 一键中英互译 -一键代码解释 | 显示代码、解释代码、生成代码、给代码加注释 +⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, [通义千问](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),智谱API,DALLE3 +润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码 [自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键 -模块化设计 | 支持自定义强大的[函数插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[自我程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] [一键读懂](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)本项目的源代码 -[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] 一键可以剖析其他Python/C/C++/Java/Lua/...项目树 -读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [函数插件] 一键解读latex/pdf论文全文并生成摘要 -Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [函数插件] 一键翻译或润色latex论文 -批量注释生成 | [函数插件] 一键批量生成函数注释 -Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [函数插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗? -chat分析报告生成 | [函数插件] 运行后自动生成总结汇报 -[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [函数插件] PDF论文提取题目&摘要+翻译全文(多线程) -[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF -Latex论文一键校对 | [函数插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF -[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) -互联网信息聚合+GPT | [函数插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时 -⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [函数插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具 -⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [函数插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机 +模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) +[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键可以剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW) +读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要 +Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文 +批量注释生成 | [插件] 一键批量生成函数注释 +Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗? +chat分析报告生成 | [插件] 运行后自动生成总结汇报 +[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程) +[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF +Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF +[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) +互联网信息聚合+GPT | [插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时 +⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具 +⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机 公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮 -多线程函数插件支持 | 支持多线调用chatgpt,一键处理[海量文本](https://www.bilibili.com/video/BV1FT411H7c5/)或程序 +⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能! 启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题 [多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧? ⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件 更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/) ⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中) -⭐虚空终端插件 | [函数插件] 用自然语言,直接调度本项目其他插件 +⭐虚空终端插件 | [插件] 用自然语言,直接调度本项目其他插件 更多新功能展示 (图像生成等) …… | 见本文档结尾处 …… diff --git a/docs/README.Arabic.md b/docs/README.Arabic.md new file mode 100644 index 0000000000..791fb69a03 --- /dev/null +++ b/docs/README.Arabic.md @@ -0,0 +1,344 @@ + + + +> **ملحوظة** +> +> تمت ترجمة هذا الملف README باستخدام GPT (بواسطة المكون الإضافي لهذا المشروع) وقد لا تكون الترجمة 100٪ موثوقة، يُرجى التمييز بعناية بنتائج الترجمة. +> +> 2023.11.7: عند تثبيت التبعيات، يُرجى اختيار الإصدار المُحدد في `requirements.txt`. الأمر للتثبيت: `pip install -r requirements.txt`. + +#
GPT الأكاديمي
+ +**إذا كنت تحب هذا المشروع، فيُرجى إعطاؤه Star. لترجمة هذا المشروع إلى لغة عشوائية باستخدام GPT، قم بقراءة وتشغيل [`multi_language.py`](multi_language.py) (تجريبي). + +> **ملحوظة** +> +> 1. يُرجى ملاحظة أنها الإضافات (الأزرار) المميزة فقط التي تدعم قراءة الملفات، وبعض الإضافات توجد في قائمة منسدلة في منطقة الإضافات. بالإضافة إلى ذلك، نرحب بأي Pull Request جديد بأعلى أولوية لأي إضافة جديدة. +> +> 2. تُوضّح كل من الملفات في هذا المشروع وظيفتها بالتفصيل في [تقرير الفهم الذاتي `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). يمكنك في أي وقت أن تنقر على إضافة وظيفة ذات صلة لاستدعاء GPT وإعادة إنشاء تقرير الفهم الذاتي للمشروع. للأسئلة الشائعة [`الويكي`](https://github.com/binary-husky/gpt_academic/wiki). [طرق التثبيت العادية](#installation) | [نصب بنقرة واحدة](https://github.com/binary-husky/gpt_academic/releases) | [تعليمات التكوين](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). +> +> 3. يتم توافق هذا المشروع مع ودعم توصيات اللغة البيجائية الأكبر شمولًا وشجاعة لمثل ChatGLM. يمكنك توفير العديد من مفاتيح Api المشتركة في تكوين الملف، مثل `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. عند تبديل مؤقت لـ `API_KEY`، قم بإدخال `API_KEY` المؤقت في منطقة الإدخال ثم اضغط على زر "إدخال" لجعله ساري المفعول. + + + +
+ +الوظائف (⭐= وظائف مُضافة حديثًا) | الوصف +--- | --- +⭐[التوصل لنموذج جديد](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | بحث بيدو[تشيان فان](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) ووينسين[جينرال](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary)، مختبرات شنغهاي للذكاء الصناعي[شو شينغ](https://github.com/InternLM/InternLM)، إكسنفلام[زينغهو]https://xinghuo.xfyun.cn/)، [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)، واجهة بيانية ذكية و3 خدمات إضافية [DALLE3] +الجودة الفائقة، الترجمة، شرح الكود | الإصلاح الفوري للاخطاء النحوية في الأبحاث وترجمة وتحسين التصريف اللغوي للأكواد +[اختصارات مخصصة](https://www.bilibili.com/video/BV14s4y1E7jN) | دعم الاختصارات المخصصة +تصميم قابل للتوسيع | دعم الإضافات القوية المخصصة (الوظائف)، الإضافات قابلة للتحديث بشكل فوري +[تحليل البرنامج](https://www.bilibili.com/video/BV1cj411A7VW) | [وظائف] التحليل الشجري بناءً على البرنامج من Python/C/C++/Java/Lua/..., أو [التحليل الذاتي](https://www.bilibili.com/video/BV1cj411A7VW) +قراءة وترجمة الأبحاث | [وظائف] فك تشفير كامل لأوراق البحث بتنسيق LaTeX/PDF وإنشاء مستخلص +ترجمة وتحسين أوراق اللاتكس | [وظائف] ترجمة أو تحسين الأوراق المكتوبة بلاتكس +إنشاء تعليقات الدوال دفعة واحدة | [وظائف] إنشاء تعليقات الدوال بدفعة واحدة +ترجمة Markdown بين اللغتين العربية والإنجليزية | [وظائف] هل رأيت الـ 5 لغات المستخدمة في منشور [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) ؟ +إنشاء تقرير تحليل الدردشة | [وظائف] إنشاء تقرير ملخص بعد تشغيله +ترجمة كاملة لأوراق PDF | [وظائف] تحليل الأوراق بتنسيق PDF لتحديد العنوان وملخصها وترجمتها (متعدد الخيوط) +مساعدة Arxiv | [وظائف] قم بإدخال رابط مقال Arxiv لترجمة الملخص وتحميل ملف PDF +تصحيح لاتكس بضغطة زر واحدة | [وظائف] إكمال تصحيح لاتكس بناءً على التركيبة النحوية، إخراج همز المقابل للمقارنة PDF +مساعد بحث Google بنسخة محلية | [وظائف] قم بتقديم رابط لصفحة بحث Google Scholar العشوائي حتى يساعدك GPT في كتابة [الأبحاث المتعلقة](https://www.bilibili.com/video/BV1GP411U7Az/) +تجميع معلومات الويب + GPT | [وظائف] جمع المعلومات من الويب بشكل سهل للرد على الأسئلة لجعل المعلومات محدثة باستمرار +⭐ترجمة دقيقة لأوراق Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [وظائف] ترجمة مقالات Arxiv عالية الجودة بنقرة واحدة، أفضل أداة حاليا للترجمة +⭐[إدخال الصوت الفوري](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [وظائف] (غير متزامن) استماع الصوت وقطعه تلقائيًا وتحديد وقت الإجابة تلقائيًا +عرض الصيغ/الصور/الجداول | يمكن عرض الصيغ بشكل [TEX](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png) وأيضًا بتنسيق رسومي، يدعم عرض الصيغ وإبراز الكود +⭐إضغط على وكيل "شارلوت الذكي" | [وظائف] استكمال الذكاء للكأس الأول للذكاء المكتسب من مايكروسوفت، اكتشاف وتطوير عالمي العميل +تبديل الواجهة المُظلمة | يمكنك التبديل إلى الواجهة المظلمة بإضافة ```/?__theme=dark``` إلى نهاية عنوان URL في المتصفح +دعم المزيد من نماذج LLM | دعم لجميع GPT3.5 وGPT4 و[ChatGLM2 في جامعة ثوه في لين](https://github.com/THUDM/ChatGLM2-6B) و[MOSS في جامعة فودان](https://github.com/OpenLMLab/MOSS) +⭐تحوي انطباعة "ChatGLM2" | يدعم استيراد "ChatGLM2" ويوفر إضافة المساعدة في تعديله +دعم المزيد من نماذج "LLM"، دعم [نشر الحديس](https://huggingface.co/spaces/qingxu98/gpt-academic) | انضم إلى واجهة "Newbing" (Bing الجديدة)،نقدم نماذج Jittorllms الجديدة تؤيدهم [LLaMA](https://github.com/facebookresearch/llama) و [盘古α](https://openi.org.cn/pangu/) +⭐حزمة "void-terminal" للشبكة (pip) | قم بطلب كافة وظائف إضافة هذا المشروع في python بدون واجهة رسومية (قيد التطوير) +⭐PCI-Express لإعلام (PCI) | [وظائف] باللغة الطبيعية، قم بتنفيذ المِهام الأخرى في المشروع +المزيد من العروض (إنشاء الصور وغيرها)……| شاهد أكثر في نهاية هذا المستند ... +
+ + +- شكل جديد (عن طريق تعديل الخيار LAYOUT في `config.py` لقانون التوزيع "اليمين أو اليسار" أو "الأعلى أو الأسفل") +
+ +
+ + +- جميع الأزرار يتم إنشاؤها ديناميكيًا من خلال قراءة functional.py ويمكن إضافة وظائف مخصصة بحرية وتحرير الحافظة +
+ +
+ +- التجميل / التحوير +
+ +
+ + + +- إذا تضمّن الإخراج معادلات، فسيتم عرضها بشكلٍ يمكّن من النسخ والقراءة على النحوين: TEX ورسومية. +
+ +
+ +- هل تشعر بالكسل من قراءة كود المشروع؟ قم بمدها مباشرةً إلى ChatGPT +
+ +
+ +- دمج نماذج اللغات الكبيرة المختلفة (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) +
+ +
+ +# Installation +### طريقة التثبيت الأولى: التشغيل المباشر (Windows، Linux أو MacOS) + +1. قم بتنزيل المشروع +```sh +git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +cd gpt_academic +``` + +2. قم بتكوين لغة البرمجة Python + +في ملف `config.py`، قم بتكوين مفتاح الواجهة API والإعدادات الأخرى، [انقر هنا للاطلاع على طريقة تكوين الإعدادات في بيئة شبكة خاصة](https://github.com/binary-husky/gpt_academic/issues/1). [انقر هنا لزيارة صفحة الويكي](https://github.com/binary-husky/gpt_academic/wiki/توضيحات-تكوين-المشروع). + +" ستقوم البرنامج بفحص وجود ملف تكوين خاص يسمى `config_private.py` بأولوية، وسيستخدم التكوينات الموجودة فيه لتجاوز التكوينات ذات الأسماء المطابقة في `config.py`. إذا كنت تفهم هذه الطريقة ونظام القراءة، فإننا نوصي بشدة بإنشاء ملف تكوين جديد يسمى `config_private.py` بجوار `config.py` ونقل (نسخ) التكوينات الموجودة في `config.py` إلى `config_private.py` (يجب نسخ العناصر التي قمت بتعديلها فقط). " + +" يدعم المشروع التكوين من خلال `المتغيرات المحيطية`، ويمكن تحديد تنسيق كتابة المتغيرات المحيطية من خلال ملف `docker-compose.yml` أو صفحة الويكي الخاصة بنا. تعتمد أولوية القراءة على التكوينات على التالي: `المتغيرات المحيطية` > `config_private.py` > `config.py`. " + +3. قم بتثبيت التبعيات +```sh +# (الخيار الأول: إذا كنت تعرف Python، python>=3.9) الملحوظة: استخدم مستودع pip الرسمي أو مستودع pip آلي بباي، يمكن تغيير المستودع المؤقت بواسطة الأمر: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +python -m pip install -r requirements.txt + +# (الخيار الثاني: باستخدام Anaconda) الخطوات مشابهة (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 # إنشاء بيئة Anaconda +conda activate gptac_venv # تنشيط بيئة Anaconda +python -m pip install -r requirements.txt # هذه الخطوة مطابقة لخطوة تثبيت pip +``` + + +
إذا كنت بحاجة إلى دعم ChatGLM2 من الجامعة الصينية للاقتصاد وإدارة الأعمال وموس من جامعة فودان كخادم وجودة عالية لطرح الأسئلة، انقر هنا للعرض +

+ +【خطوات اختيارية】إذا كنت بحاجة إلى دعم جودة عالية لتشات جامعة تسينهوا (ChatGLM2) الصينية وجامعة فودان (MOSS)، يتعين عليك تثبيت تبعيات إضافية (شرط مسبق: التعامل مع Python واستخدام Pytorch وتوفر الحاسوب الشخصي بمواصفات قوية): +```sh +# 【خطوات اختيارية 1】دعم جودة عالية لتشات جامعة تسينهوا (ChatGLM2) +python -m pip install -r request_llms/requirements_chatglm.txt + +# 【خطوات اختيارية 2】دعم جودة عالية لتشات جامعة فودان (MOSS) +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # عند تنفيذ هذا الأمر، يجب أن تكون في مسار المشروع الرئيسي + +# 【خطوات اختيارية 3】دعم RWKV Runner +راجع الويكي: https://github.com/binary-husky/gpt_academic/wiki/دليل-تكوين-RWKV + +# 【خطوات اختيارية 4】تأكد من أن ملف التكوين config.py يحتوي على النماذج المرجوة، وهناك النماذج المدعومة حاليًا التالية (توجد خطط لتشغيل "jittorllms" في docker فقط): +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +``` + +

+
+ + +4. تشغيل البرنامج +```sh +python main.py +``` + +### طريقة التثبيت الثانية: استخدام Docker + +0. نصب القدرات الكاملة للمشروع (هذا هو الصورة الكبيرة التي تحتوي على CUDA و LaTeX. ولكن إذا كانت سرعة الإنترنت بطيئة أو القرص الصلب صغير، فإننا لا نوصي باستخدام هذا الخيار) +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) + +``` sh +# قم بتعديل ملف docker-compose.yml للحفاظ على الخطة رقم 0 وحذف الخطط الأخرى. ثم أشغل: +docker-compose up +``` + +1. تشغيل نموذج ChatGPT فقط + 文心一言 (Wenxin YIYan) + Spark عبر الإنترنت (يُوصى بهذا الخيار للمعظم) + +[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) +[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) +[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + +``` sh +# قم بتعديل ملف docker-compose.yml للحفاظ على الخطة رقم 1 وحذف الخطط الأخرى. ثم أشغل: +docker-compose up +``` + +P.S. للاستفادة من إمكانية اللافتكس الإضافية، يرجى الرجوع إلى الويكي. بالإضافة إلى ذلك، يمكنك استخدام الخطة 4 أو الخطة 0 مباشرة للحصول على إمكانية اللافتكس. + +2. تشغيل نموذج ChatGPT + نموذج ChatGLM2 + نموذج MOSS + نموذج LLAMA2 + تون يي تشين ون (QiChaYiWen) (يتطلب معرفة بتشغيل نيفيديا دوكر (Nvidia Docker)) + +[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) + +``` sh +# قم بتعديل ملف docker-compose.yml للحفاظ على الخطة رقم 2 وحذف الخطط الأخرى. ثم أشغل: +docker-compose up +``` + +### طريقة التثبيت الثالثة: طرائق نشر أخرى +1. **نصوص بنقرة واحدة لأنظمة Windows**. +يمكن لمستخدمي Windows الذين لا يعرفون بيئة Python تنزيل سكربت التشغيل بنقرة واحدة من [الإصدارات](https://github.com/binary-husky/gpt_academic/releases) المنشورة لتثبيت الإصدار الذي لا يحتوي على نماذج محلية. +المساهمة في السكربت تعود لـ[oobabooga](https://github.com/oobabooga/one-click-installers). + +2. استخدام واجهة برمجة تطبيقات (API) مطراف ثالثة، Microsoft Azure، ونشوة النص، وغيرها، يرجى الرجوع إلى [صفحة الويكي](https://github.com/binary-husky/gpt_academic/wiki/إعدادات-التكوين-للمشروع) الخاصة بنا + +3. دليل تجنب المشاكل عند نشر المشروع في خوادم السحابة. +يرجى زيارة صفحة [دليل نشر خوادم السحابة في المحيط](https://github.com/binary-husky/gpt_academic/wiki/دليل-نشر-خوادم-السحابة) + +4. طرائق نشر المشروع بأحدث الأساليب + - استخدام Sealos للنشر السريع [بنقرة واحدة](https://github.com/binary-husky/gpt_academic/issues/993). + - استخدم WSL2 (Windows Subsystem for Linux). يُرجى زيارة صفحة الويكي [لدليل التثبيت-2](https://github.com/binary-husky/gpt_academic/wiki/دليل-تشغيل-WSL2-(Windows-Subsystem-for-Linux) + - كيفية تشغيل البرنامج تحت عنوان فرعي (على سبيل المثال: `http://localhost/subpath`). يُرجى زيارة [إرشادات FastAPI](docs/WithFastapi.md) + + + +# الاستخدام المتقدم +### I: إنشاء أزرار مخصصة (اختصارات أكاديمية) +افتح أي محرر نصوص وافتح `core_functional.py` وأضف الإدخالات التالية ثم أعد تشغيل البرنامج. (إذا كانت الأزرار موجودة بالفعل، بإمكانك تعديل البادئة واللاحقة حراريًا دون الحاجة لإعادة تشغيل البرنامج) +على سبيل المثال: +``` +"ترجمة سوبر الإنجليزية إلى العربية": { + # البادئة، ستتم إضافتها قبل إدخالاتك. مثلاً، لوصف ما تريده مثل ترجمة أو شرح كود أو تلوين وهلم جرا + "بادئة": "يرجى ترجمة النص التالي إلى العربية ثم استخدم جدول Markdown لشرح المصطلحات المختصة المذكورة في النص:\n\n", + + # اللاحقة، سيتم إضافتها بعد إدخالاتك. يمكن استخدامها لوضع علامات اقتباس حول إدخالك. + "لاحقة": "", +}, +``` +
+ +
+ +### II: إنشاء مكونات وظيفية مخصصة +قم بكتابة مكونات وظيفية قوية لتنفيذ أي مهمة ترغب في الحصول عليها وحتى تلك التي لم تخطر لك على بال. +إن إنشاء وتصحيح المكونات في هذا المشروع سهل للغاية، فما عليك سوى أن تمتلك بعض المعرفة الأساسية في لغة البرمجة بايثون وتستند على القالب الذي نقدمه. +للمزيد من التفاصيل، يُرجى الاطلاع على [دليل المكونات الوظيفية](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). + + +# التحديثات +### I: تحديثات + +1. ميزة حفظ الدردشة: يمكن حفظ الدردشة الحالية كملف HTML قابل للقراءة والاسترداد ببساطة عند استدعاء الوظيفة في منطقة المكونات `حفظ الدردشة الحالية` ، ويمكن استرجاع المحادثة السابقة ببساطة عند استدعاء الوظيفة في منطقة المكونات (القائمة المنسدلة) `تحميل سجل الدردشة` . +نصيحة: يمكنك النقر المباشر على `تحميل سجل الدردشة` بدون تحديد ملف لعرض ذاكرة التخزين المؤقت لسجلات HTML. +
+ +
+ +2. ميزة ترجمة المقالات العلمية بواسطة Latex/Arxiv +
+ ===> + +
+ +3. محطة فراغ (فهم نغمة المستخدم من داخل اللغة الطبيعية واستدعاء وظائف أخرى تلقائيًا) + +- الخطوة 1: اكتب "بالرجاء استدعاء وظيفة ترجمة المقالة الأكاديمية من PDF وعنوان المقال هو https://openreview.net/pdf?id=rJl0r3R9KX". +- الخطوة 2: انقر فوق "محطة الفراغ". + +
+ +
+ +4. تصميم الوظائف المتعددة القادرة على توفير وظائف قوية بواجهات بسيطة +
+ + +
+ +5. ترجمة وإلغاء ترجمة المشاريع الأخرى مفتوحة المصدر +
+ + +
+ +6. ميزة تزيين [live2d](https://github.com/fghrsh/live2d_demo) (مغلقة بشكل افتراضي، يتطلب تعديل `config.py`) +
+ +
+ +7. إنتاج الصور من OpenAI +
+ +
+ +8. تحليل وإجماع الصوت من OpenAI +
+ +
+ +9. إصلاح أخطاء اللغة الطبيعة في Latex +
+ ===> + +
+ +10. تغيير اللغة والموضوع +
+ +
+ + + +### II: الإصدارات: +- الإصدار 3.70 (قريبًا): تحسينات لوظائف AutoGen وتصميم سلسلة من المكونات المشتقة +- الإصدار 3.60: إدخال AutoGen كأساس لوظائف الجيل الجديد +- الإصدار 3.57: دعم GLM3، نار النجوم v3، وشجرة الكلمات v4، وإصلاح خطأ الازدحام في النماذج المحلية +- الإصدار 3.56: الدعم لإضافة مزامنة الأزرار الأساسية حسب الطلب، وصفحة تجميع تقارير البيانات في ملف PDF +- الإصدار 3.55: إعادة هيكلة واجهة المستخدم الأمامية، وإضافة نافذة عائمة وشريط قائمة +- الإصدار 3.54: إضافة مترجم الكود المباشر (Code Interpreter) (قيد الانجاز) +- الإصدار 3.53: دعم اختيار موضوعات واجهة مختلفة، وزيادة الاستقرار وحل مشاكل التعارض بين المستخدمين المتعدد +- الإصدار 3.50: استخدام اللغة الطبيعية لاستدعاء جميع وظائف المشروع هذا (محطة فراغ)، ودعم تصنيف الوظائف وتحسين واجهة المستخدم وتصميم مواضيع جديدة +- الإصدار 3.49: دعم المنصات البحثية في بيدو كونفان وشجرة الكلمات +- الإصدار 3.48: دعم علي بابا, بوكما رش حتكيا, إكسونامبلومانت النار +- الإصدار 3.46: دعم محادثة نصية في الوقت الحقيقي غير مراقبة +- الإصدار 3.45: دعم تخصيص LatexChatglm النموذج التعديل +- الإصدار 3.44: دعم Azure رسميًا، وتحسين سهولة الاستخدام للواجهات الأمامية +- الإصدار 3.4: +ترجمة النصوص الكاملة للمقالات من خلال ملف PDF، +اختيار موضع المنطقة النصية، +خيار التخطيط الرأسي، +تحسينات في وظائف التداخل العديدة +- الإصدار 3.3: +وظائف متكاملة للمعلومات عبر الإنترنت +- الإصدار 3.2: دعم وظائف المكونات التي تحتوي معلمات أكثر (حفظ النص، فهم أي لغة برمجة، طلب أي تركيبة LLM في وقت واحد) +- الإصدار 3.1: دعم السؤال نحو نماذج GPT المتعددة! دعم واجهة api2d، دعم توازن الأحمال بين المفاتيح الخاصة المتعددة +- الإصدار 3.0: دعم لنماذج جات، واحدة منها لشتلس الصغيرة +- الإصدار 2.6: إعادة تصميم بنية الوظائف، وتحسين التفاعل وإضافة مزيد من الوظائف +- الإصدار 2.5: التحديث التلقائي، وحل مشكلة النص الطويل عند ملخص المشاريع الضخمة وتجاوز النصوص. +- الإصدار 2.4: (١) إضافة ميزة ترجمة المقالات الدورية. (٢) إضافة ميزة لتحويل مكان منطقة الإدخال. (٣) إضافة خيار التخطيط العمودي (vertical layout). (٤) تحسين وظائف المكونات متعددة الخيوط. +- الإصدار 2.3: تحسين التفاعل مع مواضيع متعددة +- الإصدار 2.2: دعم إعادة تحميل الوظائف المكونة حراريًا +- الإصدار 2.1: تصميم قابل للطي +- الإصدار 2.0: إدخال وحدات الوظائف المكونة +- الإصدار 1.0: الوظائف الأساسية + +مجموعة المطورين GPT Academic QQ: `610599535` + +- مشكلات معروفة + - بعض ملحقات متصفح الترجمة تتداخل مع تشغيل الواجهة الأمامية لهذا البرنامج + - يحتوي Gradio الرسمي حاليًا على عدد كبير من مشاكل التوافق. يُرجى استخدام `requirement.txt` لتثبيت Gradio. + +### III: الأنساق +يمكن تغيير الأنساق بتعديل خيار `THEME` (config.py) +1. `Chuanhu-Small-and-Beautiful` [الرابط](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + + +### IV: فروع تطوير هذا المشروع + +1. الفرع `master`: الفرع الرئيسي، إصدار مستقر +2. الفرع `frontier`: الفرع التطويري، إصدار تجريبي + + +### V: المراجع والفروض التعليمية + +``` +استخدمت العديد من التصاميم الموجودة في مشاريع ممتازة أخرى في الأكواد التالية، للمراجع عشوائية: + +# ViewGradio: +https://github.com/THUD + + + +# مُثبّت بضغطة واحدة Oobabooga: +https://github.com/oobabooga/one-click-installers + +# المزيد: +https://github.com/gradio-app/gradio +https://github.com/fghrsh/live2d_demo + diff --git a/docs/README.English.md b/docs/README.English.md new file mode 100644 index 0000000000..d0f3e4a1bf --- /dev/null +++ b/docs/README.English.md @@ -0,0 +1,357 @@ + + + +> **Note** +> +> This README was translated by GPT (implemented by the plugin of this project) and may not be 100% reliable. Please carefully check the translation results. +> +> 2023.11.7: When installing dependencies, please select the **specified versions** in the `requirements.txt` file. Installation command: `pip install -r requirements.txt`. + + +#
GPT Academic Optimization
+ +**If you like this project, please give it a Star.** +To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). + +> **Note** +> +> 1.Please note that only plugins (buttons) highlighted in **bold** support reading files, and some plugins are located in the **dropdown menu** in the plugin area. Additionally, we welcome and process any new plugins with the **highest priority** through PRs. +> +> 2.The functionalities of each file in this project are described in detail in the [self-analysis report `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). As the version iterates, you can also click on the relevant function plugin at any time to call GPT to regenerate the project's self-analysis report. Common questions are in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Regular installation method](#installation) | [One-click installation script](https://github.com/binary-husky/gpt_academic/releases) | [Configuration instructions](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). +> +> 3.This project is compatible with and encourages the use of domestic large-scale language models such as ChatGLM. Multiple api-keys can be used together. You can fill in the configuration file with `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"` to temporarily switch `API_KEY` during input, enter the temporary `API_KEY`, and then press enter to apply it. + + + + +
+ +Feature (⭐ = Recently Added) | Description +--- | --- +⭐[Integrate New Models](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) and Wenxin Yiyu, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhifu API, DALLE3 +Proofreading, Translation, Code Explanation | One-click proofreading, translation, searching for grammar errors in papers, explaining code +[Custom Shortcuts](https://www.bilibili.com/video/BV14s4y1E7jN) | Support for custom shortcuts +Modular Design | Support for powerful [plugins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), plugins support [hot updates](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) +[Program Profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] One-click to profile Python/C/C++/Java/Lua/... project trees or [self-profiling](https://www.bilibili.com/video/BV1cj411A7VW) +Read Papers, [Translate](https://www.bilibili.com/video/BV1KT411x7Wn) Papers | [Plugin] One-click to interpret full-text latex/pdf papers and generate abstracts +Full-text Latex [Translation](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [Proofreading](https://www.bilibili.com/video/BV1FT411H7c5/) | [Plugin] One-click translation or proofreading of latex papers +Batch Comment Generation | [Plugin] One-click batch generation of function comments +Markdown [Translation](https://www.bilibili.com/video/BV1yo4y157jV/) | [Plugin] Did you see the [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in the top five languages? +Chat Analysis Report Generation | [Plugin] Automatically generates summary reports after running +[PDF Paper Full-text Translation](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin] Extract title & abstract of PDF papers + translate full-text (multi-threaded) +[Arxiv Helper](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugin] Enter the arxiv article URL to translate the abstract + download PDF with one click +One-click Proofreading of Latex Papers | [Plugin] Syntax and spelling correction of Latex papers similar to Grammarly + output side-by-side PDF +[Google Scholar Integration Helper](https://www.bilibili.com/video/BV19L411U7ia) | [Plugin] Given any Google Scholar search page URL, let GPT help you [write related works](https://www.bilibili.com/video/BV1GP411U7Az/) +Internet Information Aggregation + GPT | [Plugin] One-click to let GPT retrieve information from the Internet to answer questions and keep the information up to date +⭐Arxiv Paper Fine Translation ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] One-click [high-quality translation of arxiv papers](https://www.bilibili.com/video/BV1dz4y1v77A/), the best paper translation tool at present +⭐[Real-time Speech Input](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Plugin] Asynchronously [listen to audio](https://www.bilibili.com/video/BV1AV4y187Uy/), automatically segment sentences, and automatically find the best time to answer +Formula/Image/Table Display | Can simultaneously display formulas in [TeX form and rendered form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), support formula and code highlighting +⭐AutoGen Multi-Agent Plugin | [Plugin] Explore the emergence of multi-agent intelligence with Microsoft AutoGen! +Start Dark [Theme](https://github.com/binary-husky/gpt_academic/issues/173) | Add ```/?__theme=dark``` to the end of the browser URL to switch to the dark theme +[More LLM Model Support](https://www.bilibili.com/video/BV1wT411p7yf) | It must be great to be served by GPT3.5, GPT4, [THU ChatGLM2](https://github.com/THUDM/ChatGLM2-6B), and [Fudan MOSS](https://github.com/OpenLMLab/MOSS) at the same time, right? +⭐ChatGLM2 Fine-tuning Model | Support for loading ChatGLM2 fine-tuning models and providing ChatGLM2 fine-tuning assistant plugins +More LLM Model Access, support for [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Join NewBing interface (New Bing), introduce Tsinghua [JittorLLMs](https://github.com/Jittor/JittorLLMs) to support [LLaMA](https://github.com/facebookresearch/llama) and [Pangu](https://openi.org.cn/pangu/) +⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip package | Use this project's all function plugins directly in Python without GUI (under development) +⭐Void Terminal Plugin | [Plugin] Schedule other plugins of this project directly in natural language +More New Feature Demonstrations (Image Generation, etc.)...... | See the end of this document ........ +
+ + +- New interface (modify the LAYOUT option in `config.py` to switch between "left-right layout" and "top-bottom layout") +
+ +
+ + +- All buttons are dynamically generated by reading `functional.py` and can be added with custom functions to free up the clipboard +
+ +
+ +- Proofreading/Correction +
+ +
+ + + +- If the output contains formulas, they will be displayed in both tex format and rendered format for easy copying and reading. +
+ +
+ +- Too lazy to look at the project code? Show off the whole project directly in chatgpt's mouth +
+ +
+ +- Multiple large language models mixed calling (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) +
+ +
+ +# Installation +### Installation Method I: Run directly (Windows, Linux or MacOS) + +1. Download the project +```sh +git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +cd gpt_academic +``` + +2. Configure API_KEY + +In `config.py`, configure API KEY and other settings, [click here to see special network environment configuration methods](https://github.com/binary-husky/gpt_academic/issues/1). [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 + +「 The program will first check if a secret configuration file named `config_private.py` exists and use the configurations from that file to override the ones in `config.py` with the same names. If you understand this logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and move (copy) the configurations from `config.py` to `config_private.py` (only copy the configuration items you have modified). 」 + +「 Project configuration can be done via `environment variables`. The format of the environment variables can be found in the `docker-compose.yml` file or our [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). Configuration priority: `environment variables` > `config_private.py` > `config.py`. 」 + + +3. Install dependencies +```sh +# (Option I: If you are familiar with python, python>=3.9) Note: Use the official pip source or the Aliyun pip source. Temporary method for switching the source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +python -m pip install -r requirements.txt + +# (Option II: Using Anaconda) The steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 # Create the anaconda environment +conda activate gptac_venv # Activate the anaconda environment +python -m pip install -r requirements.txt # This step is the same as the pip installation process +``` + + +
If you need to support THU ChatGLM2, Fudan MOSS, or RWKV Runner as backends, click here to expand +

+ +【Optional Step】If you need to support THU ChatGLM2 or Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Familiar with Pytorch + Sufficient computer configuration): +```sh +# 【Optional Step I】Support THU ChatGLM2. Note: If you encounter the "Call ChatGLM fail unable to load ChatGLM parameters" error, refer to the following: 1. The default installation above is for torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2. If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt + +# 【Optional Step II】Support Fudan MOSS +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, make sure you are in the root directory of the project + +# 【Optional Step III】Support RWKV Runner +Refer to wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner + +# 【Optional Step IV】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. The currently supported models are as follows (jittorllms series currently only supports the docker solution): +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +``` + +

+
+ + + +4. Run +```sh +python main.py +``` + +### Installation Method II: Use Docker + +0. Deploy all capabilities of the project (this is a large image that includes cuda and latex. Not recommended if you have slow internet speed or small hard drive) +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) + +``` sh +# Modify docker-compose.yml, keep scheme 0 and delete other schemes. Then run: +docker-compose up +``` + +1. ChatGPT + Wenxin + Spark online models only (recommended for most people) +[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) +[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) +[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + +``` sh +# Modify docker-compose.yml, keep scheme 1 and delete other schemes. Then run: +docker-compose up +``` + +P.S. If you need the latex plugin functionality, please see the Wiki. Also, you can directly use scheme 4 or scheme 0 to get the Latex functionality. + +2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Intelligent Questions (requires familiarity with [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) runtime) +[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) + +``` sh +# Modify docker-compose.yml, keep scheme 2 and delete other schemes. Then run: +docker-compose up +``` + + +### Installation Method III: Other deployment methods +1. **Windows one-click running script**. +Windows users who are completely unfamiliar with the python environment can download the one-click running script from the [Release](https://github.com/binary-husky/gpt_academic/releases) to install the version without local models. +The script is contributed by [oobabooga](https://github.com/oobabooga/one-click-installers). + +2. Use third-party APIs, Azure, Wenxin, Xinghuo, etc., see [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) + +3. Pitfall guide for deploying on cloud servers. +Please visit [Cloud Server Remote Deployment Wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) + +4. Some new deployment platforms or methods + - Use Sealos [to deploy with one click](https://github.com/binary-husky/gpt_academic/issues/993). + - Use WSL2 (Windows Subsystem for Linux). Please refer to [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) + - How to run under a subpath (such as `http://localhost/subpath`). Please visit [FastAPI Run Instructions](docs/WithFastapi.md) + + + +# Advanced Usage +### I: Customizing new convenient buttons (academic shortcuts) +Open `core_functional.py` with any text editor, add the following entry, and then restart the program. (If the button already exists, both the prefix and suffix can be modified on-the-fly without restarting the program.) +For example: +``` +"Super Translation": { + # Prefix: will be added before your input. For example, used to describe your request, such as translation, code explanation, proofreading, etc. + "Prefix": "Please translate the following paragraph into Chinese and then explain each proprietary term in the text using a markdown table:\n\n", + + # Suffix: will be added after your input. For example, used to wrap your input in quotation marks along with the prefix. + "Suffix": "", +}, +``` +
+ +
+ +### II: Custom function plugins +Write powerful function plugins to perform any task you desire and can't imagine. +The difficulty of writing and debugging plugins in this project is very low. As long as you have a certain knowledge of Python, you can implement your own plugin functionality by following the template we provide. +For more details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). + +# Updates +### I: Dynamics + +1. Conversation-saving feature. Call `Save the current conversation` in the function plugin area to save the current conversation as a readable and restorable HTML file. Additionally, call `Load conversation history archive` in the function plugin area (drop-down menu) to restore previous sessions. +Tip: Clicking `Load conversation history archive` without specifying a file allows you to view the cached historical HTML archive. +
+ +
+ +2. ⭐Latex/Arxiv paper translation feature⭐ +
+ ===> + +
+ +3. Void Terminal (understanding user intent from natural language input and automatically calling other plugins) + +- Step 1: Enter " Please call the plugin to translate the PDF paper, the address is https://openreview.net/pdf?id=rJl0r3R9KX" +- Step 2: Click "Void Terminal" + +
+ +
+ +4. Modular function design, simple interface supporting powerful functionality +
+ + +
+ +5. Translate and interpret other open-source projects +
+ + +
+ +6. Added small features that decorate [live2d](https://github.com/fghrsh/live2d_demo) (disabled by default, needs modification in `config.py`) +
+ +
+ +7. OpenAI image generation +
+ +
+ +8. OpenAI audio parsing and summarization +
+ +
+ +9. Latex full-text proofreading and correction +
+ ===> + +
+ +10. Language and theme switching +
+ +
+ + + +### II: Versions: +- version 3.70 (todo): Optimize the AutoGen plugin theme and design a series of derivative plugins +- version 3.60: Introduce AutoGen as the cornerstone of the new generation of plugins +- version 3.57: Support GLM3, Spark v3, Wenxin Quote v4, and fix concurrency bugs in local models +- version 3.56: Support dynamically adding basic functional buttons and a new summary PDF page +- version 3.55: Refactor the frontend interface and introduce floating windows and a menu bar +- version 3.54: Add a dynamic code interpreter (Code Interpreter) (to be improved) +- version 3.53: Support dynamically choosing different interface themes, improve stability, and resolve conflicts between multiple users +- version 3.50: Use natural language to call all function plugins of this project (Void Terminal), support plugin classification, improve UI, and design new themes +- version 3.49: Support Baidu Qianfan Platform and Wenxin Quote +- version 3.48: Support Ali Dharma Academy Tongyi Qianwen, Shanghai AI-Lab Shusheng, and Xunfei Spark +- version 3.46: Support fully hands-off real-time voice conversation +- version 3.45: Support customizing ChatGLM2 fine-tuned models +- version 3.44: Officially support Azure, optimize interface usability +- version 3.4: + Arxiv paper translation, latex paper correction functionality +- version 3.3: + Internet information integration functionality +- version 3.2: Function plugins support more parameter interfaces (conversation saving functionality, interpreting any code language + asking any combination of LLMs simultaneously) +- version 3.1: Support querying multiple GPT models simultaneously! Support API2D, support load balancing for multiple API keys +- version 3.0: Support chatglm and other small-scale LLMs +- version 2.6: Refactored plugin structure, improved interactivity, added more plugins +- version 2.5: Self-updating, fix the problem of text being too long and token overflowing when summarizing large code projects +- version 2.4: (1) Add PDF full-text translation functionality; (2) Add functionality to switch the position of the input area; (3) Add vertical layout option; (4) Optimize multi-threaded function plugins. +- version 2.3: Enhance multi-threaded interactivity +- version 2.2: Function plugin hot-reloading support +- version 2.1: Collapsible layout +- version 2.0: Introduce modular function plugins +- version 1.0: Basic functionality + +GPT Academic Developer QQ Group: `610599535` + +- Known Issues + - Some browser translation plugins interfere with the frontend operation of this software + - Official Gradio currently has many compatibility bugs, please make sure to install Gradio using `requirement.txt` + +### III: Themes +You can change the theme by modifying the `THEME` option (config.py). +1. `Chuanhu-Small-and-Beautiful` [Website](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + +### IV: Development Branches of This Project + +1. `master` branch: Main branch, stable version +2. `frontier` branch: Development branch, test version + +### V: References and Learning + +``` +The code references the designs of many other excellent projects, in no particular order: + +# THU ChatGLM2-6B: +https://github.com/THUDM/ChatGLM2-6B + +# THU JittorLLMs: +https://github.com/Jittor/JittorLLMs + +# ChatPaper: +https://github.com/kaixindelele/ChatPaper + +# Edge-GPT: +https://github.com/acheong08/EdgeGPT + +# ChuanhuChatGPT: +https://github.com/GaiZhenbiao/ChuanhuChatGPT + + + +# Oobabooga one-click installer: +https://github.com/oobabooga/one-click-installers + +# More: +https://github.com/gradio-app/gradio +https://github.com/fghrsh/live2d_demo + diff --git a/docs/README.French.md b/docs/README.French.md new file mode 100644 index 0000000000..dbd64caac4 --- /dev/null +++ b/docs/README.French.md @@ -0,0 +1,357 @@ + + + +> **Remarque** +> +> Ce README a été traduit par GPT (implémenté par le plugin de ce projet) et n'est pas fiable à 100 %. Veuillez examiner attentivement les résultats de la traduction. +> +> 7 novembre 2023 : Lors de l'installation des dépendances, veuillez choisir les versions **spécifiées** dans le fichier `requirements.txt`. Commande d'installation : `pip install -r requirements.txt`. + + +#
Optimisation académique GPT (GPT Academic)
+ +**Si vous aimez ce projet, merci de lui donner une étoile ; si vous avez inventé des raccourcis ou des plugins utiles, n'hésitez pas à envoyer des demandes d'extraction !** + +Si vous aimez ce projet, veuillez lui donner une étoile. +Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental). + +> **Remarque** +> +> 1. Veuillez noter que seuls les plugins (boutons) marqués en **surbrillance** prennent en charge la lecture de fichiers, et certains plugins se trouvent dans le **menu déroulant** de la zone des plugins. De plus, nous accueillons avec la plus haute priorité les nouvelles demandes d'extraction de plugins. +> +> 2. Les fonctionnalités de chaque fichier de ce projet sont spécifiées en détail dans [le rapport d'auto-analyse `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic个项目自译解报告). Vous pouvez également cliquer à tout moment sur les plugins de fonctions correspondants pour appeler GPT et générer un rapport d'auto-analyse du projet. Questions fréquemment posées [wiki](https://github.com/binary-husky/gpt_academic/wiki). [Méthode d'installation standard](#installation) | [Script d'installation en un clic](https://github.com/binary-husky/gpt_academic/releases) | [Instructions de configuration](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).. +> +> 3. Ce projet est compatible avec et recommande l'expérimentation de grands modèles de langage chinois tels que ChatGLM, etc. Prend en charge plusieurs clés API, vous pouvez les remplir dans le fichier de configuration comme `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Pour changer temporairement la clé API, entrez la clé API temporaire dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer celle-ci. + + +
+ +Fonctionnalités (⭐ = fonctionnalité récemment ajoutée) | Description +--- | --- +⭐[Modèles acquis](https://github.com/binary-husky/gpt_academic/wiki/如何切换模型)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) et Wenxin Yiyuan, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhifu API, DALLE3 +Amélioration, traduction, explication du code | Correction, traduction, recherche d'erreurs de syntaxe dans les articles, explication du code +[Raccourcis personnalisés](https://www.bilibili.com/video/BV14s4y1E7jN) | Prise en charge de raccourcis personnalisés +Conception modulaire | Prise en charge de plugins puissants personnalisables, prise en charge de la [mise à jour à chaud](https://github.com/binary-husky/gpt_academic/wiki/函数插件指南) des plugins +[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] Analyse en profondeur d'un arbre de projets Python/C/C++/Java/Lua/... d'un simple clic ou [auto-analyse](https://www.bilibili.com/video/BV1cj411A7VW) +Lecture d'articles, traduction d'articles | [Plugin] Lecture automatique des articles LaTeX/PDF et génération du résumé +Traduction complète de [LaTeX](https://www.bilibili.com/video/BV1nk4y1Y7Js/) ou amélioration de leur qualité | [Plugin] Traduction ou amélioration rapide des articles LaTeX +Génération de commentaires en masse | [Plugin] Génération facile de commentaires de fonctions +Traduction [chinois-anglais](https://www.bilibili.com/video/BV1yo4y157jV/) du Markdown | [Plugin] Avez-vous vu le [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) dans les cinq langues ci-dessus ? +Génération de rapports d'analyse du chat | [Plugin] Génération automatique d'un rapport récapitulatif après l'exécution du chat +[Fonction de traduction complète des articles PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin] Extraction du titre et du résumé d'un article PDF, ainsi que traduction intégrale (multithreading) +Assistant Arxiv | [Plugin] Saisissez l'URL d'un article Arxiv pour traduire automatiquement le résumé et télécharger le PDF +Correction automatique d'articles LaTeX | [Plugin] Correction de la grammaire, de l'orthographe et comparaison avec le PDF correspondant, à la manière de Grammarly +Assistant Google Scholar | [Plugin] Donner l'URL d'une page de recherche Google Scholar pour obtenir de l'aide sur l'écriture des références +Agrégation d'informations sur Internet + GPT | [Plugin] Obtenez les informations de l'Internet pour répondre aux questions à l'aide de GPT, afin que les informations ne soient jamais obsolètes +⭐Traduction détaillée des articles Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] Traduction de haute qualité d'articles Arxiv en un clic, le meilleur outil de traduction d'articles à ce jour +⭐[Saisie orale en temps réel](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Plugin] Écoute asynchrone de l'audio, découpage automatique et recherche automatique du meilleur moment pour répondre +Affichage des formules, images, tableaux | Affichage simultané de la forme [TeX et rendue](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png) des formules, prise en charge de la mise en évidence des formules et du code +⭐Plugin AutoGen multi-agents | [Plugin] Explorez les émergences intelligentes à plusieurs agents avec Microsoft AutoGen ! +Activation du [thème sombre](https://github.com/binary-husky/gpt_academic/issues/173) | Ajouter ```/?__theme=dark``` à l'URL du navigateur pour basculer vers le thème sombre +Prise en charge de plusieurs modèles LLM | Expérimentez avec GPT 3.5, GPT4, [ChatGLM2 de Tsinghua](https://github.com/THUDM/ChatGLM2-6B), [MOSS de Fudan](https://github.com/OpenLMLab/MOSS) simultanément ! +⭐Modèle ChatGLM2 fine-tuned | Chargez et utilisez un modèle fine-tuned de ChatGLM2, disponible avec un plugin d'assistance +Prise en charge de plus de modèles LLM, déploiement sur [Huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Ajout de l'interface de connaissance-API, support de [LLaMA](https://github.com/facebookresearch/llama) et [PanGuα](https://openi.org.cn/pangu/) +⭐Paquet pip [void-terminal](https://github.com/binary-husky/void-terminal) | Accédez à toutes les fonctions et plugins de ce projet directement depuis Python (en cours de développement) +⭐Plugin terminal du vide | [Plugin] Utilisez un langage naturel pour interagir avec les autres plugins du projet +Affichage de nouvelles fonctionnalités (génération d'images, etc.) …… | Voir à la fin de ce document …… +
+ + +- Nouvelle interface (modifiez l'option LAYOUT dans `config.py` pour basculer entre la disposition "gauche-droite" et "haut-bas") +
+ +
+ + +- Tous les boutons sont générés dynamiquement en lisant `functional.py`, vous pouvez donc ajouter de nouvelles fonctionnalités personnalisées et libérer le presse-papiers. +
+ +
+ +- Retouche/correction +
+ +
+ + + +- If the output contains formulas, they will be displayed in both tex and rendered forms for easy copying and reading. + +
+ +
+ +- Don't feel like looking at the project code? Just give it to ChatGPT to show off. + +
+ +
+ +- Multiple large language models are mixed and used together (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4). + +
+ +
+ +# Installation +### Method I: Run directly (Windows, Linux, or MacOS) + +1. Download the project +```sh +git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +cd gpt_academic +``` + +2. Configure API_KEY + +In `config.py`, configure the API KEY and other settings. [Click here to see methods for special network environment configurations](https://github.com/binary-husky/gpt_academic/issues/1). [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). + +「 The program will first check if there is a confidential configuration file named `config_private.py`, and use the configurations in that file to override the corresponding configurations in `config.py`. If you understand this logic, we strongly recommend creating a new configuration file named `config_private.py` right next to `config.py`, and move (copy) the configurations from `config.py` to `config_private.py` (only copy the configurations that you have modified). 」 + +「 You can also configure the project using `environment variables`. The format of the environment variables can be found in the `docker-compose.yml` file or on our [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). The priority of configuration reading is: `environment variables` > `config_private.py` > `config.py`. 」 + +3. Install dependencies +```sh +# (Option I: If you are familiar with Python, python>=3.9) Note: Use the official pip source or the Ali pip source. Temporary change of source method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +python -m pip install -r requirements.txt + +# (Option II: Use Anaconda) The steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 # Create an anaconda environment +conda activate gptac_venv # Activate the anaconda environment +python -m pip install -r requirements.txt # This step is the same as the pip installation step +``` + + +
If you need to support Tsinghua ChatGLM2/Fudan MOSS/RWKV as backends, click here to expand +

+ +[Optional Steps] If you need to support Tsinghua ChatGLM2/Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Sufficient computer configuration): +```sh +# [Optional Step I] Support Tsinghua ChatGLM2. Comment on this note: If you encounter the error "Call ChatGLM generated an error and cannot load the parameters of ChatGLM", refer to the following: 1: The default installation is the torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True). +python -m pip install -r request_llms/requirements_chatglm.txt + +# [Optional Step II] Support Fudan MOSS +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: You need to be at the root directory of the project when executing this line of code + +# [Optional Step III] Support RWKV Runner +Refer to the wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner + +# [Optional Step IV] Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. The currently supported models are as follows (jittorllms series currently only support the docker solution): +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +``` + +

+
+ + +4. Run +```sh +python main.py +``` + +### Method II: Use Docker + +0. Deploy all capabilities of the project (this is a large image that includes cuda and latex. But if you have a slow internet speed or a small hard drive, it is not recommended to use this) + +``` sh +# Modify the docker-compose.yml file, keep scheme 0 and delete the other schemes. Then run: +docker-compose up +``` + +1. ChatGPT + Wenxin Yiyu + Spark and other online models (recommended for most people) + +``` sh +# Modify the docker-compose.yml file, keep scheme 1 and delete the other schemes. Then run: +docker-compose up +``` + +NOTE: If you need Latex plugin functionality, please refer to the Wiki. Additionally, you can also use scheme 4 or scheme 0 directly to obtain Latex functionality. + +2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Tongyi Qianwen (requires familiarity with [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) runtime) + +``` sh +# Modify the docker-compose.yml file, keep scheme 2 and delete the other schemes. Then run: +docker-compose up +``` + + +### Method III: Other deployment methods +1. **One-click run script for Windows**. +Windows users who are completely unfamiliar with the Python environment can download the one-click run script without local models from the [Release](https://github.com/binary-husky/gpt_academic/releases) section. +The script was contributed by [oobabooga](https://github.com/oobabooga/one-click-installers). + +2. Use third-party APIs, Azure, Wenxin Yiyu, Xinghuo, etc., see the [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). + +3. Pitfall guide for deploying on cloud servers. +Please visit the [cloud server remote deployment wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97). + +4. Some new deployment platforms or methods + - Use Sealos [one-click deployment](https://github.com/binary-husky/gpt_academic/issues/993). + - Use WSL2 (Windows Subsystem for Linux). Please visit the [deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) + - How to run under a subpath (such as `http://localhost/subpath`). Please see [FastAPI running instructions](docs/WithFastapi.md) + + + +# Utilisation avancée +### I: Personnalisation des nouveaux boutons d'accès rapide (raccourcis académiques) +Ouvrez `core_functional.py` avec n'importe quel éditeur de texte, ajoutez les entrées suivantes, puis redémarrez le programme. (Si le bouton existe déjà, le préfixe et le suffixe peuvent être modifiés à chaud sans redémarrer le programme). +Par exemple: +``` +"Traduction avancée de l'anglais vers le français": { + # Préfixe, ajouté avant votre saisie. Par exemple, utilisez-le pour décrire votre demande, telle que la traduction, l'explication du code, l'amélioration, etc. + "Prefix": "Veuillez traduire le contenu suivant en français, puis expliquer chaque terme propre à la langue anglaise utilisé dans le texte à l'aide d'un tableau markdown : \n\n", + + # Suffixe, ajouté après votre saisie. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu par des guillemets. + "Suffix": "", +}, +``` +
+ +
+ +### II: Personnalisation des plugins de fonction +Écrivez de puissants plugins de fonction pour accomplir toutes les tâches que vous souhaitez ou ne pouvez pas imaginer. +Le développement et le débogage de ces plugins dans ce projet sont très faciles. Tant que vous avez des connaissances de base en python, vous pouvez implémenter vos propres fonctionnalités grâce à notre modèle fourni. +Veuillez consulter le [Guide des plugins de fonction](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) pour plus de détails. + + +# Mises à jour +### I: Dynamique + +1. Fonction de sauvegarde de conversation. Appelez `Enregistrer la conversation en cours` dans la zone des plugins fonctionnels pour enregistrer la conversation en cours sous la forme d'un fichier HTML lisible et récupérable. En outre, appelez `Charger les archives de conversation` dans la zone des plugins fonctionnels (menu déroulant) pour restaurer les conversations précédentes. +Astuce: Si aucun fichier n'est spécifié, cliquez directement sur `Charger les archives de conversation` pour afficher le cache des archives HTML. +
+ +
+ +2. ⭐ Fonction de traduction des articles Latex/Arxiv ⭐ +
+ ===> + +
+ +3. Terminal du néant (comprendre l'intention de l'utilisateur à partir de la saisie en langage naturel et appeler automatiquement d'autres plugins) + +- Étape 1: Saisissez "Veuillez appeler le plugin de traduction pour le document PDF, l'URL est https://openreview.net/pdf?id=rJl0r3R9KX". +- Étape 2 : Cliquez sur "Terminal du néant". + +
+ +
+ +4. Conception de fonctionnalités modulaires, une interface simple peut prendre en charge des fonctionnalités puissantes +
+ + +
+ +5. Traduction et interprétation d'autres projets open-source +
+ + +
+ +6. Fonctionnalités supplémentaires intégrant [live2d](https://github.com/fghrsh/live2d_demo) (désactivé par défaut, nécessite des modifications dans `config.py`) +
+ +
+ +7. Génération d'images par OpenAI +
+ +
+ +8. Analyse et résumé audio par OpenAI +
+ +
+ +9. Vérification et correction orthographique complète du document en Latex +
+ ===> + +
+ +10. Changement de langue et de thème +
+ +
+ + + +### II: Versions: +- version 3.70(tâche à accomplir) : Optimisation de la fonction AutoGen et création d'une série de plugins dérivés +- version 3.60 : Introduction d'AutoGen comme base des nouveaux plugins +- version 3.57 : Prise en charge de GLM3, Starlight v3, Zen v4 et correction de l'incompatibilité des modèles locaux +- version 3.56 : Possibilité d'ajouter dynamiquement des boutons de fonction de base et nouvelle page de synthèse des PDF +- version 3.55: Refonte de l'interface utilisateur avec fenêtres flottantes et barre de menu +- version 3.54 : Nouvel interpréteur de code dynamique (Code Interpreter) (à améliorer) +- version 3.53 : Possibilité de choisir dynamiquement différents thèmes d'interface, amélioration de la stabilité et résolution des problèmes de conflit entre utilisateurs multiples +- version 3.50 : Utiliser le langage naturel pour appeler toutes les fonctions du projet (Terminal du néant), prise en charge de la classification des plugins, amélioration de l'interface utilisateur, conception de nouveaux thèmes +- version 3.49 : Prise en charge de Baidu Qianfan et Xiaomi-Wenyiyan +- version 3.48 : Prise en charge d'Ali-DA, Shanghai AI-Lab-Shusheng et Xunfei Xinghuo +- version 3.46 : Prise en charge de la conversation audio temps réel sans intervention +- version 3.45 : Prise en charge de la personnalisation du modèle ChatGLM2 +- version 3.44 : Prise en charge officielle d'Azure, amélioration de l'utilisabilité de l'interface +- version 3.4 : +traduction complète des articles Arxiv, +correction des articles Latex +- version 3.3 : +fonction d'intégration d'informations Internet +- version 3.2 : Les plugins de fonction prennent en charge plus de paramètres (fonction d'enregistrement de conversation, débogage de code de n'importe quel langage + demandes d'LLM arbitraires) +- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles gpt ! Prise en charge de l'API2D, répartition de charge entre plusieurs clés API +- version 3.0 : Prise en charge de chatglm et d'autres petits llm +- version 2.6 : Refonte de la structure des plugins, amélioration de l'interactivité, ajout de nouveaux plugins +- version 2.5 : Auto-mise à jour, résolution des problèmes de dépassement de longueur de texte et de jeton pendant la consolidation de grands projets de codes sources +- version 2.4 : (1) Nouvelle fonctionnalité de traduction complète des documents PDF ; (2) Nouvelle fonctionnalité de changement de position de la zone de saisie ; (3) Nouvelle option de disposition verticale ; (4) Optimisation des plugins de fonction multithreads. +- version 2.3 : Amélioration de l'interactivité multi-threads +- version 2.2 : Prise en charge du rechargement à chaud des plugins de fonction +- version 2.1 : Mise en page pliable +- version 2.0 : Introduction de plugins de fonction modulaires +- version 1.0: Fonctionnalités de base + +Groupe QQ des développeurs de GPT Academic: `610599535` + +- Problèmes connus + - Certains plugins de traduction de navigateurs peuvent nuire au fonctionnement de l'interface utilisateur de ce logiciel. + - Gradio officiel a actuellement de nombreux bugs de compatibilité. Veuillez utiliser `requirement.txt` pour installer Gradio. + +### III: Thèmes +Vous pouvez modifier le thème en modifiant l'option `THEME` (config.py). + +1. `Chuanhu-Small-and-Beautiful` [Lien](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + + +### IV: Branches de développement de ce projet + +1. Branche `master` : Branche principale, version stable +2. Branche `frontier` : Branche de développement, version de test + + +### V: Références et apprentissage + +``` +De nombreux designs de codes de projets exceptionnels ont été référencés dans le développement de ce projet, sans ordre spécifique : + +# ChatGLM2-6B de l'Université Tsinghua: +https://github.com/THUDM/ChatGLM2-6B + +# JittorLLMs de l'Université Tsinghua: +https://github.com/Jittor/JittorLLMs + +# ChatPaper : +https://github.com/kaixindelele/ChatPaper + +# Edge-GPT : +https://github.com/acheong08/EdgeGPT + +# ChuanhuChatGPT : +https://github.com/GaiZhenbiao/ChuanhuChatGPT + + + +# Oobabooga installeur en un clic : +https://github.com/oobabooga/one-click-installers + +# Plus: +https://github.com/gradio-app/gradio +https://github.com/fghrsh/live2d_demo + diff --git a/docs/README.German.md b/docs/README.German.md new file mode 100644 index 0000000000..ffbcaacd17 --- /dev/null +++ b/docs/README.German.md @@ -0,0 +1,364 @@ + + + +> **Hinweis** +> +> Dieses README wurde mithilfe der GPT-Übersetzung (durch das Plugin dieses Projekts) erstellt und ist nicht zu 100 % zuverlässig. Bitte überprüfen Sie die Übersetzungsergebnisse sorgfältig. +> +> 7. November 2023: Beim Installieren der Abhängigkeiten bitte nur die in der `requirements.txt` **angegebenen Versionen** auswählen. Installationsbefehl: `pip install -r requirements.txt`. + + +#
GPT Academic (GPT Akademisch)
+ +**Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star. Wenn Sie praktische Tastenkombinationen oder Plugins entwickelt haben, sind Pull-Anfragen willkommen!** + +Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star. +Um dieses Projekt mit GPT in eine beliebige Sprache zu übersetzen, lesen Sie [`multi_language.py`](multi_language.py) (experimentell). + +> **Hinweis** +> +> 1. Beachten Sie bitte, dass nur die mit **hervorgehobenen** Plugins (Schaltflächen) Dateien lesen können. Einige Plugins befinden sich im **Drop-down-Menü** des Plugin-Bereichs. Außerdem freuen wir uns über jede neue Plugin-PR mit **höchster Priorität**. +> +> 2. Die Funktionen jeder Datei in diesem Projekt sind im [Selbstanalysebericht `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT-Academic-Selbstanalysebericht) ausführlich erläutert. Sie können jederzeit auf die relevanten Funktions-Plugins klicken und GPT aufrufen, um den Selbstanalysebericht des Projekts neu zu generieren. Häufig gestellte Fragen finden Sie im [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Standardinstallationsmethode](#installation) | [Ein-Klick-Installationsskript](https://github.com/binary-husky/gpt_academic/releases) | [Konfigurationsanleitung](https://github.com/binary-husky/gpt_academic/wiki/Projekt-Konfigurationsanleitung). +> +> 3. Dieses Projekt ist kompatibel mit und unterstützt auch die Verwendung von inländischen Sprachmodellen wie ChatGLM. Die gleichzeitige Verwendung mehrerer API-Schlüssel ist möglich, indem Sie sie in der Konfigurationsdatei wie folgt angeben: `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Wenn Sie den `API_KEY` vorübergehend ändern möchten, geben Sie vorübergehend den temporären `API_KEY` im Eingabebereich ein und drücken Sie die Eingabetaste, um die Änderung wirksam werden zu lassen. + + + + +
+ +Funktionen (⭐= Kürzlich hinzugefügte Funktion) | Beschreibung +--- | --- +⭐[Neues Modell integrieren](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) und Wenxin Yanyi, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Cognitive Graph API, DALLE3 +Verfeinern, Übersetzen, Codierung erläutern | Ein-Klick-Verfeinerung, Übersetzung, Suche nach grammatikalischen Fehlern in wissenschaftlichen Arbeiten, Erklärung von Code +[Eigene Tastenkombinationen](https://www.bilibili.com/video/BV14s4y1E7jN) definieren | Eigene Tastenkombinationen definieren +Modulare Gestaltung | Ermöglicht die Verwendung benutzerdefinierter leistungsstarker [Plugins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), Plugins unterstützen [Hot-Reload](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) +[Programmanalyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] Ermöglicht die Erstellung einer Projekthierarchie für Python/C/C++/Java/Lua/... mit nur einem Klick oder [Selbstanalyse](https://www.bilibili.com/video/BV1cj411A7VW) +Lesen von Forschungsarbeiten, Übersetzen von Forschungsarbeiten | [Plugin] Ermöglicht eine Umwandlung des gesamten Latex-/PDF-Forschungspapiers mit nur einem Klick und generiert eine Zusammenfassung +Latex-Übersetzung des vollständigen Textes, Ausbesserung | [Plugin] Ermöglicht eine Übersetzung oder Verbesserung der Latex-Forschungsarbeit mit nur einem Klick +Erzeugen von Batch-Anmerkungen | [Plugin] Erzeugt Funktionserläuterungen in Stapeln +Markdown- [En-De-Übersetzung](https://www.bilibili.com/video/BV1yo4y157jV/) | [Plugin] Haben Sie die [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in den oben genannten 5 Sprachen gesehen? +Erzeugen eines Chat-Analyseberichts | [Plugin] Generiert einen zusammenfassenden Bericht nach der Ausführung +PDF-Textübersetzungsmerkmal | [Plugin] Extrahiert Titel und Zusammenfassung des PDF-Dokuments und übersetzt den vollständigen Text (mehrfädig) +Arxiv-Assistent | [Plugin] Geben Sie die URL eines Arxiv-Artikels ein, um eine Zusammenfassung zu übersetzen und die PDF-Datei herunterzuladen +Automatische Überprüfung von Latex-Artikeln | [Plugin] Überprüft die Grammatik und Rechtschreibung von Latex-Artikeln nach dem Vorbild von Grammarly und generiert eine PDF-Vergleichsdatei +Google Scholar Integration Assistant | [Plugin] Geben Sie eine beliebige URL der Google Scholar-Suchseite ein und lassen Sie GPT Ihre [Verwandten Arbeiten](https://www.bilibili.com/video/BV1GP411U7Az/) schreiben +Internetinformationsaggregation + GPT | [Plugin] Ermöglicht es GPT, Fragen durch das Durchsuchen des Internets zu beantworten und Informationen immer auf dem neuesten Stand zu halten +⭐Feine Übersetzung von Arxiv-Artikeln ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] Übersetzt Arxiv-Artikel [mit hoher Qualität](https://www.bilibili.com/video/BV1dz4y1v77A/) mit einem Klick - das beste Übersetzungstool für wissenschaftliche Artikel +⭐[Echtzeit-Spracheingabe](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Plugin] [Asynchrones Lauschen auf Audio-Eingabe](https://www.bilibili.com/video/BV1AV4y187Uy/), automatisches Zerschneiden des Textes, automatische Suche nach dem richtigen Zeitpunkt zur Beantwortung +Darstellen von Formeln/Bildern/Tabellen | Zeigt Formeln sowohl in [TEX-](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)- als auch in gerenderten Formen an, unterstützt Formeln und Code-Hervorhebung +⭐AutoGen Multi-Agent Plugin | [Plugin] Erforscht die Möglichkeiten des emergenten Verhaltens von Multi-Agent-Systemen mit Microsoft AutoGen! +Start im Dark-Theme | Um das Dark-Theme zu aktivieren, fügen Sie ```/?__theme=dark``` am Ende der URL im Browser hinzu +[Mehrsprachige LLM-Modelle](https://www.bilibili.com/video/BV1wT411p7yf) unterstützt | Es ist sicherlich beeindruckend, von GPT3.5, GPT4, [ChatGLM2 der Tsinghua University](https://github.com/THUDM/ChatGLM2-6B), [MOSS der Fudan University](https://github.com/OpenLMLab/MOSS) bedient zu werden, oder? +⭐ChatGLM2 Feinabstimmungsmodell | Unterstützt das Laden von ChatGLM2-Feinabstimmungsmodellen und bietet Unterstützung für ChatGLM2-Feinabstimmungsassistenten +Integration weiterer LLM-Modelle, Unterstützung von [Huggingface-Deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Hinzufügen der Newbing-Schnittstelle (neues Bing), Einführung der [Jittorllms der Tsinghua University](https://github.com/Jittor/JittorLLMs) zur Unterstützung von LLaMA und PanGu Alpha +⭐[void-terminal](https://github.com/binary-husky/void-terminal) Pip-Paket | Verwenden Sie das Projekt in Python direkt, indem Sie das gesamte Funktionsplugin verwenden (in Entwicklung) +⭐Void-Terminal-Plugin | [Plugin] Verwenden Sie natürliche Sprache, um andere Funktionen dieses Projekts direkt zu steuern +Weitere Funktionen anzeigen (z. B. Bildgenerierung) …… | Siehe das Ende dieses Dokuments …… +
+ + +- Neues Interface (Ändern Sie die LAYOUT-Option in der `config.py`, um zwischen "Links-Rechts-Layout" und "Oben-Unten-Layout" zu wechseln) +
+ +
+ + +- Alle Schaltflächen werden dynamisch aus der `functional.py` generiert und ermöglichen das beliebige Hinzufügen benutzerdefinierter Funktionen zur Befreiung der Zwischenablage. +
+ +
+ +- Überarbeiten/Korrigieren +
+ +
+ + + +- If the output contains formulas, they will be displayed in both tex format and rendering format for easy copying and reading. +
+ +
+ +- Don't want to look at the project code? Show off the whole project directly in chatgpt's mouth. +
+ +
+ +- Multiple large language models mixed calling (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) +
+ +
+ +# Installation +### Installation Method I: Run directly (Windows, Linux or MacOS) + +1. Download the project +```sh +git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +cd gpt_academic +``` + +2. Configure API_KEY + +In `config.py`, configure API KEY and other settings, [click to view special network environment configuration methods](https://github.com/binary-husky/gpt_academic/issues/1). [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Instructions). + +「 The program will first check if there is a confidential configuration file named `config_private.py` and use its configuration to override the configuration with the same name in `config.py`. If you understand this reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and move (copy) the configuration in `config.py` to `config_private.py` (only copy the configuration items that you have modified). 」 + +「 You can configure the project through `environment variables`. The format of environment variables can refer to the `docker-compose.yml` file or our [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Instructions). The priority of configuration reading is: `environment variables` > `config_private.py` > `config.py`. 」 + + +3. Install dependencies +```sh +# (Option I: if you are familiar with python, python>=3.9) Note: Use the official pip source or Ali pip source, temporary method to change the source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +python -m pip install -r requirements.txt + +# (Option II: Using Anaconda) The steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 # Create an anaconda environment +conda activate gptac_venv # Activate the anaconda environment +python -m pip install -r requirements.txt # This step is the same as installing with pip +``` + + +
If you need support for Tsinghua ChatGLM2/Fudan MOSS/RWKV as backend, please click to expand. +

+ +[Optional] If you need to support Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Strong computer configuration): +```sh +# [Optional Step I] Support Tsinghua ChatGLM2. Tsinghua ChatGLM note: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt + +# [Optional Step II] Support Fudan MOSS +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the root path of the project + +# [Optional Step III] Support RWKV Runner +Refer to the wiki: https://github.com/binary-husky/gpt_academic/wiki/Support-RWKV-Runner + +# [Optional Step IV] Make sure the AVAIL_LLM_MODELS in config.py includes the expected models. The currently supported models are as follows (the jittorllms series only supports the docker solution at present): +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +``` + +

+
+ + + +4. Run +```sh +python main.py +``` + +### Installation Method II: Use Docker + +0. Deploy all capabilities of the project (this is a large image that includes cuda and latex. But if you have a slow internet speed or a small hard drive, it is not recommended to use this) +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) + +``` sh +# Modify docker-compose.yml, keep solution 0 and delete other solutions. Then run: +docker-compose up +``` + +1. ChatGPT + Wenxin's words + spark and other online models (recommended for most people) +[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) +[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) +[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + +``` sh +# Modify docker-compose.yml, keep solution 1 and delete other solutions. Then run: +docker-compose up +``` + +P.S. If you need the Latex plugin functionality, please refer to the Wiki. Also, you can directly use solution 4 or 0 to get the Latex functionality. + +2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Thousand Questions (Requires familiarity with [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) runtime) +[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) + +``` sh +# Modify docker-compose.yml, keep solution 2 and delete other solutions. Then run: +docker-compose up +``` + + +### Installation Method III: Other Deployment Methods +1. **Windows One-Click Script**. +Windows users who are completely unfamiliar with the python environment can download the one-click script for installation without local models in the published [Release](https://github.com/binary-husky/gpt_academic/releases). +The script is contributed by [oobabooga](https://github.com/oobabooga/one-click-installers). + +2. Use third-party APIs, Azure, Wenxin's words, Spark, etc., see [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Instructions) + +3. Pit avoidance guide for cloud server remote deployment. +Please visit the [Cloud Server Remote Deployment Wiki](https://github.com/binary-husky/gpt_academic/wiki/Cloud-Server-Remote-Deployment-Guide) + +4. Some new deployment platforms or methods + - Use Sealos [one-click deployment](https://github.com/binary-husky/gpt_academic/issues/993). + - Use WSL2 (Windows Subsystem for Linux). Please visit the [deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/Deploy-on-Windows-Subsystem-for-Linux-WSL2) + - How to run under a subpath (such as `http://localhost/subpath`). Please visit [FastAPI Running Instructions](docs/WithFastapi.md) + + + +# Fortgeschrittene Nutzung +### I: Benutzerdefinierte Tasten hinzufügen (akademische Hotkeys) +Öffnen Sie die Datei `core_functional.py` mit einem beliebigen Texteditor und fügen Sie folgenden Eintrag hinzu. Starten Sie dann das Programm neu. (Wenn die Schaltfläche bereits vorhanden ist, können sowohl das Präfix als auch das Suffix schnell geändert werden, ohne dass das Programm neu gestartet werden muss.) + +Beispiel: +``` +"Übersetzung von Englisch nach Chinesisch": { + # Präfix, wird vor Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Anforderungen zu beschreiben, z.B. Übersetzen, Code erklären, verbessern usw. + "Präfix": "Bitte übersetzen Sie den folgenden Abschnitt ins Chinesische und erklären Sie dann jedes Fachwort in einer Markdown-Tabelle:\n\n", + + # Suffix, wird nach Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Eingabe in Anführungszeichen zu setzen. + "Suffix": "", +}, +``` +
+ +
+ +### II: Benutzerdefinierte Funktionsplugins +Schreiben Sie leistungsstarke Funktionsplugins, um beliebige Aufgaben zu erledigen, die Sie wünschen oder nicht erwartet haben. +Das Erstellen und Debuggen von Plugins in diesem Projekt ist einfach und erfordert nur Grundkenntnisse in Python. Sie können unser bereitgestelltes Template verwenden, um Ihre eigene Plugin-Funktion zu implementieren. +Weitere Informationen finden Sie in der [Plugin-Anleitung](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). + + +# Aktualisierungen +### I: Neuigkeiten + +1. Dialogspeicherungsfunktion. Rufen Sie im Funktionspluginbereich "Aktuellen Dialog speichern" auf, um den aktuellen Dialog als lesbare und wiederherstellbare HTML-Datei zu speichern. +Darüber hinaus können Sie im Funktionspluginbereich (Dropdown-Menü) "Dialoghistorie laden" aufrufen, um frühere Sitzungen wiederherzustellen. +Tipp: Wenn kein Dateiname angegeben ist, können Sie direkt auf "Dialoghistorie laden" klicken, um den Verlauf des HTML-Archivs anzuzeigen. +
+ +
+ +2. ⭐ Latex/Arxiv-Papierübersetzungsfunktion ⭐ +
+ ===> + +
+ +3. Leere Terminaloberfläche (Verständnis der Benutzerabsicht und automatischer Aufruf anderer Plugins aus natürlicher Spracheingabe) + +- Schritt 1: Geben Sie "Bitte Plugin aufrufen, um das PDF-Papier zu übersetzen, dessen Adresse https://openreview.net/pdf?id=rJl0r3R9KX ist" ein. +- Schritt 2: Klicken Sie auf "Leere Terminaloberfläche". + +
+ +
+ +4. Modulare Funktionsgestaltung mit einfacher Schnittstelle für leistungsstarke Funktionen +
+ + +
+ +5. Übersetzung und Lösung anderer Open-Source-Projekte +
+ + +
+ +6. Funktionen zur Dekoration von [live2d](https://github.com/fghrsh/live2d_demo) (standardmäßig deaktiviert, config.py muss geändert werden) +
+ +
+ +7. OpenAI-Bildgenerierung +
+ +
+ +8. OpenAI-Audioanalyse und Zusammenfassung +
+ +
+ +9. Latex-Volltextkorrektur +
+ ===> + +
+ +10. Sprach- und Themenwechsel +
+ +
+ + + +### II: Versionen: +- Version 3.70 (ausstehend): Optimierung des AutoGen-Plugin-Themas und Entwicklung einer Reihe von abgeleiteten Plugins +- Version 3.60: Einführung von AutoGen als Grundlage für neue Plugin-Generation +- Version 3.57: Unterstützung von GLM3, SparkV3, WenxinYiyanV4, Behebung von Problemen bei gleichzeitiger Verwendung von lokalen Modellen +- Version 3.56: Dynamische Hinzufügung von Basisfunktionsbuttons, neue Übersichtsseite für PDFs +- Version 3.55: Überarbeitung der Benutzeroberfläche, Hinzufügung von Schwebefenstern und Menüleiste +- Version 3.54: Neuer dynamischer Code interpretier (Code Interpreter) (unfertig) +- Version 3.53: Unterstützung für dynamische Auswahl verschiedener Oberflächenthemen, Verbesserung der Stabilität und Behebung von Mehrbenutzerkonflikten +- Version 3.50: Verwenden Sie natürliche Sprache, um alle Funktionen dieses Projekts aufzurufen (leeres Terminal), Unterstützung für Plugin-Kategorien, verbesserte Benutzeroberfläche, neue Themen +- Version 3.49: Unterstützung für Baidu Qianfan Platform und WenxinYiyan +- Version 3.48: Unterstützung für Alibaba Damo Academy Tongyi Qianwen, Shanghai AI-Lab Shusheng, Xunfei Spark +- Version 3.46: Vollständig automatisierter Echtzeit-Sprachdialog +- Version 3.45: Anpassbare ChatGLM2-Feinjustierung +- Version 3.44: Offizielle Unterstützung für Azure, Verbesserung der Benutzerfreundlichkeit der Benutzeroberfläche +- Version 3.4: Hinzufügen von Arxiv-Papierübersetzung, LaTeX-Papierkorrektur +- Version 3.3: Hinzufügen von Internet-Informationen +- Version 3.2: Funktionsplugins unterstützen weitere Parameter (Dialog speichern, beliebigen Code analysieren und nach beliebigen LLM-Kombinationen fragen) +- Version 3.1: Unterstützung für die gleichzeitige Abfrage mehrerer GPT-Modelle! Unterstützung für API-Schlüssel-Lastenausgleich +- Version 3.0: Unterstützung von ChatGLM und anderen kleinen LLMs +- Version 2.6: Neugestaltung der Plugin-Struktur, Verbesserung der Interaktivität, Hinzufügen weiterer Plugins +- Version 2.5: Auto-Update zur Lösung von Problemen mit zu langem Text oder Tokenüberschuss beim Zusammenfassen von Code +- Version 2.4: (1) Hinzufügen der Funktion zur Übersetzung des vollständigen PDF-Texts; (2) Neues Feature zum Wechseln der Position des Eingabebereichs; (3) Hinzufügen der Option für eine vertikale Ausrichtung; (4) Verbesserung der Multithreading-Funktionen von Plugins. +- Version 2.3: Verbesserte Multithreading-Interaktivität +- Version 2.2: Funktionsplugins können heiß neu geladen werden +- Version 2.1: Faltbare Layouts +- Version 2.0: Einführung modularer Funktionsplugins +- Version 1.0: Grundfunktionen + +Entwickler-QQ-Gruppe von GPT Academic: `610599535` + +- Bekannte Probleme + - Einige Browserübersetzungsplugins beeinflussen die Frontend-Ausführung dieser Software + - Die offizielle Version von Gradio hat derzeit viele Kompatibilitätsprobleme. Installieren Sie Gradio daher unbedingt über `requirement.txt`. + +### III: Themen +Sie können das Theme ändern, indem Sie die Option `THEME` (config.py) ändern. +1. `Chuanhu-Small-and-Beautiful` [Link](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + + +### IV: Entwicklungszweige dieses Projekts + +1. `master` Branch: Hauptzweig, stabile Version +2. `frontier` Branch: Entwicklungsbranch, Testversion + + +### V: Referenzen und Lernen + +``` +Der Code basiert auf dem Design anderer herausragender Projekte. Die Reihenfolge ist beliebig: + +# ChatGLM2-6B von Tsinghua: +https://github.com/THUDM/ChatGLM2-6B + +# JittorLLMs von Tsinghua: +https://github.com/Jittor/JittorLLMs + +# ChatPaper: +https://github.com/kaixindelele/ChatPaper + +# Edge-GPT: +https://github.com/acheong08/EdgeGPT + +# ChuanhuChatGPT: +https://github.com/GaiZhenbiao/ChuanhuChatGPT + + + +# Oobabooga One-Click-Installations: +https://github.com/oobabooga/one-click-installers + +# Weitere: +https://github.com/gradio-app/gradio +https://github.com/fghrsh/live2d_demo + diff --git a/docs/README.Italian.md b/docs/README.Italian.md new file mode 100644 index 0000000000..e5179f2d06 --- /dev/null +++ b/docs/README.Italian.md @@ -0,0 +1,361 @@ + + + +> **Nota** +> +> Questo README è stato tradotto da GPT (implementato da un plugin di questo progetto) e non è al 100% affidabile, per favore valuta attentamente i risultati della traduzione. +> +> 2023.11.7: Quando installi le dipendenze, seleziona le versioni **specificate** nel file `requirements.txt`. Comando di installazione: `pip install -r requirements.txt`. + + +#
GPT Ottimizzazione Accademica (GPT Academic)
+ +**Se ti piace questo progetto, per favore dagli una stella; se hai idee o plugin utili, fai una pull request!** + +Se ti piace questo progetto, dagli una stella. +Per tradurre questo progetto in qualsiasi lingua con GPT, leggi ed esegui [`multi_language.py`](multi_language.py) (sperimentale). + +> **Nota** +> +> 1. Fai attenzione che solo i plugin (pulsanti) **evidenziati** supportano la lettura dei file, alcuni plugin si trovano nel **menu a tendina** nell'area dei plugin. Inoltre, accogliamo e gestiamo con **massima priorità** qualsiasi nuovo plugin attraverso pull request. +> +> 2. Le funzioni di ogni file in questo progetto sono descritte in dettaglio nel [rapporto di traduzione automatica del progetto `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Con l'iterazione della versione, puoi anche fare clic sui plugin delle funzioni rilevanti in qualsiasi momento per richiamare GPT e rigenerare il rapporto di auto-analisi del progetto. Domande frequenti [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Metodo di installazione standard](#installazione) | [Script di installazione one-click](https://github.com/binary-husky/gpt_academic/releases) | [Configurazione](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 +> +> 3. Questo progetto è compatibile e incoraggia l'uso di modelli di linguaggio di grandi dimensioni nazionali, come ChatGLM. Supporto per la coesistenza di più chiavi API, puoi compilare nel file di configurazione come `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando è necessario sostituire temporaneamente `API_KEY`, inserisci temporaneamente `API_KEY` nell'area di input e premi Invio per confermare. + + + + +
+ +Funzionalità (⭐ = Nuove funzionalità recenti) | Descrizione +--- | --- +⭐[Integrazione di nuovi modelli](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) e [Wenxin](https://cloud.baidu.com/doc/GUIDE/5268.9) Intelligence, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [bookbrain](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhipu API, DALLE3 +Revisione, traduzione, spiegazione del codice | Revisione, traduzione, ricerca errori grammaticali nei documenti e spiegazione del codice con un clic +[Tasti di scelta rapida personalizzati](https://www.bilibili.com/video/BV14s4y1E7jN) | Supporta tasti di scelta rapida personalizzati +Design modulare | Supporto per plugin personalizzati potenti, i plugin supportano l'[aggiornamento in tempo reale](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) +[Analisi del codice](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] Un clic per analizzare alberi di progetti Python/C/C++/Java/Lua/... o [autoanalisi](https://www.bilibili.com/video/BV1cj411A7VW) +Lettura di documenti, traduzione di documenti | [Plugin] Un clic per interpretare documenti completi in latex/pdf e generare un riassunto +Traduzione completa di testi in Latex, revisione completa di testi in Latex | [Plugin] Un clic per tradurre o correggere documenti in latex +Generazione automatica di commenti in batch | [Plugin] Un clic per generare commenti di funzione in batch +Traduzione [cinese-inglese](https://www.bilibili.com/video/BV1yo4y157jV/) in Markdown | [Plugin] Hai visto sopra i README in 5 lingue diverse ([Inglese](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md))? +Generazione di rapporti di analisi chat | [Plugin] Genera automaticamente un rapporto di sintesi dopo l'esecuzione +Funzionalità di traduzione di testo completo in PDF | [Plugin] Estrai il titolo e il riassunto dei documenti PDF e traduci tutto il testo (multithreading) +Aiutante per Arxiv | [Plugin] Inserisci l'URL dell'articolo Arxiv per tradurre riassunto e scaricare PDF in un clic +Controllo completo dei documenti in Latex | [Plugin] Rileva errori grammaticali e ortografici nei documenti in Latex simile a Grammarly + Scarica un PDF per il confronto +Assistente per Google Scholar | [Plugin] Dato qualsiasi URL della pagina di ricerca di Google Scholar, fai scrivere da GPT gli *articoli correlati* per te +Concentrazione delle informazioni di Internet + GPT | [Plugin] [Recupera informazioni da Internet](https://www.bilibili.com/video/BV1om4y127ck) utilizzando GPT per rispondere alle domande e rendi le informazioni sempre aggiornate +⭐Traduzione accurata di articoli Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] [Traduci articoli Arxiv ad alta qualità](https://www.bilibili.com/video/BV1dz4y1v77A/) con un clic, lo strumento di traduzione degli articoli migliore al mondo al momento +⭐[Inserimento della conversazione vocale in tempo reale](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Plugin] [Ascolta l'audio](https://www.bilibili.com/video/BV1AV4y187Uy/) in modo asincrono, taglia automaticamente le frasi e trova automaticamente il momento giusto per rispondere +Visualizzazione di formule, immagini, tabelle | Mostra contemporaneamente formule in formato tex e renderizzato, supporta formule e evidenziazione del codice +⭐Plugin multi-agente AutoGen | [Plugin] Esplora le possibilità dell'emergenza intelligence multi-agente con l'aiuto di Microsoft AutoGen! +Attiva il tema scuro [qui](https://github.com/binary-husky/gpt_academic/issues/173) | Aggiungi ```/?__theme=dark``` alla fine dell'URL del browser per passare al tema scuro +Supporto di più modelli LLM | Essere servito contemporaneamente da GPT3.5, GPT4, [ChatGLM2 di Tsinghua](https://github.com/THUDM/ChatGLM2-6B), [MOSS di Fudan](https://github.com/OpenLMLab/MOSS) +⭐Modello di fine-tuning ChatGLM2 | Supporto per l'importazione del modello di fine-tuning di ChatGLM2, fornendo plug-in di assistenza per il fine tuning di ChatGLM2 +Più supporto per modelli LLM, supporto del [deploy di Huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Aggiungi interfaccia Newbing (Bing Translator), introduce il supporto di [JittorLLMs](https://github.com/Jittor/JittorLLMs) di Tsinghua, supporto per [LLaMA](https://github.com/facebookresearch/llama) e [Panguα](https://openi.org.cn/pangu/) +⭐Pacchetto pip [void-terminal](https://github.com/binary-husky/void-terminal) | Fornisce funzionalità di tutti i plugin di questo progetto direttamente in Python senza GUI (in sviluppo) +⭐Plugin terminale virtuale | [Plugin] Richiama altri plugin di questo progetto utilizzando linguaggio naturale +Altre nuove funzionalità (come la generazione di immagini) ... | Vedi alla fine di questo documento ... + +
+ + +- Nuovo layout (modifica l'opzione LAYOUT in `config.py` per passare tra "layout sinistra / destra" e "layout sopra / sotto") +
+ +
+ + +- Tutti i pulsanti vengono generati dinamicamente leggendo `functional.py`, puoi aggiungere liberamente funzionalità personalizzate, liberando la clipboard +
+ +
+ +- Revisione / correzione +
+ +
+ + + +- Se l'output contiene formule, saranno visualizzate sia in formato tex che in formato renderizzato per facilitarne la copia e la lettura. +
+ +
+ +- Non hai voglia di guardare il codice del progetto? Mostralo direttamente al chatgpt in bocca. +
+ +
+ +- Chiamate miste di modelli di grandi dimensioni (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) +
+ +
+ +# Installazione +### Metodo di installazione I: Esegui direttamente (Windows, Linux o MacOS) + +1. Scarica il progetto +```sh +git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +cd gpt_academic +``` + +2. Configura l'API_KEY + +Nel file `config.py`, configura l'API KEY e altre impostazioni, [clicca qui per vedere come configurare l'API in ambienti di rete speciali](https://github.com/binary-husky/gpt_academic/issues/1) . [Pagina Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). + +「 Il programma controllerà prima se esiste un file di configurazione privata chiamato `config_private.py` e utilizzerà le configurazioni in esso contenute per sovrascrivere le configurazioni con lo stesso nome in `config.py`. Se comprendi questa logica di lettura, ti consigliamo vivamente di creare un nuovo file di configurazione chiamato `config_private.py` accanto a `config.py` e spostare (copiare) le configurazioni da `config.py` a `config_private.py` (basta copiare le voci di configurazione che hai modificato). 」 + +「 Supporta la configurazione del progetto tramite `variabili d'ambiente`, il formato di scrittura delle variabili d'ambiente è descritto nel file `docker-compose.yml` o nella nostra [pagina Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) priorità di lettura della configurazione: `variabili d'ambiente` > `config_private.py` > `config.py`. 」 + +3. Installa le dipendenze +```sh +# (Scelta I: Se familiarizzato con python, python>=3.9) Nota: Usa il repository delle fonti ufficiale di pip o Ali pip per temporaneamente cambiare la fonte: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +python -m pip install -r requirements.txt + +# (Scelta II: Usa Anaconda) Anche in questo caso, i passaggi sono simili (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 # Crea l'ambiente anaconda +conda activate gptac_venv # Attiva l'ambiente anaconda +python -m pip install -r requirements.txt # Questo passaggio è identico alla procedura di installazione con pip +``` + + +
Se desideri utilizzare il backend di ChatGLM2 di Tsinghua/Fudan MOSS/RWKV, fai clic per espandere +

+ +[Optional] Se desideri utilizzare ChatGLM2 di Tsinghua/Fudan MOSS come backend, è necessario installare ulteriori dipendenze (Requisiti: conoscenza di Python + esperienza con Pytorch + hardware potente): +```sh +# [Optional Step I] Supporto per ChatGLM2 di Tsinghua. Note di ChatGLM di Tsinghua: Se si verifica l'errore "Call ChatGLM fail non può caricare i parametri di ChatGLM", fare riferimento a quanto segue: 1: L'installazione predefinita è la versione torch+cpu, per usare cuda è necessario disinstallare torch ed installare nuovamente la versione con torch+cuda; 2: Se il modello non può essere caricato a causa di una configurazione insufficiente, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, sostituendo AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) con AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt + +# [Optional Step II] Supporto per Fudan MOSS +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # Attenzione: eseguire questo comando nella directory principale del progetto + +# [Optional Step III] Supporto per RWKV Runner +Consulta il Wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner + +# [Optional Step IV] Assicurati che il file di configurazione config.py includa i modelli desiderati. Di seguito sono elencati i modelli attualmente supportati (gli llm di jittorllms supportano solo la soluzione Docker): +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +``` + +

+
+ + + +4. Esegui +```sh +python main.py +``` + +### Metodo di installazione II: Utilizzo di Docker + +0. Installa tutte le funzionalità del progetto (Questo è un'immagine di grandi dimensioni che include cuda e latex. Potrebbe non essere adatta se hai una connessione lenta o uno spazio su disco limitato) +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) + +``` sh +# Modifica il file docker-compose.yml: mantieni solo la configurazione 0 e rimuovi le altre configurazioni. Avvia il seguente comando: +docker-compose up +``` + +1. ChatGPT + Wenxin Yiyu (Poem) + Spark, solo modelli online (Consigliato per la maggior parte delle persone) +[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) +[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) +[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + +``` sh +# Modifica il file docker-compose.yml: mantieni solo la configurazione 1 e rimuovi le altre configurazioni. Avvia il seguente comando: +docker-compose up +``` + +P.S. Se hai bisogno del plugin LaTeX, consulta la pagina Wiki. In alternativa, puoi utilizzare le configurazioni 4 o 0 direttamente per ottenere questa funzionalità. + +2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Tongyi Q&W (Richiede conoscenze su Nvidia Docker) +[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) + +``` sh +# Modifica il file docker-compose.yml: mantieni solo la configurazione 2 e rimuovi le altre configurazioni. Avvia il seguente comando: +docker-compose up +``` + + +### Metodo di installazione III: Altre opzioni di distribuzione +1. **Script di esecuzione con un clic per Windows**. +Se non conosci affatto l'ambiente python in Windows, puoi scaricare uno script di esecuzione con un clic dalla sezione [Release](https://github.com/binary-husky/gpt_academic/releases) per installare la versione che non richiede modelli locali. +Lo script è stato fornito da [oobabooga](https://github.com/oobabooga/one-click-installers). + +2. Utilizzo di API di terze parti, Azure, Wenxin Yiyu (Poem), Xinghuo, ecc. vedi [pagina Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) + +3. Guida all'installazione del server cloud remoto. +Visita la [pagina Wiki sull'installazione del server cloud remoto](https://github.com/binary-husky/gpt_academic/wiki/云服务器远程部署指南). + +4. Altre nuove piattaforme o metodi di distribuzione: + - Uso di Sealos per il [deployment con un clic](https://github.com/binary-husky/gpt_academic/issues/993). + - Uso di WSL2 (Windows Subsystem for Linux). Vedi [Guida all'installazione](https://github.com/binary-husky/gpt_academic/wiki/使用WSL2(Windows-Subsystem-for-Linux-子系统)部署) per maggiori informazioni. + - Funzionamento su un sotto-percorso URL (`http://localhost/subpath`). Vedi [istruzioni FastAPI](docs/WithFastapi.md) per maggiori dettagli. + + + +# Utilizzo avanzato +### I: Personalizzare nuovi pulsanti rapidi (tasti di scelta rapida accademici) +Apri `core_functional.py` con qualsiasi editor di testo e aggiungi le seguenti voci, quindi riavvia il programma. (Se il pulsante esiste già, sia il prefisso che il suffisso possono essere modificati a caldo senza la necessità di riavviare il programma.) +Ad esempio, +``` +"Traduzione avanzata Cinese-Inglese": { + # Prefisso, sarà aggiunto prima del tuo input. Ad esempio, utilizzato per descrivere la tua richiesta, come traduzione, spiegazione del codice, rifinitura, ecc. + "Prefisso": "Si prega di tradurre il seguente testo in cinese e fornire spiegazione per i termini tecnici utilizzati, utilizzando una tabella in markdown uno per uno:\n\n", + + # Suffisso, sarà aggiunto dopo il tuo input. Ad esempio, in combinazione con il prefisso, puoi circondare il tuo input con virgolette. + "Suffisso": "", +}, +``` +
+ +
+ +### II: Plugin di funzioni personalizzate +Scrivi potentissimi plugin di funzioni per eseguire qualsiasi compito che desideri, sia che tu lo pensi o meno. +La scrittura di plugin per questo progetto è facile e richiede solo conoscenze di base di Python. Puoi seguire il [Guida ai Plugin di Funzione](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) per maggiori dettagli. + + +# Aggiornamenti +### I: Aggiornamenti + +1. Funzionalità di salvataggio della conversazione. Chiamare `Salva la conversazione corrente` nell'area del plugin per salvare la conversazione corrente come un file html leggibile e ripristinabile. +Inoltre, nella stessa area del plugin (menu a tendina) chiamare `Carica la cronologia della conversazione` per ripristinare una conversazione precedente. +Suggerimento: fare clic su `Carica la cronologia della conversazione` senza specificare un file per visualizzare la tua cronologia di archiviazione HTML. +
+ +
+ +2. ⭐ Funzionalità di traduzione articoli Latex/Arxiv ⭐ +
+ ===> + +
+ +3. Terminale vuoto (Comprensione dell'intento dell'utente dai testi liberi + Chiamata automatica di altri plugin) + +- Passaggio 1: Digitare "Chiamare il plugin per tradurre un documento PDF, l'indirizzo è https://openreview.net/pdf?id=rJl0r3R9KX" +- Passaggio 2: Fare clic su "Terminale vuoto" + +
+ +
+ +4. Design modulare, interfacce semplici che supportano funzionalità potenti +
+ + +
+ +5. Traduzione e interpretazione di altri progetti open source +
+ + +
+ +6. Funzionalità leggera per [live2d](https://github.com/fghrsh/live2d_demo) (disabilitata per impostazione predefinita, richiede modifica di `config.py`) +
+ +
+ +7. Generazione di immagini di OpenAI +
+ +
+ +8. Elaborazione e riepilogo audio di OpenAI +
+ +
+ +9. Correzione totale del testo di Latex +
+ ===> + +
+ +10. Cambio linguaggio e tema +
+ +
+ + +### II: Versioni: +- versione 3.70 (todo): Ottimizzazione della visualizzazione del tema AutoGen e sviluppo di una serie di plugin correlati. +- versione 3.60: Introduzione di AutoGen come fondamento per i plugin della nuova generazione. +- versione 3.57: Supporto per GLM3, StarFirev3, Wenxin-yiyanv4 e correzione di bug sulla concorrenza dell'uso di modelli locali. +- versione 3.56: Possibilità di aggiungere dinamicamente pulsanti per funzionalità di base e nuova pagina di riepilogo del PDF. +- versione 3.55: Ristrutturazione dell'interfaccia utente, introduzione di finestre fluttuanti e barre dei menu. +- versione 3.54: Nuovo interprete di codice dinamico (Code Interpreter) (da perfezionare). +- versione 3.53: Possibilità di selezionare dinamicamente diversi temi dell'interfaccia utente, miglioramento della stabilità e risoluzione dei conflitti tra utenti multipli. +- versione 3.50: Utilizzo del linguaggio naturale per chiamare tutte le funzioni dei plugin di questo progetto (Terminale vuoto), supporto per la classificazione dei plugin, miglioramento dell'interfaccia utente e design di nuovi temi. +- versione 3.49: Supporto per la piattaforma Baidu Qianfan e Wenxin-yiyan. +- versione 3.48: Supporto per Alibaba DAXI 所见即所答, Shanghai AI-Lab Shusheng, Xunfei StarFire. +- versione 3.46: Supporto per la chat vocale in tempo reale completamente automatica. +- versione 3.45: Supporto personalizzato per il micro-aggiustamento del modello ChatGLM2. +- versione 3.44: Supporto ufficiale per Azure, miglioramento dell'usabilità dell'interfaccia. +- versione 3.4: + Funzionalità di traduzione di documenti arXiv e correzione di documenti LaTeX. +- versione 3.3: + Funzionalità di sintesi delle informazioni su Internet. +- versione 3.2: Il plugin di funzione supporta più interfacce dei parametri (funzionalità di salvataggio della conversazione, interpretazione di codici in qualsiasi linguaggio contemporaneamente, interrogare qualsiasi combinazione di LLM). +- versione 3.1: Supporto per l'interrogazione simultanea di più modelli GPT! Supporto per api2d, equilibrio del carico con più apikey. +- versione 3.0: Supporto per chatglm e altri piccoli llm. +- versione 2.6: Rielaborazione della struttura del plugin, miglioramento dell'interattività, aggiunta di ulteriori plugin. +- versione 2.5: Aggiornamento automatico, risoluzione del problema della lunghezza eccessiva del testo durante il riepilogo di grandi blocchi di codice che supera i token. +- versione 2.4: (1) Nuova funzionalità di traduzione di documenti PDF; (2) Nuova funzionalità di scambio delle posizioni tra l'area di input (input area); (3) Nuova opzione di layout verticale; (4) Ottimizzazione del plugin a threading multiplo. +- versione 2.3: Miglioramento dell'interattività con threading multiplo. +- versione 2.2: Supporto per il plugin con ricarica a caldo. +- versione 2.1: Layout pieghevole. +- versione 2.0: Introduzione di plugin modulari. +- versione 1.0: Funzioni di base + +GPT Academic Developer QQ Group: `610599535` + +- Problemi noti + - Alcuni plug-in di traduzione del browser possono interferire con il funzionamento del frontend di questo software + - L'app Gradio ufficiale ha molti bug di compatibilità, si consiglia di installare Gradio tramite `requirement.txt` + +### III: Temi +Il tema può essere modificato modificando l'opzione `THEME` (config.py) +1. `Chuanhu-Small-and-Beautiful` [Website](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + + +### IV: Branch di Sviluppo di questo progetto + +1. `master` branch: branch principale, versione stabile +2. `frontier` branch: branch di sviluppo, versione di test + + +### V: Riferimenti e Risorse di Apprendimento + +``` +Nel codice sono state utilizzate diverse idee dagli altri progetti, senza un ordine specifico: + +# ChatGLM2-6B di Tsinghua: +https://github.com/THUDM/ChatGLM2-6B + +# JittorLLMs di Tsinghua: +https://github.com/Jittor/JittorLLMs + +# ChatPaper: +https://github.com/kaixindelele/ChatPaper + +# Edge-GPT: +https://github.com/acheong08/EdgeGPT + +# ChuanhuChatGPT: +https://github.com/GaiZhenbiao/ChuanhuChatGPT + + + +# Installazione con un solo clic di Oobabooga: +https://github.com/oobabooga/one-click-installers + +# Altre risorse: +https://github.com/gradio-app/gradio +https://github.com/fghrsh/live2d_demo + diff --git a/docs/README.Japanese.md b/docs/README.Japanese.md new file mode 100644 index 0000000000..983395b556 --- /dev/null +++ b/docs/README.Japanese.md @@ -0,0 +1,345 @@ + + + +> **注意** +> +> 此READMEはGPTによる翻訳で生成されました(このプロジェクトのプラグインによって実装されています)、翻訳結果は100%正確ではないため、注意してください。 +> +> 2023年11月7日: 依存関係をインストールする際は、`requirements.txt`で**指定されたバージョン**を選択してください。 インストールコマンド: `pip install -r requirements.txt`。 + + +#
GPT 学術最適化 (GPT Academic)
+ +**このプロジェクトが気に入った場合は、Starを付けてください。また、便利なショートカットキーまたはプラグインを作成した場合は、プルリクエストを歓迎します!** +GPTを使用してこのプロジェクトを任意の言語に翻訳するには、[`multi_language.py`](multi_language.py)を読み込んで実行します(実験的な機能)。 + +> **注意** +> +> 1. **強調された** プラグイン(ボタン)のみがファイルを読み込むことができることに注意してください。一部のプラグインは、プラグインエリアのドロップダウンメニューにあります。また、新しいプラグインのPRを歓迎し、最優先で対応します。 +> +> 2. このプロジェクトの各ファイルの機能は、[自己分析レポート`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E5%A0%82)で詳しく説明されています。バージョンが進化するにつれて、関連する関数プラグインをクリックして、プロジェクトの自己分析レポートをGPTで再生成することもできます。よくある質問については、[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)をご覧ください。[標準的なインストール方法](#installation) | [ワンクリックインストールスクリプト](https://github.com/binary-husky/gpt_academic/releases) | [構成の説明](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Explain)。 +> +> 3. このプロジェクトは、[ChatGLM](https://www.chatglm.dev/)などの中国製の大規模言語モデルも互換性があり、試してみることを推奨しています。複数のAPIキーを共存させることができ、設定ファイルに`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`のように記入できます。`API_KEY`を一時的に変更する必要がある場合は、入力エリアに一時的な`API_KEY`を入力し、Enterキーを押して提出すると有効になります。 + + + + +
+ +機能(⭐= 最近追加された機能) | 説明 +--- | --- +⭐[新しいモデルの追加](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)とWenxin Yiyu, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Xinghuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhantu API, DALLE3 +校正、翻訳、コード解説 | 一括校正、翻訳、論文の文法エラーの検索、コードの解説 +[カスタムショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | カスタムショートカットキーのサポート +モジュール化された設計 | カスタムでパワフルな[プラグイン](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions)のサポート、プラグインの[ホットリロード](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) +[プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [プラグイン] Python/C/C++/Java/Lua/...のプロジェクトツリーを簡単に解析するか、[自己解析](https://www.bilibili.com/video/BV1cj411A7VW) +論文の読み込み、[翻訳](https://www.bilibili.com/video/BV1KT411x7Wn) | [プラグイン] LaTeX/PDFの論文全文を翻訳して要約を作成する +LaTeX全文の[翻訳](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[校正](https://www.bilibili.com/video/BV1FT411H7c5/) | [プラグイン] LaTeX論文を翻訳や校正する +一括コメント生成 | [プラグイン] 関数コメントを一括生成する +Markdownの[日英翻訳](https://www.bilibili.com/video/BV1yo4y157jV/) | [プラグイン] 5つの言語([英語](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)など)のREADMEをご覧になりましたか? +チャット分析レポートの生成 | [プラグイン] 実行後にサマリーレポートを自動生成する +[PDF論文全文の翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [プラグイン] PDF論文のタイトルと要約を抽出し、全文を翻訳する(マルチスレッド) +[Arxivアシスタント](https://www.bilibili.com/video/BV1LM4y1279X) | [プラグイン] arxiv論文のURLを入力すると、要約を翻訳してPDFをダウンロードできます +LaTeX論文の一括校正 | [プラグイン] Grammarlyのように、LaTeX論文の文法とスペルを修正して対照PDFを出力する +[Google Scholar統合アシスタント](https://www.bilibili.com/video/BV19L411U7ia) | [プラグイン] 任意のGoogle Scholar検索ページのURLを指定して、関連資料をGPTに書かせることができます +インターネット情報の集約+GPT | [プラグイン] インターネットから情報を取得して質問に答え、情報が常に最新になるようにします +⭐Arxiv論文の詳細な翻訳 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [プラグイン] arxiv論文を超高品質で翻訳します。最高の論文翻訳ツールです +⭐[リアルタイム音声入力](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [プラグイン] 非同期[音声をリッスン(https://www.bilibili.com/video/BV1AV4y187Uy/)し、自動で文章を区切り、回答のタイミングを自動で探します +公式/画像/表の表示 | 公式の[tex形式とレンダリング形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)を同時に表示し、公式とコードのハイライトをサポートします +⭐AutoGenマルチエージェントプラグイン | [プラグイン] Microsoft AutoGenを利用して、マルチエージェントのインテリジェントなエマージェンスを探索します +ダーク[テーマ](https://github.com/binary-husky/gpt_academic/issues/173)を起動 | ブラウザのURLに```/?__theme=dark```を追加すると、ダークテーマに切り替えられます +[複数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)のサポート | GPT3.5、GPT4、[Tsinghua ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[Fudan MOSS](https://github.com/OpenLMLab/MOSS)などを同時に使えるのは最高の感じですよね? +⭐ChatGLM2ファインチューニングモデル | ChatGLM2ファインチューニングモデルをロードして使用することができ、ChatGLM2ファインチューニングの補助プラグインが用意されています +さらなるLLMモデルの導入、[HuggingFaceデプロイのサポート](https://huggingface.co/spaces/qingxu98/gpt-academic) | Newbingインターフェース(新しいBing)の追加、Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs)の導入、[LLaMA](https://github.com/facebookresearch/llama)および[盤古α](https://openi.org.cn/pangu/)のサポート +⭐[void-terminal](https://github.com/binary-husky/void-terminal) pipパッケージ | GUIから独立して、Pythonから直接このプロジェクトのすべての関数プラグインを呼び出せます(開発中) +⭐Void Terminalプラグイン | [プラグイン] 自然言語で、このプロジェクトの他のプラグインを直接実行します +その他の新機能の紹介(画像生成など)...... | 末尾をご覧ください ...... +
+ + + +- もし出力に数式が含まれている場合、TeX形式とレンダリング形式の両方で表示されます。これにより、コピーと読み取りが容易になります。 + +
+ +
+ +- プロジェクトのコードを見るのがめんどくさい?プロジェクト全体を`chatgpt`に広報口頭発表してもらえるよ + +
+ +
+ +- 異なる言語モデルの組み合わせ呼び出し(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) + +
+ +
+ +# インストール +### 方法I:直接実行(Windows、Linux、またはMacOS) + +1. プロジェクトをダウンロード +```sh +git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +cd gpt_academic +``` + +2. APIキーを設定する + +`config.py`でAPIキーやその他の設定を設定します。特殊なネットワーク環境の設定方法については、[こちらをクリックして確認してください](https://github.com/binary-husky/gpt_academic/issues/1)。[Wikiページ](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started)も参照してください。 + +「プログラムは、`config.py`と同じ場所にある`config_private.py`という名前のプライベート設定ファイルが存在するかどうかを優先的にチェックし、同じ名前の設定をコピーします。この読み込みロジックを理解できる場合、`config.py`の横に`config_private.py`という名前の新しい設定ファイルを作成し、`config.py`の設定を転送(コピー)することを強くお勧めします(変更した設定項目だけをコピーします)。」 + +「プロジェクトを環境変数で設定することもサポートしています。環境変数の書式は、`docker-compose.yml`ファイルや[Wikiページ](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started)を参考にしてください。設定の優先度は、`環境変数` > `config_private.py` > `config.py`の順です。」 + +3. 依存関係をインストールする +```sh +# (オプションI:Pythonに詳しい場合、Python 3.9以上)注:公式のpipソースまたは阿里pipソースを使用し、一時的なソースの変更方法は、python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/です。 +python -m pip install -r requirements.txt + +# (オプションII:Anacondaを使用する場合)手順は同様です (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 # Anaconda環境を作成 +conda activate gptac_venv # Anaconda環境をアクティベート +python -m pip install -r requirements.txt # この手順はpipのインストール手順と同じです +``` + +
清華ChatGLM2/復旦MOSS/RWKVがバックエンドとしてサポートされている場合、ここをクリックして展開してください +

+ +【オプションステップ】 清華ChatGLM2/復旦MOSSをバックエンドとしてサポートする場合は、さらに追加の依存関係をインストールする必要があります(前提条件:Pythonに精通していて、PytorchとNVIDIA GPUを使用したことがあり、十分なコンピュータの構成を持っていること): + +```sh +# 【オプションステップI】 清華ChatGLM2のサポートを追加する。 清華ChatGLM2に関する注意点: "Call ChatGLM fail 不能正常加载ChatGLM的参数" のエラーが発生した場合、次の手順を参照してください。 1: デフォルトでインストールされているのはtorch+cpu版です。CUDAを使用するにはtorchをアンインストールしてtorch+cuda版を再インストールする必要があります。 2: モデルをロードできない場合は、request_llm/bridge_chatglm.pyのモデル精度を変更できます。AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)をAutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)に変更します。 +python -m pip install -r request_llms/requirements_chatglm.txt + +# 【オプションステップII】 復旦MOSSのサポートを追加する +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # このコマンドを実行するときは、プロジェクトのルートパスである必要があります。 + +# 【オプションステップIII】 RWKV Runnerのサポートを追加する +Wikiを参照してください: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner + +# 【オプションステップIV】 config.py設定ファイルに、以下のすべてのモデルが含まれていることを確認します。以下のモデルがすべてサポートされています(jittorllmsはDockerのみサポートされています): +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +``` + +

+
+ +4. 実行する +```sh +python main.py +``` + +### 方法II:Dockerを使用する + +0. プロジェクトのフルスケールデプロイ(これは、CUDAとLaTeXを含む大規模なイメージですが、ネットワーク速度が遅いまたはディスク容量が小さい場合はおすすめしません) +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) + +```sh +# docker-compose.ymlを編集し、スキーム0を残し、その他を削除してから実行する: +docker-compose up +``` + +1. ChatGPT + 文心一言 + sparkなどのオンラインモデルのみを含む(ほとんどの人におすすめ) +[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) +[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) +[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + +```sh +# docker-compose.ymlを編集し、スキーム1を残し、その他を削除してから実行する: +docker-compose up +``` + +P.S. LaTeXプラグインの機能を使用する場合は、Wikiを参照してください。また、LaTeX機能を使用するためには、スキーム4またはスキーム0を直接使用することもできます。 + +2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通慧千問(Nvidia Dockerに精通している場合) +[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) + +```sh +# docker-compose.ymlを編集し、スキーム2を残し、その他を削除してから実行する: +docker-compose up +``` + + +### 方法III:その他のデプロイメントオプション + +1. **Windowsのワンクリック実行スクリプト**。 +Python環境に詳しくないWindowsユーザーは、[リリース](https://github.com/binary-husky/gpt_academic/releases)からワンクリック実行スクリプトをダウンロードして、ローカルモデルのないバージョンをインストールできます。 +スクリプトの貢献者は[oobabooga](https://github.com/oobabooga/one-click-installers)です。 + +2. 第三者のAPI、Azureなど、文心一言、星火などを使用するには、[Wikiページ](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started)を参照してください。 + +3. クラウドサーバーでのリモートデプロイの回避策ガイドを参照してください。 +[クラウドサーバーでのリモートデプロイの回避策ガイドwiki](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started#%E4%BA%91%E3%82%B5%E3%83%BC%E3%83%90%E3%83%BC%E3%83%AA%E3%82%BC%E3%83%A0%E3%82%B5%E3%83%BC%E3%83%90%E3%81%AE%E3%83%AA%E3%83%A2%E3%83%BC%E3%83%88%E3%83%87%E3%83%97%E3%83%AD%E3%82%A4%E6%8C%87%E5%8D%97) + +4. その他の新しいデプロイプラットフォームや方法 + - Sealosを使用した[ワンクリックデプロイ](https://github.com/binary-husky/gpt_academic/issues/993) + - WSL2(Windows Subsystem for Linux)の使用方法については、[デプロイwiki-2](https://github.com/binary-husky/gpt_academic/wiki/Getting-Started)を参照してください。 + - サブパス(例:`http://localhost/subpath`)でFastAPIを実行する方法については、[FastAPIの実行方法](docs/WithFastapi.md)を参照してください。 + + + +# 高度な使用法 +### I:カスタムショートカットボタンの作成(学術的なショートカットキー) +テキストエディタで`core_functional.py`を開き、次の項目を追加し、プログラムを再起動します。(ボタンが存在する場合、プレフィックスとサフィックスはホット変更に対応しており、プログラムを再起動せずに有効にすることができます。) +例: +``` +"超级英译中": { + # プレフィックス、入力の前に追加されます。例えば、要求を記述するために使用されます。翻訳、コードの解説、校正など + "プレフィックス": "下記の内容を中国語に翻訳し、専門用語を一つずつマークダウンテーブルで解説してください:\n\n"、 + + # サフィックス、入力の後に追加されます。プレフィックスと一緒に使用して、入力内容を引用符で囲むことができます。 + "サフィックス": ""、 +}、 +``` +
+ +
+ +### II:関数プラグインのカスタマイズ +自分の望む任意のタスクを実行するために、強力な関数プラグインを作成できます。 +このプロジェクトのプラグインの作成とデバッグの難易度は非常に低く、一定のPythonの基礎知識があれば、提供されたテンプレートを参考に自分自身のプラグイン機能を実装することができます。 +詳細については、[関数プラグインガイド](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)を参照してください。 + + +# 更新 +### I:ダイナミック + +1. 会話の保存機能。プラグインエリアで `Save Current Conversation` を呼び出すだけで、現在の会話を読み取り可能で復旧可能なhtmlファイルとして保存できます。 +また、プラグインエリア(ドロップダウンメニュー)で `Load Conversation History Archive` を呼び出すことで、以前の会話を復元できます。 +ヒント:ファイルを指定せずに `Load Conversation History Archive` をクリックすると、履歴のhtmlアーカイブのキャッシュを表示することができます。 +
+ +
+ +2. ⭐Latex/Arxiv論文の翻訳機能⭐ +
+ ===> + +
+ +3. ゼロのターミナル(自然言語入力からユーザの意図を理解+他のプラグインを自動的に呼び出す) + +- ステップ1:「プラグインのPDF論文の翻訳を呼び出してください、アドレスはhttps://openreview.net/pdf?id=rJl0r3R9KX」と入力します。 +- ステップ2:「Zero Terminal」をクリックします。 + +
+ +
+ +4. モジュール化された機能設計、シンプルなインターフェイスで強力な機能をサポートする +
+ + +
+ +5. 他のオープンソースプロジェクトの翻訳 +
+ + +
+ +6. [live2d](https://github.com/fghrsh/live2d_demo)のデコレーション機能(デフォルトでは無効で、`config.py`を変更する必要があります) +
+ +
+ +7. OpenAI画像生成 +
+ +
+ +8. OpenAIオーディオ解析と要約 +
+ +
+ +9. Latex全体の校正と修正 +
+ ===> + +
+ +10. 言語、テーマの切り替え +
+ +
+ + + +### II:バージョン: +- version 3.70(todo): AutoGenプラグインのテーマを最適化し、一連の派生プラグインを設計する +- version 3.60: AutoGenを次世代プラグインの基盤として導入 +- version 3.57: GLM3、星火v3、文心一言v4をサポート、ローカルモデルの並行バグを修正 +- version 3.56: 基本機能ボタンを動的に追加、新しい報告書PDF集約ページ +- version 3.55: フロントエンドのデザインを再構築し、浮動ウィンドウとメニューバーを導入 +- version 3.54: 新しい動的コードインタプリタ(Code Interpreter)の追加(未完成) +- version 3.53: 異なるテーマを動的に選択できるように、安定性の向上と複数ユーザの競合問題の解決 +- version 3.50: 自然言語でこのプロジェクトのすべての関数プラグインを呼び出すことができるようになりました(ゼロのターミナル)プラグインの分類をサポートし、UIを改善し、新しいテーマを設計 +- version 3.49: Baidu Qianfanプラットフォームと文心一言をサポート +- version 3.48: Alibaba DAMO Academy Tongyi Qianwen、Shanghai AI-Lab Shusheng、Xunfei Xinghuoをサポート +- version 3.46: 完全なオートモードのリアルタイム音声対話をサポート +- version 3.45: カスタムChatGLM2ファインチューニングモデルをサポート +- version 3.44: 公式にAzureをサポート、UIの使いやすさを最適化 +- version 3.4: +arxiv論文の翻訳、latex論文の校閲機能 +- version 3.3: +インターネット情報の総合機能 +- version 3.2: 関数プラグインがさらに多くのパラメータインターフェースをサポート(会話の保存機能、任意の言語のコードの解釈、同時に任意のLLMの組み合わせを尋ねる) +- version 3.1: 複数のgptモデルに同時に質問できるようにサポートされました! api2dをサポートし、複数のapikeyの負荷分散をサポートしました +- version 3.0: chatglmと他の小さなllmのサポート +- version 2.6: プラグインの構造を再構築し、対話性を高め、より多くのプラグインを追加しました +- version 2.5: 自己更新、ソースコード全体の要約時のテキストの長さ、トークンのオーバーフローの問題を解決しました +- version 2.4: (1)新しいPDF全文翻訳機能を追加しました。(2)入力エリアの位置を切り替えるための新しい機能を追加しました。(3)垂直レイアウトオプションを追加しました。(4)マルチスレッド関数プラグインを最適化しました。 +- version 2.3: マルチスレッドの対話を強化しました +- version 2.2: 関数プラグインのホットリロードをサポート +- version 2.1: 折りたたみ式のレイアウト +- version 2.0: モジュール化された関数プラグインの導入 +- version 1.0: 基本機能 + +GPT Academic開発者QQグループ:`610599535` + +-既知の問題 + - 一部のブラウザ翻訳プラグインがこのソフトウェアのフロントエンドの実行を妨げる + - 公式Gradioには互換性の問題があり、必ず`requirement.txt`を使用してGradioをインストールしてください + +### III:テーマ +`THEME`オプション(`config.py`)を変更することで、テーマを変更できます +1. `Chuanhu-Small-and-Beautiful` [リンク](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + + +### IV:本プロジェクトの開発ブランチ + +1. `master`ブランチ:メインブランチ、安定版 +2. `frontier`ブランチ:開発ブランチ、テスト版 + + +### V:参考と学習 + +``` +コードの中には、他の優れたプロジェクトのデザインを参考にしたものが多く含まれています。順序は問いません: + +# 清華ChatGLM2-6B: +https://github.com/THUDM/ChatGLM2-6B + +# 清華JittorLLMs: +https://github.com/Jittor/JittorLLMs + +# ChatPaper: +https://github.com/kaixindelele/ChatPaper + +# Edge-GPT: +https://github.com/acheong08/EdgeGPT + +# ChuanhuChatGPT: +https://github.com/GaiZhenbiao/ChuanhuChatGPT + + + +# Oobaboogaワンクリックインストーラー: +https://github.com/oobabooga/one-click-installers + +# その他: +https://github.com/gradio-app/gradio +https://github.com/fghrsh/live2d_demo + diff --git a/docs/README.Korean.md b/docs/README.Korean.md new file mode 100644 index 0000000000..ebf8d4b9c4 --- /dev/null +++ b/docs/README.Korean.md @@ -0,0 +1,364 @@ + + + +> **참고** +> +> 이 README는 GPT 번역으로 생성되었습니다 (이 프로젝트의 플러그인에 의해 구현됨) . 100% 신뢰할 수 없으므로 번역 결과를 주의 깊게 검토하십시오. +> +> 2023.11.7: 종속성을 설치할 때, `requirements.txt`에 **지정된 버전**을 선택하십시오. 설치 명령어: `pip install -r requirements.txt`. + + + + +#
GPT 학술 최적화 (GPT Academic)
+ +**이 프로젝트가 마음에 드신다면, Star를 부탁드립니다. 편리한 단축키나 플러그인을 발견하셨다면 Pull Request를 환영합니다!** +GPT를 사용하여 이 프로젝트를 임의의 언어로 번역하려면 [`multi_language.py`](multi_language.py)를 읽고 실행하십시오 (실험적). + + +> **참고** +> +> 1. **강조 표시**된 플러그인 (버튼)만 파일을 읽을 수 있습니다. 일부 플러그인은 플러그인 영역의 **드롭다운 메뉴**에 있습니다. 또한 새로운 플러그인에 대한 모든 PR을 환영하며, 이를 **가장 우선적**으로 처리합니다. +> +> 2. 이 프로젝트의 각 파일의 기능은 [자체 분석 보고서 `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic%EC%A0%9C%ED%94%84%EB%AA%85%EC%84%B1%EB%B0%A9%EC%8B%9D%EC%9D%98_%EA%B2%B0%EA%B3%BC)에서 자세히 설명되어 있습니다. 버전이 반복됨에 따라, 관련 기능 플러그인을 언제든지 클릭하여 GPT를 호출하여 프로젝트의 자체 분석 보고서를 다시 생성할 수 있습니다. 자주 묻는 질문은 [`위키`](https://github.com/binary-husky/gpt_academic/wiki)를 참조하십시오. [일반적인 설치 방법](#installation) | [원클릭 설치 스크립트](https://github.com/binary-husky/gpt_academic/releases) | [설정 설명서](https://github.com/binary-husky/gpt_academic/wiki/%EC%84%A4%EC%A0%95%EC%82%AC%EB%AA%85_%EA%B0%84%EB%8B%A8_%EC%84%B8%ED%8A%B8%EB%B2%84_%EC%B6%94%EA%B0%80) + + +> 3. 이 프로젝트는 ChatGLM 등 대형 언어 모델 (ChatGLM 등) 실행을 지원하고 권장합니다. 여러 개의 API 키를 동시에 사용할 수 있으며, 구성 파일에 `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`와 같이 입력할 수 있습니다. `API_KEY`를 일시적으로 변경해야 하는 경우, 입력 영역에 임시 `API_KEY`를 입력한 다음 Enter 키를 누르면 적용됩니다. + + + + + +
+ +기능 (⭐= 최근 추가 기능) | 설명 +--- | --- +⭐[새 모델 추가](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)와 Wenxin Yiyan, [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [Shusheng](https://github.com/InternLM/InternLM), Xunfei [Star](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhipu API, DALLE3 +문체 개선, 번역, 코드 설명 | 일괄적인 문체 개선, 번역, 논문 문법 오류 탐색, 코드 설명 +[사용자 정의 단축키](https://www.bilibili.com/video/BV14s4y1E7jN) | 사용자 정의 단축키 지원 +모듈화 설계 | 사용자 정의 가능한 강력한 [플러그인](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) 지원, 플러그인 지원 [핫 업데이트](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) +[프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [플러그인] 한 번에 Python/C/C++/Java/Lua/... 프로젝트 트리를 분석하거나 [자체 분석](https://www.bilibili.com/video/BV1cj411A7VW) +논문 읽기, 논문 [번역](https://www.bilibili.com/video/BV1KT411x7Wn) | [플러그인] LaTeX/PDF 논문 전문을 읽고 요약 생성 +LaTeX 전체 [번역](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [개선](https://www.bilibili.com/video/BV1FT411H7c5/) | [플러그인] LaTeX 논문 번역 또는 개선 +일괄 주석 생성 | [플러그인] 함수 주석 일괄 생성 +Markdown [한 / 영 번역](https://www.bilibili.com/video/BV1yo4y157jV/) | 위의 5개 언어로 작성된 [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)를 살펴보셨나요? +채팅 분석 보고서 생성 | [플러그인] 실행 후 요약 보고서 자동 생성 +[PDF 논문 전체 번역](https://www.bilibili.com/video/BV1KT411x7Wn) 기능 | [플러그인] PDF 논문 제목 및 요약 추출 + 전체 번역 (멀티 스레드) +[Arxiv 도우미](https://www.bilibili.com/video/BV1LM4y1279X) | [플러그인] arxiv 논문 url 입력시 요약 번역 + PDF 다운로드 +LaTeX 논문 일괄 교정 | [플러그인] Grammarly를 모사하여 LaTeX 논문에 대한 문법 및 맞춤법 오류 교정 + 대조 PDF 출력 +[Google 학술 통합 도우미](https://www.bilibili.com/video/BV19L411U7ia) | 임의의 Google 학술 검색 페이지 URL을 지정하여 gpt가 [related works를 작성](https://www.bilibili.com/video/BV1GP411U7Az/)하게 해주세요. +인터넷 정보 집계 + GPT | [플러그인] [인터넷에서 정보를 가져와서](https://www.bilibili.com/video/BV1om4y127ck) 질문에 대답하도록 GPT를 자동화하세요. 정보가 절대로 오래되지 않도록 해줍니다. +⭐Arxiv 논문 세심한 번역 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [플러그인] [arxiv 논문을 고품질 번역으로](https://www.bilibili.com/video/BV1dz4y1v77A/) 번역하는 최고의 도구 +⭐[실시간 음성 대화 입력](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [플러그인] 비동기적으로 [오디오를 모니터링](https://www.bilibili.com/video/BV1AV4y187Uy/)하여 문장을 자동으로 분절하고 대답 시기를 자동으로 찾습니다. +수식/이미지/표 표시 | [tex 형식 및 렌더링 형식](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)의 수식을 동시에 표시하며, 수식 및 코드 하이라이트 지원 +⭐AutoGen multi-agent 플러그인 | [플러그인] Microsoft AutoGen을 활용하여 여러 개의 에이전트가 지능적으로 발생하는 가능성을 탐색하세요! +다크 모드 주제 지원 | 브라우저의 URL 뒤에 ```/?__theme=dark```를 추가하여 다크 모드로 전환하세요. +[다양한 LLM 모델](https://www.bilibili.com/video/BV1wT411p7yf) 지원 | GPT3.5, GPT4, [Tsinghua ChatGLM2](https://github.com/THUDM/ChatGLM2-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS)을 함께 사용하는 느낌은 좋을 것입니다, 그렇지 않습니까? +⭐ChatGLM2 fine-tuned 모델 | ChatGLM2 fine-tuned 모델 로드를 지원하며, ChatGLM2 fine-tuned 보조 플러그인 제공 +더 많은 LLM 모델 연결, [huggingface 배포](https://huggingface.co/spaces/qingxu98/gpt-academic) 지원 | Newbing 인터페이스(신 밍), Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs) 도입, [LLaMA](https://github.com/facebookresearch/llama)와 [Pangu-alpha](https://openi.org.cn/pangu/)를 지원합니다. +⭐[void-terminal](https://github.com/binary-husky/void-terminal) 패키지 | GUI에서 독립, Python에서 이 프로젝트의 모든 함수 플러그인을 직접 호출 (개발 중) +⭐Void 터미널 플러그인 | [플러그인] 자연어로 이 프로젝트의 다른 플러그인을 직접 영속합니다. +기타 새로운 기능 소개 (이미지 생성 등) …… | 본 문서 맨 끝 참조 …… +
+ + +- 새로운 인터페이스(`config.py`의 LAYOUT 옵션 수정으로 "왼쪽-오른쪽 레이아웃"과 "위-아래 레이아웃"을 전환할 수 있음) +
+ +
+ + +- 모든 버튼은 functional.py를 동적으로 읽어 생성되므로 원하는대로 사용자 정의 기능을 추가할 수 있으며 클립 보드를 해제할 수 있습니다. +
+ +
+ +- 문체 개선/오류 수정 +
+ +
+ + + +- If the output contains equations, they will be displayed in both tex format and rendered format for easy copying and reading. +
+ +
+ +- Don't feel like looking at the project code? Just give it to ChatGPT and let it dazzle you. +
+ +
+ +- Mix and match multiple powerful language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) +
+ +
+ +# Installation +### Installation Method I: Run Directly (Windows, Linux or MacOS) + +1. Download the project +```sh +git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +cd gpt_academic +``` + +2. Configure API_KEY + +In `config.py`, configure the API KEY and other settings, [click here to view special network environment configuration methods](https://github.com/binary-husky/gpt_academic/issues/1). [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 + +" The program will first check if there is a confidential configuration file named `config_private.py` and use its configuration to override the configuration with the same name in `config.py`. If you can understand this reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and move (copy) the configuration from `config.py` to `config_private.py` (only copy the modified configuration items). " + +" You can configure the project through `environment variables`. The format of the environment variables can be found in the `docker-compose.yml` file or our [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). The priority of the configuration reading is: `environment variables` > `config_private.py` > `config.py`. " + +3. Install dependencies +```sh +# (Option I: if familiar with python, python>=3.9) Note: Use the official pip source or Aliyun pip source. Temporary switching source method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +python -m pip install -r requirements.txt + +# (Option II: using Anaconda) The steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 # Create an Anaconda environment +conda activate gptac_venv # Activate the Anaconda environment +python -m pip install -r requirements.txt # This step is the same as the pip installation step +``` + + +
Click here to expand if you need support for Tsinghua ChatGLM2/Fudan MOSS/RWKV backend +

+ +[Optional Step] If you need support for Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used Pytorch + Sufficient computer configuration): +```sh +# [Optional Step I] Support for Tsinghua ChatGLM2. Note for Tsinghua ChatGLM: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py, change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt + +# [Optional Step II] Support for Fudan MOSS +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, make sure you are in the project root path + +# [Optional Step III] Support for RWKV Runner +Refer to the wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner + +# [Optional Step IV] Make sure that the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. The currently supported models are as follows (the jittorllms series only supports the docker solution): +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +``` + +

+
+ + + +4. Run +```sh +python main.py +``` + +### Installation Method II: Use Docker + +0. Deploy all the capabilities of the project (this is a large image that includes cuda and latex. However, it is not recommended if your internet speed is slow or your hard disk is small) +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) + +``` sh +# Modify docker-compose.yml, keep scheme 0 and delete the others. Then run: +docker-compose up +``` + +1. ChatGPT+Random Quotes+Wikipedia Summary+Spark and other online models (recommended for most people) +[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) +[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) +[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + +``` sh +# Modify docker-compose.yml, keep scheme 1 and delete the others. Then run: +docker-compose up +``` + +P.S. If you need the Latex plugin feature, please refer to the Wiki. Additionally, you can also use scheme 4 or scheme 0 directly to get the Latex feature. + +2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + Thousand Questions (Requires familiarity with [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) runtime) +[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) + +``` sh +# Modify docker-compose.yml, keep scheme 2 and delete the others. Then run: +docker-compose up +``` + + +### Installation Method III: Other Deployment Methods +1. **One-click run script for Windows**. +Windows users who are completely unfamiliar with the Python environment can download the one-click run script without local models from the [Release](https://github.com/binary-husky/gpt_academic/releases) section. +The script contribution comes from [oobabooga](https://github.com/oobabooga/one-click-installers). + +2. Use third-party APIs, Azure, etc., Random Quotes, Spark, etc., see the [Wiki page](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). + +3. Pitfall guide for remote deployment on cloud servers. +Please visit the [cloud server remote deployment wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) + +4. Some new deployment platforms or methods + - Use Sealos for [one-click deployment](https://github.com/binary-husky/gpt_academic/issues/993). + - Use WSL2 (Windows Subsystem for Linux). Please visit [deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) + - How to run in a subpath (such as `http://localhost/subpath`). Please refer to [FastAPI running instructions](docs/WithFastapi.md) + + + +# 고급 사용법 +### I: 사용자 정의 바로 가기 버튼 추가 (학술 단축키) +임의의 텍스트 편집기로 `core_functional.py` 파일을 열고 다음과 같은 항목을 추가한 다음 프로그램을 다시 시작하십시오. (이미 버튼이 있는 경우에는 접두사와 접미사를 실시간으로 수정할 수 있으므로 프로그램을 다시 시작할 필요가 없습니다.) +예시: +``` +"초급영문 번역": { + # 접두사, 입력 내용 앞에 추가됩니다. 예를 들어 요구 사항을 설명하는 데 사용됩니다. 예를 들어 번역, 코드 설명, 교정 등 + "Prefix": "다음 내용을 한국어로 번역하고 전문 용어에 대한 설명을 적용한 마크다운 표를 사용하세요:\n\n", + + # 접미사, 입력 내용 뒤에 추가됩니다. 예를 들어 접두사와 함께 입력 내용을 따옴표로 감쌀 수 있습니다. + "Suffix": "", +}, +``` +
+ +
+ +### II: 사용자 정의 함수 플러그인 +원하는 작업을 수행하기 위해 능력있는 함수 플러그인을 작성하세요. +이 프로젝트의 플러그인 작성 및 디버깅은 난이도가 낮으며, 일정한 Python 기본 지식만 있으면 우리가 제공하는 템플릿을 본따서 고유한 플러그인 기능을 구현할 수 있습니다. +자세한 내용은 [함수 플러그인 가이드](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)를 참조하세요. + + +# 업데이트 +### I: 다이나믹 + +1. 대화 저장 기능. 플러그인 영역에서 '현재 대화 저장'을 호출하여 현재 대화를 볼 수 있고, html 파일을 복구할 수 있습니다. +또한 플러그인 영역에서 '대화 기록 불러오기'를 호출하여 이전 대화를 복원할 수 있습니다. +팁: 파일을 지정하지 않고 '대화 기록 불러오기'를 바로 클릭하면 이전 html 기록 캐시를 볼 수 있습니다. +
+ +
+ +2. ⭐Latex/Arxiv 논문 번역 기능⭐ +
+ ===> + +
+ +3. 빈 터미널 (자연어 입력에서 사용자 의도 이해 + 자동 플러그인 호출) + +- 단계 1: "플러그인을 사용하여 PDF 논문을 번역하십시오. 주소는 https://openreview.net/pdf?id=rJl0r3R9KX입니다." 입력 +- 단계 2: "빈 터미널" 클릭 + +
+ +
+ +4. 모듈화된 기능 디자인, 간단한 인터페이스로 강력한 기능 제공 +
+ + +
+ +5. 다른 오픈 소스 프로젝트 번역 +
+ + +
+ +6. [live2d](https://github.com/fghrsh/live2d_demo)의 작은 기능 추가 (기본 설정은 닫혀 있으며, `config.py`를 수정해야 합니다.) +
+ +
+ +7. OpenAI 이미지 생성 +
+ +
+ +8. OpenAI 오디오 분석 및 요약 +
+ +
+ +9. Latex 전체 교정 오류 +
+ ===> + +
+ +10. 언어, 테마 변경 +
+ +
+ + + +### II: 버전: +- 버전 3.70 (예정): AutoGen 플러그인 테마 개선 및 다른 테마 플러그인 디자인 +- 버전 3.60: AutoGen을 새로운 세대 플러그인의 기반으로 도입 +- 버전 3.57: GLM3, Starfire v3, 文心一言 v4 지원, 로컬 모델의 동시성 버그 수정 +- 버전 3.56: 동적으로 기본 기능 버튼 추가, 새로운 보고서 PDF 요약 페이지 +- 버전 3.55: 프론트 엔드 인터페이스 리팩토링, 화면 따라다니는 윈도우 및 메뉴 바 도입 +- 버전 3.54: 새로운 동적 코드 해석기 (Code Interpreter) 추가 (완벽하게 완성되지 않음) +- 버전 3.53: 다른 인터페이스 테마 동적 선택 기능 추가, 안정성 향상 및 다중 사용자 충돌 문제 해결 +- 버전 3.50: 자연어로 이 프로젝트의 모든 함수 플러그인을 호출하는 기능 (빈 터미널) 추가, 플러그인 분류 지원, UI 개선, 새로운 테마 설계 +- 버전 3.49: Baidu Qianfan 플랫폼 및 문심일언 지원 +- 버전 3.48: Ali DameiYuan Sematic Query, Shanghai AI-Lab Shusheng, Xunfei Starfire 지원 +- 버전 3.46: 완전 자동 운전 가능한 실시간 음성 대화 지원 +- 버전 3.45: 사용자 정의 ChatGLM2 fine-tuning 모델 지원 +- 버전 3.44: Azure 정식 지원, 인터페이스의 사용 편의성 개선 +- 버전 3.4: +arxiv 논문 번역, latex 논문 교정 기능 추가 +- 버전 3.3: +인터넷 정보 종합 기능 +- 버전 3.2: 함수 플러그인이 더 많은 매개변수 인터페이스를 지원합니다 (대화 저장 기능, 임의의 언어 코드 해석 + 임의의 LLM 조합을 동시에 요청) +- 버전 3.1: 여러 GPT 모델에 동시에 질문할 수 있는 기능 추가! api2d 지원, 여러 개의 apikey 부하 균형 조정 지원 +- 버전 3.0: chatglm 및 기타 소규모 llm 지원 +- 버전 2.6: 플러그인 구조를 재구성하여 상호 작용성 향상, 더 많은 플러그인 추가 +- 버전 2.5: 자동 업데이트, 소스 코드 요약 중 텍스트가 너무 길고 토큰이 오버플로되는 문제 해결 +- 버전 2.4: (1)PDF 전체 번역 기능 추가; (2)입력 영역 위치 전환 기능 추가; (3)수직 레이아웃 옵션 추가; (4)멀티 스레드 함수 플러그인 최적화 +- 버전 2.3: 멀티 스레드 상호 작용성 강화 +- 버전 2.2: 함수 플러그인의 핫 리로드 지원 +- 버전 2.1: 접을 수 있는 레이아웃 +- 버전 2.0: 모듈화 함수 플러그인 도입 +- 버전 1.0: 기본 기능 + +GPT Academic 개발자 QQ 그룹: `610599535` +- 알려진 문제 + - 특정 웹 브라우저 번역 플러그인이 이 소프트웨어의 프론트엔드 실행에 방해가 되는 경우가 있습니다. + - 공식 Gradio에는 호환성 문제가 많기 때문에 `requirement.txt`를 사용하여 Gradio를 설치하십시오. + +### III: 테마 +`THEME` 옵션 (`config.py`)을 수정하여 테마를 변경할 수 있습니다. +1. `Chuanhu-Small-and-Beautiful` [URL](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + + +### IV: 이 프로젝트의 개발 브랜치 + +1. `master` 브랜치: 메인 브랜치, 안정 버전 +2. `frontier` 브랜치: 개발 브랜치, 테스트 버전 + + +### V: 참고 및 학습 + +``` +코드에서는 다른 우수한 프로젝트의 디자인을 많이 참고했습니다. 순서는 문제 없이 나열됩니다: + +# 清华ChatGLM2-6B: +https://github.com/THUDM/ChatGLM2-6B + +# 清华JittorLLMs: +https://github.com/Jittor/JittorLLMs + +# ChatPaper: +https://github.com/kaixindelele/ChatPaper + +# Edge-GPT: +https://github.com/acheong08/EdgeGPT + +# ChuanhuChatGPT: +https://github.com/GaiZhenbiao/ChuanhuChatGPT + + + +# Oobabooga 원 클릭 설치 프로그램: +https://github.com/oobabooga/one-click-installers + +# 더보기: +https://github.com/gradio-app/gradio +https://github.com/fghrsh/live2d_demo + diff --git a/docs/README.Portuguese.md b/docs/README.Portuguese.md new file mode 100644 index 0000000000..089465a690 --- /dev/null +++ b/docs/README.Portuguese.md @@ -0,0 +1,358 @@ + + + +> **Nota** +> +> Este README foi traduzido pelo GPT (implementado por um plugin deste projeto) e não é 100% confiável. Por favor, verifique cuidadosamente o resultado da tradução. +> +> 7 de novembro de 2023: Ao instalar as dependências, favor selecionar as **versões especificadas** no `requirements.txt`. Comando de instalação: `pip install -r requirements.txt`. + +#
GPT Acadêmico
+ +**Se você gosta deste projeto, por favor, dê uma estrela nele. Se você inventou atalhos de teclado ou plugins úteis, fique à vontade para criar pull requests!** +Para traduzir este projeto para qualquer idioma utilizando o GPT, leia e execute [`multi_language.py`](multi_language.py) (experimental). + +> **Nota** +> +> 1. Observe que apenas os plugins (botões) marcados em **destaque** são capazes de ler arquivos, alguns plugins estão localizados no **menu suspenso** do plugin area. Também damos boas-vindas e prioridade máxima a qualquer novo plugin via PR. +> +> 2. As funcionalidades de cada arquivo deste projeto estão detalhadamente explicadas em [autoanálise `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Com a iteração das versões, você também pode clicar nos plugins de funções relevantes a qualquer momento para chamar o GPT para regerar o relatório de autonálise do projeto. Perguntas frequentes [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Método de instalação convencional](#installation) | [Script de instalação em um clique](https://github.com/binary-husky/gpt_academic/releases) | [Explicação de configuração](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 +> +> 3. Este projeto é compatível e encoraja o uso de modelos de linguagem chineses, como ChatGLM. Vários api-keys podem ser usados simultaneamente, podendo ser especificados no arquivo de configuração como `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando precisar alterar temporariamente o `API_KEY`, insira o `API_KEY` temporário na área de entrada e pressione Enter para que ele seja efetivo. + + +
+ +Funcionalidades (⭐= funcionalidade recentemente adicionada) | Descrição +--- | --- +⭐[Integração com novos modelos](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | [Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) da Baidu, Wenxin e [Tongyi Qianwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), [Shusheng](https://github.com/InternLM/InternLM) da Shanghai AI-Lab, [Xinghuo](https://xinghuo.xfyun.cn/) da Iflytek, [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), Zhipu API, DALLE3 +Aprimoramento, tradução, explicação de códigos | Aprimoramento com um clique, tradução, busca de erros gramaticais em artigos e explicação de códigos +[Atalhos de teclado personalizados](https://www.bilibili.com/video/BV14s4y1E7jN) | Suporte para atalhos de teclado personalizados +Design modular | Suporte a plugins poderosos e personalizáveis, plugins com suporte a [atualização a quente](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) +[Análise de código](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin] Análise instantânea da estrutura de projetos em Python/C/C++/Java/Lua/... ou [autoanálise](https://www.bilibili.com/video/BV1cj411A7VW) +Leitura de artigos, [tradução](https://www.bilibili.com/video/BV1KT411x7Wn) de artigos | [Plugin] Interpretação instantânea de artigos completos em latex/pdf e geração de resumos +Tradução completa de artigos em latex [PDF](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [aprimoramento](https://www.bilibili.com/video/BV1FT411H7c5/) | [Plugin] Tradução completa ou aprimoramento de artigos em latex com um clique +Geração em lote de comentários | [Plugin] Geração em lote de comentários de funções com um clique +Tradução (inglês-chinês) de Markdown | [Plugin] Você já viu o [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) nas 5 línguas acima? +Criação de relatório de análise de bate-papo | [Plugin] Geração automática de relatório de resumo após a execução +Tradução [completa de artigos em PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin] Extração de título e resumo de artigos em PDF + tradução completa (multithreading) +Auxiliar Arxiv | [Plugin] Insira o URL de um artigo Arxiv para traduzir o resumo + baixar o PDF com um clique +Correção automática de artigos em latex | [Plugin] Correções gramaticais e ortográficas de artigos em latex semelhante ao Grammarly + saída PDF comparativo +Auxiliar Google Scholar | [Plugin] Insira qualquer URL da busca do Google Acadêmico e deixe o GPT [escrever trabalhos relacionados](https://www.bilibili.com/video/BV1GP411U7Az/) para você +Agregação de informações da Internet + GPT | [Plugin] Capturar informações da Internet e obter respostas de perguntas com o GPT em um clique, para que as informações nunca fiquem desatualizadas +⭐Tradução refinada de artigos do Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Plugin] Tradução de alta qualidade de artigos do Arxiv com um clique, a melhor ferramenta de tradução de artigos atualmente +⭐Entrada de conversa de voz em tempo real | [Plugin] Monitoramento de áudio [assíncrono](https://www.bilibili.com/video/BV1AV4y187Uy/), segmentação automática de frases, detecção automática de momentos de resposta +Exibição de fórmulas, imagens e tabelas | Exibição de fórmulas em formato tex e renderizadas simultaneamente, suporte a fórmulas e destaque de código +⭐Plugin AutoGen para vários agentes | [Plugin] Explore a emergência de múltiplos agentes com o AutoGen da Microsoft! +Ativar o tema escuro | Adicione ```/?__theme=dark``` ao final da URL para alternar para o tema escuro +Suporte a múltiplos modelos LLM | Ser atendido simultaneamente pelo GPT3.5, GPT4, [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B) do Tsinghua University e [MOSS](https://github.com/OpenLMLab/MOSS) da Fudan University se sente incrível, não é mesmo? +⭐Modelo de ajuste fino ChatGLM2 | Suporte para carregar o modelo ChatGLM2 ajustado e fornecer plugins de assistência ao ajuste fino do ChatGLM2 +Mais modelos LLM e suporte para [implantação pela HuggingFace](https://huggingface.co/spaces/qingxu98/gpt-academic) | Integração com a interface Newbing (Bing novo), introdução do [Jittorllms](https://github.com/Jittor/JittorLLMs) da Tsinghua University com suporte a [LLaMA](https://github.com/facebookresearch/llama) e [Panguα](https://openi.org.cn/pangu/) +⭐Pacote pip [void-terminal](https://github.com/binary-husky/void-terminal) | Chame todas as funções plugins deste projeto diretamente em Python, sem a GUI (em desenvolvimento) +⭐Plugin Terminal do Vácuo | [Plugin] Chame outros plugins deste projeto diretamente usando linguagem natural +Apresentação de mais novas funcionalidades (geração de imagens, etc.) ... | Veja no final deste documento ... + +
+ + +- Nova interface (altere a opção LAYOUT em `config.py` para alternar entre os "Layouts de lado a lado" e "Layout de cima para baixo") +
+ +
+ + +- Todos os botões são gerados dinamicamente através da leitura do `functional.py`, você pode adicionar funcionalidades personalizadas à vontade, liberando sua área de transferência +
+ +
+ +- Aprimoramento/Correção +
+ +
+ + + +- Se a saída contiver fórmulas, elas serão exibidas tanto em formato tex quanto renderizado para facilitar a cópia e a leitura. +
+ +
+ +- Não tem vontade de ver o código do projeto? O projeto inteiro está diretamente na boca do chatgpt. +
+ +
+ +- Combinação de vários modelos de linguagem (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) +
+ +
+ +# Instalação +### Método de instalação I: Executar diretamente (Windows, Linux ou MacOS) + +1. Baixe o projeto +```sh +git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +cd gpt_academic +``` + +2. Configure a API_KEY + +No arquivo `config.py`, configure a API KEY e outras configurações. [Clique aqui para ver o método de configuração em redes especiais](https://github.com/binary-husky/gpt_academic/issues/1). [Página Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). + +「 O programa verificará primeiro se existe um arquivo de configuração privada chamado `config_private.py` e substituirá as configurações correspondentes no arquivo `config.py`. Se você entender essa lógica de leitura, é altamente recomendável criar um novo arquivo de configuração chamado `config_private.py` ao lado do `config.py` e copiar as configurações do `config.py` para o `config_private.py` (copiando apenas os itens de configuração que você modificou). 」 + +「 Suporte para configurar o projeto por meio de `variáveis de ambiente`, o formato de gravação das variáveis de ambiente pode ser encontrado no arquivo `docker-compose.yml` ou em nossa [página Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). A prioridade de leitura das configurações é: `variáveis de ambiente` > `config_private.py` > `config.py`. 」 + + +3. Instale as dependências +```sh +# (Opção I: Se você está familiarizado com o Python, Python>=3.9) Observação: Use o pip oficial ou o pip da Aliyun. Método temporário para alternar fontes: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +python -m pip install -r requirements.txt + +# (Opção II: Use o Anaconda) Os passos também são semelhantes (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 # Crie um ambiente do Anaconda +conda activate gptac_venv # Ative o ambiente do Anaconda +python -m pip install -r requirements.txt # Este passo é igual ao da instalação do pip +``` + + +
Se você quiser suporte para o ChatGLM2 do THU/ MOSS do Fudan/RWKV como backend, clique para expandir +

+ +[Opcional] Se você quiser suporte para o ChatGLM2 do THU/ MOSS do Fudan, precisará instalar dependências extras (pré-requisitos: familiarizado com o Python + já usou o PyTorch + o computador tem configuração suficiente): +```sh +# [Opcional Passo I] Suporte para ChatGLM2 do THU. Observações sobre o ChatGLM2 do THU: Se você encontrar o erro "Call ChatGLM fail 不能正常加载ChatGLM的参数" (Falha ao chamar o ChatGLM, não é possível carregar os parâmetros do ChatGLM), consulte o seguinte: 1: A versão instalada por padrão é a versão torch+cpu. Se você quiser usar a versão cuda, desinstale o torch e reinstale uma versão com torch+cuda; 2: Se a sua configuração não for suficiente para carregar o modelo, você pode modificar a precisão do modelo em request_llm/bridge_chatglm.py, alterando todas as ocorrências de AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) para AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt + +# [Opcional Passo II] Suporte para MOSS do Fudan +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # Observe que você deve estar no diretório raiz do projeto ao executar este comando + +# [Opcional Passo III] Suporte para RWKV Runner +Consulte a página Wiki: https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner + +# [Opcional Passo IV] Verifique se o arquivo de configuração config.py contém os modelos desejados, os modelos compatíveis são os seguintes (a série jittorllms suporta apenas a solução Docker): +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +``` + +

+
+ + + +4. Execute +```sh +python main.py +``` + +### Método de instalação II: Usando o Docker + +0. Implante todas as capacidades do projeto (este é um contêiner grande que inclui CUDA e LaTeX. Não recomendado se você tiver uma conexão lenta com a internet ou pouco espaço em disco) +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) + +``` sh +# Modifique o arquivo docker-compose.yml para incluir apenas a seção 0 e excluir as outras seções. Em seguida, execute: +docker-compose up +``` + +1. ChatGPT + 文心一言 + spark + outros modelos online (recomendado para a maioria dos usuários) +[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) +[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) +[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + +``` sh +# Modifique o arquivo docker-compose.yml para incluir apenas a seção 1 e excluir as outras seções. Em seguida, execute: +docker-compose up +``` + +Obs.: Se você precisar do plugin Latex, consulte a Wiki. Além disso, você também pode usar a seção 4 ou 0 para obter a funcionalidade do LaTeX. + +2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问 (você precisa estar familiarizado com o [Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) para executar este modo) +[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) + +``` sh +# Modifique o arquivo docker-compose.yml para incluir apenas a seção 2 e excluir as outras seções. Em seguida, execute: +docker-compose up +``` + + +### Método de instalação III: Outros métodos de implantação +1. **Script de execução com um clique para Windows**. +Usuários do Windows que não estão familiarizados com o ambiente Python podem baixar o script de execução com um clique da [Release](https://github.com/binary-husky/gpt_academic/releases) para instalar a versão sem modelos locais. +A contribuição do script vem de [oobabooga](https://github.com/oobabooga/one-click-installers). + +2. Usar APIs de terceiros, Azure, etc., 文心一言, 星火, consulte a [página Wiki](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). + +3. Guia para evitar armadilhas na implantação em servidor em nuvem. +Consulte o [wiki de implantação em servidor em nuvem](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97). + +4. Algumas novas plataformas ou métodos de implantação + - Use Sealos [implantação com um clique](https://github.com/binary-husky/gpt_academic/issues/993). + - Use o WSL2 (Subsistema do Windows para Linux). Consulte [wiki de implantação](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2). + - Como executar em um subdiretório da URL (como `http://localhost/subpath`). Consulte [instruções de execução com o FastAPI](docs/WithFastapi.md) + + + +# Uso Avançado +### I: Personalização de Novos Botões de Atalho (Atalhos Acadêmicos) +Abra o arquivo `core_functional.py` em qualquer editor de texto, adicione o seguinte item e reinicie o programa. (Se o botão já existir, o prefixo e o sufixo podem ser modificados a qualquer momento sem reiniciar o programa). +Por exemplo: +``` +"超级英译中": { + # Prefixo, adicionado antes do seu input. Por exemplo, usado para descrever sua solicitação, como traduzir, explicar o código, revisar, etc. + "Prefix": "Por favor, traduza o parágrafo abaixo para o chinês e explique cada termo técnico dentro de uma tabela markdown:\n\n", + + # Sufixo, adicionado após o seu input. Por exemplo, em conjunto com o prefixo, pode-se colocar seu input entre aspas. + "Suffix": "", +}, +``` +
+ +
+ +### II: Personalização de Funções Plugins +Crie poderosos plugins de função para executar tarefas que você pode e não pode imaginar. +Criar plugins neste projeto é fácil, basta seguir o modelo fornecido, desde que você tenha conhecimento básico de Python. +Consulte o [Guia dos Plugins de Função](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) para mais detalhes. + + +# Atualizações +### I: Dinâmico + +1. Função de salvar conversas. Chame a função "Salvar a conversa atual" na área de plugins para salvar a conversa atual em um arquivo HTML legível e recuperável. Além disso, chame a função "Carregar histórico de conversas" na área de plugins (menu suspenso) para restaurar conversas anteriores. +Dica: Se você clicar diretamente em "Carregar histórico de conversas" sem especificar o arquivo, poderá visualizar o cache do histórico do arquivo HTML. +
+ +
+ +2. ⭐Tradução de artigos Latex/Arxiv⭐ +
+ ===> + +
+ +3. Terminal vazio (entendendo a intenção do usuário a partir do texto em linguagem natural e chamando automaticamente outros plugins) + +- Passo 1: Digite "Por favor, chame o plugin 'Traduzir artigo PDF' e forneça o link https://openreview.net/pdf?id=rJl0r3R9KX" +- Passo 2: Clique em "Terminal vazio" + +
+ +
+ +4. Design de recursos modular, interface simples com suporte a recursos poderosos +
+ + +
+ +5. Tradução e interpretação de outros projetos de código aberto +
+ + +
+ +6. Recursos adicionais para [live2d](https://github.com/fghrsh/live2d_demo) (desativados por padrão, requer modificação no arquivo `config.py`) +
+ +
+ +7. Geração de imagens pela OpenAI +
+ +
+ +8. Análise e resumo de áudio pela OpenAI +
+ +
+ +9. Correção de erros em texto e código LaTeX +
+ ===> + +
+ +10. Alternância de idioma e tema +
+ +
+ + + +### II: Versões: +- Versão 3.70 (a fazer): Melhorar o plugin AutoGen e projetar uma série de plugins relacionados. +- Versão 3.60: Introdução do AutoGen como base para a próxima geração de plugins. +- Versão 3.57: Suporte para GLM3, Starfire v3, Wenxin Yiyan v4, correção de bugs relacionados a modelos locais executados simultaneamente. +- Versão 3.56: Suporte para adicionar dinamicamente botões de função básicos e nova página de resumo em PDF. +- Versão 3.55: Reformulação da interface do usuário, introdução de janelas flutuantes e menus. +- Versão 3.54: Novo interpretador de código dinâmico (Code Interpreter) (em desenvolvimento) +- Versão 3.53: Suporte para alterar dinamicamente o tema da interface, melhorias de estabilidade e correção de conflitos entre vários usuários. +- Versão 3.50: Chamada de todas as funções de plugins deste projeto usando linguagem natural (Terminal vazio), suporte a categorização de plugins, melhorias na interface do usuário e design de novos temas. +- Versão 3.49: Suporte para Baidu Qianfan Platform e Wenxin Yiyan. +- Versão 3.48: Suporte para Alibaba DAMO Academy Tongyi Qianwen, Shanghai AI-Lab Shusheng e Xunfei Xinghuo. +- Versão 3.46: Suporte para diálogos em tempo real totalmente automáticos. +- Versão 3.45: Suporte para personalização do modelo ChatGLM2. +- Versão 3.44: Suporte oficial ao Azure, aprimoramentos na usabilidade da interface. +- Versão 3.4: Tradução completa de artigos Arxiv/Latex, correção de artigos Latex. +- Versão 3.3: Funcionalidade de consulta a informações na internet. +- Versão 3.2: Maior suporte para parâmetros de função de plugins (função de salvar conversas, interpretação de código em qualquer linguagem + perguntas sobre combinações LLM arbitrariamente). +- Versão 3.1: Suporte para fazer perguntas a modelos GPT múltiplos! Suporte para API2D, balanceamento de carga em vários APIKeys. +- Versão 3.0: Suporte para chatglm e outros pequenos modelos LLM. +- Versão 2.6: Refatoração da estrutura de plugins, melhoria na interação, adição de mais plugins. +- Versão 2.5: Auto-atualizável, resolve problemas de texto muito longo ou estouro de tokens ao resumir grandes projetos de código. +- Versão 2.4: (1) Novo recurso de tradução completa de PDF; (2) Nova função para alternar a posição da área de input; (3) Nova opção de layout vertical; (4) Melhoria dos plugins de função em várias threads. +- Versão 2.3: Melhorias na interação em várias threads. +- Versão 2.2: Suporte para recarregar plugins sem reiniciar o programa. +- Versão 2.1: Layout dobrável. +- Versão 2.0: Introdução de plugins de função modular. +- Versão 1.0: Funcionalidades básicas. + +GPT Academic QQ Group: `610599535` + +- Problemas conhecidos + - Alguns plugins de tradução de navegadores podem interferir na execução deste software. + - A biblioteca Gradio possui alguns bugs de compatibilidade conhecidos. Certifique-se de instalar o Gradio usando o arquivo `requirement.txt`. + +### III: Temas +Você pode alterar o tema atualizando a opção `THEME` (config.py). +1. `Chuanhu-Small-and-Beautiful` [Link](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + + +### IV: Branches de Desenvolvimento deste Projeto + +1. Branch `master`: Branch principal, versão estável. +2. Branch `frontier`: Branch de desenvolvimento, versão de teste. + + +### V: Referências para Aprendizado + +``` +O código referenciou muitos projetos excelentes, em ordem aleatória: + +# Tsinghua ChatGLM2-6B: +https://github.com/THUDM/ChatGLM2-6B + +# Tsinghua JittorLLMs: +https://github.com/Jittor/JittorLLMs + +# ChatPaper: +https://github.com/kaixindelele/ChatPaper + +# Edge-GPT: +https://github.com/acheong08/EdgeGPT + +# ChuanhuChatGPT: +https://github.com/GaiZhenbiao/ChuanhuChatGPT + + + +# Oobabooga instalador com um clique: +https://github.com/oobabooga/instaladores-de-um-clique + +# Mais: +https://github.com/gradio-app/gradio +https://github.com/fghrsh/live2d_demo + diff --git a/docs/README.Russian.md b/docs/README.Russian.md new file mode 100644 index 0000000000..07ba098b0a --- /dev/null +++ b/docs/README.Russian.md @@ -0,0 +1,361 @@ + + + +> **Примечание** +> +> Этот README был переведен с помощью GPT (реализовано с помощью плагина этого проекта) и не может быть полностью надежным, пожалуйста, внимательно проверьте результаты перевода. +> +> 7 ноября 2023 года: При установке зависимостей, пожалуйста, выберите **указанные версии** из `requirements.txt`. Команда установки: `pip install -r requirements.txt`. + + +#
GPT Academic (GPT Академический)
+ +**Если вам нравится этот проект, пожалуйста, поставьте звезду; если у вас есть удобные горячие клавиши или плагины, приветствуются pull requests!** +Чтобы перевести этот проект на произвольный язык с помощью GPT, прочтите и выполните [`multi_language.py`](multi_language.py) (экспериментально). + +> **Примечание** +> +> 1. Пожалуйста, обратите внимание, что только плагины (кнопки), выделенные **жирным шрифтом**, поддерживают чтение файлов, некоторые плагины находятся в выпадающем меню **плагинов**. Кроме того, мы с радостью приветствуем и обрабатываем PR для любых новых плагинов с **наивысшим приоритетом**. +> +> 2. Функции каждого файла в этом проекте подробно описаны в [отчете о самостоятельном анализе проекта `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). С каждым новым релизом вы также можете в любое время нажать на соответствующий функциональный плагин, вызвать GPT для повторной генерации сводного отчета о самоанализе проекта. Часто задаваемые вопросы [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [обычные методы установки](#installation) | [скрипт одношаговой установки](https://github.com/binary-husky/gpt_academic/releases) | [инструкции по настройке](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明). +> +> 3. Этот проект совместим и настоятельно рекомендуется использование китайской NLP-модели ChatGLM и других моделей больших языков производства Китая. Поддерживает одновременное использование нескольких ключей API, которые можно указать в конфигурационном файле, например, `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Если нужно временно заменить `API_KEY`, введите временный `API_KEY` в окне ввода и нажмите Enter для его подтверждения. + + + + +
+ +Функции (⭐= Недавно добавленные функции) | Описание +--- | --- +⭐[Подключение новой модели](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | Baidu [QianFan](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu) и WenxinYiYan, [TongYiQianWen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary), Shanghai AI-Lab [ShuSheng](https://github.com/InternLM/InternLM), Xunfei [XingHuo](https://xinghuo.xfyun.cn/), [LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), ZhiPu API, DALLE3 +Улучшение, перевод, объяснение кода | Одним нажатием выполнить поиск синтаксических ошибок в научных статьях, переводить, объяснять код +[Настройка горячих клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настройки горячих клавиш +Модульный дизайн | Поддержка настраиваемых мощных [плагинов](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), плагины поддерживают [горячую замену](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) +[Профилирование кода](https://www.bilibili.com/video/BV1cj411A7VW) | [Плагин] Одним нажатием можно профилировать дерево проекта Python/C/C++/Java/Lua/... или [проанализировать самого себя](https://www.bilibili.com/video/BV1cj411A7VW) +Просмотр статей, перевод статей | [Плагин] Одним нажатием прочитать полный текст статьи в формате LaTeX/PDF и сгенерировать аннотацию +Перевод LaTeX статей, [улучшение](https://www.bilibili.com/video/BV1FT411H7c5/)| [Плагин] Одним нажатием перевести или улучшить статьи в формате LaTeX +Генерация пакетного комментария | [Плагин] Одним нажатием сгенерировать многострочный комментарий к функции +Перевод Markdown на английский и китайский | [Плагин] Вы видели документацию на сверху на пяти языках? [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)` +Анализ и создание отчета в формате чата | [Плагин] Автоматически генерируйте сводный отчет после выполнения +Функция перевода полноценной PDF статьи | [Плагин] Изъять название и аннотацию статьи из PDF + переводить полный текст (многопоточно) +[Arxiv помощник](https://www.bilibili.com/video/BV1LM4y1279X) | [Плагин] Просто введите URL статьи на arXiv, чтобы одним нажатием выполнить перевод аннотации + загрузить PDF +Одним кликом проверить статью на LaTeX | [Плагин] Проверка грамматики и правописания статьи LaTeX, добавление PDF в качестве справки +[Помощник Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Плагин] Создайте "related works" с помощью Google Scholar URL по вашему выбору. +Агрегирование интернет-информации + GPT | [Плагин] [GPT получает информацию из интернета](https://www.bilibili.com/video/BV1om4y127ck) и отвечает на вопросы, чтобы информация никогда не устаревала +⭐Точный перевод статей Arxiv ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [Плагин] [Переводите статьи Arxiv наивысшего качества](https://www.bilibili.com/video/BV1dz4y1v77A/) всего одним нажатием. Сейчас это лучший инструмент для перевода научных статей +⭐[Реальное время ввода голосом](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [Плагин] Асинхронно [слушать аудио](https://www.bilibili.com/video/BV1AV4y187Uy/), автоматически разбивать на предложения, автоматически находить момент для ответа +Отображение формул/изображений/таблиц | Поддержка отображения формул в форме [tex и рендеринга](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), поддержка подсветки синтаксиса формул и кода +⭐Плагин AutoGen для множества интеллектуальных агентов | [Плагин] Используйте Microsoft AutoGen для исследования возможностей интеллектуального всплытия нескольких агентов! +Запуск [темной темы](https://github.com/binary-husky/gpt_academic/issues/173) | Добавьте `/?__theme=dark` в конец URL в браузере, чтобы переключиться на темную тему +[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf) | Быть обслуживаемым GPT3.5, GPT4, [ChatGLM2 из Цинхуа](https://github.com/THUDM/ChatGLM2-6B), [MOSS из Фуданя](https://github.com/OpenLMLab/MOSS) одновременно должно быть очень приятно, не так ли? +⭐Модель ChatGLM2 Fine-tune | Поддержка загрузки модели ChatGLM2 Fine-tune, предоставляет вспомогательный плагин ChatGLM2 Fine-tune +Больше моделей LLM, поддержка [развертывания huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Включение интерфейса Newbing (новый Bing), введение поддержки китайских [Jittorllms](https://github.com/Jittor/JittorLLMs) для поддержки [LLaMA](https://github.com/facebookresearch/llama) и [Panguα](https://openi.org.cn/pangu/) +⭐Пакет pip [void-terminal](https://github.com/binary-husky/void-terminal) | Без GUI вызывайте все функциональные плагины этого проекта прямо из Python (разрабатывается) +⭐Плагин пустого терминала | [Плагин] Используя естественный язык, напрямую распоряжайтесь другими плагинами этого проекта +Больше новых функций (генерация изображений и т. д.) ... | Смотрите в конце этого документа ... +
+ + +- Новый интерфейс (изменение опции LAYOUT в `config.py` позволяет переключиться между "расположением слева и справа" и "расположением сверху и снизу") +
+ +
+ + +- Все кнопки генерируются динамически на основе `functional.py` и могут быть свободно дополнены, освобождая буфер обмена +
+ +
+ +- Улучшение/исправление +
+ +
+ + + +- Если вывод содержит формулы, они отображаются одновременно в виде tex и отрендеренного вида для удобства копирования и чтения +
+ +
+ +- Не хочешь смотреть код проекта? Весь проект сразу в уста ChatGPT +
+ +
+ +- Смешанное использование нескольких больших языковых моделей (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) +
+ +
+ +# Установка +### Метод установки I: Прямой запуск (Windows, Linux или MacOS) + +1. Скачайте проект +```sh +git clone --depth=1 https://github.com/binary-husky/gpt_academic.git +cd gpt_academic +``` + +2. Настройте API_KEY + +В файле `config.py` настройте API KEY и другие настройки, [нажмите здесь, чтобы узнать способы настройки в специальных сетевых средах](https://github.com/binary-husky/gpt_academic/issues/1). [Инструкции по настройке проекта](https://github.com/binary-husky/gpt_academic/wiki/Сonfig-Instructions). + +「 Программа будет в первую очередь проверять наличие файла config_private.py с приватными настройками и заменять соответствующие настройки в файле config.py на те, которые указаны в файле config_private.py. Если вы понимаете эту логику, мы настоятельно рекомендуем вам создать новый файл настроек config_private.py рядом с файлом config.py и скопировать туда настройки из config.py (только те, которые вы изменяли). 」 + +「 Поддерживается настроить проект с помощью `переменных среды`. Пример настройки переменных среды можно найти в файле docker-compose.yml или на нашей [странице вики](https://github.com/binary-husky/gpt_academic/wiki/Сonfig-Instructions). Приоритет настроек: `переменные среды` > `config_private.py` > `config.py`. 」 + + +3. Установите зависимости +```sh +# (Выбор I: Если знакомы с Python, python>=3.9). Примечание: используйте официальный pip-репозиторий или пакетный репозиторий Alibaba, временный способ изменить источник: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +python -m pip install -r requirements.txt + +# (Выбор II: Используйте Anaconda). Шаги аналогичны (https://www.bilibili.com/video/BV1rc411W7Dr): +conda create -n gptac_venv python=3.11 # Создание среды Anaconda +conda activate gptac_venv # Активация среды Anaconda +python -m pip install -r requirements.txt # Здесь все тоже самое, что и с установкой для pip +``` + + +
Если вам нужна поддержка ChatGLM2 от Цинхуа/MOSS от Фуданя/Раннера RWKV как бэкенда, нажмите, чтобы развернуть +

+ +【Опциональный шаг】Если вам нужна поддержка ChatGLM2 от Цинхуа/Сервиса MOSS от Фуданя, вам понадобится дополнительно установить дополнительные зависимости (предполагается, что вы знакомы с Python + PyTorch + у вас достаточно мощный компьютер): +```sh +# 【Опциональный шаг I】Поддержка ChatGLM2 от Цинхуа. Примечание к ChatGLM от Цинхуа: Если вы столкнулись с ошибкой "Call ChatGLM fail 不能正常加载ChatGLM的参数", обратите внимание на следующее: 1: По умолчанию установлена версия torch+cpu, для использования cuda необходимо удалить torch и установить версию torch+cuda; 2: Если вы не можете загрузить модель из-за недостаточной мощности компьютера, вы можете изменить точность модели в файле request_llm/bridge_chatglm.py, заменив AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) на AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +python -m pip install -r request_llms/requirements_chatglm.txt + +# 【Опциональный шаг II】Поддержка MOSS от Фуданя +python -m pip install -r request_llms/requirements_moss.txt +git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # Обратите внимание, что когда вы запускаете эту команду, вы должны находиться в корневой папке проекта + +# 【Опциональный шаг III】Поддержка RWKV Runner +Смотрите вики: https://github.com/binary-husky/gpt_academic/wiki/Поддержка-RWKV-Runner + +# 【Опциональный шаг IV】Убедитесь, что config.py содержит все нужные вам модели. Пример: +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +``` + +

+
+ + + +4. Запустите программу +```sh +python main.py +``` + +### Метод установки II: Используйте Docker + +0. Установка всех возможностей проекта (это большой образ с поддержкой cuda и LaTeX; но если у вас медленный интернет или маленький жесткий диск, мы не рекомендуем использовать этот метод). +[![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) + +``` sh +# Измените файл docker-compose.yml, сохраните метод 0 и удалите другие методы. Затем запустите: +docker-compose up +``` + +1. Чат GPT + 文心一言 + Spark и другие онлайн-модели (рекомендуется для большинства пользователей) +[![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) +[![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) +[![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) + +``` sh +# Измените файл docker-compose.yml, сохраните метод 1 и удалите другие методы. Затем запустите: +docker-compose up +``` + +P.S. Если вам нужен функционал, связанный с LaTeX, обратитесь к разделу Wiki. Кроме того, вы также можете использовать схему 4 или схему 0 для доступа к функционалу LaTeX. + +2. Чат GPT + ChatGLM2 + MOSS + LLAMA2 + TakyiQ & Другие попытки ввести в обиход +[![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) + +``` sh +# Измените файл docker-compose.yml, сохраните метод 2 и удалите другие методы. Затем запустите: +docker-compose up +``` + + +### Метод установки III: Другие способы развертывания +1. **Скрипты запуска одним нажатием для Windows**. +Пользователи Windows, не знакомые с окружением Python, могут загрузить одну из версий в разделе [Релизы](https://github.com/binary-husky/gpt_academic/releases) для установки версии без локальных моделей. +Скрипты взяты из вкладки [oobabooga](https://github.com/oobabooga/one-click-installers). + +2. Использование сторонних API, Azure и т. д., см. страницу [вики](https://github.com/binary-husky/gpt_academic/wiki/Сonfig-Instructions) + +3. Руководство по развертыванию на удаленном сервере. +Пожалуйста, посетите [вики-страницу развертывания на облачном сервере](https://github.com/binary-husky/gpt_academic/wiki/Руководство-по-развертыванию-на-облаке). + +4. Некоторые новые платформы или методы развертывания + - Использование Sealos [для однократного развертывания](https://github.com/binary-husky/gpt_academic/issues/993) + - Использование WSL2 (Windows Subsystem for Linux). См. [Руководство развертывания-2](https://github.com/binary-husky/gpt_academic/wiki/Using-WSL2-for-deployment) + - Как запустить на вложенном URL-адресе (например, `http://localhost/subpath`). См. [Инструкции по работе с FastAPI](docs/WithFastapi.md) + + + +# Расширенное использование +### I: Пользовательские удобные кнопки (академические сочетания клавиш) +Откройте файл `core_functional.py` в любом текстовом редакторе и добавьте следующие записи, затем перезапустите программу. (Если кнопка уже существует, то префикс и суффикс поддерживают горячую замену без перезапуска программы.) +Например, +``` +"Супер-англо-русский перевод": { + # Префикс, который будет добавлен перед вашим вводом. Например, используется для описания вашего запроса, например, перевода, объяснения кода, редактирования и т.д. + "Префикс": "Пожалуйста, переведите следующий абзац на русский язык, а затем покажите каждый термин на экране с помощью таблицы Markdown:\n\n", + + # Суффикс, который будет добавлен после вашего ввода. Например, можно использовать с префиксом, чтобы заключить ваш ввод в кавычки. + "Суффикс": "", +}, +``` +
+ +
+ +### II: Пользовательские функциональные плагины +Создавайте мощные функциональные плагины для выполнения любых задач, которые вам нужны и которых вы и не можете себе представить. +Создание плагина для этого проекта и его отладка являются простыми задачами, и если у вас есть базовые знания Python, вы можете реализовать свой собственный функциональный плагин, используя наши предоставленные шаблоны. +Дополнительную информацию см. в [Руководстве по функциональным плагинам](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). + + +# Обновления +### I: Динамические + +1. Функция сохранения диалога. Вызовите "Сохранить текущий диалог" в области функциональных плагинов, чтобы сохранить текущий диалог в виде читаемого и восстанавливаемого html-файла. +Кроме того, можно использовать "Загрузить архивный файл диалога" в области функциональных плагинов (выпадающее меню), чтобы восстановить предыдущий разговор. +Подсказка: если не указывать файл и просто щелкнуть "Загрузить архивный файл диалога", можно просмотреть кэш сохраненных html-архивов. +
+ +
+ +2. ⭐Перевод Latex/Arxiv статей⭐ +
+ ===> + +
+ +3. Void Terminal (понимание пользовательских намерений из естественного языка и автоматическое вызов других плагинов) + +- Шаг 1: Введите "Пожалуйста, вызовите плагин для перевода PDF-статьи, адрес которой https://openreview.net/pdf?id=rJl0r3R9KX". +- Шаг 2: Нажмите "Void Terminal". + +
+ +
+ +4. Модульный дизайн функционала, позволяющий реализовать мощные функции с помощью простых интерфейсов +
+ + +
+ +5. Перевод и анализ других открытых проектов +
+ + +
+ +6. Функциональность для украшения[meme](https://github.com/fghrsh/live2d_demo) (по умолчанию отключена, требуется изменение файла `config.py`) +
+ +
+ +7. Генерация изображений с помощью OpenAI +
+ +
+ +8. Анализ и обобщение аудио с помощью OpenAI +
+ +
+ +9. Проверка и исправление ошибок во всем тексте LaTeX +
+ ===> + +
+ +10. Изменение языка и темы +
+ +
+ + + +### II: Версии: +- Версия 3.70 (в планах): Оптимизация темы AutoGen и разработка ряда дополнительных плагинов +- Версия 3.60: Внедрение AutoGen в качестве фундамента нового поколения плагинов +- Версия 3.57: Поддержка GLM3, Starfire v3, Wenxin One Word v4, исправление ошибок при совместном использовании локальной модели +- Версия 3.56: Поддержка добавления дополнительных функциональных кнопок в реальном времени, новая страница отчетов в формате PDF +- Версия 3.55: Переработка пользовательского интерфейса, внедрение плавающего окна и панели меню +- Версия 3.54: Добавлен интерпретатор кода (Code Interpreter) (в разработке) +- Версия 3.53: Динамический выбор различных тем интерфейса, повышение стабильности и решение проблемы конфликтов между несколькими пользователями +- Версия 3.50: Использование естественного языка для вызова всех функциональных плагинов проекта (Void Terminal), поддержка категоризации плагинов, улучшение пользовательского интерфейса, разработка новых тем +- Версия 3.49: Поддержка платформы Baidu Qianfan и Wenxin One Word +- Версия 3.48: Поддержка Ali Dharma Institute, Shanghai AI-Lab Scholar, Xunfei Starfire +- Версия 3.46: Поддержка реального голосового диалога с полной автоматизацией +- Версия 3.45: Поддержка настраиваемой модели ChatGLM2 +- Версия 3.44: Официальная поддержка Azure, улучшение удобства пользовательского интерфейса +- Версия 3.4: +Перевод полных текстов PDF, +корректировка латексных документов +- Версия 3.3: +Интернет-информационные функции +- Версия 3.2: Поддержка дополнительных параметров в функциональных плагинах (функция сохранения диалога, интерпретация кода на любом языке + одновременный вопрос о любом комбинированном LLM) +- Версия 3.1: Поддержка одновременного обращения к нескольким моделям gpt! Поддержка API2D, поддержка равномерной нагрузки нескольких api-ключей +- Версия 3.0: Поддержка chatglm и других небольших моделей llm +- Версия 2.6: Переработка структуры плагинов для повышения интерактивности, добавление дополнительных плагинов +- Версия 2.5: Автоматическое обновление, решение проблемы с длиной текста и переполнением токенов при обработке текста +- Версия 2.4: (1) Добавление функции полного перевода PDF; (2) Добавление функции изменения позиции объекта ввода; (3) Добавление функции вертикального размещения; (4) Оптимизация многопоточных функциональных плагинов. +- Версия 2.3: Улучшение интерактивности многопоточности +- Версия 2.2: Поддержка живой перезагрузки функциональных плагинов +- Версия 2.1: Складываемый макет +- Версия 2.0: Введение модульных функциональных плагинов +- Версия 1.0: Базовые функции + +GPT Academic Группа QQ разработчиков: `610599535` + +- Известные проблемы + - Некоторые расширения для браузера могут мешать работе пользовательского интерфейса этого программного обеспечения + - У официального Gradio есть много проблем совместимости, поэтому обязательно установите Gradio с помощью `requirement.txt` + +### III: Темы +Вы можете изменить тему путем изменения опции `THEME` (config.py) +1. `Chuanhu-Small-and-Beautiful` [ссылка](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + + +### IV: Ветви разработки этого проекта + +1. Ветка `master`: Основная ветка, стабильная версия +2. Ветвь `frontier`: Ветвь разработки, версия для тестирования + + +### V: Справочники и обучение + +``` +В коде использовались многие функции, представленные в других отличных проектах, поэтому их порядок не имеет значения: + +# ChatGLM2-6B от Тиньхуа: +https://github.com/THUDM/ChatGLM2-6B + +# Линейные модели с ограниченной памятью от Тиньхуа: +https://github.com/Jittor/JittorLLMs + +# ChatPaper: +https://github.com/kaixindelele/ChatPaper + +# Edge-GPT: +https://github.com/acheong08/EdgeGPT + +# ChuanhuChatGPT: +https://github.com/GaiZhenbiao/ChuanhuChatGPT + + + +# Установщик с одним щелчком Oobabooga: +https://github.com/oobabooga/one-click-installers + +# Больше: +https://github.com/gradio-app/gradio +https://github.com/fghrsh/live2d_demo + diff --git a/docs/README.md.German.md b/docs/README.md.German.md deleted file mode 100644 index b7a53f1f9d..0000000000 --- a/docs/README.md.German.md +++ /dev/null @@ -1,307 +0,0 @@ -> **Hinweis** -> -> Bei der Installation von Abhängigkeiten sollten nur die in **requirements.txt** **angegebenen Versionen** streng ausgewählt werden. -> -> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/` - -# GPT Akademisch optimiert (GPT Academic) - -**Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Stern; wenn Sie bessere Tastenkombinationen oder Funktions-Plugins entwickelt haben, können Sie gerne einen Pull Request eröffnen.** - -Wenn Sie dieses Projekt mögen, geben Sie ihm bitte einen Stern. Wenn Sie weitere nützliche wissenschaftliche Abkürzungen oder funktionale Plugins entwickelt haben, können Sie gerne ein Problem oder eine Pull-Anforderung öffnen. Wir haben auch ein README in [Englisch|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md), das von diesem Projekt selbst übersetzt wurde. -Um dieses Projekt in eine beliebige Sprache mit GPT zu übersetzen, lesen Sie `multi_language.py` (experimentell). - -> **Hinweis** -> -> 1. Beachten Sie bitte, dass nur Funktionserweiterungen (Schaltflächen) mit **roter Farbe** Dateien lesen können und einige Erweiterungen im **Dropdown-Menü** des Erweiterungsbereichs zu finden sind. Außerdem begrüßen wir jede neue Funktionserweiterung mit **höchster Priorität** und bearbeiten sie. -> -> 2. Die Funktionalität jeder Datei in diesem Projekt wird in der Selbstanalyse [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) detailliert beschrieben. Mit der Weiterentwicklung der Versionen können Sie jederzeit die zugehörigen Funktions-Erweiterungen aufrufen, um durch Aufruf von GPT einen Selbstanalysebericht des Projekts zu erstellen. Häufig gestellte Fragen finden Sie in der [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Installationsanweisungen](#Installation). -> -> 3. Dieses Projekt ist kompatibel und fördert die Verwendung von inländischen Sprachmodellen wie ChatGLM und RWKV, Pangu, etc. Es unterstützt das Vorhandensein mehrerer api-keys, die in der Konfigurationsdatei wie folgt angegeben werden können: `API_KEY="openai-key1,openai-key2,api2d-key3"`. Wenn ein `API_KEY` temporär geändert werden muss, geben Sie den temporären `API_KEY` im Eingabebereich ein und drücken Sie dann die Eingabetaste, um ihn zu übernehmen.Funktion | Beschreibung ---- | --- -Ein-Klick-Polieren | Unterstützt ein-Klick-Polieren und ein-Klick-Suche nach grammatikalischen Fehlern in wissenschaftlichen Arbeiten -Ein-Klick Chinesisch-Englisch Übersetzung | Ein-Klick Chinesisch-Englisch Übersetzung -Ein-Klick-Code-Erklärung | Zeigt Code, erklärt Code, erzeugt Code und fügt Kommentare zum Code hinzu -[Benutzerdefinierte Tastenkombinationen](https://www.bilibili.com/video/BV14s4y1E7jN) | Unterstützt benutzerdefinierte Tastenkombinationen -Modulare Gestaltung | Unterstützt leistungsstarke individuelle [Funktions-Plugins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions). Plugins unterstützen [Hot-Updates](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Selbstprogramm-Analyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] [Ein-Klick Verstehen](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) der Quellcode dieses Projekts -[Programmanalyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] Ein-Klick-Analyse des Projektbaums anderer Python/C/C++/Java/Lua/...-Projekte -Lesen von Papieren, [Übersetzen](https://www.bilibili.com/video/BV1KT411x7Wn) von Papieren | [Funktions-Plugin] Ein-Klick Erklärung des gesamten LaTeX/PDF-Artikels und Erstellung einer Zusammenfassung -LaTeX-Volltext-Übersetzung und [Polieren](https://www.bilibili.com/video/BV1FT411H7c5/) | [Funktions-Plugin] Ein-Klick-Übersetzung oder-Polieren des LaTeX-Artikels -Bulk-Kommentargenerierung | [Funktions-Plugin] Ein-Klick Massenerstellung von Funktionskommentaren -Markdown [Chinesisch-Englisch Übersetzung](https://www.bilibili.com/video/BV1yo4y157jV/) | [Funktions-Plugin] Haben Sie die [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in den oben genannten 5 Sprachen gesehen? -Analyse-Berichtserstellung von chat | [Funktions-Plugin] Automatische Zusammenfassung nach der Ausführung -[Funktion zur vollständigen Übersetzung von PDF-Artikeln](https://www.bilibili.com/video/BV1KT411x7Wn) | [Funktions-Plugin] Extrahiert Titel und Zusammenfassung der PDF-Artikel und übersetzt den gesamten Text (mehrere Threads) -[Arxiv-Assistent](https://www.bilibili.com/video/BV1LM4y1279X) | [Funktions-Plugin] Geben Sie die Arxiv-Artikel-URL ein und klicken Sie auf Eine-Klick-Übersetzung-Zusammenfassung + PDF-Download -[Google Scholar Integrations-Assistent](https://www.bilibili.com/video/BV19L411U7ia) | [Funktions-Plugin] Geben Sie eine beliebige Google Scholar Such-URL ein und lassen Sie gpt Ihnen bei der Erstellung von [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) helfen -Internet-Informationen Aggregation + GPT | [Funktions-Plugin] Lassen Sie GPT eine Frage beantworten, indem es [zuerst Informationen aus dem Internet](https://www.bilibili.com/video/BV1om4y127ck/) sammelt und so die Informationen nie veralten -Anzeige von Formeln / Bildern / Tabellen | Zeigt Formeln in beiden Formen, [TeX-Format und gerendeter Form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), unterstützt Formeln und Code-Highlights -Unterstützung von PlugIns mit mehreren Threads | Unterstützt den Aufruf mehrerer Threads in Chatgpt, um Text oder Programme [Batch zu verarbeiten](https://www.bilibili.com/video/BV1FT411H7c5/) -Starten Sie das dunkle Gradio-[Thema](https://github.com/binary-husky/gpt_academic/issues/173) | Fügen Sie ```/?__theme=dark``` an das Ende der Browser-URL an, um das dunkle Thema zu aktivieren -[Unterstützung für mehrere LLM-Modelle](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) Interface-Unterstützung | Das Gefühl, gleichzeitig von GPT3.5, GPT4, [Tshinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) bedient zu werden, muss toll sein, oder? -Zugriff auf weitere LLM-Modelle, Unterstützung von [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Hinzufügen der Newbing-Schnittstelle (neues Bing), Einführung der Unterstützung von [Jittorllms](https://github.com/Jittor/JittorLLMs) der Tsinghua-Universität, [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) und [Pangu alpha](https://openi.org.cn/pangu/) -Weitere neue Funktionen (wie Bildgenerierung) …… | Siehe Ende dieses Dokuments …… - -- Neue Oberfläche (Ändern Sie die LAYOUT-Option in `config.py`, um zwischen "Seitenlayout" und "Oben-unten-Layout" zu wechseln) -
- -
- All buttons are dynamically generated by reading `functional.py`, and custom functions can be easily added, freeing up the clipboard. -
- -
- -- Proofreading/Correcting -
- -
- -- If the output contains formulas, they will be displayed in both tex format and rendered format for easy copying and reading. -
- -
- -- Don't feel like reading the project code? Show off the entire project to chatgpt. -
- -
- -- Multiple large language models are mixed and called together (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4). -
- -
- ---- -# Installation -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure API_KEY - -Configure API KEY and other settings in `config.py`. [Special Network Environment Settings](https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check whether there is a "config_private.py" private configuration file, and use the configuration defined in it to override the configuration of "config.py". Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named "config_private.py" next to "config.py" and transfer (copy) the configurations in "config.py" to "config_private.py". "config_private.py" is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variable` > `config_private.py` >`config.py`) - - -3. Install dependencies -```sh -# (Option I: If familar with Python) (Python version 3.9 or above, the newer the better), Note: Use the official pip source or Ali pip source, temporary switching method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: If not familiar with Python) Use anaconda with similar steps (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create an anaconda environment -conda activate gptac_venv # Activate the anaconda environment -python -m pip install -r requirements.txt # Same step as pip installation -``` - -
Click to expand if supporting Tsinghua ChatGLM/Fudan MOSS as backend -

- -[Optional Step] If supporting Tsinghua ChatGLM/Fudan MOSS as backend, additional dependencies need to be installed (Prerequisites: Familiar with Python + Used Pytorch + Sufficient computer configuration): -```sh -# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llms/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the project root path - -# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run -```sh -python main.py -```5. Testing Function Plugin -``` -- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions - Click "[Function Plugin Template Demo] Today in History" -``` - -## Installation-Method 2: Using Docker - -1. Only ChatGPT (Recommended for most people) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Download the project -cd gpt_academic # Enter the path -nano config.py # Edit config.py with any text editor, Configure "Proxy","API_KEY"and"WEB_PORT" (e.g 50923) etc. -docker build -t gpt-academic . # Install - -# (Last step-option 1) Under Linux environment, use `--net=host` is more convenient and quick -docker run --rm -it --net=host gpt-academic -# (Last step-option 2) Under macOS/windows environment, can only use the -p option to expose the container's port(eg.50923) to the port on the host. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (Requires familiarity with Docker) - -``` sh -# Modify docker-compose.yml, delete solution 1 and solution 3, and retain solution 2. Modify the configuration of solution 2 in docker-compose.yml, referring to the comments in it. -docker-compose up -``` - -3. ChatGPT+LLAMA+Pangu+RWKV(Requires familiarity with Docker) -``` sh -# Modify docker-compose.yml, delete solution 1 and solution 2, and retain solution 3. Modify the configuration of solution 3 in docker-compose.yml, referring to the comments in it. -docker-compose up -``` - - -## Installation-Method 3: Other Deployment Options - -1. How to use reverse proxy URL/Microsoft Azure API -Configure API_URL_REDIRECT according to the instructions in `config.py`. - -2. Remote cloud server deployment (requires cloud server knowledge and experience) -Please visit [Deployment wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL 2 (Windows subsystem for Linux) -Please visit [Deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to run at a secondary URL (such as `http://localhost/subpath`) -Please visit [FastAPI operating instructions](docs/WithFastapi.md) - -5. Use docker-compose to run -Please read docker-compose.yml and follow the prompts to operate. - ---- -# Advanced Usage -## Customize new convenience buttons / custom function plugins. - -1. Customize new convenience buttons (Academic Shortcut Keys) -Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, then the prefix and suffix can be hot-modified, and it will take effect without restarting the program.) -For example -``` -"Super English to Chinese": { - # Prefix, will be added before your input. For example, used to describe your requirements, such as translation, explaining code, polishing, etc. - "Prefix": "Please translate the following content into Chinese, and then use a markdown table to explain the proper nouns that appear in the text one by one:\n\n", - - # Suffix, will be added after your input. For example, combined with prefix, you can enclose your input content in quotes. - "Suffix": "", -}, -``` -
- -
- -2. Custom function plugins - -Write powerful function plugins to perform any task you want and can't think of. -The difficulty of plugin writing and debugging is very low in this project. As long as you have a certain knowledge of Python, you can implement your own plugin functions by imitating the template we provided. -For more information, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Latest Update -## New feature dynamics1. Funktion zur Speicherung von Dialogen. Rufen Sie im Bereich der Funktions-Plugins "Aktuellen Dialog speichern" auf, um den aktuellen Dialog als lesbares und wiederherstellbares HTML-Datei zu speichern. Darüber hinaus können Sie im Funktions-Plugin-Bereich (Dropdown-Menü) "Laden von Dialogverlauf" aufrufen, um den vorherigen Dialog wiederherzustellen. Tipp: Wenn Sie keine Datei angeben und stattdessen direkt auf "Laden des Dialogverlaufs" klicken, können Sie das HTML-Cache-Archiv anzeigen. Durch Klicken auf "Löschen aller lokalen Dialogverlaufsdatensätze" können alle HTML-Archiv-Caches gelöscht werden. -
- -
- -2. Berichterstellung. Die meisten Plugins generieren nach Abschluss der Ausführung einen Arbeitsbericht. -
- - - -
- -3. Modularisierte Funktionsgestaltung, einfache Schnittstellen mit leistungsstarken Funktionen. -
- - -
- -4. Dies ist ein Open-Source-Projekt, das sich "selbst übersetzen" kann. -
- -
- -5. Die Übersetzung anderer Open-Source-Projekte ist kein Problem. -
- -
- -
- -
- -6. Dekorieren Sie [`live2d`](https://github.com/fghrsh/live2d_demo) mit kleinen Funktionen (standardmäßig deaktiviert, Änderungen an `config.py` erforderlich). -
- -
- -7. Neue MOSS-Sprachmodellunterstützung. -
- -
- -8. OpenAI-Bildgenerierung. -
- -
- -9. OpenAI-Audio-Analyse und Zusammenfassung. -
- -
- -10. Latex-Proofreading des gesamten Textes. -
- -
- - -## Version: -- Version 3.5 (Todo): Rufen Sie alle Funktionserweiterungen dieses Projekts mit natürlicher Sprache auf (hohe Priorität). -- Version 3.4 (Todo): Verbesserte Unterstützung mehrerer Threads für Local Large Model (LLM). -- Version 3.3: + Internet-Informationssynthese-Funktion -- Version 3.2: Funktionserweiterungen unterstützen mehr Parameter-Schnittstellen (Speicherung von Dialogen, Interpretation beliebigen Sprachcodes + gleichzeitige Abfrage jeder LLM-Kombination) -- Version 3.1: Unterstützung mehrerer GPT-Modelle gleichzeitig! Unterstützung für API2D, Unterstützung für Lastenausgleich von mehreren API-Schlüsseln. -- Version 3.0: Unterstützung von Chatglm und anderen kleinen LLMs -- Version 2.6: Umstrukturierung der Plugin-Struktur zur Verbesserung der Interaktivität, Einführung weiterer Plugins -- Version 2.5: Automatische Aktualisierung, Problembehebung bei Quelltexten großer Projekte, wenn der Text zu lang ist oder Token überlaufen. -- Version 2.4: (1) Neue Funktion zur Übersetzung des gesamten PDF-Texts; (2) Neue Funktion zum Wechseln der Position des Eingabebereichs; (3) Neue Option für vertikales Layout; (4) Optimierung von Multithread-Funktions-Plugins. -- Version 2.3: Verbesserte Interaktivität mit mehreren Threads -- Version 2.2: Funktionserweiterungen unterstützen "Hot-Reload" -- Version 2.1: Faltbares Layout -- Version 2.0: Einführung von modularisierten Funktionserweiterungen -- Version 1.0: Grundlegende Funktionengpt_academic Entwickler QQ-Gruppe-2: 610599535 - -- Bekannte Probleme - - Einige Browser-Übersetzungs-Plugins können die Frontend-Ausführung dieser Software stören. - - Sowohl eine zu hohe als auch eine zu niedrige Version von Gradio führt zu verschiedenen Ausnahmen. - -## Referenz und Lernen - -``` -Der Code bezieht sich auf viele Designs von anderen herausragenden Projekten, insbesondere: - -# Projekt 1: ChatGLM-6B der Tsinghua Universität: -https://github.com/THUDM/ChatGLM-6B - -# Projekt 2: JittorLLMs der Tsinghua Universität: -https://github.com/Jittor/JittorLLMs - -# Projekt 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Projekt 4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Projekt 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Mehr: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/docs/README.md.Italian.md b/docs/README.md.Italian.md deleted file mode 100644 index 1e24a535b5..0000000000 --- a/docs/README.md.Italian.md +++ /dev/null @@ -1,316 +0,0 @@ -> **Nota** -> -> Durante l'installazione delle dipendenze, selezionare rigorosamente le **versioni specificate** nel file requirements.txt. -> -> ` pip install -r requirements.txt` - -# GPT Ottimizzazione Accademica (GPT Academic) - -**Se ti piace questo progetto, ti preghiamo di dargli una stella. Se hai sviluppato scorciatoie accademiche o plugin funzionali più utili, non esitare ad aprire una issue o pull request. Abbiamo anche una README in [Inglese|](README_EN.md)[Giapponese|](README_JP.md)[Coreano|](https://github.com/mldljyh/ko_gpt_academic)[Russo|](README_RS.md)[Francese](README_FR.md) tradotta da questo stesso progetto. -Per tradurre questo progetto in qualsiasi lingua con GPT, leggere e eseguire [`multi_language.py`](multi_language.py) (sperimentale). - -> **Nota** -> -> 1. Si prega di notare che solo i plugin (pulsanti) contrassegnati in **rosso** supportano la lettura di file, alcuni plugin sono posizionati nel **menu a discesa** nella zona dei plugin. Accettiamo e gestiamo PR per qualsiasi nuovo plugin con **massima priorità**! -> -> 2. Le funzionalità di ogni file di questo progetto sono descritte dettagliatamente nella propria analisi di autotraduzione [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Con l'iterazione delle versioni, è possibile fare clic sui plugin funzionali correlati in qualsiasi momento per richiamare GPT e generare nuovamente il rapporto di analisi automatica del progetto. Le domande frequenti sono riassunte nella [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Metodo di installazione] (#installazione). -> -> 3. Questo progetto è compatibile e incoraggia l'utilizzo di grandi modelli di linguaggio di produzione nazionale come chatglm, RWKV, Pangu ecc. Supporta la coesistenza di più api-key e può essere compilato nel file di configurazione come `API_KEY="openai-key1,openai-key2,api2d-key3"`. Per sostituire temporaneamente `API_KEY`, inserire `API_KEY` temporaneo nell'area di input e premere Invio per renderlo effettivo. - -
- -Funzione | Descrizione ---- | --- -Correzione immediata | Supporta correzione immediata e ricerca degli errori di grammatica del documento con un solo clic -Traduzione cinese-inglese immediata | Traduzione cinese-inglese immediata con un solo clic -Spiegazione del codice immediata | Visualizzazione del codice, spiegazione del codice, generazione del codice, annotazione del codice con un solo clic -[Scorciatoie personalizzate](https://www.bilibili.com/video/BV14s4y1E7jN) | Supporta scorciatoie personalizzate -Design modularizzato | Supporta potenti [plugin di funzioni](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) personalizzati, i plugin supportano l'[aggiornamento in tempo reale](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Auto-profiling del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] [Comprensione immediata](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) del codice sorgente di questo progetto -[Analisi del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] Un clic può analizzare l'albero di altri progetti Python/C/C++/Java/Lua/... -Lettura del documento, [traduzione](https://www.bilibili.com/video/BV1KT411x7Wn) del documento | [Plugin di funzioni] La lettura immediata dell'intero documento latex/pdf di un documento e la generazione di un riassunto -Traduzione completa di un documento Latex, [correzione immediata](https://www.bilibili.com/video/BV1FT411H7c5/) | [Plugin di funzioni] Una traduzione o correzione immediata di un documento Latex -Generazione di annotazioni in batch | [Plugin di funzioni] Generazione automatica delle annotazioni di funzione con un solo clic -[Traduzione cinese-inglese di Markdown](https://www.bilibili.com/video/BV1yo4y157jV/) | [Plugin di funzioni] Hai letto il [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) delle cinque lingue sopra? -Generazione di report di analisi di chat | [Plugin di funzioni] Generazione automatica di un rapporto di sintesi dopo l'esecuzione -[Funzione di traduzione di tutto il documento PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin di funzioni] Estrarre il titolo e il sommario dell'articolo PDF + tradurre l'intero testo (multithreading) -[Assistente di Arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugin di funzioni] Inserire l'URL dell'articolo di Arxiv e tradurre il sommario con un clic + scaricare il PDF -[Assistente integrato di Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Plugin di funzioni] Con qualsiasi URL di pagina di ricerca di Google Scholar, lascia che GPT ti aiuti a scrivere il tuo [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) -Aggregazione delle informazioni su Internet + GPT | [Plugin di funzioni] Fai in modo che GPT rilevi le informazioni su Internet prima di rispondere alle domande, senza mai diventare obsolete -Visualizzazione di formule/img/tabelle | È possibile visualizzare un'equazione in forma [tex e render](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png) contemporaneamente, supporta equazioni e evidenziazione del codice -Supporto per plugin di funzioni multithreading | Supporto per chiamata multithreaded di chatgpt, elaborazione con un clic di grandi quantità di testo o di un programma -Avvia il tema di gradio [scuro](https://github.com/binary-husky/gpt_academic/issues/173) | Aggiungere ```/?__theme=dark``` dopo l'URL del browser per passare a un tema scuro -Supporto per maggiori modelli LLM, supporto API2D | Sentirsi serviti simultaneamente da GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) deve essere una grande sensazione, giusto? -Ulteriori modelli LLM supportat,i supporto per l'implementazione di Huggingface | Aggiunta di un'interfaccia Newbing (Nuovo Bing), introdotta la compatibilità con Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) e [PanGu-α](https://openi.org.cn/pangu/) -Ulteriori dimostrazioni di nuove funzionalità (generazione di immagini, ecc.)... | Vedere la fine di questo documento... -
- - -- Nuova interfaccia (modificare l'opzione LAYOUT in `config.py` per passare dal layout a sinistra e a destra al layout superiore e inferiore) -
- -
Sei un traduttore professionista di paper accademici. - -- Tutti i pulsanti vengono generati dinamicamente leggendo il file functional.py, e aggiungerci nuove funzionalità è facile, liberando la clipboard. -
- -
- -- Revisione/Correzione -
- -
- -- Se l'output contiene una formula, viene visualizzata sia come testo che come formula renderizzata, per facilitare la copia e la visualizzazione. -
- -
- -- Non hai tempo di leggere il codice del progetto? Passa direttamente a chatgpt e chiedi informazioni. -
- -
- -- Chiamata mista di vari modelli di lingua di grandi dimensioni (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- ---- -# Installazione -## Installazione - Metodo 1: Esecuzione diretta (Windows, Linux o MacOS) - -1. Scarica il progetto -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configura API_KEY - -In `config.py`, configura la tua API KEY e altre impostazioni, [configs for special network environments](https://github.com/binary-husky/gpt_academic/issues/1). - -(N.B. Quando il programma viene eseguito, verifica prima se esiste un file di configurazione privato chiamato `config_private.py` e sovrascrive le stesse configurazioni in `config.py`. Pertanto, se capisci come funziona la nostra logica di lettura della configurazione, ti consigliamo vivamente di creare un nuovo file di configurazione chiamato `config_private.py` accanto a `config.py`, e spostare (copiare) le configurazioni di `config.py` in `config_private.py`. 'config_private.py' non è sotto la gestione di git e può proteggere ulteriormente le tue informazioni personali. NB Il progetto supporta anche la configurazione della maggior parte delle opzioni tramite "variabili d'ambiente". La sintassi della variabile d'ambiente è descritta nel file `docker-compose`. Priorità di lettura: "variabili d'ambiente" > "config_private.py" > "config.py") - - -3. Installa le dipendenze -```sh -# (Scelta I: se sei familiare con python) (python 3.9 o superiore, più nuovo è meglio), N.B.: utilizza il repository ufficiale pip o l'aliyun pip repository, metodo temporaneo per cambiare il repository: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Scelta II: se non conosci Python) utilizza anaconda, il processo è simile (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # crea l'ambiente anaconda -conda activate gptac_venv # attiva l'ambiente anaconda -python -m pip install -r requirements.txt # questo passaggio funziona allo stesso modo dell'installazione con pip -``` - -
Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, fare clic qui per espandere -

- -【Passaggio facoltativo】 Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, è necessario installare ulteriori dipendenze (prerequisiti: conoscenza di Python, esperienza con Pytorch e computer sufficientemente potente): -```sh -# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llms/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# 【Passaggio facoltativo II】 Supporto a MOSS di Fudan -python -m pip install -r request_llms/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto - -# 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Esegui -```sh -python main.py -```5. Plugin di test delle funzioni -``` -- Funzione plugin di test (richiede una risposta gpt su cosa è successo oggi in passato), puoi utilizzare questa funzione come template per implementare funzionalità più complesse - Clicca su "[Demo del plugin di funzione] Oggi nella storia" -``` - -## Installazione - Metodo 2: Utilizzo di Docker - -1. Solo ChatGPT (consigliato per la maggior parte delle persone) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # scarica il progetto -cd gpt_academic # entra nel percorso -nano config.py # con un qualsiasi editor di testo, modifica config.py configurando "Proxy", "API_KEY" e "WEB_PORT" (ad esempio 50923) -docker build -t gpt-academic . # installa - -#(ultimo passaggio - selezione 1) In un ambiente Linux, utilizzare '--net=host' è più conveniente e veloce -docker run --rm -it --net=host gpt-academic -#(ultimo passaggio - selezione 2) In un ambiente MacOS/Windows, l'opzione -p può essere utilizzata per esporre la porta del contenitore (ad es. 50923) alla porta della macchina -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (richiede familiarità con Docker) - -``` sh -# Modifica docker-compose.yml, elimina i piani 1 e 3, mantieni il piano 2. Modifica la configurazione del piano 2 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (richiede familiarità con Docker) - -``` sh -# Modifica docker-compose.yml, elimina i piani 1 e 2, mantieni il piano 3. Modifica la configurazione del piano 3 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni -docker-compose up -``` - - -## Installazione - Metodo 3: Altre modalità di distribuzione - -1. Come utilizzare un URL di reindirizzamento / AzureAPI Cloud Microsoft -Configura API_URL_REDIRECT seguendo le istruzioni nel file `config.py`. - -2. Distribuzione su un server cloud remoto (richiede conoscenze ed esperienza di server cloud) -Si prega di visitare [wiki di distribuzione-1] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Utilizzo di WSL2 (Windows Subsystem for Linux) -Si prega di visitare [wiki di distribuzione-2] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. Come far funzionare ChatGPT all'interno di un sottodominio (ad es. `http://localhost/subpath`) -Si prega di visitare [Istruzioni per l'esecuzione con FastAPI] (docs/WithFastapi.md) - -5. Utilizzo di docker-compose per l'esecuzione -Si prega di leggere il file docker-compose.yml e seguire le istruzioni fornite. - ---- -# Uso avanzato -## Personalizzazione dei pulsanti / Plugin di funzione personalizzati - -1. Personalizzazione dei pulsanti (scorciatoie accademiche) -Apri `core_functional.py` con qualsiasi editor di testo e aggiungi la voce seguente, quindi riavvia il programma (se il pulsante è già stato aggiunto con successo e visibile, il prefisso e il suffisso supportano la modifica in tempo reale, senza bisogno di riavviare il programma). - -ad esempio -``` -"超级英译中": { - # Prefisso, verrà aggiunto prima del tuo input. Ad esempio, descrivi la tua richiesta, come tradurre, spiegare il codice, correggere errori, ecc. - "Prefix": "Per favore traduci questo testo in Cinese, e poi spiega tutti i termini tecnici nel testo con una tabella markdown:\n\n", - - # Suffisso, verrà aggiunto dopo il tuo input. Ad esempio, con il prefisso puoi circondare il tuo input con le virgolette. - "Suffix": "", -}, -``` -
- -
- -2. Plugin di funzione personalizzati - -Scrivi plugin di funzione personalizzati e esegui tutte le attività che desideri o non hai mai pensato di fare. -La difficoltà di scrittura e debug dei plugin del nostro progetto è molto bassa. Se si dispone di una certa conoscenza di base di Python, è possibile realizzare la propria funzione del plugin seguendo il nostro modello. Per maggiori dettagli, consultare la [guida al plugin per funzioni](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Ultimo aggiornamento -## Nuove funzionalità dinamiche - -1. Funzionalità di salvataggio della conversazione. Nell'area dei plugin della funzione, fare clic su "Salva la conversazione corrente" per salvare la conversazione corrente come file html leggibile e ripristinabile, inoltre, nell'area dei plugin della funzione (menu a discesa), fare clic su "Carica la cronologia della conversazione archiviata" per ripristinare la conversazione precedente. Suggerimento: fare clic su "Carica la cronologia della conversazione archiviata" senza specificare il file consente di visualizzare la cache degli archivi html di cronologia, fare clic su "Elimina tutti i record di cronologia delle conversazioni locali" per eliminare tutte le cache degli archivi html. -
- -
- -2. Generazione di rapporti. La maggior parte dei plugin genera un rapporto di lavoro dopo l'esecuzione. -
- - - -
- -3. Progettazione modulare delle funzioni, semplici interfacce ma in grado di supportare potenti funzionalità. -
- - -
- -4. Questo è un progetto open source che può "tradursi da solo". -
- -
- -5. Tradurre altri progetti open source è semplice. -
- -
- -
- -
- -6. Piccola funzione decorativa per [live2d](https://github.com/fghrsh/live2d_demo) (disattivata per impostazione predefinita, è necessario modificare `config.py`). -
- -
- -7. Supporto del grande modello linguistico MOSS -
- -
- -8. Generazione di immagini OpenAI -
- -
- -9. Analisi e sintesi audio OpenAI -
- -
- -10. Verifica completa dei testi in LaTeX -
- -
- - -## Versione: -- versione 3.5(Todo): utilizzo del linguaggio naturale per chiamare tutti i plugin di funzioni del progetto (alta priorità) -- versione 3.4(Todo): supporto multi-threading per il grande modello linguistico locale Chatglm -- versione 3.3: +funzionalità di sintesi delle informazioni su Internet -- versione 3.2: i plugin di funzioni supportano più interfacce dei parametri (funzionalità di salvataggio della conversazione, lettura del codice in qualsiasi lingua + richiesta simultanea di qualsiasi combinazione di LLM) -- versione 3.1: supporto per interrogare contemporaneamente più modelli gpt! Supporto api2d, bilanciamento del carico per più apikey -- versione 3.0: supporto per Chatglm e altri piccoli LLM -- versione 2.6: ristrutturazione della struttura del plugin, miglioramento dell'interattività, aggiunta di più plugin -- versione 2.5: auto-aggiornamento, risoluzione del problema di testo troppo lungo e overflow del token durante la sintesi di grandi progetti di ingegneria -- versione 2.4: (1) funzionalità di traduzione dell'intero documento in formato PDF aggiunta; (2) funzionalità di scambio dell'area di input aggiunta; (3) opzione di layout verticale aggiunta; (4) ottimizzazione della funzione di plugin multi-threading. -- versione 2.3: miglioramento dell'interattività multi-threading -- versione 2.2: i plugin di funzioni supportano l'hot-reload -- versione 2.1: layout ripiegabile -- versione 2.0: introduzione di plugin di funzioni modulari -- versione 1.0: funzione di basegpt_academic sviluppatori gruppo QQ-2: 610599535 - -- Problemi noti - - Alcuni plugin di traduzione del browser interferiscono con l'esecuzione del frontend di questo software - - La versione di gradio troppo alta o troppo bassa può causare diversi malfunzionamenti - -## Riferimenti e apprendimento - -``` -Il codice fa riferimento a molte altre eccellenti progettazioni di progetti, principalmente: - -# Progetto 1: ChatGLM-6B di Tsinghua: -https://github.com/THUDM/ChatGLM-6B - -# Progetto 2: JittorLLMs di Tsinghua: -https://github.com/Jittor/JittorLLMs - -# Progetto 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Progetto 4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Progetto 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Altro: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` diff --git a/docs/README.md.Korean.md b/docs/README.md.Korean.md deleted file mode 100644 index db4b2d8fa5..0000000000 --- a/docs/README.md.Korean.md +++ /dev/null @@ -1,270 +0,0 @@ -> **노트** -> -> 의존성을 설치할 때는 반드시 requirements.txt에서 **지정된 버전**을 엄격하게 선택하십시오. -> -> `pip install -r requirements.txt` - -# GPT 학술 최적화 (GPT Academic) - -**이 프로젝트가 마음에 드신다면 Star를 주세요. 추가로 유용한 학술 단축키나 기능 플러그인이 있다면 이슈나 pull request를 남기세요. 이 프로젝트에 대한 [영어 |](docs/README_EN.md)[일본어 |](docs/README_JP.md)[한국어 |](https://github.com/mldljyh/ko_gpt_academic)[러시아어 |](docs/README_RS.md)[프랑스어](docs/README_FR.md)로 된 README도 있습니다. -GPT를 이용하여 프로젝트를 임의의 언어로 번역하려면 [`multi_language.py`](multi_language.py)를 읽고 실행하십시오. (실험적) - -> **노트** -> -> 1. 파일을 읽기 위해 **빨간색**으로 표시된 기능 플러그인 (버튼) 만 지원됩니다. 일부 플러그인은 플러그인 영역의 **드롭다운 메뉴**에 있습니다. 또한 새로운 플러그인은 **가장 높은 우선순위**로 환영하며 처리합니다! -> -> 2. 이 프로젝트의 각 파일의 기능을 [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)에서 자세히 설명합니다. 버전이 업데이트 됨에 따라 관련된 기능 플러그인을 클릭하고 GPT를 호출하여 프로젝트의 자체 분석 보고서를 다시 생성할 수도 있습니다. 자주 묻는 질문은 [`위키`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)에서 볼 수 있습니다. [설치 방법](#installation). -> -> 3. 이 프로젝트는 국내 언어 모델 chatglm과 RWKV, 판고 등의 시도와 호환 가능합니다. 여러 개의 api-key를 지원하며 설정 파일에 "API_KEY="openai-key1,openai-key2,api2d-key3""와 같이 작성할 수 있습니다. `API_KEY`를 임시로 변경해야하는 경우 입력 영역에 임시 `API_KEY`를 입력 한 후 엔터 키를 누르면 즉시 적용됩니다. - -
- -기능 | 설명 ---- | --- -원 키워드 | 원 키워드 및 논문 문법 오류를 찾는 기능 지원 -한-영 키워드 | 한-영 키워드 지원 -코드 설명 | 코드 표시, 코드 설명, 코드 생성, 코드에 주석 추가 -[사용자 정의 바로 가기 키](https://www.bilibili.com/video/BV14s4y1E7jN) | 사용자 정의 바로 가기 키 지원 -모듈식 설계 | 강력한[함수 플러그인](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) 지원, 플러그인이 [램 업데이트](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)를 지원합니다. -[자체 프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [함수 플러그인] [원 키 우드] 프로젝트 소스 코드의 내용을 이해하는 기능을 제공 -[프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [함수 플러그인] 프로젝트 트리를 분석할 수 있습니다 (Python/C/C++/Java/Lua/...) -논문 읽기, 번역 | [함수 플러그인] LaTex/PDF 논문의 전문을 읽고 요약을 생성합니다. -LaTeX 텍스트[번역](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [원 키워드](https://www.bilibili.com/video/BV1FT411H7c5/) | [함수 플러그인] LaTeX 논문의 번역 또는 개량을 위해 일련의 모드를 번역할 수 있습니다. -대량의 주석 생성 | [함수 플러그인] 함수 코멘트를 대량으로 생성할 수 있습니다. -Markdown 한-영 번역 | [함수 플러그인] 위의 5 종 언어의 [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)를 볼 수 있습니다. -chat 분석 보고서 생성 | [함수 플러그인] 수행 후 요약 보고서를 자동으로 생성합니다. -[PDF 논문 번역](https://www.bilibili.com/video/BV1KT411x7Wn) | [함수 플러그인] PDF 논문이 제목 및 요약을 추출한 후 번역됩니다. (멀티 스레드) -[Arxiv 도우미](https://www.bilibili.com/video/BV1LM4y1279X) | [함수 플러그인] Arxiv 논문 URL을 입력하면 요약을 번역하고 PDF를 다운로드 할 수 있습니다. -[Google Scholar 통합 도우미](https://www.bilibili.com/video/BV19L411U7ia) | [함수 플러그인] Google Scholar 검색 페이지 URL을 제공하면 gpt가 [Related Works 작성](https://www.bilibili.com/video/BV1GP411U7Az/)을 도와줍니다. -인터넷 정보 집계+GPT | [함수 플러그인] 먼저 GPT가 인터넷에서 정보를 수집하고 질문에 대답 할 수 있도록합니다. 정보가 절대적으로 구식이 아닙니다. -수식/이미지/표 표시 | 급여, 코드 강조 기능 지원 -멀티 스레드 함수 플러그인 지원 | Chatgpt를 여러 요청에서 실행하여 [대량의 텍스트](https://www.bilibili.com/video/BV1FT411H7c5/) 또는 프로그램을 처리 할 수 있습니다. -다크 그라디오 테마 시작 | 어둡게 주제를 변경하려면 브라우저 URL 끝에 ```/?__theme=dark```을 추가하면됩니다. -[다중 LLM 모델](https://www.bilibili.com/video/BV1wT411p7yf) 지원, [API2D](https://api2d.com/) 인터페이스 지원됨 | GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS)가 모두 동시에 작동하는 것처럼 느낄 수 있습니다! -LLM 모델 추가 및[huggingface 배치](https://huggingface.co/spaces/qingxu98/gpt-academic) 지원 | 새 Bing 인터페이스 (새 Bing) 추가, Clearing House [Jittorllms](https://github.com/Jittor/JittorLLMs) 지원 [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) 및 [盘古α](https://openi.org.cn/pangu/) -기타 새로운 기능 (이미지 생성 등) ... | 이 문서의 끝부분을 참조하세요. ...- 모든 버튼은 functional.py를 동적으로 읽어와서 사용자 정의 기능을 자유롭게 추가할 수 있으며, 클립 보드를 해제합니다. -
- -
- -- 검수/오타 교정 -
- -
- -- 출력에 수식이 포함되어 있으면 텍스와 렌더링의 형태로 동시에 표시되어 복사 및 읽기가 용이합니다. -
- -
- -- 프로젝트 코드를 볼 시간이 없습니까? 전체 프로젝트를 chatgpt에 직접 표시하십시오 -
- -
- -- 다양한 대형 언어 모델 범용 요청 (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- ---- -# 설치 -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. 프로젝트 다운로드 -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. API_KEY 구성 - -`config.py`에서 API KEY 등 설정을 구성합니다. [특별한 네트워크 환경 설정](https://github.com/binary-husky/gpt_academic/issues/1) . - -(P.S. 프로그램이 실행될 때, 이름이 `config_private.py`인 기밀 설정 파일이 있는지 우선적으로 확인하고 해당 설정으로 `config.py`의 동일한 이름의 설정을 덮어씁니다. 따라서 구성 읽기 논리를 이해할 수 있다면, `config.py` 옆에 `config_private.py`라는 새 구성 파일을 만들고 `config.py`의 구성을 `config_private.py`로 이동(복사)하는 것이 좋습니다. `config_private.py`는 git으로 관리되지 않으며 개인 정보를 더 안전하게 보호할 수 있습니다. P.S. 프로젝트는 또한 대부분의 옵션을 `환경 변수`를 통해 설정할 수 있으며, `docker-compose` 파일을 참조하여 환경 변수 작성 형식을 확인할 수 있습니다. 우선순위: `환경 변수` > `config_private.py` > `config.py`) - - -3. 의존성 설치 -```sh -# (I 선택: 기존 python 경험이 있다면) (python 버전 3.9 이상, 최신 버전이 좋습니다), 참고: 공식 pip 소스 또는 알리 pip 소스 사용, 일시적인 교체 방법: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (II 선택: Python에 익숙하지 않은 경우) anaconda 사용 방법은 비슷함(https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # anaconda 환경 만들기 -conda activate gptac_venv # anaconda 환경 활성화 -python -m pip install -r requirements.txt # 이 단계도 pip install의 단계와 동일합니다. -``` - -
추가지원을 위해 Tsinghua ChatGLM / Fudan MOSS를 사용해야하는 경우 지원을 클릭하여 이 부분을 확장하세요. -

- -[Tsinghua ChatGLM] / [Fudan MOSS]를 백엔드로 사용하려면 추가적인 종속성을 설치해야합니다 (전제 조건 : Python을 이해하고 Pytorch를 사용한 적이 있으며, 컴퓨터가 충분히 강력한 경우) : -```sh -# [선택 사항 I] Tsinghua ChatGLM을 지원합니다. Tsinghua ChatGLM에 대한 참고사항 : "Call ChatGLM fail cannot load ChatGLM parameters normally" 오류 발생시 다음 참조: -# 1 : 기본 설치된 것들은 torch + cpu 버전입니다. cuda를 사용하려면 torch를 제거한 다음 torch + cuda를 다시 설치해야합니다. -# 2 : 모델을 로드할 수 없는 기계 구성 때문에, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)를 -# AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)로 변경합니다. -python -m pip install -r request_llms/requirements_chatglm.txt - -# [선택 사항 II] Fudan MOSS 지원 -python -m pip install -r request_llms/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다. - -# [선택 사항III] AVAIL_LLM_MODELS config.py 구성 파일에 기대하는 모델이 포함되어 있는지 확인하십시오. -# 현재 지원되는 전체 모델 : -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. 실행 -```sh -python main.py -```5. 테스트 함수 플러그인 -``` -- 테스트 함수 플러그인 템플릿 함수 (GPT에게 오늘의 역사에서 무슨 일이 일어났는지 대답하도록 요청)를 구현하는 데 사용할 수 있습니다. 이 함수를 기반으로 더 복잡한 기능을 구현할 수 있습니다. - "[함수 플러그인 템플릿 데모] 오늘의 역사"를 클릭하세요. -``` - -## 설치 - 방법 2 : 도커 사용 - -1. ChatGPT 만 (대부분의 사람들이 선택하는 것을 권장합니다.) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # 다운로드 -cd gpt_academic # 경로 이동 -nano config.py # 아무 텍스트 에디터로 config.py를 열고 "Proxy","API_KEY","WEB_PORT" (예 : 50923) 등을 구성합니다. -docker build -t gpt-academic . # 설치 - -#(마지막 단계-1 선택) Linux 환경에서는 --net=host를 사용하면 더 편리합니다. -docker run --rm -it --net=host gpt-academic -#(마지막 단계-2 선택) macOS / windows 환경에서는 -p 옵션을 사용하여 컨테이너의 포트 (예 : 50923)를 호스트의 포트로 노출해야합니다. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (Docker에 익숙해야합니다.) - -``` sh -#docker-compose.yml을 수정하여 계획 1 및 계획 3을 삭제하고 계획 2를 유지합니다. docker-compose.yml에서 계획 2의 구성을 수정하면 됩니다. 주석을 참조하십시오. -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (Docker에 익숙해야합니다.) -``` sh -#docker-compose.yml을 수정하여 계획 1 및 계획 2을 삭제하고 계획 3을 유지합니다. docker-compose.yml에서 계획 3의 구성을 수정하면 됩니다. 주석을 참조하십시오. -docker-compose up -``` - - -## 설치 - 방법 3 : 다른 배치 방법 - -1. 리버스 프록시 URL / Microsoft Azure API 사용 방법 -API_URL_REDIRECT를 `config.py`에 따라 구성하면됩니다. - -2. 원격 클라우드 서버 배치 (클라우드 서버 지식과 경험이 필요합니다.) -[배치위키-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)에 방문하십시오. - -3. WSL2 사용 (Windows Subsystem for Linux 하위 시스템) -[배치 위키-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)에 방문하십시오. - -4. 2 차 URL (예 : `http : //localhost/subpath`)에서 실행하는 방법 -[FastAPI 실행 설명서] (docs / WithFastapi.md)를 참조하십시오. - -5. docker-compose 실행 -docker-compose.yml을 읽은 후 지시 사항에 따라 작업하십시오. ---- -# 고급 사용법 -## 사용자 정의 바로 가기 버튼 / 사용자 정의 함수 플러그인 - -1. 사용자 정의 바로 가기 버튼 (학술 바로 가기) -임의의 텍스트 편집기로 'core_functional.py'를 엽니다. 엔트리 추가, 그런 다음 프로그램을 다시 시작하면됩니다. (버튼이 이미 추가되어 보이고 접두사, 접미사가 모두 변수가 효과적으로 수정되면 프로그램을 다시 시작하지 않아도됩니다.) -예 : -``` -"超级英译中": { - # 접두사. 당신이 요구하는 것을 설명하는 데 사용됩니다. 예를 들어 번역, 코드를 설명, 다듬기 등 - "Prefix": "下面翻译成中文,然后用一个 markdown 表格逐一解释文中出现的专有名词:\n\n", - - # 접미사는 입력 내용 앞뒤에 추가됩니다. 예를 들어 전위를 사용하여 입력 내용을 따옴표로 묶는데 사용할 수 있습니다. - "Suffix": "", -}, -``` -
- -
- -2. 사용자 지정 함수 플러그인 -강력한 함수 플러그인을 작성하여 원하는 작업을 수행하십시오. -이 프로젝트의 플러그인 작성 및 디버깅 난이도는 매우 낮으며, 일부 파이썬 기본 지식만 있으면 제공된 템플릿을 모방하여 플러그인 기능을 구현할 수 있습니다. 자세한 내용은 [함수 플러그인 가이드]를 참조하십시오. (https://github.com/binary -husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E 4%BB%B6%E6%8C%87%E5%8D%97). ---- -# 최신 업데이트 -## 새로운 기능 동향1. 대화 저장 기능. - -1. 함수 플러그인 영역에서 '현재 대화 저장'을 호출하면 현재 대화를 읽을 수 있고 복원 가능한 HTML 파일로 저장할 수 있습니다. 또한 함수 플러그인 영역(드롭다운 메뉴)에서 '대화 기록 불러오기'를 호출하면 이전 대화를 복원할 수 있습니다. 팁: 파일을 지정하지 않고 '대화 기록 불러오기'를 클릭하면 기록된 HTML 캐시를 볼 수 있으며 '모든 로컬 대화 기록 삭제'를 클릭하면 모든 HTML 캐시를 삭제할 수 있습니다. - -2. 보고서 생성. 대부분의 플러그인은 실행이 끝난 후 작업 보고서를 생성합니다. - -3. 모듈화 기능 설계, 간단한 인터페이스로도 강력한 기능을 지원할 수 있습니다. - -4. 자체 번역이 가능한 오픈 소스 프로젝트입니다. - -5. 다른 오픈 소스 프로젝트를 번역하는 것은 어렵지 않습니다. - -6. [live2d](https://github.com/fghrsh/live2d_demo) 장식 기능(기본적으로 비활성화되어 있으며 `config.py`를 수정해야 합니다.) - -7. MOSS 대 언어 모델 지원 추가 - -8. OpenAI 이미지 생성 - -9. OpenAI 음성 분석 및 요약 - -10. LaTeX 전체적인 교정 및 오류 수정 - -## 버전: -- version 3.5 (TODO): 자연어를 사용하여 이 프로젝트의 모든 함수 플러그인을 호출하는 기능(우선순위 높음) -- version 3.4(TODO): 로컬 대 모듈의 다중 스레드 지원 향상 -- version 3.3: 인터넷 정보 종합 기능 추가 -- version 3.2: 함수 플러그인이 더 많은 인수 인터페이스를 지원합니다.(대화 저장 기능, 임의의 언어 코드 해석 및 동시에 임의의 LLM 조합을 확인하는 기능) -- version 3.1: 여러 개의 GPT 모델에 대한 동시 쿼리 지원! api2d 지원, 여러 개의 apikey 로드 밸런싱 지원 -- version 3.0: chatglm 및 기타 소형 llm의 지원 -- version 2.6: 플러그인 구조를 재구성하여 상호 작용성을 향상시켰습니다. 더 많은 플러그인을 추가했습니다. -- version 2.5: 자체 업데이트, 전체 프로젝트를 요약할 때 텍스트가 너무 길어지고 토큰이 오버플로우되는 문제를 해결했습니다. -- version 2.4: (1) PDF 전체 번역 기능 추가; (2) 입력 영역 위치 전환 기능 추가; (3) 수직 레이아웃 옵션 추가; (4) 다중 스레드 함수 플러그인 최적화. -- version 2.3: 다중 스레드 상호 작용성 강화 -- version 2.2: 함수 플러그인 히트 리로드 지원 -- version 2.1: 접는 레이아웃 지원 -- version 2.0: 모듈화 함수 플러그인 도입 -- version 1.0: 기본 기능 - -gpt_academic 개발자 QQ 그룹-2 : 610599535 - -- 알려진 문제 - - 일부 브라우저 번역 플러그인이이 소프트웨어의 프론트 엔드 작동 방식을 방해합니다. - - gradio 버전이 너무 높거나 낮으면 여러 가지 이상이 발생할 수 있습니다. - -## 참고 및 학습 자료 - -``` -많은 우수 프로젝트의 디자인을 참고했습니다. 주요 항목은 다음과 같습니다. - -# 프로젝트 1 : Tsinghua ChatGLM-6B : -https://github.com/THUDM/ChatGLM-6B - -# 프로젝트 2 : Tsinghua JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# 프로젝트 3 : Edge-GPT : -https://github.com/acheong08/EdgeGPT - -# 프로젝트 4 : ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# 프로젝트 5 : ChatPaper : -https://github.com/kaixindelele/ChatPaper - -# 더 많은 : -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` diff --git a/docs/README.md.Portuguese.md b/docs/README.md.Portuguese.md deleted file mode 100644 index 4a3aba0862..0000000000 --- a/docs/README.md.Portuguese.md +++ /dev/null @@ -1,324 +0,0 @@ -> **Nota** -> -> Ao instalar as dependências, por favor, selecione rigorosamente as versões **especificadas** no arquivo requirements.txt. -> -> `pip install -r requirements.txt` -> - -# Otimização acadêmica GPT (GPT Academic) - -**Se você gostou deste projeto, por favor dê um Star. Se você criou atalhos acadêmicos mais úteis ou plugins funcionais, sinta-se livre para abrir uma issue ou pull request. Nós também temos um README em [Inglês|](README_EN.md)[日本語|](README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](README_RS.md)[Français](README_FR.md) traduzidos por este próprio projeto. -Para traduzir este projeto para qualquer idioma com o GPT, leia e execute [`multi_language.py`](multi_language.py) (experimental). - -> **Nota** -> -> 1. Por favor, preste atenção que somente os plugins de funções (botões) com a cor **vermelha** podem ler arquivos. Alguns plugins estão localizados no **menu suspenso** na área de plugins. Além disso, nós damos as boas-vindas com a **maior prioridade** e gerenciamos quaisquer novos plugins PR! -> -> 2. As funções de cada arquivo neste projeto são detalhadas em [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A), auto-análises do projeto geradas pelo GPT também estão podem ser chamadas a qualquer momento ao clicar nos plugins relacionados. As perguntas frequentes estão resumidas no [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Instruções de Instalação](#installation). -> -> 3. Este projeto é compatível com e incentiva o uso de modelos de linguagem nacionais, como chatglm e RWKV, Pangolin, etc. Suporta a coexistência de várias chaves de API e pode ser preenchido no arquivo de configuração como `API_KEY="openai-key1,openai-key2,api2d-key3"`. Quando precisar alterar temporariamente o `API_KEY`, basta digitar o `API_KEY` temporário na área de entrada e pressionar Enter para que ele entre em vigor. - -
- -Funcionalidade | Descrição ---- | --- -Um clique de polimento | Suporte a um clique polimento, um clique encontrar erros de gramática no artigo -Tradução chinês-inglês de um clique | Tradução chinês-inglês de um clique -Explicação de código de um único clique | Exibir código, explicar código, gerar código, adicionar comentários ao código -[Teclas de atalho personalizadas](https://www.bilibili.com/video/BV14s4y1E7jN) | Suporte a atalhos personalizados -Projeto modular | Suporte para poderosos plugins[de função personalizada](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), os plugins suportam[hot-reload](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Análise automática do programa](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin de função][um clique para entender](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) o código-fonte do projeto -[Análise do programa](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin de função] Um clique pode analisar a árvore de projetos do Python/C/C++/Java/Lua/... -Leitura de artigos, [tradução](https://www.bilibili.com/video/BV1KT411x7Wn) de artigos | [Plugin de função] um clique para interpretar o resumo de artigos LaTeX/PDF e gerar resumo -Tradução completa LATEX, polimento|[Plugin de função] Uma clique para traduzir ou polir um artigo LATEX -Geração em lote de comentários | [Plugin de função] Um clique gera comentários de função em lote -[Tradução chinês-inglês](https://www.bilibili.com/video/BV1yo4y157jV/) markdown | [Plugin de função] Você viu o README em 5 linguagens acima? -Relatório de análise de chat | [Plugin de função] Gera automaticamente um resumo após a execução -[Funcionalidade de tradução de artigos completos em PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin de função] Extrai o título e o resumo do artigo PDF e traduz o artigo completo (multithread) -Assistente arXiv | [Plugin de função] Insira o url do artigo arXiv para traduzir o resumo + baixar PDF -Assistente de integração acadêmica do Google | [Plugin de função] Dê qualquer URL de página de pesquisa acadêmica do Google e deixe o GPT escrever[trabalhos relacionados](https://www.bilibili.com/video/BV1GP411U7Az/) -Agregação de informações da Internet + GPT | [Plugin de função] Um clique para obter informações do GPT através da Internet e depois responde a perguntas para informações nunca ficarem desatualizadas -Exibição de fórmulas/imagem/tabela | Pode exibir simultaneamente a forma de renderização e[TEX] das fórmulas, suporte a fórmulas e realce de código -Suporte de plugins de várias linhas | Suporte a várias chamadas em linha do chatgpt, um clique para processamento[de massa de texto](https://www.bilibili.com/video/BV1FT411H7c5/) ou programa -Tema gradio escuro | Adicione ``` /?__theme=dark``` ao final da url do navegador para ativar o tema escuro -[Suporte para vários modelos LLM](https://www.bilibili.com/video/BV1wT411p7yf), suporte para a nova interface API2D | A sensação de ser atendido simultaneamente por GPT3.5, GPT4, [Chatglm THU](https://github.com/THUDM/ChatGLM-6B), [Moss Fudan](https://github.com/OpenLMLab/MOSS) deve ser ótima, certo? -Mais modelos LLM incorporados, suporte para a implantação[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Adicione interface Newbing (New Bing), suporte [JittorLLMs](https://github.com/Jittor/JittorLLMs) THU Introdução ao suporte do LLaMA, RWKV e Pan Gu Alpha -Mais recursos novos mostrados (geração de imagens, etc.) ... | Consulte o final deste documento ... - -
- -- Nova interface (Modifique a opção LAYOUT em `config.py` para alternar entre o layout esquerdo/direito e o layout superior/inferior) -
- -
- All buttons are dynamically generated by reading functional.py, and you can add custom functions at will, liberating the clipboard - -
- -
- -- Proofreading/errors correction - - -
- -
- -- If the output contains formulas, it will be displayed in both tex and rendering format at the same time, which is convenient for copying and reading - - -
- -
- -- Don't want to read the project code? Just show the whole project to chatgpt - - -
- -
- -- Mix the use of multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) - - -
- -
- ---- -# Instalação -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download the project - -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure the API KEY - -In `config.py`, configure API KEY and other settings, [Special Network Environment Settings] (https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program runs, it will first check whether there is a private configuration file named `config_private.py`, and use the configuration in it to cover the configuration with the same name in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`. The writing format of environment variables is referenced to the `docker-compose` file. Reading priority: `environment variable` > `config_private.py` > `config.py`) - - -3. Install dependencies - -```sh -# (Option I: for those familiar with python)(python version is 3.9 or above, the newer the better), note: use the official pip source or the Alibaba pip source. Temporary solution for changing source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: for those who are unfamiliar with python) use anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # create anaconda environment -conda activate gptac_venv # activate anaconda environment -python -m pip install -r requirements.txt # This step is the same as the pip installation step -``` - -
If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, click to expand here -

- -[Optional Step] If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, you need to install more dependencies (prerequisite: familiar with Python + used Pytorch + computer configuration is strong): -```sh -# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# 【Optional Step II】support Fudan MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: When executing this line of code, you must be in the project root path - -# 【Optional Step III】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports docker solutions): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - -4. Run - -```sh -python main.py -```5. Plugin de Função de Teste -``` -- Função de modelo de plug-in de teste (exige que o GPT responda ao que aconteceu hoje na história), você pode usar esta função como modelo para implementar funções mais complexas - Clique em "[Função de plug-in de modelo de demonstração] O que aconteceu hoje na história?" -``` - -## Instalação - Método 2: Usando o Docker - -1. Apenas ChatGPT (recomendado para a maioria das pessoas) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Baixar o projeto -cd gpt_academic # Entrar no caminho -nano config.py # Editar config.py com qualquer editor de texto configurando "Proxy", "API_KEY" e "WEB_PORT" (por exemplo, 50923), etc. -docker build -t gpt-academic . # Instale - -# (Ùltima etapa - escolha 1) Dentro do ambiente Linux, é mais fácil e rápido usar `--net=host` -docker run --rm -it --net=host gpt-academic -# (Última etapa - escolha 2) Em ambientes macOS/windows, você só pode usar a opção -p para expor a porta do contêiner (por exemplo, 50923) para a porta no host -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (conhecimento de Docker necessário) - -``` sh -# Edite o arquivo docker-compose.yml, remova as soluções 1 e 3, mantenha a solução 2, e siga as instruções nos comentários do arquivo -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (conhecimento de Docker necessário) -``` sh -# Edite o arquivo docker-compose.yml, remova as soluções 1 e 2, mantenha a solução 3, e siga as instruções nos comentários do arquivo -docker-compose up -``` - - -## Instalação - Método 3: Outros Métodos de Implantação - -1. Como usar URLs de proxy inverso/microsoft Azure API -Basta configurar o API_URL_REDIRECT de acordo com as instruções em `config.py`. - -2. Implantação em servidores em nuvem remotos (requer conhecimento e experiência de servidores em nuvem) -Acesse [Wiki de implementação remota do servidor em nuvem](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Usando a WSL2 (sub-sistema do Windows para Linux) -Acesse [Wiki da implantação da WSL2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. Como executar em um subdiretório (ex. `http://localhost/subpath`) -Acesse [Instruções de execução FastAPI](docs/WithFastapi.md) - -5. Execute usando o docker-compose -Leia o arquivo docker-compose.yml e siga as instruções. - -# Uso Avançado -## Customize novos botões de acesso rápido / plug-ins de função personalizados - -1. Personalizar novos botões de acesso rápido (atalhos acadêmicos) -Abra `core_functional.py` em qualquer editor de texto e adicione os seguintes itens e reinicie o programa (Se o botão já foi adicionado e pode ser visto, prefixos e sufixos são compatíveis com modificações em tempo real e não exigem reinício do programa para ter efeito.) -Por exemplo, -``` -"Super Eng:": { -  # Prefixo, será adicionado antes da sua entrada. Por exemplo, para descrever sua solicitação, como tradução, explicação de código, polimento, etc. -  "Prefix": "Por favor, traduza o seguinte conteúdo para chinês e use uma tabela em Markdown para explicar termos próprios no texto: \n \n", - -  # Sufixo, será adicionado após a sua entrada. Por exemplo, emparelhado com o prefixo, pode colocar sua entrada entre aspas. -  "Suffix": "", -}, -``` -
- -
- -2. Personalizar plug-ins de função - -Escreva plug-ins de função poderosos para executar tarefas que você deseja e não pensava possível. -A dificuldade geral de escrever e depurar plug-ins neste projeto é baixa e, se você tem algum conhecimento básico de python, pode implementar suas próprias funções sobre o modelo que fornecemos. -Para mais detalhes, consulte o [Guia do plug-in de função.](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Última atualização -## Novas funções dinâmicas. - -1. Função de salvamento de diálogo. Ao chamar o plug-in de função "Salvar diálogo atual", é possível salvar o diálogo atual em um arquivo html legível e reversível. Além disso, ao chamar o plug-in de função "Carregar arquivo de histórico de diálogo" no menu suspenso da área de plug-in, é possível restaurar uma conversa anterior. Dica: clicar em "Carregar arquivo de histórico de diálogo" sem especificar um arquivo permite visualizar o cache do arquivo html de histórico. Clicar em "Excluir todo o registro de histórico de diálogo local" permite excluir todo o cache de arquivo html. -
- -
- - -2. Geração de relatório. A maioria dos plug-ins gera um relatório de trabalho após a conclusão da execução. -
- - - -
- -3. Design modular de funcionalidades, com interfaces simples, mas suporte a recursos poderosos -
- - -
- -4. Este é um projeto de código aberto que é capaz de "auto-traduzir-se". -
- -
- -5. A tradução de outros projetos de código aberto é simples. -
- -
- -
- -
- -6. Recursos decorativos para o [live2d](https://github.com/fghrsh/live2d_demo) (desativados por padrão, é necessário modificar o arquivo `config.py`) -
- -
- -7. Suporte ao modelo de linguagem MOSS -
- -
- -8. Geração de imagens pelo OpenAI -
- -
- -9. Análise e resumo de áudio pelo OpenAI -
- -
- -10. Revisão e correção de erros de texto em Latex. -
- -
- -## Versão: -- Versão 3.5(Todo): Usar linguagem natural para chamar todas as funções do projeto (prioridade alta) -- Versão 3.4(Todo): Melhorar o suporte à multithread para o chatglm local -- Versão 3.3: +Funções integradas de internet -- Versão 3.2: Suporte a mais interfaces de parâmetros de plug-in (função de salvar diálogo, interpretação de códigos de várias linguagens, perguntas de combinações LLM arbitrárias ao mesmo tempo) -- Versão 3.1: Suporte a perguntas a vários modelos de gpt simultaneamente! Suporte para api2d e balanceamento de carga para várias chaves api -- Versão 3.0: Suporte ao chatglm e outros LLMs de pequeno porte -- Versão 2.6: Refatoração da estrutura de plug-in, melhoria da interatividade e adição de mais plug-ins -- Versão 2.5: Autoatualização, resolvendo problemas de token de texto excessivamente longo e estouro ao compilar grandes projetos -- Versão 2.4: (1) Adição de funcionalidade de tradução de texto completo em PDF; (2) Adição de funcionalidade de mudança de posição da área de entrada; (3) Adição de opção de layout vertical; (4) Otimização de plug-ins de multithread. -- Versão 2.3: Melhoria da interatividade de multithread -- Versão 2.2: Suporte à recarga a quente de plug-ins -- Versão 2.1: Layout dobrável -- Versão 2.0: Introdução de plug-ins de função modular -- Versão 1.0: Funcionalidades básicasgpt_academic desenvolvedores QQ grupo-2: 610599535 - -- Problemas conhecidos - - Extensões de tradução de alguns navegadores podem interferir na execução do front-end deste software - - Uma versão muito alta ou muito baixa do Gradio pode causar vários erros - -## Referências e Aprendizado - -``` -Foi feita referência a muitos projetos excelentes em código, principalmente: - -# Projeto1: ChatGLM-6B da Tsinghua: -https://github.com/THUDM/ChatGLM-6B - -# Projeto2: JittorLLMs da Tsinghua: -https://github.com/Jittor/JittorLLMs - -# Projeto3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Projeto4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Projeto5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Mais: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` diff --git a/docs/README_EN.md b/docs/README_EN.md deleted file mode 100644 index 029186c718..0000000000 --- a/docs/README_EN.md +++ /dev/null @@ -1,322 +0,0 @@ -> **Note** -> -> This English README is automatically generated by the markdown translation plugin in this project, and may not be 100% correct. -> -> When installing dependencies, **please strictly select the versions** specified in requirements.txt. -> -> `pip install -r requirements.txt` - -# GPT Academic Optimization (GPT Academic) - -**If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. -To translate this project to arbitary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).** - -> Note: -> -> 1. Please note that only the function plugins (buttons) marked in **red** support reading files. Some plugins are in the **drop-down menu** in the plugin area. We welcome and process any new plugins with the **highest priority**! -> 2. The function of each file in this project is detailed in the self-translation analysis [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). With version iteration, you can also click on related function plugins at any time to call GPT to regenerate the project's self-analysis report. Common questions are summarized in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Installation method](#installation). -> 3. This project is compatible with and encourages trying domestic large language models such as chatglm, RWKV, Pangu, etc. Multiple API keys are supported and can be filled in the configuration file like `API_KEY="openai-key1,openai-key2,api2d-key3"`. When temporarily changing `API_KEY`, enter the temporary `API_KEY` in the input area and press enter to submit, which will take effect. - -
- -Function | Description ---- | --- -One-click polishing | Supports one-click polishing and one-click searching for grammar errors in papers. -One-click Chinese-English translation | One-click Chinese-English translation. -One-click code interpretation | Displays, explains, generates, and adds comments to code. -[Custom shortcut keys](https://www.bilibili.com/video/BV14s4y1E7jN) | Supports custom shortcut keys. -Modular design | Supports custom powerful [function plug-ins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), plug-ins support [hot update](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). -[Self-program profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Function plug-in] [One-click understanding](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) of the source code of this project -[Program profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Function plug-in] One-click profiling of other project trees in Python/C/C++/Java/Lua/... -Reading papers, [translating](https://www.bilibili.com/video/BV1KT411x7Wn) papers | [Function Plug-in] One-click interpretation of latex/pdf full-text papers and generation of abstracts. -Latex full-text [translation](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [polishing](https://www.bilibili.com/video/BV1FT411H7c5/) | [Function plug-in] One-click translation or polishing of latex papers. -Batch annotation generation | [Function plug-in] One-click batch generation of function annotations. -Markdown [Chinese-English translation](https://www.bilibili.com/video/BV1yo4y157jV/) | [Function plug-in] Have you seen the [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in the five languages above? -Chat analysis report generation | [Function plug-in] Automatically generate summary reports after running. -[PDF full-text translation function](https://www.bilibili.com/video/BV1KT411x7Wn) | [Function plug-in] PDF paper extract title & summary + translate full text (multi-threaded) -[Arxiv Assistant](https://www.bilibili.com/video/BV1LM4y1279X) | [Function plug-in] Enter the arxiv article url and you can translate abstracts and download PDFs with one click. -[Google Scholar Integration Assistant](https://www.bilibili.com/video/BV19L411U7ia) | [Function plug-in] Given any Google Scholar search page URL, let GPT help you [write relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) -Internet information aggregation+GPT | [Function plug-in] One-click [let GPT get information from the Internet first](https://www.bilibili.com/video/BV1om4y127ck), then answer questions, and let the information never be outdated. -Formula/image/table display | Can display formulas in both [tex form and render form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), support formulas and code highlighting. -Multi-threaded function plug-in support | Supports multi-threaded calling of chatgpt, and can process [massive text](https://www.bilibili.com/video/BV1FT411H7c5/) or programs with one click. -Start Dark Gradio [theme](https://github.com/binary-husky/gpt_academic/issues/173) | Add ```/?__theme=dark``` after the browser URL to switch to the dark theme. -[Multiple LLM models](https://www.bilibili.com/video/BV1wT411p7yf) support, [API2D](https://api2d.com/) interface support | The feeling of being served by GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), and [Fudan MOSS](https://github.com/OpenLMLab/MOSS) at the same time must be great, right? -More LLM model access, support [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Add Newbing interface (New Bing), introduce Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs) to support [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) and [Panguα](https://openi.org.cn/pangu/) -More new feature displays (image generation, etc.)…… | See the end of this document for more... -
- -- New interface (modify the LAYOUT option in `config.py` to switch between "left and right layout" and "up and down layout") -
- -
- All buttons are dynamically generated by reading `functional.py`, and you can add custom functions freely to unleash the power of clipboard. -
- -
- -- polishing/correction -
- -
- -- If the output contains formulas, they will be displayed in both `tex` and render form, making it easy to copy and read. -
- -
- -- Tired of reading the project code? ChatGPT can explain it all. -
- -
- -- Multiple large language models are mixed, such as ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4. -
- -
- ---- -# Installation -## Method 1: Directly running (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure the API_KEY - -Configure the API KEY in `config.py`, [special network environment settings](https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check if there is a private configuration file named `config_private.py` and use the configurations in it to override the same configurations in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and transfer (copy) the configurations in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your private information more secure. P.S. The project also supports configuring most options through `environment variables`. Please refer to the format of `docker-compose` file when writing. Reading priority: `environment variables` > `config_private.py` > `config.py`) - - -3. Install the dependencies -```sh -# (Option I: If familiar with python) (python version 3.9 or above, the newer the better), note: use official pip source or Ali pip source, temporary switching method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: If not familiar with python) Use anaconda, the steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # create anaconda environment -conda activate gptac_venv # activate anaconda environment -python -m pip install -r requirements.txt # this step is the same as pip installation -``` - -
If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, click to expand -

- -[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (prerequisites: familiar with Python + used Pytorch + computer configuration is strong enough): -```sh -# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# [Optional Step II] Support Fudan MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # When executing this line of code, you must be in the root directory of the project - -# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. Currently supported models are as follows (the jittorllms series only supports the docker solution for the time being): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run it -```sh -python main.py -```5. Test Function Plugin -``` -- Test function plugin template function (ask GPT what happened today in history), based on which you can implement more complex functions as a template - Click "[Function Plugin Template Demo] Today in History" -``` - -## Installation - Method 2: Using Docker - -1. ChatGPT Only (Recommended for Most People) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Download project -cd gpt_academic # Enter path -nano config.py # Edit config.py with any text editor, configure "Proxy", "API_KEY" and "WEB_PORT" (e.g. 50923), etc. -docker build -t gpt-academic . # Install - -#(Last step - option 1) In a Linux environment, use `--net=host` for convenience and speed. -docker run --rm -it --net=host gpt-academic -#(Last step - option 2) On macOS/windows environment, only -p option can be used to expose the container's port (e.g. 50923) to the port of the main machine. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (Requires Docker Knowledge) - -``` sh -# Modify docker-compose.yml, delete Plan 1 and Plan 3, and keep Plan 2. Modify the configuration of Plan 2 in docker-compose.yml, refer to the comments in it for configuration. -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (Requires Docker Knowledge) - -``` sh -# Modify docker-compose.yml, delete Plan 1 and Plan 2, and keep Plan 3. Modify the configuration of Plan 3 in docker-compose.yml, refer to the comments in it for configuration. -docker-compose up -``` - -## Installation - Method 3: Other Deployment Options - -1. How to Use Reverse Proxy URL/Microsoft Cloud Azure API -Configure API_URL_REDIRECT according to the instructions in 'config.py'. - -2. Deploy to a Remote Server (Requires Knowledge and Experience with Cloud Servers) -Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL2 (Windows Subsystem for Linux) -Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to Run Under a Subdomain (e.g. `http://localhost/subpath`) -Please visit [FastAPI Running Instructions](docs/WithFastapi.md) - -5. Using docker-compose to Run -Read the docker-compose.yml and follow the prompts. - ---- -# Advanced Usage -## Custom New Shortcut Buttons / Custom Function Plugins - -1. Custom New Shortcut Buttons (Academic Hotkey) -Open `core_functional.py` with any text editor, add an entry as follows and restart the program. (If the button has been successfully added and is visible, the prefix and suffix can be hot-modified without having to restart the program.) -For example, -``` -"Super English-to-Chinese": { - # Prefix, which will be added before your input. For example, used to describe your requests, such as translation, code explanation, polishing, etc. - "Prefix": "Please translate the following content into Chinese and then use a markdown table to explain the proprietary terms that appear in the text:\n\n", - - # Suffix, which is added after your input. For example, with the prefix, your input content can be surrounded by quotes. - "Suffix": "", -}, -``` -
- -
- -2. Custom Function Plugins - -Write powerful function plugins to perform any task you can think of, even those you cannot think of. -The difficulty of plugin writing and debugging in this project is very low. As long as you have a certain knowledge of Python, you can implement your own plug-in functions based on the template we provide. -For details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Latest Update -## New Feature Dynamics -1. Conversation saving function. Call `Save current conversation` in the function plugin area to save the current conversation as a readable and recoverable HTML file. In addition, call `Load conversation history archive` in the function plugin area (dropdown menu) to restore previous sessions. Tip: Clicking `Load conversation history archive` without specifying a file will display the cached history of HTML archives, and clicking `Delete all local conversation history` will delete all HTML archive caches. - -
- -
- - -2. Report generation. Most plugins will generate work reports after execution. - -
- - - -
- - -3. Modular function design with simple interfaces that support powerful functions. - -
- - -
- - -4. This is an open-source project that can "self-translate". - -
- -
- -5. Translating other open-source projects is a piece of cake. - -
- -
- -
- -
- -6. A small feature decorated with [live2d](https://github.com/fghrsh/live2d_demo) (disabled by default, need to modify `config.py`). - -
- -
- -7. Added MOSS large language model support. -
- -
- -8. OpenAI image generation. -
- -
- -9. OpenAI audio parsing and summarization. -
- -
- -10. Full-text proofreading and error correction of LaTeX. -
- -
- - -## Versions: -- version 3.5(Todo): Use natural language to call all function plugins of this project (high priority). -- version 3.4(Todo): Improve multi-threading support for chatglm local large models. -- version 3.3: +Internet information integration function. -- version 3.2: Function plugin supports more parameter interfaces (save conversation function, interpretation of any language code + simultaneous inquiry of any LLM combination). -- version 3.1: Support simultaneous inquiry of multiple GPT models! Support api2d, and support load balancing of multiple apikeys. -- version 3.0: Support chatglm and other small LLM models. -- version 2.6: Refactored plugin structure, improved interactivity, and added more plugins. -- version 2.5: Self-updating, solving the problem of text overflow and token overflow when summarizing large engineering source codes. -- version 2.4: (1) Added PDF full-text translation function; (2) Added the function of switching the position of the input area; (3) Added vertical layout option; (4) Optimized multi-threading function plugins. -- version 2.3: Enhanced multi-threading interactivity. -- version 2.2: Function plugin supports hot reloading. -- version 2.1: Collapsible layout. -- version 2.0: Introduction of modular function plugins. -- version 1.0: Basic functions. - -gpt_academic Developer QQ Group-2: 610599535 - -- Known Issues - - Some browser translation plugins interfere with the front-end operation of this software. - - Both high and low versions of gradio can lead to various exceptions. - -## Reference and Learning - -``` -Many other excellent designs have been referenced in the code, mainly including: - -# Project 1: THU ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B - -# Project 2: THU JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# Project 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Project 4: ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Project 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# More: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/docs/README_FR.md b/docs/README_FR.md deleted file mode 100644 index 62d81ebfc6..0000000000 --- a/docs/README_FR.md +++ /dev/null @@ -1,323 +0,0 @@ -> **Note** -> -> Ce fichier README est généré automatiquement par le plugin de traduction markdown de ce projet et n'est peut - être pas correct à 100%. -> -> During installation, please strictly select the versions **specified** in requirements.txt. -> -> `pip install -r requirements.txt` -> - -# Optimisation académique GPT (GPT Academic) - -**Si vous aimez ce projet, veuillez lui donner une étoile. Si vous avez trouvé des raccourcis académiques ou des plugins fonctionnels plus utiles, n'hésitez pas à ouvrir une demande ou une pull request. -Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental). - -> **Note** -> -> 1. Veuillez noter que seuls les plugins de fonctions (boutons) **en rouge** prennent en charge la lecture de fichiers. Certains plugins se trouvent dans le **menu déroulant** de la zone de plugins. De plus, nous accueillons et traitons les nouvelles pull requests pour les plugins avec **la plus haute priorité**! -> -> 2. Les fonctions de chaque fichier de ce projet sont expliquées en détail dans l'auto-analyse [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Avec l'itération des versions, vous pouvez également cliquer sur les plugins de fonctions pertinents et appeler GPT pour régénérer le rapport d'auto-analyse du projet à tout moment. Les FAQ sont résumées dans [le wiki](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Méthode d'installation](#installation). -> -> 3. Ce projet est compatible avec et encourage l'utilisation de grands modèles de langage nationaux tels que chatglm, RWKV, Pangu, etc. La coexistence de plusieurs clés API est prise en charge et peut être remplie dans le fichier de configuration, tel que `API_KEY="openai-key1,openai-key2,api2d-key3"`. Lorsque vous souhaitez remplacer temporairement `API_KEY`, saisissez temporairement `API_KEY` dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer. - -
- -Functionnalité | Description ---- | --- -Révision en un clic | prend en charge la révision en un clic et la recherche d'erreurs de syntaxe dans les articles -Traduction chinois-anglais en un clic | Traduction chinois-anglais en un clic -Explication de code en un clic | Affichage, explication, génération et ajout de commentaires de code -[Raccourcis personnalisés](https://www.bilibili.com/video/BV14s4y1E7jN) | prend en charge les raccourcis personnalisés -Conception modulaire | prend en charge de puissants plugins de fonction personnalisée, les plugins prennent en charge la [mise à jour à chaud](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Autoscanner](https://www.bilibili.com/video/BV1cj411A7VW) | [Plug-in de fonction] [Compréhension instantanée](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) du code source de ce projet -[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plug-in de fonction] Analyse en un clic de la structure d'autres projets Python / C / C ++ / Java / Lua / ... -Lecture d'articles, [traduction](https://www.bilibili.com/video/BV1KT411x7Wn) d'articles | [Plug-in de fonction] Compréhension instantanée de l'article latex / pdf complet et génération de résumés -[Traduction](https://www.bilibili.com/video/BV1nk4y1Y7Js/) et [révision](https://www.bilibili.com/video/BV1FT411H7c5/) complets en latex | [Plug-in de fonction] traduction ou révision en un clic d'articles en latex -Génération de commentaires en masse | [Plug-in de fonction] Génération en un clic de commentaires de fonction en masse -Traduction [chinois-anglais](https://www.bilibili.com/video/BV1yo4y157jV/) en Markdown | [Plug-in de fonction] avez-vous vu la [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) pour les 5 langues ci-dessus? -Génération de rapports d'analyse de chat | [Plug-in de fonction] Génère automatiquement un rapport de résumé après l'exécution -[Traduction intégrale en pdf](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plug-in de fonction] Extraction de titre et de résumé de l'article pdf + traduction intégrale (multi-thread) -[Aide à arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plug-in de fonction] Entrer l'url de l'article arxiv pour traduire et télécharger le résumé en un clic -[Aide à la recherche Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Plug-in de fonction] Donnez l'URL de la page de recherche Google Scholar, laissez GPT vous aider à [écrire des ouvrages connexes](https://www.bilibili.com/video/BV1GP411U7Az/) -Aggrégation d'informations en ligne et GPT | [Plug-in de fonction] Permet à GPT de [récupérer des informations en ligne](https://www.bilibili.com/video/BV1om4y127ck), puis de répondre aux questions, afin que les informations ne soient jamais obsolètes -Affichage d'équations / images / tableaux | Fournit un affichage simultané de [la forme tex et de la forme rendue](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), prend en charge les formules mathématiques et la coloration syntaxique du code -Prise en charge des plugins à plusieurs threads | prend en charge l'appel multithread de chatgpt, un clic pour traiter [un grand nombre d'articles](https://www.bilibili.com/video/BV1FT411H7c5/) ou de programmes -Thème gradio sombre en option de démarrage | Ajoutez```/?__theme=dark``` à la fin de l'URL du navigateur pour basculer vers le thème sombre -[Prise en charge de plusieurs modèles LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Sera probablement très agréable d'être servi simultanément par GPT3.5, GPT4, [ChatGLM de Tsinghua](https://github.com/THUDM/ChatGLM-6B), [MOSS de Fudan](https://github.com/OpenLMLab/MOSS) -Plus de modèles LLM, déploiement de [huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Ajout prise en charge de l'interface Newbing (nouvelle bing), introduction du support de [Jittorllms de Tsinghua](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) et [Panguα](https://openi.org.cn/pangu/) -Plus de nouvelles fonctionnalités (génération d'images, etc.) ... | Voir la fin de ce document pour plus de détails ... - -
- - -- Nouvelle interface (modifier l'option LAYOUT de `config.py` pour passer d'une disposition ``gauche-droite`` à une disposition ``haut-bas``) -
- -
- Tous les boutons sont générés dynamiquement en lisant functional.py et peuvent être facilement personnalisés pour ajouter des fonctionnalités personnalisées, ce qui facilite l'utilisation du presse-papiers. -
- -
- -- Correction d'erreurs/lissage du texte. -
- -
- -- Si la sortie contient des équations, elles sont affichées à la fois sous forme de tex et sous forme rendue pour faciliter la lecture et la copie. -
- -
- -- Pas envie de lire les codes de ce projet? Tout le projet est directement exposé par ChatGPT. -
- -
- -- Appel à une variété de modèles de langage de grande envergure (ChatGLM + OpenAI-GPT3.5 + [API2D] (https://api2d.com/)-GPT4). -
- -
- ---- -# Installation -## Installation-Method 1: running directly (Windows, Linux or MacOS) - -1. Télécharger le projet -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configuration de la clé API - -Dans `config.py`, configurez la clé API et d'autres paramètres. Consultez [Special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. Lorsque le programme est exécuté, il vérifie en premier s'il existe un fichier de configuration privé nommé `config_private.py` et remplace les paramètres portant le même nom dans `config.py` par les paramètres correspondants dans `config_private.py`. Par conséquent, si vous comprenez la logique de lecture de nos configurations, nous vous recommandons vivement de créer un nouveau fichier de configuration nommé `config_private.py` à côté de `config.py` et de transférer (copier) les configurations de `config.py`. `config_private.py` n'est pas contrôlé par Git et peut garantir la sécurité de vos informations privées. P.S. Le projet prend également en charge la configuration de la plupart des options via "variables d'environnement", le format d'écriture des variables d'environnement est référencé dans le fichier `docker-compose`. Priorité de lecture: "variables d'environnement" > `config_private.py` > `config.py`) - - -3. Installer les dépendances -```sh -# (Option I: python users instalation) (Python version 3.9 or higher, the newer the better). Note: use official pip source or ali pip source. To temporarily change the source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: non-python users instalation) Use Anaconda, the steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create anaconda env -conda activate gptac_venv # Activate anaconda env -python -m pip install -r requirements.txt # Same step as pip instalation -``` - -
Cliquez ici pour afficher le texte si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend. -

- -【Optional】 Si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend, des dépendances supplémentaires doivent être installées (prérequis: compétent en Python + utilisez Pytorch + configuration suffisante de l'ordinateur): -```sh -# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llms/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# 【Optional Step II】 Support FDU MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note: When running this line of code, you must be in the project root path. - -# 【Optional Step III】Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the desired model. Currently, all models supported are as follows (the jittorllms series currently only supports the docker scheme): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Exécution -```sh -python main.py -```5. Plugin de fonction de test -``` -- Fonction de modèle de plugin de test (requiert que GPT réponde à ce qui s'est passé dans l'histoire aujourd'hui), vous pouvez utiliser cette fonction comme modèle pour mettre en œuvre des fonctionnalités plus complexes. - Cliquez sur "[Démo de modèle de plugin de fonction] Aujourd'hui dans l'histoire" -``` - -## Installation - Méthode 2: Utilisation de Docker - -1. ChatGPT uniquement (recommandé pour la plupart des gens) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Télécharger le projet -cd gpt_academic # Accéder au chemin -nano config.py # Editez config.py avec n'importe quel éditeur de texte en configurant "Proxy", "API_KEY" et "WEB_PORT" (p. ex. 50923) -docker build -t gpt-academic . # Installer - -# (Dernière étape - choix1) Dans un environnement Linux, l'utilisation de `--net=host` est plus facile et rapide -docker run --rm -it --net=host gpt-academic -# (Dernière étape - choix 2) Dans un environnement macOS/Windows, seule l'option -p permet d'exposer le port du récipient (p.ex. 50923) au port de l'hôte. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (il faut connaître Docker) - -``` sh -# Modifiez docker-compose.yml, supprimez la solution 1 et la solution 3, conservez la solution 2. Modifiez la configuration de la solution 2 dans docker-compose.yml en suivant les commentaires. -docker-compose up -``` - -3. ChatGPT + LLAMA + PanGu + RWKV (il faut connaître Docker) -``` sh -# Modifiez docker-compose.yml, supprimez la solution 1 et la solution 2, conservez la solution 3. Modifiez la configuration de la solution 3 dans docker-compose.yml en suivant les commentaires. -docker-compose up -``` - - -## Installation - Méthode 3: Autres méthodes de déploiement - -1. Comment utiliser une URL de proxy inversé / Microsoft Azure Cloud API -Configurez simplement API_URL_REDIRECT selon les instructions de config.py. - -2. Déploiement distant sur un serveur cloud (connaissance et expérience des serveurs cloud requises) -Veuillez consulter [Wiki de déploiement-1] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97). - -3. Utilisation de WSL2 (sous-système Windows pour Linux) -Veuillez consulter [Wiki de déploiement-2] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2). - -4. Comment exécuter sous un sous-répertoire (tel que `http://localhost/subpath`) -Veuillez consulter les [instructions d'exécution de FastAPI] (docs/WithFastapi.md). - -5. Utilisation de docker-compose -Veuillez lire docker-compose.yml, puis suivre les instructions fournies. - -# Utilisation avancée -## Personnalisation de nouveaux boutons pratiques / Plugins de fonctions personnalisées - -1. Personnalisation de nouveaux boutons pratiques (raccourcis académiques) -Ouvrez core_functional.py avec n'importe quel éditeur de texte, ajoutez une entrée comme suit, puis redémarrez le programme. (Si le bouton a été ajouté avec succès et est visible, le préfixe et le suffixe prennent en charge les modifications à chaud et ne nécessitent pas le redémarrage du programme pour prendre effet.) -Par exemple -``` -"Super coller sens": { - # Préfixe, sera ajouté avant votre entrée. Par exemple, pour décrire votre demande, telle que traduire, expliquer du code, faire la mise en forme, etc. - "Prefix": "Veuillez traduire le contenu suivant en chinois, puis expliquer chaque terme proprement nommé qui y apparaît avec un tableau markdown:\n\n", - - # Suffixe, sera ajouté après votre entrée. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu d'entrée de guillemets. - "Suffix": "", -}, -``` -
- -
- -2. Plugins de fonctions personnalisées - -Écrivez des plugins de fonctions puissants pour effectuer toutes les tâches que vous souhaitez ou que vous ne pouvez pas imaginer. -Les plugins de ce projet ont une difficulté de programmation et de débogage très faible. Si vous avez des connaissances de base en Python, vous pouvez simuler la fonctionnalité de votre propre plugin en suivant le modèle que nous avons fourni. -Veuillez consulter le [Guide du plugin de fonction] (https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) pour plus de détails. - ---- -# Latest Update - -## Nouvelles fonctionnalités en cours de déploiement. - -1. Fonction de sauvegarde de la conversation. -Appelez simplement "Enregistrer la conversation actuelle" dans la zone de plugin de fonction pour enregistrer la conversation actuelle en tant que fichier html lisible et récupérable. De plus, dans la zone de plugin de fonction (menu déroulant), appelez "Charger une archive de l'historique de la conversation" pour restaurer la conversation précédente. Astuce : cliquer directement sur "Charger une archive de l'historique de la conversation" sans spécifier de fichier permet de consulter le cache d'archive html précédent. Cliquez sur "Supprimer tous les enregistrements locaux de l'historique de la conversation" pour supprimer le cache d'archive html. - -
- -
- - - -2. Générer un rapport. La plupart des plugins génèrent un rapport de travail après l'exécution. -
- - - -
- -3. Conception de fonctionnalités modulaires avec une interface simple mais capable d'une fonctionnalité puissante. -
- - -
- -4. C'est un projet open source qui peut "se traduire de lui-même". -
- -
- -5. Traduire d'autres projets open source n'est pas un problème. -
- -
- -
- -
- -6. Fonction de décoration de live2d (désactivée par défaut, nécessite une modification de config.py). -
- -
- -7. Prise en charge du modèle de langue MOSS. -
- -
- -8. Génération d'images OpenAI. -
- -
- -9. Analyse et synthèse vocales OpenAI. -
- -
- -10. Correction de la totalité des erreurs de Latex. -
- -
- - -## Versions : -- version 3.5 (À faire) : appel de toutes les fonctions de plugin de ce projet en langage naturel (priorité élevée) -- version 3.4 (À faire) : amélioration du support multi-thread de chatglm en local -- version 3.3 : Fonctionnalité intégrée d'informations d'internet -- version 3.2 : La fonction du plugin de fonction prend désormais en charge des interfaces de paramètres plus nombreuses (fonction de sauvegarde, décodage de n'importe quel langage de code + interrogation simultanée de n'importe quelle combinaison de LLM) -- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles GPT ! Support api2d, équilibrage de charge multi-clé api. -- version 3.0 : Prise en charge de chatglm et autres LLM de petite taille. -- version 2.6 : Refonte de la structure des plugins, amélioration de l'interactivité, ajout de plus de plugins. -- version 2.5 : Auto-mise à jour, résolution des problèmes de texte trop long et de dépassement de jetons lors de la compilation du projet global. -- version 2.4 : (1) Nouvelle fonction de traduction de texte intégral PDF ; (2) Nouvelle fonction de permutation de position de la zone d'entrée ; (3) Nouvelle option de mise en page verticale ; (4) Amélioration des fonctions multi-thread de plug-in. -- version 2.3 : Amélioration de l'interactivité multithread. -- version 2.2 : Les plugins de fonctions peuvent désormais être rechargés à chaud. -- version 2.1 : Disposition pliable -- version 2.0 : Introduction de plugins de fonctions modulaires -- version 1.0 : Fonctionnalités de base - -gpt_academic développeur QQ groupe-2:610599535 - -- Problèmes connus - - Certains plugins de traduction de navigateur perturbent le fonctionnement de l'interface frontend de ce logiciel - - Des versions gradio trop hautes ou trop basses provoquent de nombreuses anomalies - -## Référence et apprentissage - -``` -De nombreux autres excellents projets ont été référencés dans le code, notamment : - -# Projet 1 : ChatGLM-6B de Tsinghua : -https://github.com/THUDM/ChatGLM-6B - -# Projet 2 : JittorLLMs de Tsinghua : -https://github.com/Jittor/JittorLLMs - -# Projet 3 : Edge-GPT : -https://github.com/acheong08/EdgeGPT - -# Projet 4 : ChuanhuChatGPT : -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Projet 5 : ChatPaper : -https://github.com/kaixindelele/ChatPaper - -# Plus : -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/docs/README_JP.md b/docs/README_JP.md deleted file mode 100644 index 8ade71b4b8..0000000000 --- a/docs/README_JP.md +++ /dev/null @@ -1,329 +0,0 @@ -> **Note** -> -> このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。 -> -> When installing dependencies, please strictly choose the versions specified in `requirements.txt`. -> -> `pip install -r requirements.txt` -> - -# GPT 学术优化 (GPT Academic) - -**もしこのプロジェクトが好きなら、星をつけてください。もしあなたがより良いアカデミックショートカットまたは機能プラグインを思いついた場合、Issueをオープンするか pull request を送信してください。私たちはこのプロジェクト自体によって翻訳された[英語 |](README_EN.md)[日本語 |](README_JP.md)[한국어 |](https://github.com/mldljyh/ko_gpt_academic)[Русский |](README_RS.md)[Français](README_FR.md)のREADMEも用意しています。 -GPTを使った任意の言語にこのプロジェクトを翻訳するには、[`multi_language.py`](multi_language.py)を読んで実行してください。 (experimental)。 - -> **注意** -> -> 1. **赤色**で表示された関数プラグイン(ボタン)のみ、ファイルの読み取りをサポートしています。一部のプラグインは、プラグインエリアの**ドロップダウンメニュー**内にあります。また、私たちはどんな新しいプラグインのPRでも、**最優先**で歓迎し、処理します! -> -> 2. このプロジェクトの各ファイルの機能は、自己解析の詳細説明書である[`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)で説明されています。バージョンが進化するにつれて、関連する関数プラグインをいつでもクリックし、GPTを呼び出してプロジェクトの自己解析レポートを再生成することができます。よくある問題は[`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)にまとめられています。[インストール方法](#installation)。 - -> 3. このプロジェクトは、chatglmやRWKV、パンクなど、国内の大規模自然言語モデルを利用することをサポートし、試みることを奨励します。複数のAPIキーを共存することができ、設定ファイルに`API_KEY="openai-key1,openai-key2,api2d-key3"`のように記入することができます。`API_KEY`を一時的に変更する場合は、入力エリアに一時的な`API_KEY`を入力してEnterキーを押せば、それが有効になります。 - - -
- -機能 | 説明 ---- | --- -一键校正 | 一键で校正可能、論文の文法エラーを検索することができる -一键中英翻訳 | 一键で中英翻訳可能 -一键コード解説 | コードを表示し、解説し、生成し、コードに注釈をつけることができる -[自分でカスタマイズ可能なショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | 自分でカスタマイズ可能なショートカットキーをサポートする -モジュール化された設計 | カスタマイズ可能な[強力な関数プラグイン](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions)をサポートし、プラグインは[ホットアップデート](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)に対応している -[自己プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン] [一键読解](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)このプロジェクトのソースコード -プログラム解析 | [関数プラグイン] 一鍵で他のPython/C/C++/Java/Lua/...プロジェクトを分析できる -論文の読み、[翻訳](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] LaTex/ PDF論文の全文を一鍵で読み解き、要約を生成することができる -LaTex全文[翻訳](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[校正](https://www.bilibili.com/video/BV1FT411H7c5/) | [関数プラグイン] LaTex論文の翻訳または校正を一鍵で行うことができる -一括で注釈を生成 | [関数プラグイン] 一鍵で関数に注釈をつけることができる -Markdown[中英翻訳](https://www.bilibili.com/video/BV1yo4y157jV/) | [関数プラグイン] 上記の5種類の言語の[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)を見たことがありますか? -チャット分析レポート生成 | [関数プラグイン] 実行後、自動的に概要報告書を生成する -[PDF論文全文翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] PDF論文からタイトルと要約を抽出し、全文を翻訳する(マルチスレッド) -[Arxivアシスタント](https://www.bilibili.com/video/BV1LM4y1279X) | [関数プラグイン] arxiv記事のURLを入力するだけで、要約を一鍵翻訳し、PDFをダウンロードできる -[Google Scholar 総合アシスタント](https://www.bilibili.com/video/BV19L411U7ia) | [関数プラグイン] 任意のGoogle Scholar検索ページURLを指定すると、gptが[related works](https://www.bilibili.com/video/BV1GP411U7Az/)を作成する -インターネット情報収集+GPT | [関数プラグイン] まずGPTに[インターネットから情報を収集](https://www.bilibili.com/video/BV1om4y127ck)してから質問に回答させ、情報が常に最新であるようにする -数式/画像/表表示 | 数式の[tex形式とレンダリング形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)を同時に表示し、数式、コードハイライトをサポートしている -マルチスレッド関数プラグインがサポートされている | chatgptをマルチスレッドで呼び出し、[大量のテキスト](https://www.bilibili.com/video/BV1FT411H7c5/)またはプログラムを一鍵で処理できる -ダークグラジオ[テーマの起動](https://github.com/binary-husky/gpt_academic/issues/173) | ブラウザのURLの後ろに```/?__theme=dark```を追加すると、ダークテーマを切り替えることができます。 -[多数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)がサポートされ、[API2D](https://api2d.com/)がサポートされている | 同時にGPT3.5、GPT4、[清華ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[復旦MOSS](https://github.com/OpenLMLab/MOSS)に対応 -より多くのLLMモデルが接続され、[huggingfaceデプロイ](https://huggingface.co/spaces/qingxu98/gpt-academic)がサポートされている | Newbingインターフェイス(Newbing)、清華大学の[Jittorllm](https://github.com/Jittor/JittorLLMs)のサポート[LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV)と[盘古α](https://openi.org.cn/pangu/) -さらに多くの新機能(画像生成など)を紹介する... | この文書の最後に示す... -
- -- 新しいインターフェース(`config.py`のLAYOUTオプションを変更することで、「左右配置」と「上下配置」を切り替えることができます) -
- -
- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to free the clipboard. - -
- -
- -- Polishing/Correction - -
- -
- -- If the output contains formulas, they are displayed in both TeX and rendering forms, making it easy to copy and read. - -
- -
- -- Don't feel like looking at the project code? Just ask chatgpt directly. - -
- -
- - -- Mixed calls of multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) - -
- -
- ---- - -# Installation - -## Installation-Method 1: Directly run (Windows, Linux or MacOS) - -1. Download the project. - -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure the API_KEY. - -Configure the API KEY and other settings in `config.py` and [special network environment settings](https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check if there is a private configuration file named `config_private.py`, and use the configuration in it to override the same name configuration in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variables` > `config_private.py` > `config.py`) - -3. Install dependencies. - -```sh -# (Choose I: If familiar with Python)(Python version 3.9 or above, the newer the better) Note: Use the official pip source or Ali pip source. Temporary switching source method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Choose II: If not familiar with Python) Use anaconda, the steps are the same (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # Create anaconda environment. -conda activate gptac_venv # Activate the anaconda environment. -python -m pip install -r requirements.txt # This step is the same as the pip installation step. -``` - -
If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, click to expand. -

- -[Optional Steps] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (precondition: familiar with Python + used Pytorch + computer configuration). Strong enough): - -```sh -# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True). -python -m pip install -r request_llms/requirements_chatglm.txt - -# Optional Step II: Support Fudan MOSS. -python -m pip install -r request_llms/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note that when executing this line of code, it must be in the project root. - -# 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run. - -```sh -python main.py -```5. Testing Function Plugin -``` -- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions - Click "[Function Plugin Template Demo] Today in History" -``` - -## Installation-Methods 2: Using Docker - -1. Only ChatGPT (recommended for most people) - - ``` sh -git clone https://github.com/binary-husky/gpt_academic.git # Download project -cd gpt_academic # Enter path -nano config.py # Edit config.py with any text editor ‑ configure "Proxy," "API_KEY," "WEB_PORT" (e.g., 50923) and more -docker build -t gpt-academic . # installation - -#(Last step-Option 1) In a Linux environment, `--net=host` is more convenient and quick -docker run --rm -it --net=host gpt-academic -#(Last step-Option 2) In a macOS/windows environment, the -p option must be used to expose the container port (e.g., 50923) to the port on the host. -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker) - -``` sh -# Modify docker-compose.yml, delete plans 1 and 3, and retain plan 2. Modify the configuration of plan 2 in docker-compose.yml, and reference the comments for instructions. -docker-compose up -``` - -3. ChatGPT + LLAMA + Pangu + RWKV (requires familiarity with Docker) -``` sh -# Modify docker-compose.yml, delete plans 1 and 2, and retain plan 3. Modify the configuration of plan 3 in docker-compose.yml, and reference the comments for instructions. -docker-compose up -``` - - -## Installation-Method 3: Other Deployment Methods - -1. How to use proxy URL/Microsoft Azure API -Configure API_URL_REDIRECT according to the instructions in `config.py`. - -2. Remote Cloud Server Deployment (requires cloud server knowledge and experience) -Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL2 (Windows Subsystem for Linux Subsystem) -Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to run on a secondary URL (such as `http://localhost/subpath`) -Please visit [FastAPI Running Instructions](docs/WithFastapi.md) - -5. Run with docker-compose -Please read docker-compose.yml and follow the instructions provided therein. ---- -# Advanced Usage -## Customize new convenience buttons/custom function plugins - -1. Custom new convenience buttons (academic shortcut keys) -Open `core_functional.py` with any text editor, add the item as follows, and restart the program. (If the button has been added successfully and is visible, the prefix and suffix support hot modification without restarting the program.) -example: -``` -"Super English to Chinese Translation": { - # Prefix, which will be added before your input. For example, used to describe your request, such as translation, code interpretation, polish, etc. - "Prefix": "Please translate the following content into Chinese, and explain the proper nouns in the text in a markdown table one by one:\n\n", - - # Suffix, which will be added after your input. For example, in combination with the prefix, you can surround your input content with quotation marks. - "Suffix": "", -}, -``` -
- -
- -2. Custom function plugins - -Write powerful function plugins to perform any task you can and cannot think of. -The difficulty of writing and debugging plugins in this project is low, and as long as you have a certain amount of python basic knowledge, you can follow the template provided by us to achieve your own plugin functions. -For details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). - ---- -# Latest Update -## New feature dynamics. -1. ダイアログの保存機能。関数プラグインエリアで '現在の会話を保存' を呼び出すと、現在のダイアログを読み取り可能で復元可能なHTMLファイルとして保存できます。さらに、関数プラグインエリア(ドロップダウンメニュー)で 'ダイアログの履歴保存ファイルを読み込む' を呼び出すことで、以前の会話を復元することができます。Tips:ファイルを指定せずに 'ダイアログの履歴保存ファイルを読み込む' をクリックすることで、過去のHTML保存ファイルのキャッシュを表示することができます。'すべてのローカルダイアログの履歴を削除' をクリックすることで、すべてのHTML保存ファイルのキャッシュを削除できます。 -
- -
- - -2. 報告書を生成します。ほとんどのプラグインは、実行が終了した後に作業報告書を生成します。 -
- - - -
- -3. モジュール化された機能設計、簡単なインターフェースで強力な機能をサポートする。 -
- - -
- -4. 自己解決可能なオープンソースプロジェクトです。 -
- -
- - -5. 他のオープンソースプロジェクトの解読、容易である。 -
- -
- -
- -
- -6. [Live2D](https://github.com/fghrsh/live2d_demo)のデコレート小機能です。(デフォルトでは閉じてますが、 `config.py`を変更する必要があります。) -
- -
- -7. 新たにMOSS大言語モデルのサポートを追加しました。 -
- -
- -8. OpenAI画像生成 -
- -
- -9. OpenAIオーディオの解析とサマリー -
- -
- -10. 全文校正されたLaTeX -
- -
- - -## バージョン: -- version 3.5(作業中):すべての関数プラグインを自然言語で呼び出すことができるようにする(高い優先度)。 -- version 3.4(作業中):chatglmのローカルモデルのマルチスレッドをサポートすることで、機能を改善する。 -- version 3.3:+Web情報の総合機能 -- version 3.2:関数プラグインでさらに多くのパラメータインターフェイスをサポートする(ダイアログの保存機能、任意の言語コードの解読+同時に任意のLLM組み合わせに関する問い合わせ) -- version 3.1:複数のGPTモデルを同時に質問できるようになりました! api2dをサポートし、複数のAPIキーを均等に負荷分散することができます。 -- version 3.0:chatglmとその他の小型LLMのサポート。 -- version 2.6:プラグイン構造を再構築し、対話内容を高め、より多くのプラグインを追加しました。 -- version 2.5:自己アップデートし、長文書やトークンのオーバーフローの問題を解決しました。 -- version 2.4:(1)全文翻訳のPDF機能を追加しました。(2)入力エリアの位置切り替え機能を追加しました。(3)垂直レイアウトオプションを追加しました。(4)マルチスレッド関数プラグインを最適化しました。 -- version 2.3:マルチスレッド性能の向上。 -- version 2.2:関数プラグインのホットリロードをサポートする。 -- version 2.1:折りたたみ式レイアウト。 -- version 2.0:モジュール化された関数プラグインを導入。 -- version 1.0:基本機能 - -gpt_academic開発者QQグループ-2:610599535 - -- 既知の問題 - - 一部のブラウザ翻訳プラグインが、このソフトウェアのフロントエンドの実行を妨害する - - gradioバージョンが高すぎるか低すぎると、多くの異常が引き起こされる - -## 参考学習 - -``` -コードの中には、他の優れたプロジェクトの設計から参考にしたものがたくさん含まれています: - -# プロジェクト1:清華ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B - -# プロジェクト2:清華JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# プロジェクト3:Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# プロジェクト4:ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# プロジェクト5:ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# その他: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/docs/README_RS.md b/docs/README_RS.md deleted file mode 100644 index 52d18dfcf6..0000000000 --- a/docs/README_RS.md +++ /dev/null @@ -1,278 +0,0 @@ -> **Note** -> -> Этот файл самовыражения автоматически генерируется модулем перевода markdown в этом проекте и может быть не на 100% правильным. -> -# GPT Академическая оптимизация (GPT Academic) - -**Если вам нравится этот проект, пожалуйста, поставьте ему звезду. Если вы придумали более полезные языковые ярлыки или функциональные плагины, не стесняйтесь открывать issue или pull request. -Чтобы перевести этот проект на произвольный язык с помощью GPT, ознакомьтесь и запустите [`multi_language.py`](multi_language.py) (экспериментальный). - -> **Примечание** -> -> 1. Обратите внимание, что только функциональные плагины (кнопки), помеченные **красным цветом**, поддерживают чтение файлов, некоторые плагины находятся в **выпадающем меню** в области плагинов. Кроме того, мы с наивысшим приоритетом рады и обрабатываем pull requests для любых новых плагинов! -> -> 2. В каждом файле проекта функциональность описана в документе самоанализа [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). С каждой итерацией выполнения версии вы можете в любое время вызвать повторное создание отчета о самоанализе этого проекта, щелкнув соответствующий функциональный плагин и вызвав GPT. Вопросы сборки описаны в [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Метод установки](#installation). -> -> 3. Этот проект совместим и поощряет использование китайских языковых моделей chatglm и RWKV, пангу и т. Д. Поддержка нескольких api-key, которые могут существовать одновременно, может быть указан в файле конфигурации, например `API_KEY="openai-key1,openai-key2,api2d-key3"`. Если требуется временно изменить `API_KEY`, введите временный `API_KEY` в области ввода и нажмите клавишу Enter, чтобы он вступил в силу. - -> **Примечание** -> -> При установке зависимостей строго выбирайте версии, **указанные в файле requirements.txt**. -> -> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`## Задание - -Вы профессиональный переводчик научных статей. - -Переведите этот файл в формате Markdown на русский язык. Не изменяйте существующие команды Markdown, ответьте только переведенными результатами. - -## Результат - -Функция | Описание ---- | --- -Однокнопочный стиль | Поддержка однокнопочного стиля и поиска грамматических ошибок в научных статьях -Однокнопочный перевод на английский и китайский | Однокнопочный перевод на английский и китайский -Однокнопочное объяснение кода | Показ кода, объяснение его, генерация кода, комментирование кода -[Настройка быстрых клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настройки быстрых клавиш -Модульный дизайн | Поддержка пользовательских функциональных плагинов мощных [функциональных плагинов](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), плагины поддерживают [горячую замену](https://github.com/binary-husky/gpt_academic/wiki/Function-Plug-in-Guide) -[Анализ своей программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] [Однокнопочный просмотр](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academicProject-Self-analysis-Report) исходного кода этого проекта -[Анализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] Однокнопочный анализ дерева других проектов Python/C/C++/Java/Lua/... -Чтение статей, [перевод](https://www.bilibili.com/video/BV1KT411x7Wn) статей | [Функциональный плагин] Однокнопочное чтение полного текста научных статей и генерация резюме -Полный перевод [LaTeX](https://www.bilibili.com/video/BV1nk4y1Y7Js/) и совершенствование | [Функциональный плагин] Однокнопочный перевод или совершенствование LaTeX статьи -Автоматическое комментирование | [Функциональный плагин] Однокнопочное автоматическое генерирование комментариев функций -[Перевод](https://www.bilibili.com/video/BV1yo4y157jV/) Markdown на английский и китайский | [Функциональный плагин] Вы видели обе версии файлов [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) для этих 5 языков? -Отчет о чат-анализе | [Функциональный плагин] После запуска будет автоматически сгенерировано сводное извещение -Функция перевода полного текста [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) | [Функциональный плагин] Извлечение заголовка и резюме [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) и перевод всего документа (многопоточность) -[Arxiv Helper](https://www.bilibili.com/video/BV1LM4y1279X) | [Функциональный плагин] Введите URL статьи на arxiv и одним щелчком мыши переведите резюме и загрузите PDF -[Google Scholar Integration Helper](https://www.bilibili.com/video/BV19L411U7ia) | [Функциональный плагин] При заданном любом URL страницы поиска в Google Scholar позвольте gpt вам помочь [написать обзор](https://www.bilibili.com/video/BV1GP411U7Az/) -Сбор Интернет-информации + GPT | [Функциональный плагин] Однокнопочный [запрос информации из Интернета GPT](https://www.bilibili.com/video/BV1om4y127ck), затем ответьте на вопрос, чтобы информация не устарела никогда -Отображение формул / изображений / таблиц | Может одновременно отображать формулы в [формате Tex и рендеринге](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), поддерживает формулы, подсвечивает код -Поддержка функций с многопоточностью | Поддержка многопоточного вызова chatgpt, однокнопочная обработка [больших объемов текста](https://www.bilibili.com/video/BV1FT411H7c5/) или программ -Темная тема gradio для запуска приложений | Добавьте ```/?__theme=dark``` после URL в браузере, чтобы переключиться на темную тему -[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Они одновременно обслуживаются GPT3.5, GPT4, [Clear ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) -Подключение нескольких новых моделей LLM, поддержка деплоя[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Подключение интерфейса Newbing (новый Bing), подключение поддержки [LLaMA](https://github.com/facebookresearch/llama), поддержка [RWKV](https://github.com/BlinkDL/ChatRWKV) и [Pangu α](https://openi.org.cn/pangu/) -Больше новых функций (генерация изображения и т. д.) | См. на конце этого файла…- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to liberate the clipboard -
- -
- -- Revision/Correction -
- -
- -- If the output contains formulas, they will be displayed in both tex and rendered form for easy copying and reading -
- -
- -- Don't feel like looking at project code? Show the entire project directly in chatgpt -
- -
- -- Mixing multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- ---- -# Installation -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` - -2. Configure API_KEY - -In `config.py`, configure API KEY and other settings, [special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check whether there is a secret configuration file named `config_private.py` and use the configuration in it to replace the same name in` config.py`. Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Priority of read: `environment variable`>`config_private.py`>`config.py`) - - -3. Install dependencies -```sh -# (Option I: If familiar with Python)(Python version 3.9 or above, the newer the better), note: use the official pip source or the aliyun pip source, temporary switching source method: python -m pip install -r requirements.txt - i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: If unfamiliar with Python)Use Anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # create an Anaconda environment -conda activate gptac_venv # activate Anaconda environment -python -m pip install -r requirements.txt # This step is the same as the pip installation -``` - -
If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, click here to expand -

- -[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, you need to install more dependencies (prerequisites: familiar with Python + have used Pytorch + computer configuration is strong): -```sh -# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llms/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llms/requirements_chatglm.txt - -# [Optional step II] Support Fudan MOSS -python -m pip install -r request_llms/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss # Note that when executing this line of code, you must be in the project root path - -# [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

-
- - - -4. Run -```sh -python main.py -```5. Testing Function Plugin -``` -- Testing function plugin template function (requires GPT to answer what happened in history today), you can use this function as a template to implement more complex functions - Click "[Function plugin Template Demo] On this day in history" -``` - -## Installation - Method 2: Using Docker - -1. ChatGPT only (recommended for most people) - -``` sh -git clone https://github.com/binary-husky/gpt_academic.git # download the project -cd gpt_academic # enter the path -nano config.py # edit config.py with any text editor to configure "Proxy", "API_KEY", and "WEB_PORT" (eg 50923) -docker build -t gpt-academic . # install - -# (Last step-Option 1) In a Linux environment, using `--net=host` is more convenient and faster -docker run --rm -it --net=host gpt-academic -# (Last step-Option 2) In macOS/windows environment, only -p option can be used to expose the port on the container (eg 50923) to the port on the host -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker) - -``` sh -# Edit docker-compose.yml, delete solutions 1 and 3, and keep solution 2. Modify the configuration of solution 2 in docker-compose.yml, refer to the comments in it -docker-compose up -``` - -3. ChatGPT + LLAMA + PanGu + RWKV (requires familiarity with Docker) -``` sh -# Edit docker-compose.yml, delete solutions 1 and 2, and keep solution 3. Modify the configuration of solution 3 in docker-compose.yml, refer to the comments in it -docker-compose up -``` - - -## Installation Method 3: Other Deployment Methods - -1. How to use reverse proxy URL/Microsoft Azure API -Configure API_URL_REDIRECT according to the instructions in `config.py`. - -2. Remote Cloud Server Deployment (Requires Knowledge and Experience of Cloud Servers) -Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL2 (Windows Subsystem for Linux subsystem) -Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to run at the secondary URL (such as `http://localhost/subpath`) -Please visit [FastAPI Operation Instructions](docs/WithFastapi.md) - -5. Using docker-compose to run -Please read docker-compose.yml and follow the prompts to operate. - ---- -# Advanced Usage -## Customize new convenient buttons / custom function plugins - -1. Customize new convenient buttons (academic shortcuts) -Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, both prefixes and suffixes can be hot-modified without having to restart the program.) -For example: -``` -"Super English to Chinese": { - # Prefix, will be added before your input. For example, describe your requirements, such as translation, code interpretation, polishing, etc. - "Prefix": "Please translate the following content into Chinese, and then explain each proper noun that appears in the text with a markdown table:\n\n", - - # Suffix, will be added after your input. For example, with the prefix, you can enclose your input content in quotes. - "Suffix": "", -}, -``` -
- -
- -2. Custom function plugin - -Write powerful function plugins to perform any task you can and can't imagine. -The difficulty of debugging and writing plugins in this project is very low. As long as you have a certain knowledge of python, you can implement your own plugin function by imitating the template we provide. -Please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) for details. - ---- -# Latest Update -## New feature dynamic - -1. Сохранение диалогов. Вызовите "Сохранить текущий диалог" в разделе функций-плагина, чтобы сохранить текущий диалог как файл HTML, который можно прочитать и восстановить. Кроме того, вызовите «Загрузить архив истории диалога» в меню функций-плагина, чтобы восстановить предыдущую сессию. Совет: если нажать кнопку "Загрузить исторический архив диалога" без указания файла, можно просмотреть кэш исторических файлов HTML. Щелкните "Удалить все локальные записи истории диалогов", чтобы удалить все файловые кэши HTML. - -2. Создание отчетов. Большинство плагинов создают рабочий отчет после завершения выполнения. -  -3. Модульный дизайн функций, простой интерфейс, но сильный функционал. - -4. Это проект с открытым исходным кодом, который может «сам переводить себя». - -5. Перевод других проектов с открытым исходным кодом - это не проблема. - -6. Мелкие функции декорирования [live2d](https://github.com/fghrsh/live2d_demo) (по умолчанию отключены, нужно изменить `config.py`). - -7. Поддержка большой языковой модели MOSS. - -8. Генерация изображений с помощью OpenAI. - -9. Анализ и подведение итогов аудиофайлов с помощью OpenAI. - -10. Полный цикл проверки правописания с использованием LaTeX. - -## Версии: -- Версия 3.5 (Todo): использование естественного языка для вызова функций-плагинов проекта (высокий приоритет) -- Версия 3.4 (Todo): улучшение многопоточной поддержки локальных больших моделей чата. -- Версия 3.3: добавлена функция объединения интернет-информации. -- Версия 3.2: функции-плагины поддерживают большое количество параметров (сохранение диалогов, анализирование любого языка программирования и одновременное запрос LLM-групп). -- Версия 3.1: поддержка одновременного запроса нескольких моделей GPT! Поддержка api2d, сбалансированное распределение нагрузки по нескольким ключам api. -- Версия 3.0: поддержка chatglm и других небольших LLM. -- Версия 2.6: перестройка структуры плагинов, улучшение интерактивности, добавлено больше плагинов. -- Версия 2.5: автоматическое обновление для решения проблемы длинного текста и переполнения токенов при обработке больших проектов. -- Версия 2.4: (1) добавлена функция полного перевода PDF; (2) добавлена функция переключения положения ввода; (3) добавлена опция вертикального макета; (4) оптимизация многопоточности плагинов. -- Версия 2.3: улучшение многопоточной интерактивности. -- Версия 2.2: функции-плагины поддерживают горячую перезагрузку. -- Версия 2.1: раскрывающийся макет. -- Версия 2.0: использование модульных функций-плагинов. -- Версия 1.0: базовые функции. - -gpt_academic Разработчик QQ-группы-2: 610599535 - -- Известные проблемы - - Некоторые плагины перевода в браузерах мешают работе фронтенда этого программного обеспечения - - Высокая или низкая версия gradio может вызвать множество исключений - -## Ссылки и учебные материалы - -``` -Мы использовали многие концепты кода из других отличных проектов, включая: - -# Проект 1: Qinghua ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B - -# Проект 2: Qinghua JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# Проект 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Проект 4: Chuanhu ChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Проект 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Больше: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/toolbox.py b/toolbox.py index 293419c3f5..dca2673e20 100644 --- a/toolbox.py +++ b/toolbox.py @@ -1098,14 +1098,11 @@ def get_chat_handle(): def get_plugin_default_kwargs(): """ """ - from toolbox import get_conf, ChatBotWithCookies - - WEB_PORT, LLM_MODEL, API_KEY = \ - get_conf('WEB_PORT', 'LLM_MODEL', 'API_KEY') - + from toolbox import ChatBotWithCookies + cookies = load_chat_cookies() llm_kwargs = { - 'api_key': API_KEY, - 'llm_model': LLM_MODEL, + 'api_key': cookies['api_key'], + 'llm_model': cookies['llm_model'], 'top_p':1.0, 'max_length': None, 'temperature':1.0, @@ -1120,25 +1117,21 @@ def get_plugin_default_kwargs(): "chatbot_with_cookie": chatbot, "history": [], "system_prompt": "You are a good AI.", - "web_port": WEB_PORT + "web_port": None } return DEFAULT_FN_GROUPS_kwargs def get_chat_default_kwargs(): """ """ - from toolbox import get_conf - - LLM_MODEL, API_KEY = get_conf('LLM_MODEL', 'API_KEY') - + cookies = load_chat_cookies() llm_kwargs = { - 'api_key': API_KEY, - 'llm_model': LLM_MODEL, + 'api_key': cookies['api_key'], + 'llm_model': cookies['llm_model'], 'top_p':1.0, 'max_length': None, 'temperature':1.0, } - default_chat_kwargs = { "inputs": "Hello there, are you ready?", "llm_kwargs": llm_kwargs, @@ -1150,3 +1143,6 @@ def get_chat_default_kwargs(): return default_chat_kwargs +def get_max_token(llm_kwargs): + from request_llms.bridge_all import model_info + return model_info[llm_kwargs['llm_model']]['max_token'] \ No newline at end of file diff --git a/version b/version index 4e18fc8616..81ad2fd933 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { "version": 3.60, "show_feature": true, - "new_feature": "AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" + "new_feature": "11月12日紧急BUG修复 <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From b9b7bf38ab7d9ae5a97e2a9f96ce6435085bb75d Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 13 Nov 2023 00:15:15 +0800 Subject: [PATCH 067/117] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E5=AF=BC=E5=85=A5=E6=97=B6=E7=9A=84pytorch=E5=8A=A0=E8=BD=BD?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 46 +++++++++++++------ ...3_\345\244\232\347\272\277\347\250\213.py" | 9 ++-- request_llms/bridge_chatglm.py | 2 +- request_llms/bridge_chatglm3.py | 2 +- request_llms/bridge_moss.py | 2 - toolbox.py | 8 +++- 6 files changed, 45 insertions(+), 24 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index be05efe8cf..42b0151a95 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -1,4 +1,5 @@ from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效 +from toolbox import trimmed_format_exc def get_crazy_functions(): @@ -292,6 +293,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -316,6 +318,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -331,6 +334,7 @@ def get_crazy_functions(): }, }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -346,23 +350,24 @@ def get_crazy_functions(): }, }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: - from crazy_functions.图片生成 import 图片生成, 图片生成_DALLE3 + from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3 function_plugins.update({ - "图片生成(先切换模型到openai或api2d)": { + "图片生成_DALLE2 (先切换模型到openai或api2d)": { "Group": "对话", "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示 "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成) + "Function": HotReload(图片生成_DALLE2) }, }) function_plugins.update({ - "图片生成_DALLE3(先切换模型到openai或api2d)": { + "图片生成_DALLE3 (先切换模型到openai或api2d)": { "Group": "对话", "Color": "stop", "AsButton": False, @@ -373,6 +378,7 @@ def get_crazy_functions(): }, }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -389,6 +395,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -403,6 +410,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -418,6 +426,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -433,6 +442,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -448,6 +458,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -461,6 +472,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -505,6 +517,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -522,6 +535,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -535,6 +549,7 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') try: @@ -548,17 +563,22 @@ def get_crazy_functions(): } }) except: + print(trimmed_format_exc()) print('Load function plugin failed') - from crazy_functions.多智能体 import 多智能体终端 - function_plugins.update({ - "AutoGen多智能体终端(仅供测试)": { - "Group": "智能体", - "Color": "stop", - "AsButton": False, - "Function": HotReload(多智能体终端) - } - }) + try: + from crazy_functions.多智能体 import 多智能体终端 + function_plugins.update({ + "AutoGen多智能体终端(仅供测试)": { + "Group": "智能体", + "Color": "stop", + "AsButton": False, + "Function": HotReload(多智能体终端) + } + }) + except: + print(trimmed_format_exc()) + print('Load function plugin failed') # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 diff --git "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" index 333b529b7e..73cf59200c 100644 --- "a/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" +++ "b/crazy_functions/\346\211\271\351\207\217\347\277\273\350\257\221PDF\346\226\207\346\241\243_\345\244\232\347\272\277\347\250\213.py" @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_exception, get_log_folder, gen_time_str +from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -6,9 +6,8 @@ from .crazy_utils import read_and_clean_pdf_text from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf from colorful import * -import copy import os -import math + @CatchException def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): @@ -22,9 +21,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 尝试导入依赖,如果缺少依赖,则给出安装建议 try: - import fitz - import tiktoken - import scipdf + check_packages(["fitz", "tiktoken", "scipdf"]) except: report_exception(chatbot, history, a=f"解析项目: {txt}", diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index 83c50da133..c58495dccf 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -2,7 +2,6 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" -from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns @@ -23,6 +22,7 @@ def load_model_and_tokenizer(self): import os, glob import os import platform + from transformers import AutoModel, AutoTokenizer LOCAL_MODEL_QUANT, device = get_conf('LOCAL_MODEL_QUANT', 'LOCAL_MODEL_DEVICE') if LOCAL_MODEL_QUANT == "INT4": # INT4 diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py index 4465660833..3caa4769d3 100644 --- a/request_llms/bridge_chatglm3.py +++ b/request_llms/bridge_chatglm3.py @@ -2,7 +2,6 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" -from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns @@ -20,6 +19,7 @@ def load_model_info(self): def load_model_and_tokenizer(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + from transformers import AutoModel, AutoTokenizer import os, glob import os import platform diff --git a/request_llms/bridge_moss.py b/request_llms/bridge_moss.py index d7399f52b6..ee8907cf01 100644 --- a/request_llms/bridge_moss.py +++ b/request_llms/bridge_moss.py @@ -1,8 +1,6 @@ -from transformers import AutoModel, AutoTokenizer import time import threading -import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe diff --git a/toolbox.py b/toolbox.py index dca2673e20..30f717629a 100644 --- a/toolbox.py +++ b/toolbox.py @@ -1145,4 +1145,10 @@ def get_chat_default_kwargs(): def get_max_token(llm_kwargs): from request_llms.bridge_all import model_info - return model_info[llm_kwargs['llm_model']]['max_token'] \ No newline at end of file + return model_info[llm_kwargs['llm_model']]['max_token'] + +def check_packages(packages=[]): + import importlib.util + for p in packages: + spam_spec = importlib.util.find_spec(p) + if spam_spec is None: raise ModuleNotFoundError \ No newline at end of file From 27db9006929699c26cdcd0de5e1669ffae4b9c5d Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 13 Nov 2023 00:24:20 +0800 Subject: [PATCH 068/117] =?UTF-8?q?=E7=A7=BB=E9=99=A4batchsize?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/crazy_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 832775d18d..a23c732b58 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -748,7 +748,7 @@ def NOUGAT_parse_pdf(self, fp, chatbot, history): yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)", chatbot=chatbot, history=history, delay=0) - self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}" --batchsize 4', os.getcwd(), timeout=3600) + self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600) res = glob.glob(os.path.join(dst,'*.mmd')) if len(res) == 0: self.threadLock.release() From 7b526cf74b03dfd1fe8f9fed8b726dcc5aa37d66 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 13 Nov 2023 00:48:48 +0800 Subject: [PATCH 069/117] =?UTF-8?q?=E6=9B=B4=E6=96=B0scipdf=5Fparser?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- requirements.txt | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/requirements.txt b/requirements.txt index 1f86d336e5..a5782f779f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,24 +1,24 @@ ./docs/gradio-3.32.6-py3-none-any.whl -pydantic==1.10.11 +pypdf2==2.12.1 tiktoken>=0.3.3 requests[socks] +pydantic==1.10.11 transformers>=4.27.1 +scipdf_parser>=0.52 python-markdown-math +websocket-client beautifulsoup4 prompt_toolkit latex2mathml python-docx mdtex2html anthropic +pyautogen colorama Markdown pygments pymupdf openai -pyautogen -numpy arxiv +numpy rich -pypdf2==2.12.1 -websocket-client -scipdf_parser>=0.3 From f03aa8713d23939b9b0be637cdb9ac5c75f319fc Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 13 Nov 2023 01:10:12 +0800 Subject: [PATCH 070/117] limit author nums --- crazy_functions/pdf_fns/parse_pdf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functions/pdf_fns/parse_pdf.py b/crazy_functions/pdf_fns/parse_pdf.py index 6646c5bfec..51f8811fc8 100644 --- a/crazy_functions/pdf_fns/parse_pdf.py +++ b/crazy_functions/pdf_fns/parse_pdf.py @@ -82,7 +82,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi # title title = article_dict.get('title', '无法获取 title'); prompt += f'title:{title}\n\n' # authors - authors = article_dict.get('authors', '无法获取 authors'); prompt += f'authors:{authors}\n\n' + authors = article_dict.get('authors', '无法获取 authors')[:100]; prompt += f'authors:{authors}\n\n' # abstract abstract = article_dict.get('abstract', '无法获取 abstract'); prompt += f'abstract:{abstract}\n\n' # command From 9a21e13d33b1a553065b9ba89dd0d470d4ae93ad Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 13 Nov 2023 13:10:15 +0800 Subject: [PATCH 071/117] =?UTF-8?q?=E6=94=AF=E6=8C=81gpt-4-vision-preview?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_all.py | 13 + request_llms/bridge_chatgpt_vision.py | 329 ++++++++++++++++++++++++++ toolbox.py | 7 +- 3 files changed, 347 insertions(+), 2 deletions(-) create mode 100644 request_llms/bridge_chatgpt_vision.py diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 89c9f76cee..88848a984a 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -16,6 +16,9 @@ from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui from .bridge_chatgpt import predict as chatgpt_ui +from .bridge_chatgpt_vision import predict_no_ui_long_connection as chatgpt_vision_noui +from .bridge_chatgpt_vision import predict as chatgpt_vision_ui + from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui from .bridge_chatglm import predict as chatglm_ui @@ -162,6 +165,16 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, + "gpt-4-vision-preview": { + "fn_with_ui": chatgpt_vision_ui, + "fn_without_ui": chatgpt_vision_noui, + "endpoint": openai_endpoint, + "max_token": 4096, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + + # azure openai "azure-gpt-3.5":{ "fn_with_ui": chatgpt_ui, diff --git a/request_llms/bridge_chatgpt_vision.py b/request_llms/bridge_chatgpt_vision.py new file mode 100644 index 0000000000..112391c759 --- /dev/null +++ b/request_llms/bridge_chatgpt_vision.py @@ -0,0 +1,329 @@ +""" + 该文件中主要包含三个函数 + + 不具备多线程能力的函数: + 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 + + 具备多线程调用能力的函数 + 2. predict_no_ui_long_connection:支持多线程 +""" + +import json +import time +import logging +import requests +import base64 +import os +import glob + +from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, update_ui_lastest_msg, get_max_token +proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \ + get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY') + +timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \ + '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。' + +def have_any_recent_upload_image_files(chatbot): + _5min = 5 * 60 + if chatbot is None: return False, None # chatbot is None + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + if not most_recent_uploaded: return False, None # most_recent_uploaded is None + if time.time() - most_recent_uploaded["time"] < _5min: + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + path = most_recent_uploaded['path'] + file_manifest = [f for f in glob.glob(f'{path}/**/*.jpg', recursive=True)] + file_manifest += [f for f in glob.glob(f'{path}/**/*.jpeg', recursive=True)] + file_manifest += [f for f in glob.glob(f'{path}/**/*.png', recursive=True)] + if len(file_manifest) == 0: return False, None + return True, file_manifest # most_recent_uploaded is new + else: + return False, None # most_recent_uploaded is too old + +def report_invalid_key(key): + if get_conf("BLOCK_INVALID_APIKEY"): + # 实验性功能,自动检测并屏蔽失效的KEY,请勿使用 + from request_llms.key_manager import ApiKeyManager + api_key = ApiKeyManager().add_key_to_blacklist(key) + +def get_full_error(chunk, stream_response): + """ + 获取完整的从Openai返回的报错 + """ + while True: + try: + chunk += next(stream_response) + except: + break + return chunk + +def decode_chunk(chunk): + # 提前读取一些信息 (用于判断异常) + chunk_decoded = chunk.decode() + chunkjson = None + has_choices = False + choice_valid = False + has_content = False + has_role = False + try: + chunkjson = json.loads(chunk_decoded[6:]) + has_choices = 'choices' in chunkjson + if has_choices: choice_valid = (len(chunkjson['choices']) > 0) + if has_choices and choice_valid: has_content = "content" in chunkjson['choices'][0]["delta"] + if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"] + except: + pass + return chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role + +from functools import lru_cache +@lru_cache(maxsize=32) +def verify_endpoint(endpoint): + """ + 检查endpoint是否可用 + """ + return endpoint + +def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): + raise NotImplementedError + + +def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): + + have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot) + + if is_any_api_key(inputs): + chatbot._cookies['api_key'] = inputs + chatbot.append(("输入已识别为openai的api_key", what_keys(inputs))) + yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面 + return + elif not is_any_api_key(chatbot._cookies['api_key']): + chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")) + yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面 + return + if not have_recent_file: + chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写")) + yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面 + return + if os.path.exists(inputs): + chatbot.append((inputs, "已经接收到您上传的文件,您不需要再重复强调该文件的路径了,请直接输入您的问题。")) + yield from update_ui(chatbot=chatbot, history=history, msg="等待指令") # 刷新界面 + return + + + user_input = inputs + if additional_fn is not None: + from core_functional import handle_core_functionality + inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) + + raw_input = inputs + logging.info(f'[raw_input] {raw_input}') + def make_media_input(inputs, image_paths): + for image_path in image_paths: + inputs = inputs + f'

' + return inputs + chatbot.append((make_media_input(inputs, image_paths), "")) + yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 + + # check mis-behavior + if is_the_upload_folder(user_input): + chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。") + yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面 + time.sleep(2) + + try: + headers, payload, api_key = generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths) + except RuntimeError as e: + chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。") + yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 + return + + # 检查endpoint是否合法 + try: + from .bridge_all import model_info + endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint']) + except: + tb_str = '```\n' + trimmed_format_exc() + '```' + chatbot[-1] = (inputs, tb_str) + yield from update_ui(chatbot=chatbot, history=history, msg="Endpoint不满足要求") # 刷新界面 + return + + history.append(make_media_input(inputs, image_paths)) + history.append("") + + retry = 0 + while True: + try: + # make a POST request to the API endpoint, stream=True + response = requests.post(endpoint, headers=headers, proxies=proxies, + json=payload, stream=True, timeout=TIMEOUT_SECONDS);break + except: + retry += 1 + chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg)) + retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else "" + yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面 + if retry > MAX_RETRY: raise TimeoutError + + gpt_replying_buffer = "" + + is_head_of_the_stream = True + if stream: + stream_response = response.iter_lines() + while True: + try: + chunk = next(stream_response) + except StopIteration: + # 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里 + chunk_decoded = chunk.decode() + error_msg = chunk_decoded + # 首先排除一个one-api没有done数据包的第三方Bug情形 + if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0: + yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。") + break + # 其他情况,直接返回报错 + chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key) + yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面 + return + + # 提前读取一些信息 (用于判断异常) + chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk) + + if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded): + # 数据流的第一帧不携带content + is_head_of_the_stream = False; continue + + if chunk: + try: + if has_choices and not choice_valid: + # 一些垃圾第三方接口的出现这样的错误 + continue + # 前者是API2D的结束条件,后者是OPENAI的结束条件 + if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0): + # 判定为数据流的结束,gpt_replying_buffer也写完了 + lastmsg = chatbot[-1][-1] + f"

{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。" + yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1) + logging.info(f'[response] {gpt_replying_buffer}') + break + # 处理数据流的主体 + status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}" + # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 + if has_content: + # 正常情况 + gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"] + elif has_role: + # 一些第三方接口的出现这样的错误,兼容一下吧 + continue + else: + # 一些垃圾第三方接口的出现这样的错误 + gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"] + + history[-1] = gpt_replying_buffer + chatbot[-1] = (history[-2], history[-1]) + yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面 + except Exception as e: + yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面 + chunk = get_full_error(chunk, stream_response) + chunk_decoded = chunk.decode() + error_msg = chunk_decoded + chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key) + yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 + print(error_msg) + return + +def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key=""): + from .bridge_all import model_info + openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup' + if "reduce the length" in error_msg: + if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出 + history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], + max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一 + chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)") + elif "does not exist" in error_msg: + chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.") + elif "Incorrect API key" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website); report_invalid_key(api_key) + elif "exceeded your current quota" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website); report_invalid_key(api_key) + elif "account is not active" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key) + elif "associated with a deactivated account" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key) + elif "API key has been deactivated" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key) + elif "bad forward key" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") + elif "Not enough point" in error_msg: + chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.") + else: + from toolbox import regular_txt_to_markdown + tb_str = '```\n' + trimmed_format_exc() + '```' + chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}") + return chatbot, history + +# Function to encode the image +def encode_image(image_path): + with open(image_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode('utf-8') + +def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths): + """ + 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备 + """ + if not is_any_api_key(llm_kwargs['api_key']): + raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。") + + api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) + + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {api_key}" + } + if API_ORG.startswith('org-'): headers.update({"OpenAI-Organization": API_ORG}) + if llm_kwargs['llm_model'].startswith('azure-'): + headers.update({"api-key": api_key}) + if llm_kwargs['llm_model'] in AZURE_CFG_ARRAY.keys(): + azure_api_key_unshared = AZURE_CFG_ARRAY[llm_kwargs['llm_model']]["AZURE_API_KEY"] + headers.update({"api-key": azure_api_key_unshared}) + + base64_images = [] + for image_path in image_paths: + base64_images.append(encode_image(image_path)) + + messages = [] + what_i_ask_now = {} + what_i_ask_now["role"] = "user" + what_i_ask_now["content"] = [] + what_i_ask_now["content"].append({ + "type": "text", + "text": inputs + }) + + for image_path, base64_image in zip(image_paths, base64_images): + what_i_ask_now["content"].append({ + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{base64_image}" + } + }) + + messages.append(what_i_ask_now) + model = llm_kwargs['llm_model'] + if llm_kwargs['llm_model'].startswith('api2d-'): + model = llm_kwargs['llm_model'][len('api2d-'):] + + payload = { + "model": model, + "messages": messages, + "temperature": llm_kwargs['temperature'], # 1.0, + "top_p": llm_kwargs['top_p'], # 1.0, + "n": 1, + "stream": True, + "max_tokens": get_max_token(llm_kwargs), + "presence_penalty": 0, + "frequency_penalty": 0, + } + try: + print(f" {llm_kwargs['llm_model']} : {inputs[:100]} ..........") + except: + print('输入中可能存在乱码。') + return headers, payload, api_key + + diff --git a/toolbox.py b/toolbox.py index 30f717629a..3f3c68bd9f 100644 --- a/toolbox.py +++ b/toolbox.py @@ -279,9 +279,12 @@ def text_divide_paragraph(text): if '```' in text: # careful input - return pre + text + suf + return text + elif '
' in text: + # careful input + return text else: - # wtf input + # whatever input lines = text.split("\n") for i, line in enumerate(lines): lines[i] = lines[i].replace(" ", " ") From 682898a3bac517f5c630713607db958b885b6a6c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 13 Nov 2023 13:21:33 +0800 Subject: [PATCH 072/117] =?UTF-8?q?=E6=94=AF=E6=8C=81gpt-4-v=E5=A4=84?= =?UTF-8?q?=E7=90=86=E5=A4=9A=E5=BC=A0=E5=9B=BE=E7=89=87?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatgpt_vision.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/request_llms/bridge_chatgpt_vision.py b/request_llms/bridge_chatgpt_vision.py index 112391c759..e84bc0b717 100644 --- a/request_llms/bridge_chatgpt_vision.py +++ b/request_llms/bridge_chatgpt_vision.py @@ -198,7 +198,7 @@ def make_media_input(inputs, image_paths): # 前者是API2D的结束条件,后者是OPENAI的结束条件 if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0): # 判定为数据流的结束,gpt_replying_buffer也写完了 - lastmsg = chatbot[-1][-1] + f"

{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。" + lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」" yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1) logging.info(f'[response] {gpt_replying_buffer}') break From 2003afe27f26f7bb7add0162a4742a838a15f1cf Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 14 Nov 2023 11:54:07 +0800 Subject: [PATCH 073/117] =?UTF-8?q?API=5FURL=5FREDIRECT=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A3=80=E6=B5=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- toolbox.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/toolbox.py b/toolbox.py index 3f3c68bd9f..b7b762d740 100644 --- a/toolbox.py +++ b/toolbox.py @@ -808,6 +808,11 @@ def read_single_conf_with_lru_cache(arg): r = getattr(importlib.import_module('config'), arg) # 在读取API_KEY时,检查一下是不是忘了改config + if arg == 'API_URL_REDIRECT': + oai_rd = r.get("https://api.openai.com/v1/chat/completions", None) # API_URL_REDIRECT填写格式是错误的,请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明` + if oai_rd and not oai_rd.endswith('/completions'): + print亮红( "\n\n[API_URL_REDIRECT] API_URL_REDIRECT填错了。请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明`。如果您确信自己没填错,无视此消息即可。") + time.sleep(5) if arg == 'API_KEY': print亮蓝(f"[API_KEY] 本项目现已支持OpenAI和Azure的api-key。也支持同时填写多个api-key,如API_KEY=\"openai-key1,openai-key2,azure-key3\"") print亮蓝(f"[API_KEY] 您既可以在config.py中修改api-key(s),也可以在问题输入区输入临时的api-key(s),然后回车键提交后即可生效。") From b0c34a89cd3b0feabd974a5d3c36cf8632258210 Mon Sep 17 00:00:00 2001 From: Doiiars Date: Tue, 14 Nov 2023 12:22:52 +0800 Subject: [PATCH 074/117] Update main.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 添加临时更换API的帮助 --- main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/main.py b/main.py index 428955991b..ba18f85adb 100644 --- a/main.py +++ b/main.py @@ -31,6 +31,7 @@ def main(): description += "

虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端" description += "

如何保存对话: 点击保存当前的对话按钮" description += "

如何语音对话: 请阅读Wiki" + description += "

如何临时更换API_KEY: 需要时,在输入区输入临时的API_KEY然后回车键提交后即可生效" # 问询记录, python 版本建议3.9+(越新越好) import logging, uuid From 1fa296a3033026fc0616ec98af901ba111ab3249 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Tue, 14 Nov 2023 12:28:57 +0800 Subject: [PATCH 075/117] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=B8=AE=E5=8A=A9?= =?UTF-8?q?=E6=96=87=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index ba18f85adb..b29c94fcb7 100644 --- a/main.py +++ b/main.py @@ -31,7 +31,7 @@ def main(): description += "

虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端" description += "

如何保存对话: 点击保存当前的对话按钮" description += "

如何语音对话: 请阅读Wiki" - description += "

如何临时更换API_KEY: 需要时,在输入区输入临时的API_KEY然后回车键提交后即可生效" + description += "

如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)" # 问询记录, python 版本建议3.9+(越新越好) import logging, uuid From 443915b6d6178c4f32cb11322e0d50c5c59d5952 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 14 Nov 2023 20:49:53 +0800 Subject: [PATCH 076/117] =?UTF-8?q?Update=20=E5=9B=BE=E7=89=87=E7=94=9F?= =?UTF-8?q?=E6=88=90.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...76\347\211\207\347\224\237\346\210\220.py" | 62 ++++++++++++++++++- 1 file changed, 60 insertions(+), 2 deletions(-) diff --git "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" index 4968361a99..8a304b4e2d 100644 --- "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" +++ "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" @@ -42,6 +42,47 @@ def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2"): return image_url, file_path+file_name +def gen_image_dalle3(quality, llm_kwargs, prompt, resolution="1024x1024", model="dall-e-3"): + import requests, json, time, os + from request_llms.bridge_all import model_info + + proxies = get_conf('proxies') + # Set up OpenAI API key and model + api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) + chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] + # 'https://api.openai.com/v1/chat/completions' + img_endpoint = chat_endpoint.replace('chat/completions','images/generations') + # # Generate the image + url = img_endpoint + headers = { + 'Authorization': f"Bearer {api_key}", + 'Content-Type': 'application/json' + } + data = { + 'prompt': prompt, + 'n': 1, + 'size': resolution, + 'quality': quality, + 'model': model, + 'response_format': 'url' + } + response = requests.post(url, headers=headers, json=data, proxies=proxies) + print(response.content) + try: + image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] + except: + raise RuntimeError(response.content.decode()) + # 文件保存到本地 + r = requests.get(image_url, proxies=proxies) + file_path = f'{get_log_folder()}/image_gen/' + os.makedirs(file_path, exist_ok=True) + file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png' + with open(file_path+file_name, 'wb+') as f: f.write(r.content) + + + return image_url, file_path+file_name + + def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="dall-e-2"): import requests, json, time, os from request_llms.bridge_all import model_info @@ -109,13 +150,30 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys @CatchException -def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): +def 图片生成_DALLE3_Standard(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): history = [] # 清空历史,以免输入溢出 chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") resolution = plugin_kwargs.get("advanced_arg", '1024x1024') - image_url, image_path = gen_image(llm_kwargs, prompt, resolution) + image_url, image_path = gen_image_dalle3(standard, llm_kwargs, prompt, resolution) + chatbot.append([prompt, + f'图像中转网址:
`{image_url}`
'+ + f'中转网址预览:
' + f'本地文件地址:
`{image_path}`
'+ + f'本地文件预览:
' + ]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + + +@CatchException +def 图片生成_DALLE3_HD(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + history = [] # 清空历史,以免输入溢出 + chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") + resolution = plugin_kwargs.get("advanced_arg", '1024x1024') + image_url, image_path = gen_image_dalle3(HD, llm_kwargs, prompt, resolution) chatbot.append([prompt, f'图像中转网址:
`{image_url}`
'+ f'中转网址预览:
' From 518a1b2b75e21f790135bbe0ada5370e4bc748f0 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 14 Nov 2023 20:51:49 +0800 Subject: [PATCH 077/117] Update crazy_functional.py --- crazy_functional.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 42b0151a95..bb7aba9059 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -354,7 +354,7 @@ def get_crazy_functions(): print('Load function plugin failed') try: - from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3 + from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3_Standard, 图片生成_DALLE3_HD function_plugins.update({ "图片生成_DALLE2 (先切换模型到openai或api2d)": { "Group": "对话", @@ -367,13 +367,24 @@ def get_crazy_functions(): }, }) function_plugins.update({ - "图片生成_DALLE3 (先切换模型到openai或api2d)": { + "图片生成_DALLE3_Standard (先切换模型到openai或api2d)": { "Group": "对话", "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 - "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容", + "Info": "使用DALLE3 standard质量生成图片 | 输入参数字符串,提供图像的内容", + "Function": HotReload(图片生成_DALLE3) + }, + }) + function_plugins.update({ + "图片生成_DALLE3_HD (先切换模型到openai或api2d)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 + "Info": "使用DALLE3 HD质量生成图片 | 输入参数字符串,提供图像的内容", "Function": HotReload(图片生成_DALLE3) }, }) From 1b28ae3baa21eb56b8a37617595aaf59142213b3 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 14 Nov 2023 21:14:41 +0800 Subject: [PATCH 078/117] Update crazy_functional.py --- crazy_functional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functional.py b/crazy_functional.py index bb7aba9059..7f3af8496e 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -354,7 +354,7 @@ def get_crazy_functions(): print('Load function plugin failed') try: - from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3_Standard, 图片生成_DALLE3_HD + from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3_Standard, 图片生成_DALLE3_HD function_plugins.update({ "图片生成_DALLE2 (先切换模型到openai或api2d)": { "Group": "对话", From 5391cb4198cd9a291f4d2991921e48711d8d3a87 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 14 Nov 2023 21:17:48 +0800 Subject: [PATCH 079/117] Update crazy_functional.py --- crazy_functional.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 7f3af8496e..bdf49b7fc9 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -374,7 +374,7 @@ def get_crazy_functions(): "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 "Info": "使用DALLE3 standard质量生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成_DALLE3) + "Function": HotReload(图片生成_DALLE3_Standard) }, }) function_plugins.update({ @@ -385,7 +385,7 @@ def get_crazy_functions(): "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 "Info": "使用DALLE3 HD质量生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成_DALLE3) + "Function": HotReload(图片生成_DALLE3_HD) }, }) except: From 4d1657a53197b87827a46cf9cc57dab4d6a65086 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 14 Nov 2023 21:25:47 +0800 Subject: [PATCH 080/117] =?UTF-8?q?Update=20=E5=9B=BE=E7=89=87=E7=94=9F?= =?UTF-8?q?=E6=88=90.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../\345\233\276\347\211\207\347\224\237\346\210\220.py" | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" index 8a304b4e2d..e871051a14 100644 --- "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" +++ "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" @@ -156,7 +156,7 @@ def 图片生成_DALLE3_Standard(prompt, llm_kwargs, plugin_kwargs, chatbot, his yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") resolution = plugin_kwargs.get("advanced_arg", '1024x1024') - image_url, image_path = gen_image_dalle3(standard, llm_kwargs, prompt, resolution) + image_url, image_path = gen_image_dalle3("standard", llm_kwargs, prompt, resolution) chatbot.append([prompt, f'图像中转网址:
`{image_url}`
'+ f'中转网址预览:
' @@ -173,7 +173,7 @@ def 图片生成_DALLE3_HD(prompt, llm_kwargs, plugin_kwargs, chatbot, history, yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") resolution = plugin_kwargs.get("advanced_arg", '1024x1024') - image_url, image_path = gen_image_dalle3(HD, llm_kwargs, prompt, resolution) + image_url, image_path = gen_image_dalle3("HD", llm_kwargs, prompt, resolution) chatbot.append([prompt, f'图像中转网址:
`{image_url}`
'+ f'中转网址预览:
' From f495bb154ecbabdfc08ea051ea699c576cc4cea6 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 14 Nov 2023 21:33:00 +0800 Subject: [PATCH 081/117] =?UTF-8?q?Update=20=E5=9B=BE=E7=89=87=E7=94=9F?= =?UTF-8?q?=E6=88=90.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../\345\233\276\347\211\207\347\224\237\346\210\220.py" | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" index e871051a14..5c1f1b9a00 100644 --- "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" +++ "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" @@ -173,7 +173,7 @@ def 图片生成_DALLE3_HD(prompt, llm_kwargs, plugin_kwargs, chatbot, history, yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") resolution = plugin_kwargs.get("advanced_arg", '1024x1024') - image_url, image_path = gen_image_dalle3("HD", llm_kwargs, prompt, resolution) + image_url, image_path = gen_image_dalle3("hd", llm_kwargs, prompt, resolution) chatbot.append([prompt, f'图像中转网址:
`{image_url}`
'+ f'中转网址预览:
' From 5caeb7525d11c6f10ac1950e215cf5d2ae46a582 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Tue, 14 Nov 2023 23:15:19 +0800 Subject: [PATCH 082/117] =?UTF-8?q?Update=20=E5=9B=BE=E7=89=87=E7=94=9F?= =?UTF-8?q?=E6=88=90.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...76\347\211\207\347\224\237\346\210\220.py" | 109 +++++------------- 1 file changed, 29 insertions(+), 80 deletions(-) diff --git "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" index 5c1f1b9a00..642a9e225a 100644 --- "a/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" +++ "b/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" @@ -2,7 +2,7 @@ from crazy_functions.multi_stage.multi_stage_utils import GptAcademicState -def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2"): +def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", quality=None): import requests, json, time, os from request_llms.bridge_all import model_info @@ -25,47 +25,7 @@ def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2"): 'model': model, 'response_format': 'url' } - response = requests.post(url, headers=headers, json=data, proxies=proxies) - print(response.content) - try: - image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] - except: - raise RuntimeError(response.content.decode()) - # 文件保存到本地 - r = requests.get(image_url, proxies=proxies) - file_path = f'{get_log_folder()}/image_gen/' - os.makedirs(file_path, exist_ok=True) - file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png' - with open(file_path+file_name, 'wb+') as f: f.write(r.content) - - - return image_url, file_path+file_name - - -def gen_image_dalle3(quality, llm_kwargs, prompt, resolution="1024x1024", model="dall-e-3"): - import requests, json, time, os - from request_llms.bridge_all import model_info - - proxies = get_conf('proxies') - # Set up OpenAI API key and model - api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - # 'https://api.openai.com/v1/chat/completions' - img_endpoint = chat_endpoint.replace('chat/completions','images/generations') - # # Generate the image - url = img_endpoint - headers = { - 'Authorization': f"Bearer {api_key}", - 'Content-Type': 'application/json' - } - data = { - 'prompt': prompt, - 'n': 1, - 'size': resolution, - 'quality': quality, - 'model': model, - 'response_format': 'url' - } + if quality is not None: data.update({'quality': quality}) response = requests.post(url, headers=headers, json=data, proxies=proxies) print(response.content) try: @@ -126,17 +86,17 @@ def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="da @CatchException def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 + txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 + llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 + plugin_kwargs 插件模型的参数,暂时没有用武之地 + chatbot 聊天显示框的句柄,用于显示给用户 + history 聊天历史,前情提要 system_prompt 给gpt的静默提醒 web_port 当前软件运行的端口号 """ - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 + history = [] # 清空历史,以免输入溢出 + chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 .....")) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新 if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") resolution = plugin_kwargs.get("advanced_arg", '1024x1024') image_url, image_path = gen_image(llm_kwargs, prompt, resolution) @@ -146,44 +106,32 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys f'本地文件地址:
`{image_path}`
'+ f'本地文件预览:
' ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新 @CatchException -def 图片生成_DALLE3_Standard(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 +def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + history = [] # 清空历史,以免输入溢出 + chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 .....")) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新 if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - resolution = plugin_kwargs.get("advanced_arg", '1024x1024') - image_url, image_path = gen_image_dalle3("standard", llm_kwargs, prompt, resolution) + resolution = plugin_kwargs.get("advanced_arg", '1024x1024').lower() + if resolution.endswith('-hd'): + resolution = resolution.replace('-hd', '') + quality = 'hd' + else: + quality = 'standard' + image_url, image_path = gen_image(llm_kwargs, prompt, resolution, model="dall-e-3", quality=quality) chatbot.append([prompt, f'图像中转网址:
`{image_url}`
'+ f'中转网址预览:
' f'本地文件地址:
`{image_path}`
'+ f'本地文件预览:
' ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - -@CatchException -def 图片生成_DALLE3_HD(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - resolution = plugin_kwargs.get("advanced_arg", '1024x1024') - image_url, image_path = gen_image_dalle3("hd", llm_kwargs, prompt, resolution) - chatbot.append([prompt, - f'图像中转网址:
`{image_url}`
'+ - f'中转网址预览:
' - f'本地文件地址:
`{image_path}`
'+ - f'本地文件预览:
' - ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新 class ImageEditState(GptAcademicState): + # 尚未完成 def get_image_file(self, x): import os, glob if len(x) == 0: return False, None @@ -204,8 +152,8 @@ def get_prompt(self, x): def reset(self): self.req = [ {'value':None, 'description': '请先上传图像(必须是.png格式), 然后再次点击本插件', 'verify_fn': self.get_image_file}, - {'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024', 'verify_fn': self.get_resolution}, - {'value':None, 'description': '请输入修改需求,建议您使用英文提示词', 'verify_fn': self.get_prompt}, + {'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024', 'verify_fn': self.get_resolution}, + {'value':None, 'description': '请输入修改需求,建议您使用英文提示词', 'verify_fn': self.get_prompt}, ] self.info = "" @@ -230,11 +178,12 @@ def already_obtained_all_materials(self): @CatchException def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + # 尚未完成 history = [] # 清空历史 state = ImageEditState.get_state(chatbot, ImageEditState) state = state.feed(prompt, chatbot) if not state.already_obtained_all_materials(): - chatbot.append(["图片修改(先上传图片,再输入修改需求,最后输入分辨率)", state.next_req()]) + chatbot.append(["图片修改(先上传图片,再输入修改需求,最后输入分辨率)", state.next_req()]) yield from update_ui(chatbot=chatbot, history=history) return @@ -251,5 +200,5 @@ def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys f'本地文件地址:
`{image_path}`
'+ f'本地文件预览:
' ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新 From e7c662a5d66d58150b56fb36abc6a513e44cd27e Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Tue, 14 Nov 2023 23:16:49 +0800 Subject: [PATCH 083/117] Update crazy_functional.py --- crazy_functional.py | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index bdf49b7fc9..3d4df71883 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -354,7 +354,7 @@ def get_crazy_functions(): print('Load function plugin failed') try: - from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3_Standard, 图片生成_DALLE3_HD + from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3 function_plugins.update({ "图片生成_DALLE2 (先切换模型到openai或api2d)": { "Group": "对话", @@ -367,25 +367,14 @@ def get_crazy_functions(): }, }) function_plugins.update({ - "图片生成_DALLE3_Standard (先切换模型到openai或api2d)": { + "图片生成_DALLE3 (先切换模型到openai或api2d)": { "Group": "对话", "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 - "Info": "使用DALLE3 standard质量生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成_DALLE3_Standard) - }, - }) - function_plugins.update({ - "图片生成_DALLE3_HD (先切换模型到openai或api2d)": { - "Group": "对话", - "Color": "stop", - "AsButton": False, - "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 - "Info": "使用DALLE3 HD质量生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成_DALLE3_HD) + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792。如需生成高清图像,请输入 1024x1024-HD, 1792x1024-HD, 1024x1792-HD。", # 高级参数输入区的显示提示 + "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容", + "Function": HotReload(图片生成_DALLE3) }, }) except: From 75a84d3cec1ce18b84638ea744ae55802ed1efac Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Thu, 16 Nov 2023 17:18:07 +0800 Subject: [PATCH 084/117] =?UTF-8?q?=E6=B7=BB=E5=8A=A0python=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8e1e55b92d..05ba22cdb4 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ > **Note** > -> 2023.11.12: 紧急修复了endpoint异常的问题。 +> 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。 > > 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。 @@ -108,7 +108,7 @@ cd gpt_academic 3. 安装依赖 ```sh -# (选择I: 如熟悉python, python>=3.9)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ +# (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ python -m pip install -r requirements.txt # (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr): From 47fe06f79d28a65d06e92b5d062467b294ce1843 Mon Sep 17 00:00:00 2001 From: Mehdi Baneshi Date: Fri, 17 Nov 2023 05:45:47 +0330 Subject: [PATCH 085/117] Update README.English.md Change the reference section, add link for easy access to the resource --- docs/README.English.md | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/docs/README.English.md b/docs/README.English.md index d0f3e4a1bf..f5632f35da 100644 --- a/docs/README.English.md +++ b/docs/README.English.md @@ -326,25 +326,26 @@ You can change the theme by modifying the `THEME` option (config.py). 1. `master` branch: Main branch, stable version 2. `frontier` branch: Development branch, test version +*** + ### V: References and Learning -``` + The code references the designs of many other excellent projects, in no particular order: -# THU ChatGLM2-6B: -https://github.com/THUDM/ChatGLM2-6B +[THU ChatGLM2-6B](https://github.com/THUDM/ChatGLM2-6B) + + +[THU JittorLLMs](https://github.com/Jittor/JittorLLMs) + + +[ChatPaper](https://github.com/kaixindelele/ChatPaper) -# THU JittorLLMs: -https://github.com/Jittor/JittorLLMs -# ChatPaper: -https://github.com/kaixindelele/ChatPaper +[Edge-GPT](https://github.com/acheong08/EdgeGPT) -# Edge-GPT: -https://github.com/acheong08/EdgeGPT -# ChuanhuChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT +[ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) From 74f70305b7f1598be4a9149425cb4732d16e82c7 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sun, 19 Nov 2023 22:03:36 +0800 Subject: [PATCH 086/117] introduce precommit --- .pre-commit-config.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..fd16ba2dc3 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,10 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files From 977f992e3a041eb8cff61e5fa527c251aefce97d Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Mon, 20 Nov 2023 00:33:18 +0800 Subject: [PATCH 087/117] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=A4=9A=E7=94=A8?= =?UTF-8?q?=E6=88=B7=E6=96=87=E4=BB=B6=E5=86=B2=E7=AA=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...06\345\217\262\345\255\230\346\241\243.py" | 26 +++-- ...05\345\212\251\345\212\237\350\203\275.py" | 22 ++++- toolbox.py | 98 ++++++++++++++----- 3 files changed, 106 insertions(+), 40 deletions(-) diff --git "a/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" "b/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" index f89faeda27..3f603f9b82 100644 --- "a/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" +++ "b/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" @@ -1,7 +1,8 @@ -from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder, get_user import re +f_prefix = 'GPT-Academic对话存档' + def write_chat_to_file(chatbot, history=None, file_name=None): """ 将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。 @@ -9,8 +10,8 @@ def write_chat_to_file(chatbot, history=None, file_name=None): import os import time if file_name is None: - file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html' - fp = os.path.join(get_log_folder(), file_name) + file_name = f_prefix + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html' + fp = os.path.join(get_log_folder(get_user(chatbot), plugin_name='chat_history'), file_name) with open(fp, 'w', encoding='utf8') as f: from themes.theme import advanced_css f.write(f'对话历史') @@ -80,7 +81,7 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ """ chatbot.append(("保存当前对话", - f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用“载入对话历史存档”还原当下的对话。\n警告!被保存的对话历史可以被使用该系统的任何人查阅。")) + f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 def hide_cwd(str): @@ -106,7 +107,12 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s if not success: if txt == "": txt = '空空如也的输入栏' import glob - local_history = "
".join(["`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)]) + local_history = "
".join([ + "`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" + for f in glob.glob( + f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', + recursive=True + )]) chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:
{local_history}"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -132,8 +138,12 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot """ import glob, os - local_history = "
".join(["`"+hide_cwd(f)+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)]) - for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True): + local_history = "
".join([ + "`"+hide_cwd(f)+"`" + for f in glob.glob( + f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True + )]) + for f in glob.glob(f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True): os.remove(f) chatbot.append([f"删除所有历史对话文件", f"已删除
{local_history}"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git "a/crazy_functions/\350\276\205\345\212\251\345\212\237\350\203\275.py" "b/crazy_functions/\350\276\205\345\212\251\345\212\237\350\203\275.py" index 16854e087e..c5f874026c 100644 --- "a/crazy_functions/\350\276\205\345\212\251\345\212\237\350\203\275.py" +++ "b/crazy_functions/\350\276\205\345\212\251\345\212\237\350\203\275.py" @@ -2,9 +2,12 @@ # @Time : 2023/4/19 # @Author : Spike # @Descr : -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, get_user from toolbox import CatchException +from toolbox import default_user_name from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +import shutil +import os @CatchException @@ -33,10 +36,19 @@ def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt chatbot.append(['清除本地缓存数据', '执行中. 删除数据']) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - import shutil, os - PATH_PRIVATE_UPLOAD, PATH_LOGGING = get_conf('PATH_PRIVATE_UPLOAD', 'PATH_LOGGING') - shutil.rmtree(PATH_LOGGING, ignore_errors=True) - shutil.rmtree(PATH_PRIVATE_UPLOAD, ignore_errors=True) + def _get_log_folder(user=default_user_name): + PATH_LOGGING = get_conf('PATH_LOGGING') + _dir = os.path.join(PATH_LOGGING, user) + if not os.path.exists(_dir): os.makedirs(_dir) + return _dir + + def _get_upload_folder(user=default_user_name): + PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') + _dir = os.path.join(PATH_PRIVATE_UPLOAD, user) + return _dir + + shutil.rmtree(_get_log_folder(get_user(chatbot)), ignore_errors=True) + shutil.rmtree(_get_upload_folder(get_user(chatbot)), ignore_errors=True) chatbot.append(['清除本地缓存数据', '执行完成']) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 \ No newline at end of file diff --git a/toolbox.py b/toolbox.py index b7b762d740..4376f05058 100644 --- a/toolbox.py +++ b/toolbox.py @@ -11,7 +11,7 @@ from latex2mathml.converter import convert as tex2mathml from functools import wraps, lru_cache pj = os.path.join - +default_user_name = 'default_user' """ ======================================================================== 第一部分 @@ -61,11 +61,16 @@ def decorated(request: gradio.Request, cookies, max_length, llm_model, txt, txt2 txt_passon = txt if txt == "" and txt2 != "": txt_passon = txt2 # 引入一个有cookie的chatbot + if request.username is not None: + user_name = request.username + else: + user_name = default_user_name cookies.update({ 'top_p':top_p, 'api_key': cookies['api_key'], 'llm_model': llm_model, 'temperature':temperature, + 'user_name': user_name, }) llm_kwargs = { 'api_key': cookies['api_key'], @@ -537,40 +542,57 @@ def find_recent_files(directory): return recent_files + +def file_already_in_downloadzone(file, user_path): + parent_path = user_path + child_path = file + if os.path.commonpath([parent_path, child_path]) == parent_path: + return True + else: + return False + + def promote_file_to_downloadzone(file, rename_file=None, chatbot=None): # 将文件复制一份到下载区 import shutil - if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}' - new_path = pj(get_log_folder(), rename_file) - # 如果已经存在,先删除 - if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path) - # 把文件复制过去 - if not os.path.exists(new_path): shutil.copyfile(file, new_path) - # 将文件添加到chatbot cookie中,避免多用户干扰 + if chatbot is not None: + user_name = get_user(chatbot) + else: + user_name = default_user_name + + user_path = get_log_folder(user_name, plugin_name=None) + if file_already_in_downloadzone(file, user_path): + new_path = file + else: + if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}' + new_path = pj(user_path, rename_file) + # 如果已经存在,先删除 + if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path) + # 把文件复制过去 + if not os.path.exists(new_path): shutil.copyfile(file, new_path) + # 将文件添加到chatbot cookie中 if chatbot is not None: if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote'] else: current = [] chatbot._cookies.update({'files_to_promote': [new_path] + current}) return new_path + def disable_auto_promotion(chatbot): chatbot._cookies.update({'files_to_promote': []}) return -def is_the_upload_folder(string): - PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') - pattern = r'^PATH_PRIVATE_UPLOAD/[A-Za-z0-9_-]+/\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}$' - pattern = pattern.replace('PATH_PRIVATE_UPLOAD', PATH_PRIVATE_UPLOAD) - if re.match(pattern, string): return True - else: return False -def del_outdated_uploads(outdate_time_seconds): - PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') +def del_outdated_uploads(outdate_time_seconds, target_path_base=None): + if target_path_base is None: + user_upload_dir = get_conf('PATH_PRIVATE_UPLOAD') + else: + user_upload_dir = target_path_base current_time = time.time() one_hour_ago = current_time - outdate_time_seconds - # Get a list of all subdirectories in the PATH_PRIVATE_UPLOAD folder + # Get a list of all subdirectories in the user_upload_dir folder # Remove subdirectories that are older than one hour - for subdirectory in glob.glob(f'{PATH_PRIVATE_UPLOAD}/*/*'): + for subdirectory in glob.glob(f'{user_upload_dir}/*'): subdirectory_time = os.path.getmtime(subdirectory) if subdirectory_time < one_hour_ago: try: shutil.rmtree(subdirectory) @@ -583,17 +605,16 @@ def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkbo """ if len(files) == 0: return chatbot, txt - - # 移除过时的旧文件从而节省空间&保护隐私 - outdate_time_seconds = 60 - del_outdated_uploads(outdate_time_seconds) # 创建工作路径 - user_name = "default" if not request.username else request.username + user_name = default_user_name if not request.username else request.username time_tag = gen_time_str() - PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') - target_path_base = pj(PATH_PRIVATE_UPLOAD, user_name, time_tag) + target_path_base = get_upload_folder(user_name, tag=time_tag) os.makedirs(target_path_base, exist_ok=True) + + # 移除过时的旧文件从而节省空间&保护隐私 + outdate_time_seconds = 3600 # 一小时 + del_outdated_uploads(outdate_time_seconds, get_upload_folder(user_name)) # 逐个文件转移到目标路径 upload_msg = '' @@ -998,12 +1019,35 @@ def gen_time_str(): import time return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) -def get_log_folder(user='default', plugin_name='shared'): +def get_log_folder(user=default_user_name, plugin_name='shared'): + if user is None: user = default_user_name PATH_LOGGING = get_conf('PATH_LOGGING') - _dir = pj(PATH_LOGGING, user, plugin_name) + if plugin_name is None: + _dir = pj(PATH_LOGGING, user) + else: + _dir = pj(PATH_LOGGING, user, plugin_name) if not os.path.exists(_dir): os.makedirs(_dir) return _dir +def get_upload_folder(user=default_user_name, tag=None): + PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') + if user is None: user = default_user_name + if tag is None or len(tag)==0: + target_path_base = pj(PATH_PRIVATE_UPLOAD, user) + else: + target_path_base = pj(PATH_PRIVATE_UPLOAD, user, tag) + return target_path_base + +def is_the_upload_folder(string): + PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD') + pattern = r'^PATH_PRIVATE_UPLOAD[\\/][A-Za-z0-9_-]+[\\/]\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}$' + pattern = pattern.replace('PATH_PRIVATE_UPLOAD', PATH_PRIVATE_UPLOAD) + if re.match(pattern, string): return True + else: return False + +def get_user(chatbotwithcookies): + return chatbotwithcookies._cookies.get('user_name', default_user_name) + class ProxyNetworkActivate(): """ 这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理 From b1be05009bfc7b14478459d96a293252d0afcce3 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Mon, 20 Nov 2023 01:06:19 +0800 Subject: [PATCH 088/117] =?UTF-8?q?=E7=A7=BB=E9=99=A4=E5=86=97=E4=BD=99?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=EF=BC=8C=E4=BF=AE=E5=A4=8D=E5=A4=9A=E7=94=A8?= =?UTF-8?q?=E6=88=B7=E5=AD=98=E6=A1=A3=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .pre-commit-config.yaml | 10 ---------- config.py | 5 ----- crazy_functions/latex_fns/latex_actions.py | 23 ++++++++++++---------- crazy_functions/latex_fns/latex_toolbox.py | 1 + toolbox.py | 15 ++++++++------ version | 2 +- 6 files changed, 24 insertions(+), 32 deletions(-) delete mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index fd16ba2dc3..0000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks -repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.2.0 - hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files diff --git a/config.py b/config.py index a51778b2cb..ea603c3bf9 100644 --- a/config.py +++ b/config.py @@ -235,11 +235,6 @@ # 自定义按钮的最大数量限制 NUM_CUSTOM_BASIC_BTN = 4 - -# LATEX实验性功能 -LATEX_EXPERIMENTAL = False - - """ 在线大模型配置关联关系示意图 │ diff --git a/crazy_functions/latex_fns/latex_actions.py b/crazy_functions/latex_fns/latex_actions.py index c51e98582e..74e8757e62 100644 --- a/crazy_functions/latex_fns/latex_actions.py +++ b/crazy_functions/latex_fns/latex_actions.py @@ -95,11 +95,14 @@ def __init__(self) -> None: self.abstract = "unknown" def read_title_and_abstract(self, txt): - title, abstract = find_title_and_abs(txt) - if title is not None: - self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') - if abstract is not None: - self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') + try: + title, abstract = find_title_and_abs(txt) + if title is not None: + self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') + if abstract is not None: + self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '') + except: + pass def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10): """ @@ -265,12 +268,12 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin else: # <-------- gpt 多线程请求 ----------> - LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL') history_array = [[""] for _ in range(n_split)] - if LATEX_EXPERIMENTAL: - paper_meta = f"The paper you processing is `{lps.title}`, a part of the abstraction is `{lps.abstract}`" - paper_meta_max_len = 888 - history_array = [[ paper_meta[:paper_meta_max_len] + '...', "Understand, what should I do?"] for _ in range(n_split)] + # LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL') + # if LATEX_EXPERIMENTAL: + # paper_meta = f"The paper you processing is `{lps.title}`, a part of the abstraction is `{lps.abstract}`" + # paper_meta_max_len = 888 + # history_array = [[ paper_meta[:paper_meta_max_len] + '...', "Understand, what should I do?"] for _ in range(n_split)] gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( inputs_array=inputs_array, diff --git a/crazy_functions/latex_fns/latex_toolbox.py b/crazy_functions/latex_fns/latex_toolbox.py index b56825aa65..afa65bf94d 100644 --- a/crazy_functions/latex_fns/latex_toolbox.py +++ b/crazy_functions/latex_fns/latex_toolbox.py @@ -352,6 +352,7 @@ def extract_title(string): title = extract_title(main_file) return title, abstract + def merge_tex_files(project_foler, main_file, mode): """ Merge Tex project recrusively diff --git a/toolbox.py b/toolbox.py index 4376f05058..8747afdb8c 100644 --- a/toolbox.py +++ b/toolbox.py @@ -544,14 +544,16 @@ def find_recent_files(directory): def file_already_in_downloadzone(file, user_path): - parent_path = user_path - child_path = file - if os.path.commonpath([parent_path, child_path]) == parent_path: - return True - else: + try: + parent_path = os.path.abspath(user_path) + child_path = os.path.abspath(file) + if os.path.samefile(os.path.commonpath([parent_path, child_path]), parent_path): + return True + else: + return False + except: return False - def promote_file_to_downloadzone(file, rename_file=None, chatbot=None): # 将文件复制一份到下载区 import shutil @@ -564,6 +566,7 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None): if file_already_in_downloadzone(file, user_path): new_path = file else: + user_path = get_log_folder(user_name, plugin_name='downloadzone') if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}' new_path = pj(user_path, rename_file) # 如果已经存在,先删除 diff --git a/version b/version index 81ad2fd933..cf14e82bf2 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { "version": 3.60, "show_feature": true, - "new_feature": "11月12日紧急BUG修复 <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" + "new_feature": "修复多个BUG <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From c7a0a5f2079bbe85e134b7927a66ce15386927b6 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Wed, 22 Nov 2023 01:40:40 +0800 Subject: [PATCH 089/117] =?UTF-8?q?=E5=BC=95=E5=85=A5=E6=9B=B4=E7=A8=B3?= =?UTF-8?q?=E5=AE=9A=E7=9A=84=E8=87=AA=E5=8A=A8=E6=9B=B4=E6=96=B0URL?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- check_proxy.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/check_proxy.py b/check_proxy.py index 977ac276a9..e2ba3f1aaa 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -5,7 +5,6 @@ def check_proxy(proxies): try: response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4) data = response.json() - # print(f'查询代理的地理位置,返回的结果是{data}') if 'country_name' in data: country = data['country_name'] result = f"代理配置 {proxies_https}, 代理所在地:{country}" @@ -47,8 +46,8 @@ def backup_and_download(current_version, remote_version): os.makedirs(new_version_dir) shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history']) proxies = get_conf('proxies') - r = requests.get( - 'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True) + try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True) + except: r = requests.get('https://public.gpt-academic.top/publish/master.zip', proxies=proxies, stream=True) zip_file_path = backup_dir+'/master.zip' with open(zip_file_path, 'wb+') as f: f.write(r.content) @@ -111,11 +110,10 @@ def auto_update(raise_error=False): try: from toolbox import get_conf import requests - import time import json proxies = get_conf('proxies') - response = requests.get( - "https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5) + try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5) + except: response = requests.get("https://public.gpt-academic.top/publish/version", proxies=proxies, timeout=5) remote_json_data = json.loads(response.text) remote_version = remote_json_data['version'] if remote_json_data["show_feature"]: @@ -127,8 +125,7 @@ def auto_update(raise_error=False): current_version = json.loads(current_version)['version'] if (remote_version - current_version) >= 0.01-1e-5: from colorful import print亮黄 - print亮黄( - f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}') + print亮黄(f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}') print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n') user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?') if user_instruction in ['Y', 'y']: @@ -154,7 +151,7 @@ def auto_update(raise_error=False): print(msg) def warm_up_modules(): - print('正在执行一些模块的预热...') + print('正在执行一些模块的预热 ...') from toolbox import ProxyNetworkActivate from request_llms.bridge_all import model_info with ProxyNetworkActivate("Warmup_Modules"): From 7754215dadb1e3fccc8da169f8773d38c514fee2 Mon Sep 17 00:00:00 2001 From: Harry67Hu Date: Wed, 22 Nov 2023 15:23:23 +0800 Subject: [PATCH 090/117] fix MacOS-zip bug --- ...Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" "b/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" index a2545ddde8..f3919edc38 100644 --- "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" +++ "b/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" @@ -73,6 +73,7 @@ def move_project(project_folder, arxiv_id=None): # align subfolder if there is a folder wrapper items = glob.glob(pj(project_folder,'*')) + items = [item for item in items if os.path.basename(item)!='__MACOSX'] if len(glob.glob(pj(project_folder,'*.tex'))) == 0 and len(items) == 1: if os.path.isdir(items[0]): project_folder = items[0] @@ -214,7 +215,6 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo # <-------------- we are done -------------> return success - # ========================================= 插件主程序2 ===================================================== @CatchException From 1253a2b0a6d9d9a720ed3ec316062f3ff16a1809 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Thu, 23 Nov 2023 15:37:00 +0800 Subject: [PATCH 091/117] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E9=94=99=E8=AF=AF?= =?UTF-8?q?=E5=9C=B0=E6=8A=8A=E9=87=8D=E5=90=8D=E8=B7=AF=E5=BE=84=E5=BD=93?= =?UTF-8?q?=E6=88=90=E6=96=87=E4=BB=B6=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/latex_fns/latex_toolbox.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crazy_functions/latex_fns/latex_toolbox.py b/crazy_functions/latex_fns/latex_toolbox.py index afa65bf94d..4555ff185c 100644 --- a/crazy_functions/latex_fns/latex_toolbox.py +++ b/crazy_functions/latex_fns/latex_toolbox.py @@ -283,10 +283,10 @@ def find_tex_file_ignore_case(fp): dir_name = os.path.dirname(fp) base_name = os.path.basename(fp) # 如果输入的文件路径是正确的 - if os.path.exists(pj(dir_name, base_name)): return pj(dir_name, base_name) + if os.path.isfile(pj(dir_name, base_name)): return pj(dir_name, base_name) # 如果不正确,试着加上.tex后缀试试 if not base_name.endswith('.tex'): base_name+='.tex' - if os.path.exists(pj(dir_name, base_name)): return pj(dir_name, base_name) + if os.path.isfile(pj(dir_name, base_name)): return pj(dir_name, base_name) # 如果还找不到,解除大小写限制,再试一次 import glob for f in glob.glob(dir_name+'/*.tex'): From e533ed6d123baafaec859a7994d73934d859110a Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 23 Nov 2023 17:51:00 +0800 Subject: [PATCH 092/117] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E5=B9=B6=E8=A1=8C?= =?UTF-8?q?=E8=BF=90=E8=A1=8C=E6=97=B6=E7=9A=84=E6=88=AA=E6=96=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/crazy_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 5788194c01..afe079f419 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -1,4 +1,4 @@ -from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder +from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token import threading import os import logging @@ -92,7 +92,7 @@ def _req_gpt(inputs, history, sys_prompt): # 【选择处理】 尝试计算比例,尽可能多地保留文本 from toolbox import get_reduce_token_percent p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error)) - MAX_TOKEN = 4096 + MAX_TOKEN = get_max_token(llm_kwargs) EXCEED_ALLO = 512 + 512 * exceeded_cnt inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO) mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n' @@ -224,7 +224,7 @@ def _req_gpt(index, inputs, history, sys_prompt): # 【选择处理】 尝试计算比例,尽可能多地保留文本 from toolbox import get_reduce_token_percent p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error)) - MAX_TOKEN = 4096 + MAX_TOKEN = get_max_token(llm_kwargs) EXCEED_ALLO = 512 + 512 * exceeded_cnt inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO) gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n' From e6716ccf630564a728cc158e4660db898f109c9b Mon Sep 17 00:00:00 2001 From: binary-husky Date: Fri, 24 Nov 2023 01:47:03 +0800 Subject: [PATCH 093/117] =?UTF-8?q?=E6=B7=BB=E5=8A=A0zhipuai=E4=BE=9D?= =?UTF-8?q?=E8=B5=96=E5=AE=89=E8=A3=85=E6=8F=90=E9=86=92?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 4 ++++ request_llms/bridge_zhipu.py | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/config.py b/config.py index ea603c3bf9..baaaaa8311 100644 --- a/config.py +++ b/config.py @@ -271,6 +271,10 @@ │ ├── BAIDU_CLOUD_API_KEY │ └── BAIDU_CLOUD_SECRET_KEY │ +├── "zhipuai" 智谱AI大模型chatglm_turbo +│ ├── ZHIPUAI_API_KEY +│ └── ZHIPUAI_MODEL +│ ├── "newbing" Newbing接口不再稳定,不推荐使用 ├── NEWBING_STYLE └── NEWBING_COOKIES diff --git a/request_llms/bridge_zhipu.py b/request_llms/bridge_zhipu.py index a1e0de5918..915a13efba 100644 --- a/request_llms/bridge_zhipu.py +++ b/request_llms/bridge_zhipu.py @@ -1,6 +1,7 @@ import time from toolbox import update_ui, get_conf, update_ui_lastest_msg +from toolbox import check_packages, report_exception model_name = '智谱AI大模型' @@ -37,6 +38,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot.append((inputs, "")) yield from update_ui(chatbot=chatbot, history=history) + # 尝试导入依赖,如果缺少依赖,则给出安装建议 + try: + check_packages(["zhipuai"]) + except: + yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。", + chatbot=chatbot, history=history, delay=0) + return + if validate_key() is False: yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0) return From 9916f59753827e84c6dab7c11d77389062db3a29 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 24 Nov 2023 02:35:44 +0800 Subject: [PATCH 094/117] =?UTF-8?q?=E6=8E=A5=E5=85=A5deepseek-coder?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 28 +++++++-- request_llms/bridge_all.py | 16 +++++ request_llms/bridge_deepseekcoder.py | 88 ++++++++++++++++++++++++++++ request_llms/local_llm_class.py | 2 +- tests/test_llms.py | 3 +- 5 files changed, 129 insertions(+), 8 deletions(-) create mode 100644 request_llms/bridge_deepseekcoder.py diff --git a/config.py b/config.py index ea603c3bf9..f5788beaf8 100644 --- a/config.py +++ b/config.py @@ -91,8 +91,8 @@ "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", - "chatglm3", "moss", "newbing", "claude-2"] -# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" + "chatglm3", "moss", "claude-2"] +# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] @@ -271,11 +271,27 @@ │ ├── BAIDU_CLOUD_API_KEY │ └── BAIDU_CLOUD_SECRET_KEY │ -├── "newbing" Newbing接口不再稳定,不推荐使用 +└── "newbing" Newbing接口不再稳定,不推荐使用 ├── NEWBING_STYLE └── NEWBING_COOKIES +本地大模型示意图 +│ +├── "chatglm3" +├── "chatglm" +├── "chatglm_onnx" +├── "chatglmft" +├── "internlm" +├── "moss" +├── "jittorllms_pangualpha" +├── "jittorllms_llama" +├── "deepseekcoder" +├── "qwen" +├── RWKV的支持见Wiki +└── "llama2" + + 用户图形界面布局依赖关系示意图 │ ├── CHATBOT_HEIGHT 对话窗的高度 @@ -286,7 +302,7 @@ ├── THEME 色彩主题 ├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框 ├── ADD_WAIFU 加一个live2d装饰 -├── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性 +└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性 插件在线服务配置依赖关系示意图 @@ -298,7 +314,7 @@ │ ├── ALIYUN_ACCESSKEY │ └── ALIYUN_SECRET │ -├── PDF文档精准解析 -│ └── GROBID_URLS +└── PDF文档精准解析 + └── GROBID_URLS """ diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 88848a984a..8dece5487e 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -543,6 +543,22 @@ def decode(self, *args, **kwargs): }) except: print(trimmed_format_exc()) +if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder + try: + from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui + from .bridge_deepseekcoder import predict as deepseekcoder_ui + model_info.update({ + "deepseekcoder": { + "fn_with_ui": deepseekcoder_ui, + "fn_without_ui": deepseekcoder_noui, + "endpoint": None, + "max_token": 4096, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + } + }) + except: + print(trimmed_format_exc()) # <-- 用于定义和切换多个azure模型 --> AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY") diff --git a/request_llms/bridge_deepseekcoder.py b/request_llms/bridge_deepseekcoder.py new file mode 100644 index 0000000000..979194072a --- /dev/null +++ b/request_llms/bridge_deepseekcoder.py @@ -0,0 +1,88 @@ +model_name = "deepseek-coder-6.7b-instruct" +cmd_to_install = "未知" # "`pip install -r request_llms/requirements_qwen.txt`" + +import os +from toolbox import ProxyNetworkActivate +from toolbox import get_conf +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns +from threading import Thread + +def download_huggingface_model(model_name, max_retry, local_dir): + from huggingface_hub import snapshot_download + for i in range(1, max_retry): + try: + snapshot_download(repo_id=model_name, local_dir=local_dir, resume_download=True) + break + except Exception as e: + print(f'\n\n下载失败,重试第{i}次中...\n\n') + return local_dir +# ------------------------------------------------------------------------------------------------------------------------ +# 🔌💻 Local Model +# ------------------------------------------------------------------------------------------------------------------------ +class GetONNXGLMHandle(LocalLLMHandle): + + def load_model_info(self): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + self.model_name = model_name + self.cmd_to_install = cmd_to_install + + def load_model_and_tokenizer(self): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + with ProxyNetworkActivate('Download_LLM'): + from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer + model_name = "deepseek-ai/deepseek-coder-6.7b-instruct" + # local_dir = f"~/.cache/{model_name}" + # if not os.path.exists(local_dir): + # tokenizer = download_huggingface_model(model_name, max_retry=128, local_dir=local_dir) + tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) + self._streamer = TextIteratorStreamer(tokenizer) + model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) + if get_conf('LOCAL_MODEL_DEVICE') != 'cpu': + model = model.cuda() + return model, tokenizer + + def llm_stream_generator(self, **kwargs): + # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 + def adaptor(kwargs): + query = kwargs['query'] + max_length = kwargs['max_length'] + top_p = kwargs['top_p'] + temperature = kwargs['temperature'] + history = kwargs['history'] + return query, max_length, top_p, temperature, history + + query, max_length, top_p, temperature, history = adaptor(kwargs) + history.append({ 'role': 'user', 'content': query}) + messages = history + inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt").to(self._model.device) + generation_kwargs = dict( + inputs=inputs, + max_new_tokens=max_length, + do_sample=False, + top_p=top_p, + streamer = self._streamer, + top_k=50, + temperature=temperature, + num_return_sequences=1, + eos_token_id=32021, + ) + thread = Thread(target=self._model.generate, kwargs=generation_kwargs, daemon=True) + thread.start() + generated_text = "" + for new_text in self._streamer: + generated_text += new_text + # print(generated_text) + yield generated_text + + + def try_to_import_special_deps(self, **kwargs): pass + # import something that will raise error if the user does not install requirement_*.txt + # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行 + # import importlib + # importlib.import_module('modelscope') + + +# ------------------------------------------------------------------------------------------------------------------------ +# 🔌💻 GPT-Academic Interface +# ------------------------------------------------------------------------------------------------------------------------ +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name, history_format='chatglm3') \ No newline at end of file diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index 091707a768..413df03f22 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -198,7 +198,7 @@ def stream_chat(self, **kwargs): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - # print(new_output, end='') + print(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': diff --git a/tests/test_llms.py b/tests/test_llms.py index 6285f0309f..8b685972e8 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -15,7 +15,8 @@ def validate_path(): # from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection # from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection # from request_llms.bridge_claude import predict_no_ui_long_connection - from request_llms.bridge_internlm import predict_no_ui_long_connection + # from request_llms.bridge_internlm import predict_no_ui_long_connection + from request_llms.bridge_deepseekcoder import predict_no_ui_long_connection # from request_llms.bridge_qwen import predict_no_ui_long_connection # from request_llms.bridge_spark import predict_no_ui_long_connection # from request_llms.bridge_zhipu import predict_no_ui_long_connection From fd72894c9062ffc183a138618c3114aca5208196 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 24 Nov 2023 02:42:58 +0800 Subject: [PATCH 095/117] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E9=94=99=E8=AF=AF?= =?UTF-8?q?=E7=9A=84class=E5=91=BD=E5=90=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_deepseekcoder.py | 4 ++-- request_llms/bridge_llama2.py | 4 ++-- request_llms/bridge_qwen.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/request_llms/bridge_deepseekcoder.py b/request_llms/bridge_deepseekcoder.py index 979194072a..2242eec75c 100644 --- a/request_llms/bridge_deepseekcoder.py +++ b/request_llms/bridge_deepseekcoder.py @@ -19,7 +19,7 @@ def download_huggingface_model(model_name, max_retry, local_dir): # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -class GetONNXGLMHandle(LocalLLMHandle): +class GetCoderLMHandle(LocalLLMHandle): def load_model_info(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 @@ -85,4 +85,4 @@ def try_to_import_special_deps(self, **kwargs): pass # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 GPT-Academic Interface # ------------------------------------------------------------------------------------------------------------------------ -predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name, history_format='chatglm3') \ No newline at end of file +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetCoderLMHandle, model_name, history_format='chatglm3') \ No newline at end of file diff --git a/request_llms/bridge_llama2.py b/request_llms/bridge_llama2.py index e6da4b755a..bfa3c14ae6 100644 --- a/request_llms/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -12,7 +12,7 @@ # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -class GetONNXGLMHandle(LocalLLMHandle): +class GetLlamaHandle(LocalLLMHandle): def load_model_info(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 @@ -87,4 +87,4 @@ def try_to_import_special_deps(self, **kwargs): # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 GPT-Academic Interface # ------------------------------------------------------------------------------------------------------------------------ -predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name) \ No newline at end of file +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetLlamaHandle, model_name) \ No newline at end of file diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index afd886bf9d..85a4d80cbb 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -15,7 +15,7 @@ # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -class GetONNXGLMHandle(LocalLLMHandle): +class GetQwenLMHandle(LocalLLMHandle): def load_model_info(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 @@ -64,4 +64,4 @@ def try_to_import_special_deps(self, **kwargs): # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 GPT-Academic Interface # ------------------------------------------------------------------------------------------------------------------------ -predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name) \ No newline at end of file +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name) \ No newline at end of file From 5d5695cd9af38aa9b612a6deb29b0d17bbd5d811 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 24 Nov 2023 03:19:20 +0800 Subject: [PATCH 096/117] version 3.61 --- README.md | 73 ++++++++++++++++++++----------------- request_llms/README.md | 82 ++++++++++-------------------------------- version | 4 +-- 3 files changed, 61 insertions(+), 98 deletions(-) diff --git a/README.md b/README.md index 05ba22cdb4..b1e5568ea9 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l 功能(⭐= 近期新增功能) | 描述 --- | --- -⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, [通义千问](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),智谱API,DALLE3 +⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱API](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/) 润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码 [自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键 模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) @@ -92,36 +92,38 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 ### 安装方法I:直接运行 (Windows, Linux or MacOS) 1. 下载项目 -```sh -git clone --depth=1 https://github.com/binary-husky/gpt_academic.git -cd gpt_academic -``` + + ```sh + git clone --depth=1 https://github.com/binary-husky/gpt_academic.git + cd gpt_academic + ``` 2. 配置API_KEY -在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 + 在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 -「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」 + 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」 -「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」 + 「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」 3. 安装依赖 -```sh -# (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt + ```sh + # (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ + python -m pip install -r requirements.txt -# (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # 创建anaconda环境 -conda activate gptac_venv # 激活anaconda环境 -python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤 -``` + # (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr): + conda create -n gptac_venv python=3.11 # 创建anaconda环境 + conda activate gptac_venv # 激活anaconda环境 + python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤 + ```
如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处

【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): + ```sh # 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) python -m pip install -r request_llms/requirements_chatglm.txt @@ -143,39 +145,39 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt- 4. 运行 -```sh -python main.py -``` + ```sh + python main.py + ``` ### 安装方法II:使用Docker 0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐使用这个) [![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) -``` sh -# 修改docker-compose.yml,保留方案0并删除其他方案。然后运行: -docker-compose up -``` + ``` sh + # 修改docker-compose.yml,保留方案0并删除其他方案。然后运行: + docker-compose up + ``` 1. 仅ChatGPT+文心一言+spark等在线模型(推荐大多数人选择) [![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) [![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) [![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) -``` sh -# 修改docker-compose.yml,保留方案1并删除其他方案。然后运行: -docker-compose up -``` + ``` sh + # 修改docker-compose.yml,保留方案1并删除其他方案。然后运行: + docker-compose up + ``` P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。 2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时) [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) -``` sh -# 修改docker-compose.yml,保留方案2并删除其他方案。然后运行: -docker-compose up -``` + ``` sh + # 修改docker-compose.yml,保留方案2并删除其他方案。然后运行: + docker-compose up + ``` ### 安装方法III:其他部署姿势 @@ -196,9 +198,11 @@ docker-compose up # Advanced Usage ### I:自定义新的便捷按钮(学术快捷键) + 任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序。(如按钮已存在,那么前缀、后缀都支持热修改,无需重启程序即可生效。) 例如 -``` + +```python "超级英译中": { # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 "Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n", @@ -207,6 +211,7 @@ docker-compose up "Suffix": "", }, ``` +

@@ -283,6 +288,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h ### II:版本: + - version 3.70(todo): 优化AutoGen插件主题并设计一系列衍生插件 - version 3.60: 引入AutoGen作为新一代插件的基石 - version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG @@ -303,7 +309,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h - version 3.0: 对chatglm和其他小型llm的支持 - version 2.6: 重构了插件结构,提高了交互性,加入更多插件 - version 2.5: 自更新,解决总结大工程源代码时文本过长、token溢出的问题 -- version 2.4: (1)新增PDF全文翻译功能; (2)新增输入区切换位置的功能; (3)新增垂直布局选项; (4)多线程函数插件优化。 +- version 2.4: 新增PDF全文翻译功能; 新增输入区切换位置的功能 - version 2.3: 增强多线程交互性 - version 2.2: 函数插件支持热重载 - version 2.1: 可折叠式布局 @@ -325,6 +331,7 @@ GPT Academic开发者QQ群:`610599535` 1. `master` 分支: 主分支,稳定版 2. `frontier` 分支: 开发分支,测试版 +3. 如何接入其他大模型:[接入其他大模型](request_llms/README.md) ### V:参考与学习 diff --git a/request_llms/README.md b/request_llms/README.md index 92b856e30b..288bc1352e 100644 --- a/request_llms/README.md +++ b/request_llms/README.md @@ -1,79 +1,35 @@ -# 如何使用其他大语言模型 +P.S. 如果您按照以下步骤成功接入了新的大模型,欢迎发Pull Requests(如果您在自己接入新模型的过程中遇到困难,欢迎加README底部QQ群联系群主) -## ChatGLM -- 安装依赖 `pip install -r request_llms/requirements_chatglm.txt` -- 修改配置,在config.py中将LLM_MODEL的值改为"chatglm" +# 如何接入其他本地大语言模型 -``` sh -LLM_MODEL = "chatglm" -``` -- 运行! -``` sh -`python main.py` -``` +1. 复制`request_llms/bridge_llama2.py`,重命名为你喜欢的名字 -## Claude-Stack +2. 修改`load_model_and_tokenizer`方法,加载你的模型和分词器(去该模型官网找demo,复制粘贴即可) -- 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689 - - 1、SLACK_CLAUDE_BOT_ID - - 2、SLACK_CLAUDE_USER_TOKEN +3. 修改`llm_stream_generator`方法,定义推理模型(去该模型官网找demo,复制粘贴即可) -- 把token加入config.py +4. 命令行测试 + - 修改`tests/test_llms.py`(聪慧如您,只需要看一眼该文件就明白怎么修改了) + - 运行`python tests/test_llms.py` -## Newbing +5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了) -- 使用cookie editor获取cookie(json) -- 把cookie(json)加入config.py (NEWBING_COOKIES) +6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果 -## Moss -- 使用docker-compose -## RWKV -- 使用docker-compose +# 如何接入其他在线大语言模型 -## LLAMA -- 使用docker-compose +1. 复制`request_llms/bridge_zhipu.py`,重命名为你喜欢的名字 -## 盘古 -- 使用docker-compose +2. 修改`predict_no_ui_long_connection` +3. 修改`predict` ---- -## Text-Generation-UI (TGUI,调试中,暂不可用) +4. 命令行测试 + - 修改`tests/test_llms.py`(聪慧如您,只需要看一眼该文件就明白怎么修改了) + - 运行`python tests/test_llms.py` -### 1. 部署TGUI -``` sh -# 1 下载模型 -git clone https://github.com/oobabooga/text-generation-webui.git -# 2 这个仓库的最新代码有问题,回滚到几周之前 -git reset --hard fcda3f87767e642d1c0411776e549e1d3894843d -# 3 切换路径 -cd text-generation-webui -# 4 安装text-generation的额外依赖 -pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers -# 5 下载模型 -python download-model.py facebook/galactica-1.3b -# 其他可选如 facebook/opt-1.3b -# facebook/galactica-1.3b -# facebook/galactica-6.7b -# facebook/galactica-120b -# facebook/pygmalion-1.3b 等 -# 详情见 https://github.com/oobabooga/text-generation-webui +5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了) -# 6 启动text-generation -python server.py --cpu --listen --listen-port 7865 --model facebook_galactica-1.3b -``` - -### 2. 修改config.py - -``` sh -# LLM_MODEL格式: tgui:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致 -LLM_MODEL = "tgui:galactica-1.3b@localhost:7860" -``` - -### 3. 运行! -``` sh -cd chatgpt-academic -python main.py -``` +6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果 \ No newline at end of file diff --git a/version b/version index cf14e82bf2..5f6de09c8a 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.60, + "version": 3.61, "show_feature": true, - "new_feature": "修复多个BUG <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" + "new_feature": "修复潜在的多用户冲突问题 <-> 接入Deepseek Coder <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮" } From 7ca37c4831bef9bef9bbac28665f754f105a84fd Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 25 Nov 2023 23:14:57 +0800 Subject: [PATCH 097/117] =?UTF-8?q?=E6=8A=8Agpt-4-vision-preview=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0=E5=88=B0=E6=94=AF=E6=8C=81=E5=88=97=E8=A1=A8=E4=B8=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.py b/config.py index c7b15fa469..f170a2bbbb 100644 --- a/config.py +++ b/config.py @@ -87,7 +87,7 @@ # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview", +AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-preview", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", From 577d3d566b6b42eb410f6fe64507008af9df59f8 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 29 Nov 2023 00:05:26 +0800 Subject: [PATCH 098/117] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E7=9C=8B=E6=9D=BF?= =?UTF-8?q?=E5=A8=98=E4=B8=8D=E6=96=AD=E5=88=86=E8=A3=82=E7=9A=84BUG?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- themes/contrast.py | 12 ++++++++---- themes/default.py | 6 ++++-- themes/gradios.py | 15 ++++++++------- themes/green.py | 19 +++++++++++-------- 4 files changed, 31 insertions(+), 21 deletions(-) diff --git a/themes/contrast.py b/themes/contrast.py index d407d92ab5..bf48808d93 100644 --- a/themes/contrast.py +++ b/themes/contrast.py @@ -1,6 +1,8 @@ +import os import gradio as gr from toolbox import get_conf CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT') +theme_dir = os.path.dirname(__file__) def adjust_theme(): @@ -57,7 +59,7 @@ def adjust_theme(): button_cancel_text_color_dark="white", ) - with open('themes/common.js', 'r', encoding='utf8') as f: + with open(os.path.join(theme_dir, 'common.js'), 'r', encoding='utf8') as f: js = f"" # 添加一个萌萌的看板娘 @@ -67,7 +69,9 @@ def adjust_theme(): """ - gradio_original_template_fn = gr.routes.templates.TemplateResponse + if not hasattr(gr, 'RawTemplateResponse'): + gr.RawTemplateResponse = gr.routes.templates.TemplateResponse + gradio_original_template_fn = gr.RawTemplateResponse def gradio_new_template_fn(*args, **kwargs): res = gradio_original_template_fn(*args, **kwargs) res.body = res.body.replace(b'', f'{js}'.encode("utf8")) @@ -79,7 +83,7 @@ def gradio_new_template_fn(*args, **kwargs): print('gradio版本较旧, 不能自定义字体和颜色') return set_theme -with open("themes/contrast.css", "r", encoding="utf-8") as f: +with open(os.path.join(theme_dir, 'contrast.css'), "r", encoding="utf-8") as f: advanced_css = f.read() -with open("themes/common.css", "r", encoding="utf-8") as f: +with open(os.path.join(theme_dir, 'common.css'), "r", encoding="utf-8") as f: advanced_css += f.read() diff --git a/themes/default.py b/themes/default.py index 6fa2ba5c53..64b6720943 100644 --- a/themes/default.py +++ b/themes/default.py @@ -60,7 +60,7 @@ def adjust_theme(): with open(os.path.join(theme_dir, 'common.js'), 'r', encoding='utf8') as f: js = f"" - + # 添加一个萌萌的看板娘 if ADD_WAIFU: js += """ @@ -68,7 +68,9 @@ def adjust_theme(): """ - gradio_original_template_fn = gr.routes.templates.TemplateResponse + if not hasattr(gr, 'RawTemplateResponse'): + gr.RawTemplateResponse = gr.routes.templates.TemplateResponse + gradio_original_template_fn = gr.RawTemplateResponse def gradio_new_template_fn(*args, **kwargs): res = gradio_original_template_fn(*args, **kwargs) res.body = res.body.replace(b'', f'{js}'.encode("utf8")) diff --git a/themes/gradios.py b/themes/gradios.py index 96a9c54e60..9db134159c 100644 --- a/themes/gradios.py +++ b/themes/gradios.py @@ -1,7 +1,9 @@ -import gradio as gr import logging +import os +import gradio as gr from toolbox import get_conf, ProxyNetworkActivate CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT') +theme_dir = os.path.dirname(__file__) def dynamic_set_theme(THEME): set_theme = gr.themes.ThemeClass() @@ -13,7 +15,6 @@ def dynamic_set_theme(THEME): return set_theme def adjust_theme(): - try: set_theme = gr.themes.ThemeClass() with ProxyNetworkActivate('Download_Gradio_Theme'): @@ -23,7 +24,7 @@ def adjust_theme(): if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-') set_theme = set_theme.from_hub(THEME.lower()) - with open('themes/common.js', 'r', encoding='utf8') as f: + with open(os.path.join(theme_dir, 'common.js'), 'r', encoding='utf8') as f: js = f"" # 添加一个萌萌的看板娘 @@ -33,7 +34,9 @@ def adjust_theme(): """ - gradio_original_template_fn = gr.routes.templates.TemplateResponse + if not hasattr(gr, 'RawTemplateResponse'): + gr.RawTemplateResponse = gr.routes.templates.TemplateResponse + gradio_original_template_fn = gr.RawTemplateResponse def gradio_new_template_fn(*args, **kwargs): res = gradio_original_template_fn(*args, **kwargs) res.body = res.body.replace(b'', f'{js}'.encode("utf8")) @@ -46,7 +49,5 @@ def gradio_new_template_fn(*args, **kwargs): logging.error('gradio版本较旧, 不能自定义字体和颜色:', trimmed_format_exc()) return set_theme -# with open("themes/default.css", "r", encoding="utf-8") as f: -# advanced_css = f.read() -with open("themes/common.css", "r", encoding="utf-8") as f: +with open(os.path.join(theme_dir, 'common.css'), "r", encoding="utf-8") as f: advanced_css = f.read() diff --git a/themes/green.py b/themes/green.py index a29a0fa100..326981e3d8 100644 --- a/themes/green.py +++ b/themes/green.py @@ -1,6 +1,8 @@ +import os import gradio as gr from toolbox import get_conf CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT') +theme_dir = os.path.dirname(__file__) def adjust_theme(): try: @@ -73,7 +75,7 @@ def adjust_theme(): chatbot_code_background_color_dark="*neutral_950", ) - with open('themes/common.js', 'r', encoding='utf8') as f: + with open(os.path.join(theme_dir, 'common.js'), 'r', encoding='utf8') as f: js = f"" # 添加一个萌萌的看板娘 @@ -83,11 +85,13 @@ def adjust_theme(): """ - - with open('themes/green.js', 'r', encoding='utf8') as f: + + with open(os.path.join(theme_dir, 'green.js'), 'r', encoding='utf8') as f: js += f"" - - gradio_original_template_fn = gr.routes.templates.TemplateResponse + + if not hasattr(gr, 'RawTemplateResponse'): + gr.RawTemplateResponse = gr.routes.templates.TemplateResponse + gradio_original_template_fn = gr.RawTemplateResponse def gradio_new_template_fn(*args, **kwargs): res = gradio_original_template_fn(*args, **kwargs) res.body = res.body.replace(b'', f'{js}'.encode("utf8")) @@ -99,8 +103,7 @@ def gradio_new_template_fn(*args, **kwargs): print('gradio版本较旧, 不能自定义字体和颜色') return set_theme - -with open("themes/green.css", "r", encoding="utf-8") as f: +with open(os.path.join(theme_dir, 'green.css'), "r", encoding="utf-8") as f: advanced_css = f.read() -with open("themes/common.css", "r", encoding="utf-8") as f: +with open(os.path.join(theme_dir, 'common.css'), "r", encoding="utf-8") as f: advanced_css += f.read() From 8c840f3d4cd6fb1b42459716137c8042554a6c2a Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 29 Nov 2023 00:28:13 +0800 Subject: [PATCH 099/117] =?UTF-8?q?=E7=9C=8B=E6=9D=BF=E5=A8=98=E6=95=88?= =?UTF-8?q?=E6=9E=9C=E4=BF=AE=E6=AD=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/waifu_plugin/waifu-tips.js | 34 +------------------------------ docs/waifu_plugin/waifu-tips.json | 6 ++---- 2 files changed, 3 insertions(+), 37 deletions(-) diff --git a/docs/waifu_plugin/waifu-tips.js b/docs/waifu_plugin/waifu-tips.js index 8f9533a19e..d1b214d7e2 100644 --- a/docs/waifu_plugin/waifu-tips.js +++ b/docs/waifu_plugin/waifu-tips.js @@ -258,39 +258,7 @@ function loadTipsMessage(result) { }); window.showWelcomeMessage = function(result) { - var text; - if (window.location.href == live2d_settings.homePageUrl) { - var now = (new Date()).getHours(); - if (now > 23 || now <= 5) text = getRandText(result.waifu.hour_tips['t23-5']); - else if (now > 5 && now <= 7) text = getRandText(result.waifu.hour_tips['t5-7']); - else if (now > 7 && now <= 11) text = getRandText(result.waifu.hour_tips['t7-11']); - else if (now > 11 && now <= 14) text = getRandText(result.waifu.hour_tips['t11-14']); - else if (now > 14 && now <= 17) text = getRandText(result.waifu.hour_tips['t14-17']); - else if (now > 17 && now <= 19) text = getRandText(result.waifu.hour_tips['t17-19']); - else if (now > 19 && now <= 21) text = getRandText(result.waifu.hour_tips['t19-21']); - else if (now > 21 && now <= 23) text = getRandText(result.waifu.hour_tips['t21-23']); - else text = getRandText(result.waifu.hour_tips.default); - } else { - var referrer_message = result.waifu.referrer_message; - if (document.referrer !== '') { - var referrer = document.createElement('a'); - referrer.href = document.referrer; - var domain = referrer.hostname.split('.')[1]; - if (window.location.hostname == referrer.hostname) - text = referrer_message.localhost[0] + document.title.split(referrer_message.localhost[2])[0] + referrer_message.localhost[1]; - else if (domain == 'baidu') - text = referrer_message.baidu[0] + referrer.search.split('&wd=')[1].split('&')[0] + referrer_message.baidu[1]; - else if (domain == 'so') - text = referrer_message.so[0] + referrer.search.split('&q=')[1].split('&')[0] + referrer_message.so[1]; - else if (domain == 'google') - text = referrer_message.google[0] + document.title.split(referrer_message.google[2])[0] + referrer_message.google[1]; - else { - $.each(result.waifu.referrer_hostname, function(i,val) {if (i==referrer.hostname) referrer.hostname = getRandText(val)}); - text = referrer_message.default[0] + referrer.hostname + referrer_message.default[1]; - } - } else text = referrer_message.none[0] + document.title.split(referrer_message.none[2])[0] + referrer_message.none[1]; - } - showMessage(text, 6000); + showMessage('欢迎使用GPT-Academic', 6000); }; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result); var waifu_tips = result.waifu; diff --git a/docs/waifu_plugin/waifu-tips.json b/docs/waifu_plugin/waifu-tips.json index 229d5a14ca..30f263153f 100644 --- a/docs/waifu_plugin/waifu-tips.json +++ b/docs/waifu_plugin/waifu-tips.json @@ -83,8 +83,8 @@ "很多强大的函数插件隐藏在下拉菜单中呢。", "红色的插件,使用之前需要把文件上传进去哦。", "想添加功能按钮吗?读读readme很容易就学会啦。", - "敏感或机密的信息,不可以问chatGPT的哦!", - "chatGPT究竟是划时代的创新,还是扼杀创造力的毒药呢?" + "敏感或机密的信息,不可以问AI的哦!", + "LLM究竟是划时代的创新,还是扼杀创造力的毒药呢?" ] } ], "click": [ @@ -92,8 +92,6 @@ "selector": ".waifu #live2d", "text": [ "是…是不小心碰到了吧", - "萝莉控是什么呀", - "你看到我的小熊了吗", "再摸的话我可要报警了!⌇●﹏●⌇", "110吗,这里有个变态一直在摸我(ó﹏ò。)" ] From 688df6aa24d9f18b46635547334444d8f2224767 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 11:28:37 +0800 Subject: [PATCH 100/117] Update README.md --- README.md | 48 ++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index b1e5568ea9..05fb0ba520 100644 --- a/README.md +++ b/README.md @@ -1,34 +1,65 @@ -> **Note** +> **Caution** > > 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。 > > 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。 +
+

+ GPT 学术优化 (GPT Academic) +

+ +[![Github][Github-image]][Github-url] +[![Releases][Releases-image]][Releases-url] +[![Installation][Installation-image]][Installation-url] +[![Wiki][Wiki-image]][Wiki-url] +[![PR][PRs-image]][PRs-url] + +[Github-image]: https://img.shields.io/badge/github-12100E.svg?&logo=github&logoColor=white +[Releases-image]: https://img.shields.io/badge/Releases-v3.6.0-blue +[Installation-image]: https://img.shields.io/badge/Installation-v3.6.1-blue +[Wiki-image]: https://img.shields.io/badge/wiki-000000.svg?logo=wikipedia +[PRs-image]: https://img.shields.io/badge/PRs-welcome-pink -#
GPT 学术优化 (GPT Academic)
+[Github-url]: https://github.com/binary-husky/gpt_academic +[Releases-url]: https://github.com/binary-husky/gpt_academic/releases +[Installation-url]: https://github.com/binary-husky/gpt_academic#installation +[Wiki-url]: https://github.com/binary-husky/gpt_academic/wiki +[PRs-url]: https://github.com/binary-husky/gpt_academic/pulls + + +
**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!** -If you like this project, please give it a Star. We also have a README in [English|](docs/README.English.md)[日本語|](docs/README.Japanese.md)[한국어|](docs/README.Korean.md)[Русский|](docs/README.Russian.md)[Français](docs/README.French.md) translated by this project itself. +If you like this project, please give it a Star. + + +Read this in [English | ](docs/README.English.md)[日本語 | ](docs/README.Japanese.md)[한국어 | ](docs/README.Korean.md)[Русский | ](docs/README.Russian.md)[Français](docs/README.French.md). All translations have been provided by the project itself. + To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). +

> **Note** > > 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 > -> 2.本项目中每个文件的功能都在[自译解报告`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)。[常规安装方法](#installation) | [一键安装脚本](https://github.com/binary-husky/gpt_academic/releases) | [配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 +> 2.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。 +> +> [![常规安装方法](https://img.shields.io/static/v1?label=&message=常规安装方法&color=gray)](#installation) [![一键安装脚本](https://img.shields.io/static/v1?label=&message=一键安装脚本&color=gray)](https://github.com/binary-husky/gpt_academic/releases) [![配置说明](https://img.shields.io/static/v1?label=&message=配置说明&color=gray)](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) [![wiki](https://img.shields.io/static/v1?label=&message=wiki&color=gray)]([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki)) > > 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。 +

- +# Features Overview
功能(⭐= 近期新增功能) | 描述 --- | --- -⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱API](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/) +⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱API](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/) 润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码 [自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键 模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) @@ -57,9 +88,10 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
+ - 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
- +
@@ -100,7 +132,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 2. 配置API_KEY - 在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 + 在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。 [Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」 From d39945c415e616f621c394da15a0bc379b471caf Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 11:38:59 +0800 Subject: [PATCH 101/117] Update README.md --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 05fb0ba520..b6af2cfdac 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l 读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要 Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文 批量注释生成 | [插件] 一键批量生成函数注释 -Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗? +Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔 chat分析报告生成 | [插件] 运行后自动生成总结汇报 [PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程) [Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF @@ -79,7 +79,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮 ⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能! 启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题 -[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧? +[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)伺候的感觉一定会很不错吧? ⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件 更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/) ⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中) @@ -95,7 +95,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 -- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板 +- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放剪贴板
@@ -105,7 +105,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 -- 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读 +- 如果输出包含公式,会以tex形式和渲染形式同时显示,方便复制和阅读
@@ -134,9 +134,9 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。 [Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 - 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」 + 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可) 」。 - 「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」 + 「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py` 」。 3. 安装依赖 @@ -203,7 +203,7 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt- P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。 -2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时) +2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行) [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) ``` sh @@ -215,7 +215,7 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 ### 安装方法III:其他部署姿势 1. **Windows一键运行脚本**。 完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。 -脚本的贡献来源是[oobabooga](https://github.com/oobabooga/one-click-installers)。 +脚本的贡献来源:[oobabooga](https://github.com/oobabooga/one-click-installers)。 2. 使用第三方API、Azure等、文心一言、星火等,见[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) @@ -231,7 +231,7 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 # Advanced Usage ### I:自定义新的便捷按钮(学术快捷键) -任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序。(如按钮已存在,那么前缀、后缀都支持热修改,无需重启程序即可生效。) +任意文本编辑器打开`core_functional.py`,添加如下条目,然后重启程序。(如果按钮已存在,那么可以直接修改(前缀、后缀都已支持热修改),无需重启程序即可生效。) 例如 ```python @@ -352,7 +352,7 @@ GPT Academic开发者QQ群:`610599535` - 已知问题 - 某些浏览器翻译插件干扰此软件前端的运行 - - 官方Gradio目前有很多兼容性Bug,请务必使用`requirement.txt`安装Gradio + - 官方Gradio目前有很多兼容性Bug,请**务必使用`requirement.txt`安装Gradio** ### III:主题 可以通过修改`THEME`选项(config.py)变更主题 From d57bb8afbe5947cf673c2ee6051eda8dcaa76147 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 11:41:05 +0800 Subject: [PATCH 102/117] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b6af2cfdac..6f93981728 100644 --- a/README.md +++ b/README.md @@ -214,8 +214,7 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 ### 安装方法III:其他部署姿势 1. **Windows一键运行脚本**。 -完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。 -脚本的贡献来源:[oobabooga](https://github.com/oobabooga/one-click-installers)。 +完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。脚本贡献来源:[oobabooga](https://github.com/oobabooga/one-click-installers)。 2. 使用第三方API、Azure等、文心一言、星火等,见[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) @@ -250,6 +249,7 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 ### II:自定义函数插件 编写强大的函数插件来执行任何你想得到的和想不到的任务。 + 本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。 详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。 From 8780fe29f1083f92fca446a4162c28228bbc6121 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:07:27 +0800 Subject: [PATCH 103/117] Update README.md --- README.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 6f93981728..b38a1c3807 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ > > 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。 +

@@ -63,7 +64,7 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l 润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码 [自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键 模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键可以剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW) +[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW) 读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要 Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文 批量注释生成 | [插件] 一键批量生成函数注释 @@ -83,7 +84,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 ⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件 更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/) ⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中) -⭐虚空终端插件 | [插件] 用自然语言,直接调度本项目其他插件 +⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件 更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……

@@ -110,7 +111,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 -- 懒得看项目代码?整个工程直接给chatgpt炫嘴里 +- 懒得看项目代码?直接把整个工程炫ChatGPT嘴里
@@ -134,7 +135,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。 [Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 - 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可) 」。 + 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,以确保更新或其他用户无法轻易查看您的私有配置 」。 「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py` 」。 @@ -183,7 +184,7 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt- ### 安装方法II:使用Docker -0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐使用这个) +0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐该方法全部部署项目) [![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) ``` sh @@ -212,7 +213,7 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 ``` -### 安装方法III:其他部署姿势 +### 安装方法III:其他部署方法 1. **Windows一键运行脚本**。 完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。脚本贡献来源:[oobabooga](https://github.com/oobabooga/one-click-installers)。 @@ -221,7 +222,7 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 3. 云服务器远程部署避坑指南。 请访问[云服务器远程部署wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) -4. 一些新型的部署平台或方法 +4. 在其他平台部署&二级网址部署 - 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)。 - 使用WSL2(Windows Subsystem for Linux 子系统)。请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - 如何在二级网址(如`http://localhost/subpath`)下运行。请访问[FastAPI运行说明](docs/WithFastapi.md) From 17f361d63bcea24a2ec2196120b0b664144c53cd Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:11:29 +0800 Subject: [PATCH 104/117] Update README.md --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b38a1c3807..ffcf26639f 100644 --- a/README.md +++ b/README.md @@ -17,11 +17,11 @@ [![Wiki][Wiki-image]][Wiki-url] [![PR][PRs-image]][PRs-url] -[Github-image]: https://img.shields.io/badge/github-12100E.svg?&logo=github&logoColor=white -[Releases-image]: https://img.shields.io/badge/Releases-v3.6.0-blue -[Installation-image]: https://img.shields.io/badge/Installation-v3.6.1-blue -[Wiki-image]: https://img.shields.io/badge/wiki-000000.svg?logo=wikipedia -[PRs-image]: https://img.shields.io/badge/PRs-welcome-pink +[Github-image]: https://img.shields.io/badge/github-12100E.svg?&logo=github&logoColor=white?style=flat-square +[Releases-image]: https://img.shields.io/badge/Releases-v3.6.0-blue?style=flat-square +[Installation-image]: https://img.shields.io/badge/Installation-v3.6.1-blue?style=flat-square +[Wiki-image]: https://img.shields.io/badge/wiki-项目文档-black?style=flat-square +[PRs-image]: https://img.shields.io/badge/PRs-welcome-pink?style=flat-square [Github-url]: https://github.com/binary-husky/gpt_academic [Releases-url]: https://github.com/binary-husky/gpt_academic/releases From c141e767c65bdfdaa66f3e773672e5e1a6d9ac8b Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:37:20 +0800 Subject: [PATCH 105/117] Update README.md --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ffcf26639f..06abfac6f7 100644 --- a/README.md +++ b/README.md @@ -12,17 +12,21 @@ [![Github][Github-image]][Github-url] +[![License][License-image]][License-url] + [![Releases][Releases-image]][Releases-url] [![Installation][Installation-image]][Installation-url] [![Wiki][Wiki-image]][Wiki-url] [![PR][PRs-image]][PRs-url] -[Github-image]: https://img.shields.io/badge/github-12100E.svg?&logo=github&logoColor=white?style=flat-square +[License-image]: https://img.shields.io/badge/LICENSE-GPL3.0-black?&style=for-the-badge +[Github-image]: https://img.shields.io/badge/github-12100E.svg?&style=for-the-badge&logo=github&logoColor=white [Releases-image]: https://img.shields.io/badge/Releases-v3.6.0-blue?style=flat-square [Installation-image]: https://img.shields.io/badge/Installation-v3.6.1-blue?style=flat-square -[Wiki-image]: https://img.shields.io/badge/wiki-项目文档-black?style=flat-square +[Wiki-image]: https://img.shields.io/badge/wiki-项目文档-yellow?style=flat-square [PRs-image]: https://img.shields.io/badge/PRs-welcome-pink?style=flat-square +[License-url]: https://github.com/binary-husky/gpt_academic/blob/master/LICENSE [Github-url]: https://github.com/binary-husky/gpt_academic [Releases-url]: https://github.com/binary-husky/gpt_academic/releases [Installation-url]: https://github.com/binary-husky/gpt_academic#installation From f9e9b6f4ec3252b6028601da7c3c8ff614fe49fd Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:38:08 +0800 Subject: [PATCH 106/117] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 06abfac6f7..79dd989262 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ If you like this project, please give it a Star. -Read this in [English | ](docs/README.English.md)[日本語 | ](docs/README.Japanese.md)[한국어 | ](docs/README.Korean.md)[Русский | ](docs/README.Russian.md)[Français](docs/README.French.md). All translations have been provided by the project itself. +Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).

From e4c057f5a304b5a5a4a4940f7e8282f2b112126d Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:39:33 +0800 Subject: [PATCH 107/117] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 79dd989262..f747be901b 100644 --- a/README.md +++ b/README.md @@ -19,11 +19,11 @@ [![Wiki][Wiki-image]][Wiki-url] [![PR][PRs-image]][PRs-url] -[License-image]: https://img.shields.io/badge/LICENSE-GPL3.0-black?&style=for-the-badge +[License-image]: https://img.shields.io/badge/LICENSE-GPL3.0-orange?&style=for-the-badge [Github-image]: https://img.shields.io/badge/github-12100E.svg?&style=for-the-badge&logo=github&logoColor=white [Releases-image]: https://img.shields.io/badge/Releases-v3.6.0-blue?style=flat-square [Installation-image]: https://img.shields.io/badge/Installation-v3.6.1-blue?style=flat-square -[Wiki-image]: https://img.shields.io/badge/wiki-项目文档-yellow?style=flat-square +[Wiki-image]: https://img.shields.io/badge/wiki-项目文档-black?style=flat-square [PRs-image]: https://img.shields.io/badge/PRs-welcome-pink?style=flat-square [License-url]: https://github.com/binary-husky/gpt_academic/blob/master/LICENSE From f417c1ce6df0873b7c481cea6edf7a8e9d73ca4e Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:46:00 +0800 Subject: [PATCH 108/117] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f747be901b..5716b66ccf 100644 --- a/README.md +++ b/README.md @@ -135,9 +135,9 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 cd gpt_academic ``` -2. 配置API_KEY +2. 配置API_KEY等变量 - 在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。 [Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 + 在`config.py`中,配置API KEY等变量。[特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1)、[Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,以确保更新或其他用户无法轻易查看您的私有配置 」。 From 6417ca9ddeb92046e85eecb101defbad690e9989 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:46:43 +0800 Subject: [PATCH 109/117] Update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5716b66ccf..a9c6458822 100644 --- a/README.md +++ b/README.md @@ -48,13 +48,13 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l > **Note** > -> 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 +> 1. 请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 > -> 2.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。 +> 2. 本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。 > > [![常规安装方法](https://img.shields.io/static/v1?label=&message=常规安装方法&color=gray)](#installation) [![一键安装脚本](https://img.shields.io/static/v1?label=&message=一键安装脚本&color=gray)](https://github.com/binary-husky/gpt_academic/releases) [![配置说明](https://img.shields.io/static/v1?label=&message=配置说明&color=gray)](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) [![wiki](https://img.shields.io/static/v1?label=&message=wiki&color=gray)]([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki)) > -> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。 +> 3. 本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。

From 29775dedd8400b83a9051ce2120dd655d63291a8 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:49:38 +0800 Subject: [PATCH 110/117] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a9c6458822..04b70fede1 100644 --- a/README.md +++ b/README.md @@ -188,7 +188,7 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt- ### 安装方法II:使用Docker -0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐该方法全部部署项目) +0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐该方法部署完整项目) [![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml) ``` sh From f44642d9d23182a4fc43851ebe2e00b2e3593c7e Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:51:44 +0800 Subject: [PATCH 111/117] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 04b70fede1..6ff14e4a76 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l > > 2. 本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。 > -> [![常规安装方法](https://img.shields.io/static/v1?label=&message=常规安装方法&color=gray)](#installation) [![一键安装脚本](https://img.shields.io/static/v1?label=&message=一键安装脚本&color=gray)](https://github.com/binary-husky/gpt_academic/releases) [![配置说明](https://img.shields.io/static/v1?label=&message=配置说明&color=gray)](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) [![wiki](https://img.shields.io/static/v1?label=&message=wiki&color=gray)]([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki)) +> [![常规安装方法](https://img.shields.io/static/v1?label=&message=常规安装方法&color=gray)](#installation) [![一键安装脚本](https://img.shields.io/static/v1?label=&message=一键安装脚本&color=gray)](https://github.com/binary-husky/gpt_academic/releases) [![配置说明](https://img.shields.io/static/v1?label=&message=配置说明&color=gray)](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) [![wiki](https://img.shields.io/static/v1?label=&message=wiki&color=gray)]([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki)) > > 3. 本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。 From 55d807c116112366bd86200f9765d2e9c0d5b48f Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Thu, 30 Nov 2023 22:19:05 +0800 Subject: [PATCH 112/117] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E5=86=85=E5=AD=98?= =?UTF-8?q?=E6=B3=84=E9=9C=B2=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/latex_fns/latex_actions.py | 1 + crazy_functions/latex_fns/latex_toolbox.py | 37 +++++++++++++++++++--- toolbox.py | 3 +- 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/crazy_functions/latex_fns/latex_actions.py b/crazy_functions/latex_fns/latex_actions.py index 74e8757e62..be3d52e7d1 100644 --- a/crazy_functions/latex_fns/latex_actions.py +++ b/crazy_functions/latex_fns/latex_actions.py @@ -418,6 +418,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f merge_pdfs(origin_pdf, result_pdf, concat_pdf) promote_file_to_downloadzone(concat_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI except Exception as e: + print(e) pass return True # 成功啦 else: diff --git a/crazy_functions/latex_fns/latex_toolbox.py b/crazy_functions/latex_fns/latex_toolbox.py index 4555ff185c..0a6a873b50 100644 --- a/crazy_functions/latex_fns/latex_toolbox.py +++ b/crazy_functions/latex_fns/latex_toolbox.py @@ -493,11 +493,38 @@ def compile_latex_with_timeout(command, cwd, timeout=60): return False return True - - -def merge_pdfs(pdf1_path, pdf2_path, output_path): - import PyPDF2 +def run_in_subprocess_wrapper_func(func, args, kwargs, return_dict, exception_dict): + import sys + try: + result = func(*args, **kwargs) + return_dict['result'] = result + except Exception as e: + exc_info = sys.exc_info() + exception_dict['exception'] = exc_info + +def run_in_subprocess(func): + import multiprocessing + def wrapper(*args, **kwargs): + return_dict = multiprocessing.Manager().dict() + exception_dict = multiprocessing.Manager().dict() + process = multiprocessing.Process(target=run_in_subprocess_wrapper_func, + args=(func, args, kwargs, return_dict, exception_dict)) + process.start() + process.join() + process.close() + if 'exception' in exception_dict: + # ooops, the subprocess ran into an exception + exc_info = exception_dict['exception'] + raise exc_info[1].with_traceback(exc_info[2]) + if 'result' in return_dict.keys(): + # If the subprocess ran successfully, return the result + return return_dict['result'] + return wrapper + +def _merge_pdfs(pdf1_path, pdf2_path, output_path): + import PyPDF2 # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放 Percent = 0.95 + # raise RuntimeError('PyPDF2 has a serious memory leak problem, please use other tools to merge PDF files.') # Open the first PDF file with open(pdf1_path, 'rb') as pdf1_file: pdf1_reader = PyPDF2.PdfFileReader(pdf1_file) @@ -531,3 +558,5 @@ def merge_pdfs(pdf1_path, pdf2_path, output_path): # Save the merged PDF file with open(output_path, 'wb') as output_file: output_writer.write(output_file) + +merge_pdfs = run_in_subprocess(_merge_pdfs) # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放 diff --git a/toolbox.py b/toolbox.py index 8747afdb8c..21c56014b1 100644 --- a/toolbox.py +++ b/toolbox.py @@ -561,7 +561,8 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None): user_name = get_user(chatbot) else: user_name = default_user_name - + if not os.path.exists(file): + raise FileNotFoundError(f'文件{file}不存在') user_path = get_log_folder(user_name, plugin_name=None) if file_already_in_downloadzone(file, user_path): new_path = file From 900fad69cf9f63878d1ddf6636ee6e7566261424 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Thu, 30 Nov 2023 22:21:44 +0800 Subject: [PATCH 113/117] produce comparison pdf cache --- ...atex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" | 3 +++ 1 file changed, 3 insertions(+) diff --git "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" "b/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" index f3919edc38..18a8d1bab2 100644 --- "a/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" +++ "b/crazy_functions/Latex\350\276\223\345\207\272PDF\347\273\223\346\236\234.py" @@ -88,6 +88,9 @@ def check_cached_translation_pdf(arxiv_id): target_file = pj(translation_dir, 'translate_zh.pdf') if os.path.exists(target_file): promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot) + target_file_compare = pj(translation_dir, 'comparison.pdf') + if os.path.exists(target_file_compare): + promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot) return target_file return False def is_float(s): From ecaf2bdf45ca9bb5d89ee60bd4b26cf7789b3348 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 30 Nov 2023 22:36:16 +0800 Subject: [PATCH 114/117] add comparison pdf file save and load --- crazy_functions/latex_fns/latex_actions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crazy_functions/latex_fns/latex_actions.py b/crazy_functions/latex_fns/latex_actions.py index be3d52e7d1..113a27853d 100644 --- a/crazy_functions/latex_fns/latex_actions.py +++ b/crazy_functions/latex_fns/latex_actions.py @@ -416,6 +416,8 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f from .latex_toolbox import merge_pdfs concat_pdf = pj(work_folder_modified, f'comparison.pdf') merge_pdfs(origin_pdf, result_pdf, concat_pdf) + if os.path.exists(pj(work_folder, '..', 'translation')): + shutil.copyfile(concat_pdf, pj(work_folder, '..', 'translation', 'comparison.pdf')) promote_file_to_downloadzone(concat_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI except Exception as e: print(e) From 6723eb77b2768d0463992ecf292dfda66f08f5ff Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 30 Nov 2023 23:08:33 +0800 Subject: [PATCH 115/117] version3.62 --- README.md | 20 ++++++-------------- version | 4 ++-- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 6ff14e4a76..8102da8d44 100644 --- a/README.md +++ b/README.md @@ -38,23 +38,17 @@ **如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!** -If you like this project, please give it a Star. - - -Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. - -To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). +If you like this project, please give it a Star. Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).

> **Note** > -> 1. 请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 -> -> 2. 本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。 +> 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 > +> 2.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。 > [![常规安装方法](https://img.shields.io/static/v1?label=&message=常规安装方法&color=gray)](#installation) [![一键安装脚本](https://img.shields.io/static/v1?label=&message=一键安装脚本&color=gray)](https://github.com/binary-husky/gpt_academic/releases) [![配置说明](https://img.shields.io/static/v1?label=&message=配置说明&color=gray)](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) [![wiki](https://img.shields.io/static/v1?label=&message=wiki&color=gray)]([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki)) > -> 3. 本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。 +> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。

@@ -93,7 +87,6 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼 - - 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
@@ -208,7 +201,7 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt- P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。 -2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行) +2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时) [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) ``` sh @@ -254,7 +247,6 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 ### II:自定义函数插件 编写强大的函数插件来执行任何你想得到的和想不到的任务。 - 本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。 详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。 @@ -357,7 +349,7 @@ GPT Academic开发者QQ群:`610599535` - 已知问题 - 某些浏览器翻译插件干扰此软件前端的运行 - - 官方Gradio目前有很多兼容性Bug,请**务必使用`requirement.txt`安装Gradio** + - 官方Gradio目前有很多兼容性问题,请**务必使用`requirement.txt`安装Gradio** ### III:主题 可以通过修改`THEME`选项(config.py)变更主题 diff --git a/version b/version index 5f6de09c8a..cb4df5ae57 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.61, + "version": 3.62, "show_feature": true, - "new_feature": "修复潜在的多用户冲突问题 <-> 接入Deepseek Coder <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮" + "new_feature": "修复若干隐蔽的内存BUG <-> 修复多用户冲突问题 <-> 接入Deepseek Coder <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮" } From a64d5500450d0bad901f26e4493320d397fb9915 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 30 Nov 2023 23:23:54 +0800 Subject: [PATCH 116/117] =?UTF-8?q?=E4=BF=AE=E6=94=B9README=E4=B8=AD?= =?UTF-8?q?=E7=9A=84=E4=B8=80=E4=BA=9B=E6=8D=A2=E8=A1=8C=E7=AC=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 8102da8d44..54bf7c1f9e 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ > > 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。 -

+

@@ -13,14 +13,13 @@ [![Github][Github-image]][Github-url] [![License][License-image]][License-url] - [![Releases][Releases-image]][Releases-url] [![Installation][Installation-image]][Installation-url] [![Wiki][Wiki-image]][Wiki-url] [![PR][PRs-image]][PRs-url] -[License-image]: https://img.shields.io/badge/LICENSE-GPL3.0-orange?&style=for-the-badge -[Github-image]: https://img.shields.io/badge/github-12100E.svg?&style=for-the-badge&logo=github&logoColor=white +[License-image]: https://img.shields.io/badge/LICENSE-GPL3.0-orange?&style=flat-square +[Github-image]: https://img.shields.io/badge/github-12100E.svg?&style=flat-square [Releases-image]: https://img.shields.io/badge/Releases-v3.6.0-blue?style=flat-square [Installation-image]: https://img.shields.io/badge/Installation-v3.6.1-blue?style=flat-square [Wiki-image]: https://img.shields.io/badge/wiki-项目文档-black?style=flat-square @@ -35,14 +34,14 @@

+
**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!** If you like this project, please give it a Star. Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental). -

+
+ -> **Note** -> > 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。 > > 2.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。 @@ -52,8 +51,6 @@ If you like this project, please give it a Star. Read this in [English](docs/REA

-# Features Overview -
功能(⭐= 近期新增功能) | 描述 @@ -118,6 +115,8 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
+

+ # Installation ### 安装方法I:直接运行 (Windows, Linux or MacOS) @@ -224,6 +223,7 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 - 使用WSL2(Windows Subsystem for Linux 子系统)。请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - 如何在二级网址(如`http://localhost/subpath`)下运行。请访问[FastAPI运行说明](docs/WithFastapi.md) +

# Advanced Usage ### I:自定义新的便捷按钮(学术快捷键) @@ -250,6 +250,7 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。 详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。 +

# Updates ### I:动态 From d8958da8cd0153a717a6585b3faf1d72bd6803ad Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Fri, 1 Dec 2023 09:28:22 +0800 Subject: [PATCH 117/117] =?UTF-8?q?=E4=BF=AE=E6=94=B9Typo?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functional.py b/crazy_functional.py index 3d4df71883..3b8b945335 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -489,7 +489,7 @@ def get_crazy_functions(): }) from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF function_plugins.update({ - "Arixv论文精细翻译(输入arxivID)[需Latex]": { + "Arxiv论文精细翻译(输入arxivID)[需Latex]": { "Group": "学术", "Color": "stop", "AsButton": False,