Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream'
Browse files Browse the repository at this point in the history
  • Loading branch information
haiiliin committed Sep 17, 2024
2 parents 70aa66a + 597c320 commit eb23c76
Show file tree
Hide file tree
Showing 257 changed files with 19,240 additions and 4,556 deletions.
14 changes: 4 additions & 10 deletions .github/ISSUE_TEMPLATE/bug_report.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ body:
- Others | 非最新版
validations:
required: true

- type: dropdown
id: os
attributes:
Expand All @@ -47,31 +47,25 @@ body:
- Docker
validations:
required: true

- type: textarea
id: describe
attributes:
label: Describe the bug | 简述
description: Describe the bug | 简述
validations:
required: true

- type: textarea
id: screenshot
attributes:
label: Screen Shot | 有帮助的截图
description: Screen Shot | 有帮助的截图
validations:
required: true

- type: textarea
id: traceback
attributes:
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)






5 changes: 0 additions & 5 deletions .github/ISSUE_TEMPLATE/feature_request.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,3 @@ body:
attributes:
label: Feature Request | 功能请求
description: Feature Request | 功能请求





44 changes: 44 additions & 0 deletions .github/workflows/build-with-all-capacity-beta.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: build-with-all-capacity-beta

on:
push:
branches:
- 'master'

env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}_with_all_capacity_beta

jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write

steps:
- name: Checkout repository
uses: actions/checkout@v3

- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}

- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
push: true
file: docs/GithubAction+AllCapacityBeta
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
2 changes: 1 addition & 1 deletion .github/workflows/stale.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
permissions:
issues: write
pull-requests: read

steps:
- uses: actions/stale@v8
with:
Expand Down
10 changes: 10 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,9 @@ dmypy.json
# Pyre type checker
.pyre/

# macOS files
.DS_Store

.vscode
.idea

Expand All @@ -152,4 +155,11 @@ request_llms/moss
media
flagged
request_llms/ChatGLM-6b-onnx-u8s8
.pre-commit-config.yaml
test.*
temp.*
objdump*
*.min.*.js
TODO

.pdm-python
8 changes: 6 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,17 @@ RUN echo '[global]' > /etc/pip.conf && \
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf


# 语音输出功能(以下两行,第一行更换阿里源,第二行安装ffmpeg,都可以删除)
RUN UBUNTU_VERSION=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release); echo "deb https://mirrors.aliyun.com/debian/ $UBUNTU_VERSION main non-free contrib" > /etc/apt/sources.list; apt-get update
RUN apt-get install ffmpeg -y


# 进入工作路径(必要)
WORKDIR /gpt


# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下三行,可以删除)
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下两行,可以删除)
COPY requirements.txt ./
COPY ./docs/gradio-3.32.6-py3-none-any.whl ./docs/gradio-3.32.6-py3-none-any.whl
RUN pip3 install -r requirements.txt


Expand Down
135 changes: 97 additions & 38 deletions README.md

Large diffs are not rendered by default.

45 changes: 32 additions & 13 deletions check_proxy.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,44 @@

def check_proxy(proxies):
def check_proxy(proxies, return_ip=False):
import requests
proxies_https = proxies['https'] if proxies is not None else '无'
ip = None
try:
response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
data = response.json()
if 'country_name' in data:
country = data['country_name']
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
if 'ip' in data: ip = data['ip']
elif 'error' in data:
alternative = _check_with_backup_source(proxies)
alternative, ip = _check_with_backup_source(proxies)
if alternative is None:
result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限"
else:
result = f"代理配置 {proxies_https}, 代理所在地:{alternative}"
else:
result = f"代理配置 {proxies_https}, 代理数据解析失败:{data}"
print(result)
return result
if not return_ip:
print(result)
return result
else:
return ip
except:
result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
print(result)
return result
if not return_ip:
print(result)
return result
else:
return ip

def _check_with_backup_source(proxies):
import random, string, requests
random_string = ''.join(random.choices(string.ascii_letters + string.digits, k=32))
try: return requests.get(f"http://{random_string}.edns.ip-api.com/json", proxies=proxies, timeout=4).json()['dns']['geo']
except: return None
try:
res_json = requests.get(f"http://{random_string}.edns.ip-api.com/json", proxies=proxies, timeout=4).json()
return res_json['dns']['geo'], res_json['dns']['ip']
except:
return None, None

def backup_and_download(current_version, remote_version):
"""
Expand All @@ -47,7 +58,7 @@ def backup_and_download(current_version, remote_version):
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
proxies = get_conf('proxies')
try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
except: r = requests.get('https://public.gpt-academic.top/publish/master.zip', proxies=proxies, stream=True)
except: r = requests.get('https://public.agent-matrix.com/publish/master.zip', proxies=proxies, stream=True)
zip_file_path = backup_dir+'/master.zip'
with open(zip_file_path, 'wb+') as f:
f.write(r.content)
Expand All @@ -71,7 +82,7 @@ def patch_and_restart(path):
import sys
import time
import glob
from colorful import print亮黄, print亮绿, print亮红
from shared_utils.colorful import print亮黄, print亮绿, print亮红
# if not using config_private, move origin config.py as config_private.py
if not os.path.exists('config_private.py'):
print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,',
Expand All @@ -81,7 +92,7 @@ def patch_and_restart(path):
dir_util.copy_tree(path_new_version, './')
print亮绿('代码已经更新,即将更新pip包依赖……')
for i in reversed(range(5)): time.sleep(1); print(i)
try:
try:
import subprocess
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])
except:
Expand Down Expand Up @@ -113,7 +124,7 @@ def auto_update(raise_error=False):
import json
proxies = get_conf('proxies')
try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
except: response = requests.get("https://public.gpt-academic.top/publish/version", proxies=proxies, timeout=5)
except: response = requests.get("https://public.agent-matrix.com/publish/version", proxies=proxies, timeout=5)
remote_json_data = json.loads(response.text)
remote_version = remote_json_data['version']
if remote_json_data["show_feature"]:
Expand All @@ -124,7 +135,7 @@ def auto_update(raise_error=False):
current_version = f.read()
current_version = json.loads(current_version)['version']
if (remote_version - current_version) >= 0.01-1e-5:
from colorful import print亮黄
from shared_utils.colorful import print亮黄
print亮黄(f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}{new_feature}')
print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?')
Expand Down Expand Up @@ -160,6 +171,14 @@ def warm_up_modules():
enc = model_info["gpt-4"]['tokenizer']
enc.encode("模块预热", disallowed_special=())

def warm_up_vectordb():
print('正在执行一些模块的预热 ...')
from toolbox import ProxyNetworkActivate
with ProxyNetworkActivate("Warmup_Modules"):
import nltk
with ProxyNetworkActivate("Warmup_Modules"): nltk.download("punkt")


if __name__ == '__main__':
import os
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
Expand Down
Loading

0 comments on commit eb23c76

Please sign in to comment.