Skip to content

Commit

Permalink
1. System字段添加了药品信息查询的功能
Browse files Browse the repository at this point in the history
2. 模型路径更新
3. DR0代表健康改为非糖尿病视网膜病变
4. 将cuda版本信息检测放到app.py,避免多次执行
  • Loading branch information
JieGenius committed Mar 3, 2024
1 parent ea0ee3d commit 6cce9e8
Show file tree
Hide file tree
Showing 7 changed files with 139 additions and 17 deletions.
12 changes: 10 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ Demo访问地址: [OculiChatDA](https://openxlab.org.cn/apps/detail/helloworld/O
## 模型
| 模型 | 基座 | 数据量 | OpenXLab
|---------------------|-------------------|-------------------|----------------------------------------------------------------------------------------|
| OculiChatDA-chat-7b | InternLM2-chat-7b | 500K个对话,对话轮次为1~64 | [![Open in OpenXLab](https://cdn-static.openxlab.org.cn/header/openxlab_models.svg)](https://openxlab.org.cn/models/detail/helloworld/OculiChatDA) |
| OculiChatDA-chat-7b | InternLM2-chat-7b | 500K个对话,对话轮次为1~64 | [![Open in OpenXLab](https://cdn-static.openxlab.org.cn/header/openxlab_models.svg)](https://openxlab.org.cn/models/detail/flyer/OculiChatDA) |

## 环境安装
```bash
Expand Down Expand Up @@ -135,7 +135,7 @@ xtuner chat ./merged --prompt-template internlm2_chat

继续输入:

我上传了一张图片,图片路径为/a/b/c.jpg 情判断我是否患有青光眼
我上传了一张图片,图片路径为/a/b/c.jpg q请判断我是否患有青光眼

模型输出:

Expand All @@ -154,6 +154,14 @@ lmdeploy convert internlm2-chat-7b ./merged
lmdeploy serve api_server ./workspace --server-name 0.0.0.0 --server-port 23333 --tp 1
```

## 模型上传
```bash
python model_upload/convert.py
openxlab login
cd merged
openxlab model create --model-repo='flyer/OculiChatDA' -s ./metafile.yml

```
## Web Demo
```bash
streamlit run web_demo.py --server.address=0.0.0.0 --server.port 7860 --server.enableStaticServing True
Expand Down
12 changes: 10 additions & 2 deletions app.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import os
import time

import torch
# os.system("pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu118")

MODEL_DIR = "models/"
MODEL_REPOSITORY_OPENXLAB = "OpenLMLab/internlm2-chat-7b"
MODEL_REPOSITORY_OPENXLAB = "flyer/OculiChatDA"

if __name__ == '__main__':
print("服务器cuda版本检测:")
os.system("ls /usr/local")
if not os.path.exists(MODEL_DIR):
from openxlab.model import download
Expand All @@ -16,6 +17,13 @@
print("解压后目录结果如下:")
print(os.listdir(MODEL_DIR))
os.system(f"lmdeploy convert internlm2-chat-7b {MODEL_DIR}")

print("torch.cuda.is_available():", torch.cuda.is_available())
print("torch.__version__:", torch.__version__)
print("torch.version.cuda:", torch.version.cuda)
print("torch.cuda.get_device_name(0):", torch.cuda.get_device_name(0))
print("nvidia-smi:", os.popen('nvidia-smi').read())

os.system("lmdeploy serve api_server ./workspace --server-name 0.0.0.0 --server-port 23333 --tp 1 --cache-max-entry-count 0.5 &")
time.sleep(5)
os.system('streamlit run web_demo.py --server.address=0.0.0.0 --server.port 7860 --server.enableStaticServing True --server.enableStaticServing true')
28 changes: 28 additions & 0 deletions model_upload/convert.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import sys
import os
import ruamel.yaml

mstr = ruamel.yaml.scalarstring.DoubleQuotedScalarString

MODEL_DIR = "../merged/"
yaml = ruamel.yaml.YAML()
yaml.preserve_quotes = True
yaml.default_flow_style = False
file_name = 'metafile.yml'
# 读取YAML文件内容
with open(file_name, 'r') as file:
data = yaml.load(file)
data['Models'] = []
for file in os.listdir(MODEL_DIR):
if file == "metafile.yml": continue
data['Models'].append({
"Name": mstr(file),
"Results": [dict(Task=mstr("Text Generation"), Dataset=mstr("none"))],
"Weights": mstr(file),
})

# 将修改后的数据写回文件
with open(os.path.join(MODEL_DIR, file_name), 'w') as file:
yaml.dump(data, file)

print("Modifications saved to the file.")
76 changes: 76 additions & 0 deletions model_upload/metafile.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
Collections:
- Name: "OculiChatDA"
License: "Apache-2.0"
Framework: "[]"
Paper: {}
Code:
URL: "https://github.com/JieGenius/OculiChatDA"
Models:
- Name: "config.json"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "configuration_internlm.py"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "generation_config.json"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "modeling_internlm2.py"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "pytorch_model-00001-of-00008.bin"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "pytorch_model-00002-of-00008.bin"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "pytorch_model-00003-of-00008.bin"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "pytorch_model-00004-of-00008.bin"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "pytorch_model-00005-of-00008.bin"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "pytorch_model-00006-of-00008.bin"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "pytorch_model-00007-of-00008.bin"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "pytorch_model-00008-of-00008.bin"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "special_tokens_map.json"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "tokenization_internlm.py"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "tokenizer_config.json"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "tokenizer.model"
Results:
- Task: "Text Generation"
Dataset: "none"
- Name: "pytorch_model.bin.index.json"
Results:
- Task: "Text Generation"
Dataset: "none"
6 changes: 4 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
pandas
python-docx
# torch
# torchvision
modelscope
transformers
xtuner[all]
Expand All @@ -10,3 +8,7 @@ streamlit
lagent==0.2.2
onnxruntime-gpu
openxlab
# torch==2.0.0+cu118
# torchvision==0.15.1+cu118
# torchaudio
# --index-url https://download.pytorch.org/whl/cu118
2 changes: 1 addition & 1 deletion utils/actions/fundus_diagnosis.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def fundus_diagnosis(self, fundus_path: str) -> dict:
* msg (str): 工具调用是否成功的说明
* glaucoma (int): 1代表可疑青光眼,0代表非青光眼
* dr_level (int): 糖尿病视网膜病变的等级,0代表健康,4代表患病程度最严重
0 为健康
0 为非糖尿病视网膜病变
1 代表轻度非增生性的糖尿病视网膜病变
2 中度
3 重度
Expand Down
20 changes: 10 additions & 10 deletions web_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,18 @@
from lagent.llms.meta_template import INTERNLM2_META as META
from lagent.schema import AgentStatusCode
from utils.actions.fundus_diagnosis import FundusDiagnosis

import base64
# from streamlit.logger import get_logger
LMDEPLOY_IP = "0.0.0.0:23333"
MODEL_NAME = "internlm2-chat-7b"

OculiChatDA_META_CN = ("你是一名眼科专家,可以通过文字和图片来帮助用户诊断眼睛的状态。\n"
"你的工作单位为**某三家医院**\n"
"你有以下三种能力:\n"
"1. 诊断眼底疾病,包括青光眼和糖尿病视网膜病变\n"
"2. 眼科常见疾病诊断,疾病解答,疾病预防等\n"
"3. 眼科药品信息查询\n"
"你可以主动询问用户基本信息,比如年龄,用眼频率,用眼环境等等,请时刻保持耐心且细致的回答"
"你可以调用外部工具来帮助帮助用户解决问题")
OculiChatDA_META_CN = OculiChatDA_META_CN # + "\n".join(ReActCALL_PROTOCOL_CN.split("\n")[1:])
PLUGIN_CN = """你可以使用如下工具:
Expand Down Expand Up @@ -165,7 +170,7 @@ def render_user(self, prompt: str):
with st.chat_message('user'):
img_paths = re.findall(r'\!\[.*?\]\((.*?)\)', prompt, re.DOTALL) # 允许皮配\n等空字符
if len(img_paths):
st.markdown(re.sub(r'!\[.*\]\(.*\)', '', prompt)) # 先渲染非图片部分
st.markdown(re.sub(r'!\[.*\]\(.*\)', '', prompt.replace("\\n", " \\n "))) # 先渲染非图片部分
# 再渲染图片
img_path = img_paths[0]
st.write(
Expand All @@ -175,7 +180,7 @@ def render_user(self, prompt: str):
# if os.path.exists(img_path):
# st.image(open(img_path, 'rb').read(), caption='Uploaded Image', width=400)
else:
st.markdown(prompt)
st.markdown(prompt.replace("\\n", " \\n "))

def render_assistant(self, agent_return):
with st.chat_message('assistant'):
Expand Down Expand Up @@ -284,7 +289,7 @@ def main():
file_bytes = uploaded_file.read()
file_type = uploaded_file.type
if 'image' in file_type:
st.image(file_bytes, caption='Uploaded Image')
st.image(file_bytes, caption='Uploaded Image', width=600)
elif 'video' in file_type:
st.video(file_bytes, caption='Uploaded Video')
elif 'audio' in file_type:
Expand Down Expand Up @@ -356,11 +361,6 @@ def main():


if __name__ == '__main__':
print("torch.cuda.is_available():", torch.cuda.is_available())
print("torch.__version__:", torch.__version__)
print("torch.version.cuda:", torch.version.cuda)
print("torch.cuda.get_device_name(0):", torch.cuda.get_device_name(0))
print("nvidia-smi:", os.popen('nvidia-smi').read())
root_dir = 'static'
os.makedirs(root_dir, exist_ok=True)
main()
main()

0 comments on commit 6cce9e8

Please sign in to comment.