-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
101 lines (85 loc) · 3.21 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import streamlit as st
import torch
import random
import json
import torch.nn as nn
from model import NeuralNet # Ensure this import works by placing model.py in the same directory
from nltk_utils import tokenize, stem, bag_of_words
import os
import nltk
icon_path=os.path.join(os.getcwd(), 'icon.ico')
st.set_page_config(page_title="Cypher Chatbot", page_icon=icon_path)
# Download the punkt tokenizer (ensure it's available)
try:
nltk.download('punkt_tab')
except LookupError as e:
st.error(f"NLTK resource download failed: {e}")
# Ensure paths are correct for deployment
current_directory = os.getcwd()
model_path = os.path.join(current_directory, "data.pth")
intents_path = os.path.join(current_directory, "intents.json")
# ----------------------------
# Load the trained chatbot model
# ----------------------------
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load intents (ensure file exists in the correct path)
with open(intents_path, "r") as f:
intents = json.load(f)
# Load trained model parameters (ensure file exists in the correct path)
if os.path.exists(model_path):
# Modify torch.load to use map_location to load on CPU if CUDA is not available
data = torch.load(model_path, map_location=device)
else:
st.error(f"Model file '{model_path}' not found!")
st.stop()
input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
all_words = data["all_words"]
tags = data["tags"]
model_state = data["model_state"]
# Initialize the model
model = NeuralNet(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()
# ----------------------------
# Helper function to get chatbot response
# ----------------------------
def get_response(user_input):
sentence = tokenize(user_input)
X = bag_of_words(sentence, all_words)
X = torch.from_numpy(X).to(device).unsqueeze(0)
# Model prediction
output = model(X)
_, predicted = torch.max(output, dim=1)
tag = tags[predicted.item()]
# Confidence threshold
probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
if prob.item() > 0.75:
for intent in intents["intents"]:
if intent["tag"] == tag:
return random.choice(intent["responses"])
return "Sorry, I can only answer limited questions about Vanshaj to protect his privacy, could you rephrase your query?"
# ----------------------------
# Streamlit UI for chatbot
# ----------------------------
st.title("🤖 Chat with Cypher")
st.write("Ask me anything about Vanshaj Raghuvanshi!")
# Chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# User input
user_input = st.chat_input("Type your message here...")
if user_input:
# User message
st.chat_message("user").write(user_input)
st.session_state.messages.append({"role": "user", "content": user_input})
# Bot response
response = get_response(user_input)
st.chat_message("assistant").write(response)
st.session_state.messages.append({"role": "assistant", "content": response})