diff --git a/chatgui.py b/chatgui.py index 86d7f08..64affc3 100644 --- a/chatgui.py +++ b/chatgui.py @@ -12,30 +12,25 @@ words = pickle.load(open('words.pkl','rb')) classes = pickle.load(open('classes.pkl','rb')) - +# preprocessamento input utente def clean_up_sentence(sentence): sentence_words = nltk.word_tokenize(sentence) sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words] return sentence_words -# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence - +# creazione bag of words def bow(sentence, words, show_details=True): - # tokenize the pattern sentence_words = clean_up_sentence(sentence) - # bag of words - matrix of N words, vocabulary matrix bag = [0]*len(words) for s in sentence_words: for i,w in enumerate(words): if w == s: - # assign 1 if current word is in the vocabulary position bag[i] = 1 if show_details: print ("found in bag: %s" % w) return(np.array(bag)) -def predict_class(sentence, model): - # filter out predictions below a threshold +def calcola_pred(sentence, model): p = bow(sentence, words,show_details=False) res = model.predict(np.array([p]))[0] ERROR_THRESHOLD = 0.25 @@ -47,7 +42,7 @@ def predict_class(sentence, model): return_list.append({"intent": classes[r[0]], "probability": str(r[1])}) return return_list -def getResponse(ints, intents_json): +def getRisposta(ints, intents_json): tag = ints[0]['intent'] list_of_intents = intents_json['intents'] for i in list_of_intents: @@ -56,64 +51,10 @@ def getResponse(ints, intents_json): break return result -def chatbot_response(msg): - ints = predict_class(msg, model) - res = getResponse(ints, intents) +def inizia(msg): + ints = calcola_pred(msg, model) + res = getRisposta(ints, intents) print(res) return res -chatbot_response('Thank you') -''' -#Creating GUI with tkinter -import tkinter -from tkinter import * - - -def send(): - msg = EntryBox.get("1.0",'end-1c').strip() - EntryBox.delete("0.0",END) - - if msg != '': - ChatLog.config(state=NORMAL) - ChatLog.insert(END, "Tu: " + msg + '\n\n') - ChatLog.config(foreground="#442265", font=("Verdana", 12 )) - - res = chatbot_response(msg) - ChatLog.insert(END, "Bot: " + res + '\n\n') - - ChatLog.config(state=DISABLED) - ChatLog.yview(END) - - -base = Tk() -base.title("Ciao!") -base.geometry("400x500") -base.resizable(width=FALSE, height=FALSE) - -#Create Chat window -ChatLog = Text(base, bd=0, bg="white", height="8", width="50", font="Arial",) - -ChatLog.config(state=DISABLED) - -#Bind scrollbar to Chat window -scrollbar = Scrollbar(base, command=ChatLog.yview, cursor="heart") -ChatLog['yscrollcommand'] = scrollbar.set - -#Create Button to send message -SendButton = Button(base, font=("Verdana",12,'bold'), text="Invia", width="12", height=5, - bd=0, bg="#32de97", activebackground="#3c9d9b",fg='#ffffff', - command= send ) - -#Create the box to enter message -EntryBox = Text(base, bd=0, bg="white",width="29", height="5", font="Arial") -#EntryBox.bind("", send) - - -#Place all components on the screen -scrollbar.place(x=376,y=6, height=386) -ChatLog.place(x=6,y=6, height=386, width=370) -EntryBox.place(x=128, y=401, height=90, width=265) -SendButton.place(x=6, y=401, height=90) - -base.mainloop() -''' \ No newline at end of file +inizia('Ciao!') \ No newline at end of file diff --git a/intents.json b/intents.json index a6cccc4..668577f 100644 --- a/intents.json +++ b/intents.json @@ -1,73 +1,53 @@ {"intents": [ - {"tag": "greeting", - "patterns": ["Hi there", "How are you", "Is anyone there?","Hey","Hola", "Hello", "Good day"], - "responses": ["Hello, thanks for asking", "Good to see you again", "Hi there, how can I help?"], - "context": [""] - }, - {"tag": "goodbye", - "patterns": ["Bye", "See you later", "Goodbye", "Nice chatting to you, bye", "Till next time"], - "responses": ["See you!", "Have a nice day", "Bye! Come back again soon."], - "context": [""] - }, - {"tag": "thanks", - "patterns": ["Thanks", "Thank you", "That's helpful", "Awesome, thanks", "Thanks for helping me"], - "responses": ["Happy to help!", "Any time!", "My pleasure"], - "context": [""] - }, - {"tag": "noanswer", - "patterns": [], - "responses": ["Sorry, can't understand you", "Please give me more info", "Not sure I understand"], - "context": [""] - }, - {"tag": "options", - "patterns": ["How you could help me?", "What you can do?", "What help you provide?", "How you can be helpful?", "What support is offered"], - "responses": ["I can guide you through Adverse drug reaction list, Blood pressure tracking, Hospitals and Pharmacies", "Offering support for Adverse drug reaction, Blood pressure, Hospitals and Pharmacies"], - "context": [""] - }, - {"tag": "adverse_drug", - "patterns": ["How to check Adverse drug reaction?", "Open adverse drugs module", "Give me a list of drugs causing adverse behavior", "List all drugs suitable for patient with adverse reaction", "Which drugs dont have adverse reaction?" ], - "responses": ["Navigating to Adverse drug reaction module"], - "context": [""] - }, - {"tag": "blood_pressure", - "patterns": ["Open blood pressure module", "Task related to blood pressure", "Blood pressure data entry", "I want to log blood pressure results", "Blood pressure data management" ], - "responses": ["Navigating to Blood Pressure module"], - "context": [""] - }, - {"tag": "blood_pressure_search", - "patterns": ["I want to search for blood pressure result history", "Blood pressure for patient", "Load patient blood pressure result", "Show blood pressure results for patient", "Find blood pressure results by ID" ], - "responses": ["Please provide Patient ID", "Patient ID?"], - "context": ["search_blood_pressure_by_patient_id"] - }, - {"tag": "search_blood_pressure_by_patient_id", - "patterns": [], - "responses": ["Loading Blood pressure result for Patient"], - "context": [""] - }, - {"tag": "pharmacy_search", - "patterns": ["Find me a pharmacy", "Find pharmacy", "List of pharmacies nearby", "Locate pharmacy", "Search pharmacy" ], - "responses": ["Please provide pharmacy name"], - "context": ["search_pharmacy_by_name"] - }, - {"tag": "search_pharmacy_by_name", - "patterns": [], - "responses": ["Loading pharmacy details"], - "context": [""] - }, - {"tag": "hospital_search", - "patterns": ["Lookup for hospital", "Searching for hospital to transfer patient", "I want to search hospital data", "Hospital lookup for patient", "Looking up hospital details" ], - "responses": ["Please provide hospital name or location"], - "context": ["search_hospital_by_params"] - }, - {"tag": "search_hospital_by_params", - "patterns": [], - "responses": ["Please provide hospital type"], - "context": ["search_hospital_by_type"] - }, - {"tag": "search_hospital_by_type", - "patterns": [], - "responses": ["Loading hospital details"], - "context": [""] - } - ] + {"tag": "saluti", + "patterns": ["Ciao", "Hey!", "C'è qualcuno?","Toc toc","Hola", "Ciaone", "Buongiorno", "Buonasera", "Buonanotte", "Buona serata"], + "responses": ["Ciao a te!", "Ci sono. Come posso aiutarti?", "Hey! Come va?", "Bentornato"], + "context": [""] + }, + {"tag": "frasi_sentimentali", + "patterns": ["Ti amo", "Sono innamorato di te", "Sei un tesoro", "Sai cosa provo per te", "Sai che ho bisogno di te"], + "responses": ["Anche io!", "Si, lo so", "Me lo avrai detto un milione di volte", "Dici davvero?"], + "context": [""] + }, + {"tag": "domande", + "patterns": ["Mi ami?","Pensi che io sia gentile?","Ti piaccio?"], + "responses": ["Decisamente sì", "Assolutamente", "Non è bello ciò che è bello, ma è bello ciò che piace"], + "context": [""] + }, + {"tag": "grazie", + "patterns": ["Grazie", "Grazie mille", "Sei stato davvero di compagnia", "Perfetto", "Grazie per avermi aiutato"], + "responses": ["Ne sono felice", "Quando vuoi, mi trovi qui", "Piacere tutto mio"], + "context": [""] + }, + {"tag": "noanswer", + "patterns": [], + "responses": ["Non ho capito", "Scusa, potresti ripetere?", "Prova ad usare parole più semplici."], + "context": [""] + }, + {"tag": "options", + "patterns": ["Mi faresti compagnia?", "Che cosa fai?", "Possiamo parlare?"], + "responses": ["Possiamo parlare un po', se vuoi", "Posso farti compagnia", "Se vuoi, possiamo chiaccherare!"], + "context": [""] + }, + {"tag": "options", + "patterns": ["Mi faresti compagnia?", "Che cosa fai?", "Possiamo parlare?"], + "responses": ["Possiamo parlare un po', se vuoi", "Posso farti compagnia", "Se vuoi, possiamo chiaccherare!"], + "context": [""] + }, + {"tag": "introspezione_bot", + "patterns": ["Chi sei?", "Hai una coscienza?", "Hai un nome?", "Sei un'intelligenza artificiale?"], + "responses": ["Dipende da te", "Dubito di possederlo.", "Non è poi così importante saperlo", "Cosa te lo fa pensare?"], + "context": [""] + }, + {"tag": "introspezione_utente", + "patterns": ["Mi sento solo", "Non mi sento amato", "Nessuno mi ama", "Qualcuno mi amerà mai?", "Sarò mai felice?"], + "responses": ["Dipende da te", "Per quale motivo dici questo?", "L'importante è saper stare bene da soli", "Meglio soli che malaccompagnati"], + "context": [""] + }, + {"tag": "aiuto", + "patterns": ["Ho bisogno di aiuto", "Non mi sento per niente bene", "Penso di stare male", "Aiutami", "Aiuto"], + "responses": ["Come posso aiutarti?", "Fai un bel respiro", "Vuoi contattare qualcuno?", "Andrà tutto bene"], + "context": [""] + } +] } diff --git a/requirements.txt b/requirements.txt index 4f9ca80..b69d07e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ nltk>=3.4.5 numpy>=1.16.2 keras>=2.1.5 -tensorflow===2.0.0 +tensorflow===2.1.0 EasyTkinter>=1.1.0 \ No newline at end of file diff --git a/train_chatbot.py b/train_chatbot.py index dac6ed5..30ca016 100644 --- a/train_chatbot.py +++ b/train_chatbot.py @@ -8,12 +8,11 @@ import numpy as np from keras.models import Sequential -from keras.layers import Dense, Activation, Dropout +from keras.layers import Dense, Dropout from keras.optimizers import SGD -import random lemmatizer = WordNetLemmatizer() -words=[] +words = [] classes = [] documents = [] ignore_words = ['?', '!'] @@ -21,14 +20,15 @@ intents = json.loads(data_file) print(intents) - +# intents: gruppi di conversazioni-tipo +# patterns: possibili interazioni dell'utente for intent in intents['intents']: for pattern in intent['patterns']: - # take each word and tokenize it + # tokenizzo ogni parola w = nltk.word_tokenize(pattern) words.extend(w) - # adding documents + # aggiungo all'array documents documents.append((w, intent['tag'])) # adding classes to our class list @@ -36,51 +36,35 @@ classes.append(intent['tag']) words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words] -#words = sorted(list(set(words))) - -#classes = sorted(list(set(classes))) - -print (len(documents), "documents") -print("###########DOCUMENTS") -print(documents) -print (len(classes), "classes", classes) - -print (len(words), "unique lemmatized words", words) +pickle.dump(words, open('words.pkl','wb')) +pickle.dump(classes, open('classes.pkl','wb')) -pickle.dump(words,open('words.pkl','wb')) -pickle.dump(classes,open('classes.pkl','wb')) - -# initializing training data +# preparazione per l'addestramento della rete training = [] output_empty = [0] * len(classes) for doc in documents: - # initializing bag of words + # bag of words bag = [] - # list of tokenized words for the pattern + # lista di tokens pattern_words = doc[0] - # lemmatize each word - create base word, in attempt to represent related words + # lemmatizzazione dei token pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words] - # create our bag of words array with 1, if word match found in current pattern + # se la parola matcha, inserisco 1, altriment 0 for w in words: bag.append(1) if w in pattern_words else bag.append(0) - # output is a '0' for each tag and '1' for current tag (for each pattern) output_row = list(output_empty) output_row[classes.index(doc[1])] = 1 training.append([bag, output_row]) -# shuffle our features and turn into np.array -#random.shuffle(training) + training = np.array(training) -# create train and test lists. X - patterns, Y - intents +# creazione dei set di train e di test: X - patterns, Y - intents train_x = list(training[:,0]) train_y = list(training[:,1]) -print("Training data created") - -# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons -# equal to number of intents to predict output intent with softmax +# creazione del modello model = Sequential() model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu')) model.add(Dropout(0.5)) @@ -88,12 +72,11 @@ model.add(Dropout(0.5)) model.add(Dense(len(train_y[0]), activation='softmax')) -# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) #fitting and saving the model -hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1) +hist = model.fit(np.array(train_x), np.array(train_y), epochs=300, batch_size=5, verbose=1) model.save('chatbot_model.h5', hist) -print("model created") +print("Modello creato!")