forked from mickvanhulst/tf_chatbot_lotr
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathExperimental Generator.py
70 lines (60 loc) · 2.2 KB
/
Experimental Generator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import numpy as np
import pandas as pd
from keras.layers import LSTM, Dense, Dropout
from keras.models import Sequential
from keras.utils import np_utils
text=(open("./ST.txt").read())
text=text.lower()
#Get Characters in text. Generate a number for each letter and vice versa
characters = sorted(list(set(text)))
n_to_char = {n:char for n, char in enumerate(characters)}
char_to_n = {char:n for n, char in enumerate(characters)}
X = []
Y = []
length = len(text)
seq_length = 100
for i in range(0, length-seq_length, 1):
sequence = text[i:i + seq_length]
label =text[i + seq_length]
X.append([char_to_n[char] for char in sequence])
Y.append(char_to_n[label])
#Reshaping X and Y groupings
X_modified = np.reshape(X, (len(X), seq_length, 1))
X_modified = X_modified / float(len(characters))
Y_modified = np_utils.to_categorical(Y)
#Loading model
model = Sequential()
model.add(LSTM(50, input_shape=(X_modified.shape[1], X_modified.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(50))
model.add(Dropout(0.2))
model.add(Dense(Y_modified.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
txt=""
model.load_weights('./text_generator_50_0.2_50_0.2_baseline.h5')
for _ in range(1):
#Train model
model.fit(X_modified, Y_modified, epochs=100, batch_size=100)
#Save model weights
model.save_weights('./text_generator_50_0.2_50_0.2_baseline.h5')
string_mapped = X[99]
full_string = [n_to_char[value] for value in string_mapped]
# generating characters
print('Generating Characters, Pls wait')
for i in range(5000):
x = np.reshape(string_mapped,(1,len(string_mapped), 1))
x = x / float(len(characters))
pred_index = np.argmax(model.predict(x, verbose=0))
seq = [n_to_char[value] for value in string_mapped]
full_string.append(n_to_char[pred_index])
string_mapped.append(pred_index)
string_mapped = string_mapped[1:len(string_mapped)]
txt=""
for char in full_string:
txt = txt+char
print(txt)
#Load model weights
model.load_weights('./text_generator_50_0.2_50_0.2_baseline.h5')
text_file = open("Output.txt", "w")
text_file.write(txt)
text_file.close()