-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathReadLDA_Model.py
192 lines (161 loc) · 5.18 KB
/
ReadLDA_Model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
import re
import json
import uuid
import glob
import pickle
import sys
import os
import math
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.lancaster import LancasterStemmer
from nltk.cluster import KMeansClusterer, euclidean_distance
from numpy import array
from pprint import pprint
import re
import scipy.stats as stat
from os.path import basename
import string
import pandas as pd
import numpy as np
import pyLDAvis
from empath import Empath
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import recall_score
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.metrics import make_scorer
from sklearn.ensemble import RandomForestRegressor
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from imblearn.under_sampling import RandomUnderSampler
import seaborn as sns
from pycebox.ice import ice, ice_plot
import operator
from nltk import stem
from nltk.stem.snowball import *
from nltk.stem import PorterStemmer
import dask
import datetime
import time
from pprint import pprint
from gensim.corpora.dictionary import Dictionary
import re
import numpy as np
import pandas as pd
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
import os
# Enable logging for gensim - optional
import logging
logging.basicConfig(filename='lda_model_Parenting.log',format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.debug("test")
def Tokinization(document):
document = "".join(document)
tokenizer = RegexpTokenizer(r'\w+')
intermediate = tokenizer.tokenize(document)
return intermediate
df= pd.read_pickle('Parenting_cleaned.pkl')
print('reading the Parenting Cleaned dataframe...')
print('*' * 50)
print('and for a test...')
print(df.clean_body.head())
print('*' * 50)
print('grouping into dcouments')
CompleteThread = []
CompleteThread = df.groupby('link_id')['clean_body'].apply(list)
#running for the rest of the data
processed_threads = []
#creating threads for each of the users
for thread in CompleteThread:
#Preprocessing each of the threads
processed_threads.append(Tokinization(thread))
texts = processed_threads
print('The length of the documents is...')
print(len(texts))
print('*' * 50)
print('the first document is...')
print(texts[0])
print('*' * 50)
print('buiding the dictionary...')
dictionary = corpora.Dictionary(texts)
corpus = corpora.MmCorpus('Parenting.mm')
print(corpus[0])
dictionary = Dictionary.load('Parenting.dict')
print('The length of the dictionary')
print(len(dictionary))
print('the first pass of the dictionary')
print(dictionary[0])
print('*' * 50)
print('reading all LDA models....')
print('*' * 50)
model_list = []
for i in range(0,9):
LDA_File = "LDA_Model."+str(i+1)+".model"
lda_file = gensim.models.ldamodel.LdaModel.load(LDA_File)
model_list.append(lda_file)
print('preparing coherence values for each of the LDA models...')
print('*' * 50)
coherence_values = []
for i in range(0, len(model_list)):
print('now working on coherence value for model...')
print(model_list[i])
print('*' * 50)
coherencemodel = CoherenceModel(model=model_list[i], texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
limit = 50
start = 5
step = 5
x = range(start, limit, step)
coherence = zip(x, coherence_values)
sorted_coherence = sorted(coherence, key=lambda x: x[1], reverse=True)
print(sorted_coherence)
print('*' * 50)
for m, cv in sorted_coherence:
print("Num Topics =", m, " has Coherence Value of", round(cv, 4))
print('saving list...')
print('*' * 50)
with open("cohere.pkl", "wb") as fp:
pickle.dump(sorted_coherence, fp)
print('creating a graph of the coherence model...')
print('*' * 50)
# Show graph
fig = plt.gcf()
limit=limit; start=start; step=step;
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
fig.savefig('LDA_models_coherence.png')