-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathreadBriefingPickles.py
47 lines (39 loc) · 1.24 KB
/
readBriefingPickles.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import pickle
import os
from keras.preprocessing import *
from nltk.stem import SnowballStemmer
f = open("./briefings/2016_6_24.p", "rb")
d = pickle.loads(f.read())
print(d)
data = []
for i in os.listdir("./briefings/"):
#print(i)
f = open("./briefings/"+str(i), "rb")
d = pickle.loads(f.read())
data.append(d)
#Headlines
headlines = []
text_bodies = []
all_text = []
snow = SnowballStemmer('english')
for i,v in enumerate(data):
for k in v:
#print(k["date"], k["headline"])
headlines.append(' '.join([snow.stem(w) for w in str(k["headline"])]))
text_bodies.append(' '.join([snow.stem(w) for w in str(k["text"])]))
all_text.append(' '.join([snow.stem(w) for w in str(k["headline"])]))
all_text.append(' '.join([snow.stem(w) for w in str(k["text"])]))
tokenizer = text.Tokenizer(num_words=100, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' ', char_level=False, oov_token=None)
tokenizer.fit_on_texts(all_text)
encoded_docs = tokenizer.texts_to_matrix(all_text, mode='tfidf')
print(encoded_docs)
print(tokenizer.word_index[0:10])
# To Build
# N-grams
# Ticker (Entity) Recognition
# Train and Save TF-IDF Vectorizer
#Bodies
#for i,v in enumerate(data):
# for k in v:
# print(k["date"], k["text"])
##TickerInfo