-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnaive_bayes.py
149 lines (142 loc) · 6.66 KB
/
naive_bayes.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 20:31:51 2019
使用朴素贝叶斯对文本进行分类
@author: 李畅
"""
from os import path,listdir,makedirs,rename,chmod
import pickle
from math import pow,log,exp
import argparse
import time
import sys
class Naive_bayes:
def __init__(self,file_type_num,train_dir):
"""
@param file_type_num: 文本的类别个数
@param train_dir: 用于分类训练的目录
"""
self.type_prob=[]
self.type_names=[]
self.type_feat_cnt={} # {文档类i:{特征j: 出现次数, ...}, ...}
self.all_feats=set() # 特征的所有的可能的取值
self.doc_feat_prob_cnt={} # {文档i:{特征j:(条件概率值,出现次数)}}
self.train_dir=train_dir
self._cal_type_prob()
self._cal_type_feats()
def _cal_type_prob(self):
"""
统计各类文档出现的概率
"""
print('all type docs calculating.....')
for subdir in listdir(self.train_dir):
self.type_names.append(subdir)
self.type_prob.append(len(listdir(path.join(self.train_dir,\
subdir))))
all_file_cnt=sum(self.type_prob)
self.type_prob=[item * 1.0/all_file_cnt for item in self.type_prob]
print('calculating finish.........',flush=True)
def _cal_type_feats(self):
"""
计算每个类别的文档,每个特征的出现次数
"""
progres=0
for subdir in listdir(self.train_dir):
self.type_feat_cnt.update({subdir:{}})
for doc in listdir(path.join(self.train_dir,subdir)):
progres +=1
if progres % 100==0:
print('train file , solved:{}'.format(progres),flush=True)
with open(path.join(self.train_dir,subdir,doc),encoding=\
'utf-8',mode='r') as fr:
for line in fr.readlines():
line=line.strip()
if line:
line=[item.strip() for item in line.split() if item.strip()]
for feat in line:
self.all_feats=self.all_feats | {feat}
if feat not in self.type_feat_cnt[subdir].keys():
self.type_feat_cnt[subdir].update({feat:1.0})
else:
self.type_feat_cnt[subdir][feat]+=1.0
def _prob_soft_migrate(self,prob):
"""
对概率值进行平滑处理并迁移至正数区间
@param prob: 概率值
"""
prob=1.0/(1+exp(0-prob))
prob += 3.0
return prob
def cal_condition_prob(self,testdir):
"""
计算每个文档, 每个特征的条件概率值以及出现的次数
@param testdir: 测试文档目录路径
"""
for type_name in self.type_names:
makedirs(path.join(testdir,type_name),mode=755)
chmod(path.join(testdir,type_name),0o755)
progres=0
for doc in listdir(testdir):
progres +=1
if progres % 100==0:
print('test file, solved:{}'.format(progres),flush=True)
self.doc_feat_prob_cnt.update({doc[:6]:{}})
with open(path.join(testdir,doc),encoding=\
'utf-8',mode='r') as fr:
for line in fr.readlines():
line=line.strip()
if line:
line=[item.strip() for item in line.split() if item.strip()]
for feat in line:
if feat not in self.doc_feat_prob_cnt[doc[:6]].keys():
self.doc_feat_prob_cnt[doc[:6]].update({feat:[.0,.0]})
self.doc_feat_prob_cnt[doc[:6]][feat][1] +=1.0
# 计算该文档在每个类别下的条件概率
max_prob=-1.0
max_prob_type=''
lamda=log(len(self.all_feats)) # 平滑系数
for type_name in self.type_names:
for feat in self.doc_feat_prob_cnt[doc[:6]].keys():
self.doc_feat_prob_cnt[doc[:6]][feat][0]=log(\
((self.type_feat_cnt[type_name][feat] if \
feat in self.type_feat_cnt[type_name].keys()\
else 0) +lamda)*1.0/ \
(log(len(self.type_feat_cnt[type_name].keys()))+ \
log(len(self.all_feats))))
self.doc_feat_prob_cnt[doc[:6]][feat][0]= \
self._prob_soft_migrate(self.doc_feat_prob_cnt[doc[:6]]\
[feat][0])
prob=log(self.type_prob[self.type_names.index(type_name)])
prob=self._prob_soft_migrate(prob)
for feat in self.doc_feat_prob_cnt[doc[:6]].keys():
prob = log(prob*pow(self.doc_feat_prob_cnt[doc[:6]][feat][0],
int(self.doc_feat_prob_cnt[doc[:6]][feat][1])))
prob=self._prob_soft_migrate(prob)
# print('type is {}, probability ={}'.format(type_name,prob))
if prob > max_prob:
max_prob=prob
max_prob_type=type_name
# 将文件移动至对应类别的文件夹下
# print('{} ---> {}'.format(doc,max_prob_type))
rename(path.join(testdir,doc),path.join(testdir,max_prob_type,doc))
if __name__=='__main__':
try:
parser=argparse.ArgumentParser()
parser.add_argument('-n',required=True,dest='type_num',help='type count')
parser.add_argument('--train_dir',required=True,dest='train_dir',
help='the directory of type sub directories')
parser.add_argument('--test_dir',required=True,dest='test_dir',
help='the directory of testfiles')
args=parser.parse_args()
print('model start training....',flush=True)
start=time.time()
nv_bayes=Naive_bayes(int(args.type_num),args.train_dir)
print('model training finish....',flush=True)
nv_bayes.cal_condition_prob(args.test_dir)
elapse=(time.time()-start)
print('elapse = {}'.format(elapse),flush=True)
print('file classify finish....',flush=True)
except (BrokenPipeError, IOError):
print('BrokenPipeError caught',file=sys.stderr)
print('Done',file=sys.stderr)
sys.stderr.close()