-
Notifications
You must be signed in to change notification settings - Fork 4
/
Trainer.cpp
107 lines (87 loc) · 3.56 KB
/
Trainer.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#include <libcohog/Trainer.hpp>
namespace libcohog
{
parameter default_liblinear_parameter()
{
parameter par;
par.solver_type = L2R_L2LOSS_SVC_DUAL;
par.eps = 0.1;
par.C = 1;
par.nr_weight = 0;
par.weight_label = NULL;
par.weight = NULL;
return par;
}
model* train_liblinear(const std::vector<std::vector<feature_node> >& positive_features, const std::vector<std::vector<feature_node> >& negative_features, int dim, parameter param)
{
std::vector<double> responses(positive_features.size() + negative_features.size());
std::fill(responses.begin(), responses.begin() + positive_features.size(), 1);
std::fill(responses.begin() + positive_features.size(), responses.end(), -1);
//liblinear問題構築
problem prob;
prob.n = dim; //次元数
prob.y = responses.data(); //ラベル配列
prob.l = responses.size(); //学習データ数
prob.bias = 0;
std::vector<feature_node*> features_ptr;
for(int i = 0; i < positive_features.size(); ++i) features_ptr.push_back(const_cast<feature_node*>(positive_features[i].data()));
for(int i = 0; i < negative_features.size(); ++i) features_ptr.push_back(const_cast<feature_node*>(negative_features[i].data()));
prob.x = features_ptr.data();
//liblinear学習
model *m = train(&prob, ¶m);
//識別テスト
int cnt = 0;
for(int i = 0; i < features_ptr.size(); ++i)
if(predict(m, features_ptr[i]) != prob.y[i])
++cnt;
std::cerr << "misses:" << cnt << " (" << (100 * cnt / responses.size()) << "%)" << std::endl;
return m;
}
model* train_liblinear(const std::vector<std::vector<float> >& positive_features, const std::vector<std::vector<float> >& negative_features, parameter param)
{
const int dim = positive_features[0].size();
std::vector<std::vector<float> > features;
features.insert(features.end(), positive_features.begin(), positive_features.end());
features.insert(features.end(), negative_features.begin(), negative_features.end());
std::vector<double> responses(features.size());
std::fill(responses.begin(), responses.begin() + positive_features.size(), 1);
std::fill(responses.begin() + positive_features.size(), responses.end(), -1);
//liblinear問題構築
problem prob;
prob.n = dim; //次元数
prob.y = responses.data(); //ラベル配列
prob.l = responses.size(); //学習データ数
prob.bias = 0;
std::vector<std::vector<feature_node> > features_liblinear(responses.size());
std::vector<feature_node*> features_ptr;
for(int i = 0; i < responses.size(); ++i)
{
for(int k = 0; k < dim; ++k)
features_liblinear[i].push_back(feature_node{k + 1, features[i][k]});
features_liblinear[i].push_back(feature_node{-1, 0});
features_ptr.push_back(features_liblinear[i].data());
}
prob.x = features_ptr.data();
//check looop
{
for(int i = 0; i < responses.size(); ++i)
{
for(int k = 0; k < dim; ++k)
{
feature_node node = features_ptr[i][k];
if(node.index != k + 1 || node.value != features[i][k])
throw;
}
}
}
//liblinear学習
model *m = train(&prob, ¶m);
//識別テスト
int cnt = 0;
for(int i = 0; i < features.size(); ++i)
if(predict(m, features_ptr[i]) != prob.y[i])
++cnt;
std::cerr << "misses:" << cnt << " (" << (100 * cnt / responses.size()) << "%)" << std::endl;
return m;
}
}