1 | // $Id: SVM.cc 114 2004-07-15 12:46:15Z peter $ |
---|
2 | |
---|
3 | // System includes |
---|
4 | #include <cmath> |
---|
5 | #include <utility> |
---|
6 | #include <vector> |
---|
7 | |
---|
8 | // Thep C++ Tools |
---|
9 | #include "Averager.h" |
---|
10 | #include "matrix.h" |
---|
11 | #include "random_singleton.h" |
---|
12 | #include "SVM.h" |
---|
13 | #include "vector.h" |
---|
14 | |
---|
15 | |
---|
16 | |
---|
17 | namespace theplu { |
---|
18 | namespace cpptools { |
---|
19 | |
---|
20 | SVM::SVM(const gslapi::matrix& kernel, const gslapi::vector& target, |
---|
21 | const std::vector<size_t> train_set) |
---|
22 | : alpha_(target.size(),0), |
---|
23 | bias_(0), |
---|
24 | c_(0), |
---|
25 | kernel_(kernel), |
---|
26 | target_(target), |
---|
27 | trained_(false), |
---|
28 | train_set_(train_set), |
---|
29 | tolerance_(0.000000000001) |
---|
30 | |
---|
31 | { |
---|
32 | if (!train_set_.size()) |
---|
33 | for (size_t i=0; i<target_.size(); i++) |
---|
34 | train_set_.push_back(i); |
---|
35 | } |
---|
36 | |
---|
37 | |
---|
38 | std::pair<size_t,size_t> |
---|
39 | SVM::choose(const theplu::gslapi::vector& E, |
---|
40 | const theplu::gslapi::vector& target_train, |
---|
41 | const theplu::gslapi::vector& alpha_train, |
---|
42 | bool& stop_condition) |
---|
43 | { |
---|
44 | std::pair<size_t, size_t> index(0,0); |
---|
45 | |
---|
46 | // First check for violation among support vectors |
---|
47 | double max = -10000000; |
---|
48 | double min = 10000000; |
---|
49 | // Peter, avoid looping over all alphas |
---|
50 | for (size_t i=0; i<E.size(); i++){ |
---|
51 | if (alpha_train(i)){ |
---|
52 | if (E(i)>max){ |
---|
53 | max = E(i); |
---|
54 | index.second = i; |
---|
55 | } |
---|
56 | if (E(i)<min){ |
---|
57 | min = E(i); |
---|
58 | index.first = i; |
---|
59 | } |
---|
60 | } |
---|
61 | } |
---|
62 | //index = E.minmax_index(support_vectors); |
---|
63 | if (E(index.second) - E(index.first) > 2*tolerance_) |
---|
64 | return index; |
---|
65 | |
---|
66 | // If no violation check among non-support vectors |
---|
67 | std::vector<size_t> tmp; |
---|
68 | tmp.resize(E.size()); |
---|
69 | for (size_t i=0; i<E.size(); i++) |
---|
70 | tmp[i]=i; |
---|
71 | my_uniform_rng a; |
---|
72 | random_shuffle(tmp.begin(), tmp.end(), a); |
---|
73 | for (size_t i=0; i<tmp.size(); i++){ |
---|
74 | if (target_train(tmp[i])==1){ |
---|
75 | if ( E(tmp[i]) < E(index.first) - 2*tolerance_){ |
---|
76 | index.second = tmp[i]; |
---|
77 | return index; |
---|
78 | } |
---|
79 | } |
---|
80 | else if (target_train(tmp[i])==-1){ |
---|
81 | if ( E(tmp[i]) > E(index.second) + 2*tolerance_){ |
---|
82 | index.first = tmp[i]; |
---|
83 | return index; |
---|
84 | } |
---|
85 | } |
---|
86 | } |
---|
87 | // If there is no violation then we should stop |
---|
88 | stop_condition = true; |
---|
89 | return index; |
---|
90 | } |
---|
91 | |
---|
92 | void SVM::train() |
---|
93 | { |
---|
94 | using namespace std; |
---|
95 | |
---|
96 | // initializing variables for optimization |
---|
97 | gslapi::matrix kernel_train(train_set_.size(),train_set_.size()); |
---|
98 | gslapi::vector alpha_train(train_set_.size()); |
---|
99 | gslapi::vector target_train(train_set_.size()); |
---|
100 | for (unsigned int i=0; i<train_set_.size(); i++) { |
---|
101 | alpha_train(i) = alpha_(train_set_[i]); |
---|
102 | target_train(i) = target_(train_set_[i]); |
---|
103 | for (unsigned int j=0; j<train_set_.size(); j++) |
---|
104 | kernel_train(i,j) = kernel_(train_set_[i],train_set_[j]); |
---|
105 | } |
---|
106 | |
---|
107 | // Modify the diagonal of the kernel matrix (soft margin) |
---|
108 | double d; |
---|
109 | if (c_>0) |
---|
110 | d = 1/c_; |
---|
111 | else if (c_==0) |
---|
112 | d = 0; |
---|
113 | else { |
---|
114 | cerr << "SVM): " << "C-parameter cannot be a negative number" << endl; |
---|
115 | exit(1); |
---|
116 | } |
---|
117 | for (unsigned int i=0; i<kernel_train.columns(); i++) |
---|
118 | kernel_train(i,i) += d; |
---|
119 | |
---|
120 | gslapi::vector E = ( kernel_train*target_train.mul_elements(alpha_train) |
---|
121 | - target_train ) ; |
---|
122 | |
---|
123 | double upper_bound = pow(10., 32.); //Peter, should be static... |
---|
124 | u_int epochs = 0; |
---|
125 | double alpha_new2; |
---|
126 | double alpha_new1; |
---|
127 | double u; |
---|
128 | double v; |
---|
129 | bool stop_condition = false; |
---|
130 | |
---|
131 | // Training loop |
---|
132 | while(!stop_condition) { |
---|
133 | pair<size_t, size_t> index = choose(E, target_train, |
---|
134 | alpha_train,stop_condition); |
---|
135 | if (target_train[index.first]!=target_train[index.second]) { |
---|
136 | if (alpha_train[index.second] > alpha_train[index.first]) { |
---|
137 | v = upper_bound; |
---|
138 | u = alpha_train[index.second] - alpha_train[index.first]; |
---|
139 | } |
---|
140 | else { |
---|
141 | v = upper_bound - alpha_train[index.first] + alpha_train[index.second]; |
---|
142 | u = 0; |
---|
143 | } |
---|
144 | } |
---|
145 | else { |
---|
146 | if (alpha_train[index.second] + alpha_train[index.first] > upper_bound) { |
---|
147 | u = alpha_train[index.second] + alpha_train[index.first] - upper_bound; |
---|
148 | v = upper_bound; |
---|
149 | } |
---|
150 | else { |
---|
151 | u = 0; |
---|
152 | v = alpha_train[index.first] + alpha_train[index.second]; |
---|
153 | } |
---|
154 | } |
---|
155 | |
---|
156 | double k = ( kernel_train(index.first, index.first) + |
---|
157 | kernel_train(index.second, index.second) - |
---|
158 | 2*kernel_train(index.first, index.second)); |
---|
159 | alpha_new2 = (alpha_train[index.second] + target_train[index.second]* |
---|
160 | (E[index.first]-E[index.second])/k ); |
---|
161 | |
---|
162 | if (alpha_new2 > v) |
---|
163 | alpha_new2 = v; |
---|
164 | else if (alpha_new2<u) |
---|
165 | alpha_new2 = u; |
---|
166 | |
---|
167 | // Updating the alphas and some more |
---|
168 | if (alpha_new2 < tolerance_) |
---|
169 | alpha_new2=0; |
---|
170 | |
---|
171 | alpha_new1 = (alpha_train[index.first] + |
---|
172 | (target_train[index.first] * target_train[index.second] * |
---|
173 | (alpha_train[index.second] - alpha_new2)) ); |
---|
174 | if (alpha_new1 < tolerance_){ |
---|
175 | alpha_new1=0; |
---|
176 | } |
---|
177 | |
---|
178 | alpha_train[index.first] = alpha_new1; |
---|
179 | alpha_train[index.second] = alpha_new2; |
---|
180 | E = kernel_train*target_train.mul_elements(alpha_train) - target_train; |
---|
181 | gslapi::vector ones(alpha_train.size(),1); |
---|
182 | |
---|
183 | epochs++; |
---|
184 | if (epochs>1000000){ |
---|
185 | cerr << "WARNING: SVM: " << "1,000,000 epochs reached" << endl; |
---|
186 | exit(1); |
---|
187 | } |
---|
188 | } |
---|
189 | |
---|
190 | // Calculating the bias from support vectors. E = output - bias - target |
---|
191 | Averager bias; |
---|
192 | for (size_t i=0; i<E.size(); i++) |
---|
193 | if (alpha_train(i)) |
---|
194 | bias.add(-E(i)); |
---|
195 | bias_ = bias.mean(); |
---|
196 | |
---|
197 | trained_= true; |
---|
198 | alpha_.set_all(0); |
---|
199 | for (unsigned int i=0; i<train_set_.size(); i++) |
---|
200 | alpha_(train_set_[i]) = alpha_train(i); |
---|
201 | |
---|
202 | } |
---|
203 | |
---|
204 | |
---|
205 | |
---|
206 | |
---|
207 | |
---|
208 | }} // of namespace cpptools and namespace theplu |
---|