1 | // $Id: SVM.cc 227 2005-02-01 00:52:27Z jari $ |
---|
2 | |
---|
3 | #include "SVM.h" |
---|
4 | |
---|
5 | // System includes |
---|
6 | #include <cmath> |
---|
7 | #include <limits.h> |
---|
8 | #include <utility> |
---|
9 | #include <vector> |
---|
10 | |
---|
11 | // Thep C++ Tools |
---|
12 | #include "Averager.h" |
---|
13 | #include "matrix.h" |
---|
14 | #include "random_singleton.h" |
---|
15 | #include "vector.h" |
---|
16 | |
---|
17 | |
---|
18 | |
---|
19 | namespace theplu { |
---|
20 | namespace cpptools { |
---|
21 | |
---|
22 | SVM::SVM(const Kernel& kernel, |
---|
23 | const gslapi::vector& target, |
---|
24 | const std::vector<size_t>& train_set) |
---|
25 | : alpha_(target.size(),0), |
---|
26 | bias_(0), |
---|
27 | c_(0), |
---|
28 | kernel_(kernel), |
---|
29 | max_epochs_(10000000), |
---|
30 | target_(target), |
---|
31 | trained_(false), |
---|
32 | train_set_(train_set), |
---|
33 | tolerance_(0.000001) |
---|
34 | |
---|
35 | { |
---|
36 | if (max_epochs_>ULONG_MAX) |
---|
37 | max_epochs_=ULONG_MAX; |
---|
38 | if (!train_set_.size()) |
---|
39 | for (size_t i=0; i<target_.size(); i++) |
---|
40 | train_set_.push_back(i); |
---|
41 | } |
---|
42 | |
---|
43 | |
---|
44 | |
---|
45 | std::pair<size_t,size_t> |
---|
46 | SVM::choose(const theplu::gslapi::vector& E, |
---|
47 | const theplu::gslapi::vector& target_train, |
---|
48 | const theplu::gslapi::vector& alpha_train, |
---|
49 | bool& stop_condition) |
---|
50 | { |
---|
51 | std::pair<size_t, size_t> index(0,0); |
---|
52 | |
---|
53 | // First check for violation among support vectors |
---|
54 | double max = -10000000; |
---|
55 | double min = 10000000; |
---|
56 | // Peter, avoid looping over all alphas |
---|
57 | for (size_t i=0; i<E.size(); i++){ |
---|
58 | if (alpha_train(i)){ |
---|
59 | if (E(i)>max){ |
---|
60 | max = E(i); |
---|
61 | index.second = i; |
---|
62 | } |
---|
63 | if (E(i)<min){ |
---|
64 | min = E(i); |
---|
65 | index.first = i; |
---|
66 | } |
---|
67 | } |
---|
68 | } |
---|
69 | //index = E.minmax_index(support_vectors); |
---|
70 | if (E(index.second) - E(index.first) > 2*tolerance_) |
---|
71 | return index; |
---|
72 | |
---|
73 | // If no violation check among non-support vectors |
---|
74 | std::vector<size_t> tmp; |
---|
75 | tmp.resize(E.size()); |
---|
76 | for (size_t i=0; i<E.size(); i++) |
---|
77 | tmp[i]=i; |
---|
78 | my_uniform_rng a; |
---|
79 | random_shuffle(tmp.begin(), tmp.end(), a); |
---|
80 | for (size_t i=0; i<tmp.size(); i++){ |
---|
81 | if (target_train(tmp[i])==1){ |
---|
82 | if ( E(tmp[i]) < E(index.first) - 2*tolerance_){ |
---|
83 | index.second = tmp[i]; |
---|
84 | return index; |
---|
85 | } |
---|
86 | } |
---|
87 | else if (target_train(tmp[i])==-1){ |
---|
88 | if ( E(tmp[i]) > E(index.second) + 2*tolerance_){ |
---|
89 | index.first = tmp[i]; |
---|
90 | return index; |
---|
91 | } |
---|
92 | } |
---|
93 | } |
---|
94 | // If there is no violation then we should stop |
---|
95 | stop_condition = true; |
---|
96 | return index; |
---|
97 | } |
---|
98 | |
---|
99 | |
---|
100 | |
---|
101 | bool SVM::train() |
---|
102 | { |
---|
103 | using namespace std; |
---|
104 | // initializing variables for optimization |
---|
105 | gslapi::matrix kernel_train(train_set_.size(),train_set_.size()); |
---|
106 | gslapi::vector alpha_train(train_set_.size()); |
---|
107 | gslapi::vector target_train(train_set_.size()); |
---|
108 | |
---|
109 | for (unsigned int i=0; i<train_set_.size(); i++) { |
---|
110 | target_train(i) = target_(train_set_[i]); |
---|
111 | for (unsigned int j=0; j<train_set_.size(); j++) |
---|
112 | kernel_train(i,j) = kernel_.get()(train_set_[i],train_set_[j]); |
---|
113 | } |
---|
114 | // Modify the diagonal of the kernel matrix (soft margin) |
---|
115 | if (c_>0){ |
---|
116 | double d = 1/c_; |
---|
117 | for (unsigned int i=0; i<kernel_train.columns(); i++) |
---|
118 | kernel_train(i,i) += d; |
---|
119 | } |
---|
120 | |
---|
121 | gslapi::vector E = ( kernel_train*target_train.mul(alpha_train) |
---|
122 | - target_train ) ; |
---|
123 | |
---|
124 | double upper_bound = pow(10., 32); //Peter, should be static... |
---|
125 | unsigned long int epochs = 0; |
---|
126 | double alpha_new2; |
---|
127 | double alpha_new1; |
---|
128 | double u; |
---|
129 | double v; |
---|
130 | bool stop_condition = false; |
---|
131 | // Training loop |
---|
132 | while(!stop_condition) { |
---|
133 | pair<size_t, size_t> index = choose(E, target_train, |
---|
134 | alpha_train,stop_condition); |
---|
135 | // Defining upper and lower bounds for alpha |
---|
136 | if (target_train[index.first]!=target_train[index.second]) { |
---|
137 | if (alpha_train[index.second] > alpha_train[index.first]) { |
---|
138 | v = upper_bound; |
---|
139 | u = alpha_train[index.second] - alpha_train[index.first]; |
---|
140 | } |
---|
141 | else { |
---|
142 | v = (upper_bound - alpha_train[index.first] + |
---|
143 | alpha_train[index.second]); |
---|
144 | u = 0; |
---|
145 | } |
---|
146 | } |
---|
147 | else { |
---|
148 | if (alpha_train[index.second] + alpha_train[index.first] > |
---|
149 | upper_bound) { |
---|
150 | u = (alpha_train[index.second] + alpha_train[index.first] - |
---|
151 | upper_bound); |
---|
152 | v = upper_bound; |
---|
153 | } |
---|
154 | else { |
---|
155 | u = 0; |
---|
156 | v = alpha_train[index.first] + alpha_train[index.second]; |
---|
157 | } |
---|
158 | } |
---|
159 | |
---|
160 | double k = ( kernel_train(index.first, index.first) + |
---|
161 | kernel_train(index.second, index.second) - |
---|
162 | 2*kernel_train(index.first, index.second)); |
---|
163 | |
---|
164 | alpha_new2 = (alpha_train[index.second] + target_train[index.second]* |
---|
165 | (E[index.first]-E[index.second])/k ); |
---|
166 | |
---|
167 | if (alpha_new2 > v) |
---|
168 | alpha_new2 = v; |
---|
169 | else if (alpha_new2<u) |
---|
170 | alpha_new2 = u; |
---|
171 | |
---|
172 | // Updating the alphas and some more |
---|
173 | if (alpha_new2 < tolerance_) |
---|
174 | alpha_new2=0; |
---|
175 | |
---|
176 | alpha_new1 = (alpha_train[index.first] + |
---|
177 | (target_train[index.first] * target_train[index.second] * |
---|
178 | (alpha_train[index.second] - alpha_new2)) ); |
---|
179 | if (alpha_new1 < tolerance_){ |
---|
180 | alpha_new1=0; |
---|
181 | } |
---|
182 | |
---|
183 | alpha_train[index.first] = alpha_new1; |
---|
184 | alpha_train[index.second] = alpha_new2; |
---|
185 | |
---|
186 | E = kernel_train*target_train.mul(alpha_train) - target_train; |
---|
187 | gslapi::vector ones(alpha_train.size(),1); |
---|
188 | |
---|
189 | epochs++; |
---|
190 | if (epochs>max_epochs_){ |
---|
191 | return false; |
---|
192 | } |
---|
193 | } |
---|
194 | |
---|
195 | // Calculating the bias from support vectors. E = output - bias - target |
---|
196 | statistics::Averager bias; |
---|
197 | for (size_t i=0; i<E.size(); i++) |
---|
198 | if (alpha_train(i)) |
---|
199 | bias.add(-E(i)); |
---|
200 | bias_ = bias.mean(); |
---|
201 | |
---|
202 | trained_= true; |
---|
203 | alpha_.set_all(0); |
---|
204 | for (unsigned int i=0; i<train_set_.size(); i++) |
---|
205 | alpha_(train_set_[i]) = alpha_train(i); |
---|
206 | |
---|
207 | // training performed succesfully |
---|
208 | return true; |
---|
209 | |
---|
210 | } |
---|
211 | |
---|
212 | |
---|
213 | |
---|
214 | |
---|
215 | |
---|
216 | }} // of namespace cpptools and namespace theplu |
---|