1 | // $Id: SVM.h 509 2006-02-18 13:47:32Z peter $ |
---|
2 | |
---|
3 | #ifndef _theplu_classifier_svm_ |
---|
4 | #define _theplu_classifier_svm_ |
---|
5 | |
---|
6 | #include <c++_tools/classifier/DataLookup2D.h> |
---|
7 | #include <c++_tools/classifier/SupervisedClassifier.h> |
---|
8 | #include <c++_tools/classifier/Target.h> |
---|
9 | #include <c++_tools/gslapi/vector.h> |
---|
10 | |
---|
11 | #include <cassert> |
---|
12 | #include <utility> |
---|
13 | #include <vector> |
---|
14 | |
---|
15 | |
---|
16 | namespace theplu { |
---|
17 | namespace classifier { |
---|
18 | |
---|
19 | // @internal Class keeping track of which samples are support vectors and |
---|
20 | // not. The first nof_sv elements in the vector are indices of the |
---|
21 | // support vectors |
---|
22 | // |
---|
23 | class Index |
---|
24 | { |
---|
25 | |
---|
26 | public: |
---|
27 | //Default Contructor |
---|
28 | Index(); |
---|
29 | |
---|
30 | // |
---|
31 | Index(const size_t); |
---|
32 | |
---|
33 | // @return index_first |
---|
34 | inline size_t index_first(void) const |
---|
35 | { assert(index_first_<n()); return index_first_; } |
---|
36 | |
---|
37 | // @return index_second |
---|
38 | inline size_t index_second(void) const |
---|
39 | { assert(index_second_<n()); return index_second_; } |
---|
40 | |
---|
41 | // synch the object against alpha |
---|
42 | void init(const gslapi::vector& alpha, const double); |
---|
43 | |
---|
44 | // @return nof samples |
---|
45 | inline size_t n(void) const { return vec_.size(); } |
---|
46 | |
---|
47 | // @return nof support vectors |
---|
48 | inline size_t nof_sv(void) const { return nof_sv_; } |
---|
49 | |
---|
50 | // making first to an nsv. If already sv, nothing happens. |
---|
51 | void nsv_first(void); |
---|
52 | |
---|
53 | // making first to an nsv. If already sv, nothing happens. |
---|
54 | void nsv_second(void); |
---|
55 | |
---|
56 | // randomizes the nsv part of vector and sets index_first to |
---|
57 | // nof_sv_ (the first nsv) |
---|
58 | void shuffle(void); |
---|
59 | |
---|
60 | // making first to a sv. If already sv, nothing happens. |
---|
61 | void sv_first(void); |
---|
62 | |
---|
63 | // making first to a sv. If already sv, nothing happens. |
---|
64 | void sv_second(void); |
---|
65 | |
---|
66 | // |
---|
67 | void update_first(const size_t); |
---|
68 | |
---|
69 | // |
---|
70 | void update_second(const size_t); |
---|
71 | |
---|
72 | // @return value_first |
---|
73 | inline size_t value_first(void) const |
---|
74 | { assert(value_first_<n()); return value_first_; } |
---|
75 | |
---|
76 | // @return const ref value_second |
---|
77 | inline size_t value_second(void) const |
---|
78 | { assert(value_first_<n()); return value_second_; } |
---|
79 | |
---|
80 | inline size_t operator()(size_t i) const { |
---|
81 | assert(i<n()); assert(vec_[i]<n()); return vec_[i]; } |
---|
82 | |
---|
83 | private: |
---|
84 | size_t index_first_; |
---|
85 | size_t index_second_; |
---|
86 | size_t nof_sv_; |
---|
87 | std::vector<size_t> vec_; |
---|
88 | size_t value_first_; // vec_[index_first_] exists for fast access |
---|
89 | size_t value_second_; // vec_[index_second_] exists for fast access |
---|
90 | |
---|
91 | }; |
---|
92 | |
---|
93 | /// |
---|
94 | /// Class for SVM using Keerthi's second modification of Platt's |
---|
95 | /// Sequential Minimal Optimization. The SVM uses all data given for |
---|
96 | /// training. If validation or testing is wanted this should be |
---|
97 | /// taken care of outside (in the kernel). |
---|
98 | /// |
---|
99 | class SVM : public SupervisedClassifier |
---|
100 | { |
---|
101 | |
---|
102 | public: |
---|
103 | /// |
---|
104 | /// Constructor taking the kernel and the target vector as |
---|
105 | /// input. |
---|
106 | /// |
---|
107 | /// @note if the target, kernel (or data in kernel) is destroyed |
---|
108 | /// the SVM is no longer defined. |
---|
109 | /// |
---|
110 | SVM(const DataLookup2D&, const Target&); |
---|
111 | |
---|
112 | /// |
---|
113 | /// @todo doc |
---|
114 | /// |
---|
115 | SupervisedClassifier* |
---|
116 | make_classifier(const DataLookup2D&, const Target&) const; |
---|
117 | |
---|
118 | /// |
---|
119 | /// The C-parameter is the balance term (see train()). A very |
---|
120 | /// large C means the training will be focused on getting samples |
---|
121 | /// correctly classified, with risk for overfitting and poor |
---|
122 | /// generalisation. A too small C will result in a training where |
---|
123 | /// misclassifications are not penalized. C is weighted with |
---|
124 | /// respect to the size, so \f$ n_+C_+ = n_-C_- \f$, meaning a |
---|
125 | /// misclassificaion of the smaller group is penalized |
---|
126 | /// harder. This balance is equivalent to the one occuring for |
---|
127 | /// regression with regularisation, or ANN-training with a |
---|
128 | /// weight-decay term. Default is C set to infinity. |
---|
129 | /// |
---|
130 | /// @returns mean of vector \f$ C_i \f$ |
---|
131 | /// |
---|
132 | inline double C(void) const { return 1/C_inverse_; } |
---|
133 | |
---|
134 | /// |
---|
135 | /// @return \f$\alpha\f$ |
---|
136 | /// |
---|
137 | inline const gslapi::vector& alpha(void) const { return alpha_; } |
---|
138 | |
---|
139 | /// |
---|
140 | /// Default is max_epochs set to 10,000,000. |
---|
141 | /// |
---|
142 | /// @return number of maximal epochs |
---|
143 | /// |
---|
144 | inline long int max_epochs(void) const {return max_epochs_;} |
---|
145 | |
---|
146 | /// |
---|
147 | /// The output is calculated as \f$ o_i = \sum \alpha_j t_j K_{ij} |
---|
148 | /// + bias \f$, where \f$ t \f$ is the target. |
---|
149 | /// |
---|
150 | /// @return output |
---|
151 | /// |
---|
152 | inline const theplu::gslapi::vector& output(void) const { return output_; } |
---|
153 | |
---|
154 | /// |
---|
155 | /// Generate output values for a data set |
---|
156 | /// Not implemented !!!!! |
---|
157 | /// |
---|
158 | inline void predict(const DataLookup2D&, gslapi::matrix&) const |
---|
159 | {} |
---|
160 | |
---|
161 | |
---|
162 | /// |
---|
163 | /// Function sets \f$ \alpha=0 \f$ and makes SVM untrained. |
---|
164 | /// |
---|
165 | inline void reset(void) |
---|
166 | { trained_=false; alpha_=gslapi::vector(target_.size(),0); } |
---|
167 | |
---|
168 | /// |
---|
169 | /// Training the SVM following Platt's SMO, with Keerti's |
---|
170 | /// modifacation. Minimizing \f$ \frac{1}{2}\sum |
---|
171 | /// y_iy_j\alpha_i\alpha_j(K_{ij}+\frac{1}{C_i}\delta_{ij}) \f$, |
---|
172 | /// which corresponds to minimizing \f$ \sum w_i^2+\sum C_i\xi_i^2 |
---|
173 | /// \f$. |
---|
174 | /// |
---|
175 | bool train(); |
---|
176 | |
---|
177 | |
---|
178 | |
---|
179 | private: |
---|
180 | /// |
---|
181 | /// Copy constructor. (not implemented) |
---|
182 | /// |
---|
183 | SVM(const SVM&); |
---|
184 | |
---|
185 | /// |
---|
186 | /// Calculates bounds for alpha2 |
---|
187 | /// |
---|
188 | void bounds(double&, double&) const; |
---|
189 | |
---|
190 | /// |
---|
191 | /// @todo Function calculating bias |
---|
192 | /// |
---|
193 | /// @return true if successful |
---|
194 | /// |
---|
195 | bool calculate_bias(void); |
---|
196 | |
---|
197 | /// |
---|
198 | /// Private function choosing which two elements that should be |
---|
199 | /// updated. First checking for the biggest violation (output - target = |
---|
200 | /// 0) among support vectors (alpha!=0). If no violation was found check |
---|
201 | /// sequentially among the other samples. If no violation there as |
---|
202 | /// well training is completed |
---|
203 | /// |
---|
204 | /// @return true if a pair of samples that violate the conditions |
---|
205 | /// can be found |
---|
206 | /// |
---|
207 | bool choose(const theplu::gslapi::vector&); |
---|
208 | |
---|
209 | /// |
---|
210 | /// @return kernel modified with diagonal term (soft margin) |
---|
211 | /// |
---|
212 | inline double kernel_mod(const size_t i, const size_t j) const |
---|
213 | { return i!=j ? (kernel_)(i,j) : (kernel_)(i,j) + C_inverse_; } |
---|
214 | |
---|
215 | /// @return 1 if i belong to class one and -1 if i belong to rest |
---|
216 | inline int target(size_t i) const { return target_.one(i) ? 1 : -1; } |
---|
217 | |
---|
218 | gslapi::vector alpha_; |
---|
219 | double bias_; |
---|
220 | double C_inverse_; |
---|
221 | const DataLookup2D& kernel_; |
---|
222 | unsigned long int max_epochs_; |
---|
223 | gslapi::vector output_; |
---|
224 | Index sample_; |
---|
225 | const Target& target_; |
---|
226 | bool trained_; |
---|
227 | double tolerance_; |
---|
228 | |
---|
229 | |
---|
230 | }; |
---|
231 | |
---|
232 | /// |
---|
233 | /// @todo The output operator for the SVM class. |
---|
234 | /// |
---|
235 | std::ostream& operator<< (std::ostream& s, const SVM&); |
---|
236 | |
---|
237 | |
---|
238 | }} // of namespace classifier and namespace theplu |
---|
239 | |
---|
240 | #endif |
---|