Changeset 45
Legend:
 Unmodified
 Added
 Removed

trunk/src/SVM.cc
r42 r45 16 16 namespace cpptools { 17 17 18 SVM::SVM(const gslapi::matrix& kernel, const gslapi::vector& target) 19 20 21 22 23 18 SVM::SVM(const gslapi::matrix& kernel, const gslapi::vector& target) 19 : trained_(false), 20 kernel_(kernel), 21 target_(target), 22 alpha_(target.size()), 23 bias_(0) 24 24 { 25 25 } 26 26 27 void SVM::train(void) // Should be done so one can choose to not train on all the samples27 void SVM::train(void) //Peter, this should be done so one can choose to not train on all the samples 28 28 { 29 29 using namespace std; 30 30 gslapi::vector E = gslapi::vector(target_); 31 31 … … 33 33 u_int count = 0; 34 34 double alpha_new; 35 random_singleton* rnd=random_singleton::get_instance();36 35 double u; 37 36 double v; … … 40 39 // Stop criteria 41 40 bool stop_condition = false; 42 while(!stop_condition) 43 { 44 count++; 45 dalpha = alpha_.mul_elements(target_); 46 E = kernel_*dalphatarget_; //should be done in another way!! 47 48 // Choosing a pair of variables to modify (Should be done more clever) 49 u_long index1 = rnd>get_uniform_int(kernel_.columns()); 50 u_long index2 = rnd>get_uniform_int(kernel_.columns()1); 51 if (index2 >= index1) 52 index2++; 53 54 //Updating the two variables 55 if (target_[index1]!=target_[index2]) 56 { 57 if (alpha_[index2] > alpha_[index1] ) 58 { 59 v = upper_bound; 60 u = alpha_[index2]  alpha_[index1]; 61 } 62 else 63 { 64 v = upper_bound  alpha_[index1] + alpha_[index2]; 65 u = 0; 66 } 67 } 68 else 69 { 70 if (alpha_[index2] + alpha_[index1] > upper_bound) 71 { 72 u = alpha_[index2] + alpha_[index1]  upper_bound; 73 v = upper_bound; 74 } 75 else 76 { 77 u = 0; 78 v = alpha_[index1] + alpha_[index2]; 79 } 80 } 81 82 double k = ( kernel_(index1, index1) + kernel_(index2, index2)  83 2*kernel_(index1, index2) ); 84 alpha_new = alpha_[index2] + target_[index2]* 85 (E[index1]E[index2])/k; 86 if (alpha_new > v) 87 alpha_new = v; 88 else if (alpha_new<u) 89 alpha_new = u; 90 91 alpha_[index1]+=target_[index1]*target_[index2]*(alpha_[index2]alpha_new); 92 alpha_[index2]=alpha_new; 93 stop_condition = stop( target_, kernel_, alpha_); 94 if (count>10000000){ 95 cerr << "SVM): " << "more than 10,000,000 epochs reached" << endl; 96 exit(1); 97 } 41 while(!stop_condition) { 42 count++; 43 dalpha = alpha_.mul_elements(target_); 44 E = kernel_*dalphatarget_; //Peter, should be done in another way!! 45 46 // Choosing a pair of variables to modify 47 pair<u_int, u_int> index(0,0); 48 index = SVM::choose(target_.size()); 49 50 //Updating the two variables 51 if (target_[index.first]!=target_[index.second]) { 52 if (alpha_[index.second] > alpha_[index.first]) { 53 v = upper_bound; 54 u = alpha_[index.second]  alpha_[index.first]; 55 } 56 else { 57 v = upper_bound  alpha_[index.first] + alpha_[index.second]; 58 u = 0; 59 } 98 60 } 61 else { 62 if (alpha_[index.second] + alpha_[index.first] > upper_bound) { 63 u = alpha_[index.second] + alpha_[index.first]  upper_bound; 64 v = upper_bound; 65 } 66 else { 67 u = 0; 68 v = alpha_[index.first] + alpha_[index.second]; 69 } 70 } 71 72 double k = ( kernel_(index.first, index.first) + 73 kernel_(index.second, index.second)  74 2*kernel_(index.first, index.second) ); 75 alpha_new = (alpha_[index.second] + target_[index.second]* 76 (E[index.first]E[index.second])/k) ; 77 if (alpha_new > v) 78 alpha_new = v; 79 else if (alpha_new<u) 80 alpha_new = u; 81 82 alpha_[index.first] += (target_[index.first]*target_[index.second] * 83 (alpha_[index.second]alpha_new)); 84 alpha_[index.second]=alpha_new; 85 stop_condition = stop( target_, kernel_, alpha_); 86 if (count>10000000){ 87 cerr << "SVM): " << "more than 10,000,000 epochs reached" << endl; 88 exit(1); 89 } 90 } 99 91 100 92 // Calculating the bias … … 103 95 gslapi::vector output_unbiased = kernel_ * alpha_.mul_elements(target_); 104 96 for (u_int i=0; i<target_.size(); i++){ 105 if (target_[i]==1){ 106 if (output_unbiased[i] < min_output_positive) 107 min_output_positive = output_unbiased[i]; 108 } 109 else if( output_unbiased[i] > max_output_negative ) 110 max_output_negative = output_unbiased[i]; 111 97 if (target_[i]==1){ 98 if (output_unbiased[i] < min_output_positive) { 99 min_output_positive = output_unbiased[i]; 100 } 101 } 102 else if( output_unbiased[i] > max_output_negative ) { 103 max_output_negative = output_unbiased[i]; 104 } 105 106 bias_ = ( min_output_positive  max_output_negative )/2; 107 108 trained_= true; 112 109 } 113 cout << max_output_negative << endl;114 cout << min_output_positive << endl;115 116 bias_ = ( min_output_positive  max_output_negative )/2;117 cout << "bias:" << bias_ << endl;118 119 trained_= true;120 110 } 121 111 … … 124 114 const gslapi::vector& alpha_) 125 115 { 126 double min_output_positive = 10000; 127 double max_output_negative = 10000; 128 double epsilon = 0.000001; // used twice, should perhaps be two different values 129 gslapi::vector output_unbiased = kernel_ * alpha_.mul_elements(target_); 130 for (u_int i=0; i<target_.size(); i++){ 131 if (target_[i]==1){ 132 if (output_unbiased[i] < min_output_positive) 133 min_output_positive = output_unbiased[i]; 134 } 135 else if( output_unbiased[i] > max_output_negative ) 136 max_output_negative = output_unbiased[i]; 137 138 } 116 double min_output_positive = 10000; 117 double max_output_negative = 10000; 118 double epsilon = 0.000001; // Peter, should be static const 119 gslapi::vector output_unbiased = kernel_ * alpha_.mul_elements(target_); 120 for (u_int i=0; i<target_.size(); i++) { 121 if (target_[i]==1) { 122 if (output_unbiased[i] < min_output_positive) { 123 min_output_positive = output_unbiased[i]; 124 } 125 } 126 else if( output_unbiased[i] > max_output_negative ) 127 max_output_negative = output_unbiased[i]; 128 129 } 139 130 140 if (min_output_positive  max_output_negative < 2epsilon){141 return false;142 }143 else{144 double primal = alpha_.mul_elements(target_)145 146 double gap = 2 * primal  alpha_.sum();147 if (gap/(primal+1)<epsilon)148 149 else150 131 if (min_output_positive  max_output_negative < 2epsilon) { 132 return false; 133 } 134 else{ 135 double primal = alpha_.mul_elements(target_) 136 * ( alpha_.mul_elements(target_) ); 137 double gap = 2 * primal  alpha_.sum(); 138 if (gap/(primal+1)<epsilon) 139 return true; 140 else 141 return false; 151 142 } 152 143 
trunk/src/SVM.h
r42 r45 1 1 // $Id$ 2 2 3 #ifndef CS_CPP_TOOLS_SVM_H4 #define CS_CPP_TOOLS_SVM_H3 #ifndef _THEP_SVM_H 4 #define _THEP_SVM_H 5 5 6 6 // C++ tools include … … 12 12 // Standard C++ includes 13 13 //////////////////////// 14 14 #include <utility> 15 15 16 16 namespace theplu { 17 17 namespace cpptools { 18 18 /// 19 /// Class for SVM using a modified version of the SMO algorithm 20 /// (Cristianini). The difference to the original SMO is that the 21 /// whole Kernel matrix is kept in memory instead of calculating each 22 /// element when it's needed. If the number of samples (size of 23 /// kernel matrix) is too large, the original SMO should be used 24 /// instead (or buying more memory!). In the present implementation 25 /// only maximal margin SVMs can be used and any kind of validation 26 /// or testing is not possible. 27 /// 19 28 class SVM 20 29 { 21 30 22 31 public: 23 / **24 25 */32 /// 33 /// Constructor taking the kernel matrix and the target vector as input 34 /// 26 35 SVM(const gslapi::matrix&, const gslapi::vector&); 27 36 28 / **29 30 */37 /// 38 /// Training the SVM using the SMO algorithm 39 /// 31 40 void train(); 32 41 33 / **34 35 */42 /// 43 /// Function will return \f$\alpha\f$ 44 /// 36 45 inline gslapi::vector get_alpha() const; 37 46 38 / **39 40 */47 /// 48 /// Function will return the output from SVM 49 /// 41 50 inline gslapi::vector get_output() const; 42 51 … … 50 59 double bias_; 51 60 52 /** 53 Private function that determines when to stop the training. 54 The test is done in two steps. First, we check that the 55 functional margin is at least 2  epsilon. Second, we check 56 that the gap between the primal and the dual object is less 57 than epsilon. 58 59 */ 60 bool SVM::stop(const gslapi::vector& target_, 61 const gslapi::matrix& kernel_, 62 const gslapi::vector& alpha_); 61 /// 62 /// Private function choosing which two elements that should be 63 /// updated. By random from uniform distributions. 64 /// 65 66 std::pair<u_int,u_int> choose(u_int nof_samples); 67 68 /// 69 /// Private function that determines when to stop the training. 70 /// The test is done in two steps. First, we check that the 71 /// functional margin is at least 2  epsilon. Second, we check 72 /// that the gap between the primal and the dual object is less 73 /// than epsilon. 74 /// 75 bool stop(const gslapi::vector& target_, 76 const gslapi::matrix& kernel_, 77 const gslapi::vector& alpha_); 63 78 }; 64 79
Note: See TracChangeset
for help on using the changeset viewer.