Changeset 648
- Timestamp:
- Sep 14, 2006, 5:04:17 AM (16 years ago)
- Location:
- trunk/c++_tools
- Files:
-
- 33 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/c++_tools/classifier/DataLookup1D.h
r624 r648 25 25 /// Constructor. 26 26 /// 27 /// @param eter row_vector if true (default) DataLookup1D is28 /// looking into a row of DataLookup2D, otherwise looking into29 /// a column. @parameterindex which row/column to look into.27 /// @param row_vector if true DataLookup1D is looking into a 28 /// row of DataLookup2D, otherwise looking into a 29 /// column. @param index which row/column to look into. 30 30 /// 31 31 DataLookup1D(const DataLookup2D&, const size_t index, … … 49 49 50 50 /// 51 /// 51 /// @return number of elements 52 52 /// 53 53 inline size_t size(void) const … … 55 55 56 56 /// 57 /// 57 /// @brief access operator 58 58 /// 59 59 inline double operator()(const size_t i) const … … 64 64 65 65 /// 66 /// 66 /// @brief access operator 67 67 /// 68 68 inline double operator[](const size_t i) const -
trunk/c++_tools/classifier/DataLookup2D.h
r640 r648 79 79 80 80 /// 81 /// 81 /// @return Data based on selected features. 82 82 /// 83 83 virtual const DataLookup2D* selected(const std::vector< size_t > &) const=0; … … 141 141 142 142 protected: 143 /// 144 /// @brief assignment operator 145 /// 143 146 const DataLookup2D& operator=(const DataLookup2D&); 144 147 148 /// 149 /// @brief which rows to look into 150 /// 145 151 std::vector<size_t> row_index_; 152 153 /// 154 /// @brief which columns to look into 155 /// 146 156 std::vector<size_t> column_index_; 157 158 /// 159 /// poiter telling how many owners to underlying data. NULL if 160 /// this is not an owner. 161 /// 147 162 u_int* ref_count_; 148 163 -
trunk/c++_tools/classifier/DataLookupWeighted1D.h
r622 r648 46 46 /// Constructor. 47 47 /// 48 /// @param eterrow_vector if true (default) DataLookup1D is48 /// @param row_vector if true (default) DataLookup1D is 49 49 /// looking into a row of DataLookup2D, otherwise looking into 50 /// a column. @param eterindex which row/column to look into.50 /// a column. @param index which row/column to look into. 51 51 /// 52 52 DataLookupWeighted1D(const MatrixLookupWeighted&, const size_t index, … … 65 65 66 66 /// 67 /// 67 /// @brief Destructor 68 68 /// 69 69 virtual ~DataLookupWeighted1D(); 70 70 71 71 /// 72 /// 72 /// @return number of elements 73 73 /// 74 74 inline size_t size(void) const … … 101 101 102 102 /// 103 /// 103 /// @return data(i) * weight(i) 104 104 /// 105 105 inline double operator[](const size_t i) const -
trunk/c++_tools/classifier/FeatureSelector.h
r642 r648 42 42 43 43 /// 44 /// 44 /// @return vector of indices corresponding to selected features. 45 45 /// 46 46 inline const std::vector<size_t> features(void) const { return features_; } … … 58 58 59 59 protected: 60 /// 61 /// @brief features 62 /// 60 63 std::vector<size_t> features_; 64 65 /// 66 /// number of features to skip in list 67 /// 61 68 size_t first_; 69 70 /// 71 /// number of features selected and returned 72 /// 62 73 size_t N_; 63 74 -
trunk/c++_tools/classifier/Kernel.h
r640 r648 122 122 123 123 protected: 124 /// underlyung data 124 125 const DataLookup2D* data_; 125 // Peter can we move data_w_ to weighted daughted classes?126 /// same as data_ if weifghted otherwise a NULL pointer 126 127 const MatrixLookupWeighted* data_w_; 128 /// type of Kernel Function e.g. Gaussian (aka RBF) 127 129 const KernelFunction* kf_; 130 /// if true we own data and will delete it in destructor 128 131 const bool data_owner_; 132 /// if true we own data_w and will delete it in destructor 129 133 bool weight_owner_; 130 134 -
trunk/c++_tools/classifier/KernelLookup.h
r640 r648 139 139 140 140 /// 141 /// @retu n a sub-kernel of kernel calculated using data defined by141 /// @return a sub-kernel of kernel calculated using data defined by 142 142 /// @a features. Each row and each columns corresponds to a traing 143 143 /// sample defined by @a train. -
trunk/c++_tools/classifier/Kernel_MEV.h
r629 r648 47 47 48 48 /// 49 /// 49 /// Constructing a new Kernel based on selected features @a 50 /// index. All other seeting are the same. 50 51 /// 51 52 Kernel_MEV(const Kernel_MEV& kernel, const std::vector<size_t>& index); … … 64 65 65 66 /// 67 /// @see Kernel_MEV(const Kernel_MEV&, const std::vector<size_t>&); 68 66 69 /// 70 /// @note returns dynamically allocated pointer that must be 71 /// deleted by the caller to avoid memory leaks. 67 72 /// 68 73 const Kernel_MEV* selected(const std::vector<size_t>& index) const; -
trunk/c++_tools/classifier/Kernel_SEV.h
r640 r648 16 16 /// 17 17 /// @brief Speed Efficient Kernel 18 /// 18 19 /// Class taking care of the \f$ NxN \f$ kernel matrix, where 19 20 /// \f$ N \f$ is number of samples. Type of Kernel is defined by a … … 45 46 46 47 /// 47 /// 48 /// Constructs a Kernel based on selected features defined by @a index 48 49 /// 49 50 Kernel_SEV(const Kernel_SEV& kernel, const std::vector<size_t>& index); -
trunk/c++_tools/classifier/MatrixLookup.h
r631 r648 67 67 68 68 /// 69 /// Constructor creating a lookup into a sub-matrix of @ matrix.69 /// Constructor creating a lookup into a sub-matrix of @a matrix. 70 70 /// 71 71 /// If @a row_vectors is true the new MatrixLookup will be consist -
trunk/c++_tools/classifier/MatrixLookupWeighted.h
r640 r648 78 78 79 79 /// 80 /// Constructor creating a lookup into a sub-matrix of @ matrix.80 /// Constructor creating a lookup into a sub-matrix of @a matrix. 81 81 /// 82 82 /// If @a row_vectors is true the new MatrixLookupWeighted will be -
trunk/c++_tools/classifier/PolynomialKernelFunction.h
r629 r648 43 43 /// 44 44 /// @return If order is larger than one: \f$ (1+x \cdot y)^{order} 45 /// \f$ \n If order is one (linear): \f$ \ frac{\sum w_yxy}\f$45 /// \f$ \n If order is one (linear): \f$ \sum w_yxy \f$ 46 46 /// 47 47 /// … … 54 54 /// 55 55 /// @return If order is larger than one: \f$ (1+x \cdot y)^{order} 56 /// \f$ \n If order is one (linear): \f$ \sum w_xw_yxy }\f$56 /// \f$ \n If order is one (linear): \f$ \sum w_xw_yxy \f$ 57 57 /// 58 58 double operator()(const DataLookupWeighted1D& x, -
trunk/c++_tools/classifier/SVM.h
r641 r648 19 19 class DataLookup2D; 20 20 #ifndef DOXYGEN_SHOULD_SKIP_THIS 21 // @internal Class keeping track of which samples are support vectors and22 // not. The first nof_sv elements in the vector are indices of the23 // support vectors24 // 21 /// @internal Class keeping track of which samples are support vectors and 22 /// not. The first nof_sv elements in the vector are indices of the 23 /// support vectors 24 /// 25 25 class Index 26 26 { … … 178 178 /// for training. 179 179 /// 180 /// @note181 ///182 180 void predict(const DataLookup2D& input, utility::matrix& predict) const; 183 181 … … 205 203 void set_C(const double); 206 204 207 / //208 ///Training the SVM following Platt's SMO, with Keerti's209 ///modifacation. Minimizing \f$ \frac{1}{2}\sum210 ///y_iy_j\alpha_i\alpha_j(K_{ij}+\frac{1}{C_i}\delta_{ij}) \f$ ,211 ///which corresponds to minimizing \f$ \sum w_i^2+\sum C_i\xi_i^2212 /// \f$.213 ///205 /** 206 Training the SVM following Platt's SMO, with Keerti's 207 modifacation. Minimizing \f$ \frac{1}{2}\sum 208 y_iy_j\alpha_i\alpha_j(K_{ij}+\frac{1}{C_i}\delta_{ij}) \f$ , 209 which corresponds to minimizing \f$ \sum w_i^2+\sum C_i\xi_i^2 210 \f$. 211 */ 214 212 bool train(); 215 213 -
trunk/c++_tools/classifier/Sampler.h
r619 r648 44 44 /// @brief Constructor 45 45 /// 46 /// @param eter Target targets46 /// @param target used to balance partitions 47 47 /// 48 48 Sampler(const Target& target); … … 99 99 100 100 protected: 101 /// Target used to balance partitions 101 102 Target target_; 103 /// index of training sets for the partitions 102 104 std::vector<std::vector<size_t> > training_index_; 105 /// Targets for training sets for the partitions 103 106 std::vector<Target> training_target_; 107 /// index of validation sets for the partitions 104 108 std::vector<std::vector<size_t> > validation_index_; 109 /// Targets for validation sets for the partitions 105 110 std::vector<Target> validation_target_; 106 111 -
trunk/c++_tools/classifier/SubsetGenerator.h
r636 r648 47 47 /// @brief Constructor 48 48 /// 49 /// @param etersampler sampler50 /// @param eterdata data to split up in validation and training.49 /// @param sampler sampler 50 /// @param data data to split up in validation and training. 51 51 /// 52 52 SubsetGenerator(const Sampler& sampler, const DataLookup2D& data); … … 56 56 /// @brief Constructor 57 57 /// 58 /// @parameter Sampler 59 /// @parameter data data to be split up in validation and training. 58 /// @param sampler taking care of partioning dataset 59 /// @param data data to be split up in validation and training. 60 /// @param fs Object selecting features for each subset 60 61 /// 61 62 SubsetGenerator(const Sampler& sampler, const DataLookup2D& data, -
trunk/c++_tools/classifier/SupervisedClassifier.h
r635 r648 48 48 49 49 50 // 51 // Train the classifier.52 // 50 /// 51 /// Train the classifier. 52 /// 53 53 virtual bool train()=0; 54 54 … … 61 61 protected: 62 62 63 /// Target to train on. 63 64 const Target& target_; 65 /// true if classifier successfully trained 64 66 bool trained_; 65 67 -
trunk/c++_tools/random/random.h
r614 r648 108 108 inline std::string name(void) const { return gsl_rng_name(rng_); } 109 109 110 /// 111 /// @return const pointer to underlying GSL random generator. 112 /// 110 113 inline const gsl_rng* rng(void) const { return rng_; } 111 114 … … 181 184 182 185 protected: 186 /// GSL random gererator 183 187 RNG* rng_; 184 188 }; … … 360 364 361 365 protected: 366 /// pointer to GSL random generator 362 367 RNG* rng_; 363 368 }; … … 429 434 /// @brief Constructor 430 435 /// 431 /// @param \am is the expectation value of the distribution.436 /// @param m is the expectation value of the distribution. 432 437 /// 433 438 inline Exponential(const double m=1) : m_(m) {} … … 471 476 /// @brief Constructor 472 477 /// @param s is the standard deviation \f$ \sigma \f$ of distribution 473 /// m is the expectation value \f$ \mu \f$ of the distribution478 /// @param m is the expectation value \f$ \mu \f$ of the distribution 474 479 /// 475 480 inline Gaussian(const double s=1, const double m=0) : m_(m), s_(s) {} -
trunk/c++_tools/statistics/AveragerPairWeighted.h
r627 r648 48 48 49 49 /// 50 /// 50 /// Adding two sequences of data @a x and @a y. The data 51 /// should be paired so \f$ x(i) \f$ is associated to \f$ y(i) \f$ 52 /// @a x will be treated as having all weights equal to unity 51 53 /// 52 54 void add(const classifier::DataLookup1D& x, … … 54 56 55 57 /// 56 /// 58 /// Adding two sequences of data @a x and @a y. The data should be 59 /// paired so \f$ x(i) \f$ is associated to \f$ y(i) \f$ 60 /// @a y will be treated as having all weights equal to unity 57 61 /// 58 62 inline void add(const classifier::DataLookupWeighted1D& x, … … 60 64 61 65 /// 62 /// 66 /// Adding two sequences of weighted data @a x and @a y. The data 67 /// should be paired so \f$ x(i) \f$ is associated to \f$ y(i) \f$ 63 68 /// 64 69 void add(const classifier::DataLookupWeighted1D& x, -
trunk/c++_tools/statistics/Distance.h
r616 r648 30 30 } 31 31 32 /// 33 /// @return distance 34 /// 32 35 virtual double operator()(const utility::vector& x, 33 36 const utility::vector& y) const = 0; 34 37 35 38 39 /// 40 /// This function is virtual, and implemented in this base class 41 /// copying the lookups to utilty::vectors followed by calling 42 /// appropriate function. If speed is crucial you can implement 43 /// this function in inherited class avoiding the copying. 44 /// 45 /// @return distance 46 /// 36 47 virtual double operator()(const classifier::DataLookup1D& x, 37 48 const classifier::DataLookup1D& y) const; 38 49 39 50 51 /// 52 /// @return weighted distance 53 /// 40 54 virtual double operator()(const utility::vector& x, 41 55 const utility::vector& y, … … 43 57 const utility::vector& wy) const = 0; 44 58 59 /// 60 /// This function is virtual, and implemented in this base class 61 /// copying the lookups to utilty::vectors followed by calling 62 /// appropiate function. If speed is crucial you can implement 63 /// this function in inherited class avoiding the copying. 64 /// 65 /// @return weighted distance 66 /// 45 67 virtual double operator()(const classifier::DataLookup1D& x, 46 68 const classifier::DataLookup1D& y, -
trunk/c++_tools/statistics/Fisher.h
r623 r648 11 11 namespace theplu { 12 12 namespace statistics { 13 /// 14 /// @brief Fisher's exact test. 15 /// Fisher's Exact test is a procedure that you can use for data 16 /// in a two by two contingency table: \f[ \begin{tabular}{|c|c|} 17 /// \hline a&b \tabularnewline \hline c&d \tabularnewline \hline 18 /// \end{tabular} \f] Fisher's Exact Test is based on exact 19 /// probabilities from a specific distribution (the hypergeometric 20 /// distribution). There's really no lower bound on the amount of 21 /// data that is needed for Fisher's Exact Test. You do have to 22 /// have at least one data value in each row and one data value in 23 /// each column. If an entire row or column is zero, then you 24 /// don't really have a 2 by 2 table. But you can use Fisher's 25 /// Exact Test when one of the cells in your table has a zero in 26 /// it. Fisher's Exact Test is also very useful for highly 27 /// imbalanced tables. If one or two of the cells in a two by two 28 /// table have numbers in the thousands and one or two of the 29 /// other cells has numbers less than 5, you can still use 30 /// Fisher's Exact Test. For very large tables (where all four 31 /// entries in the two by two table are large), your computer may 32 /// take too much time to compute Fisher's Exact Test. In these 33 /// situations, though, you might as well use the Chi-square test 34 /// because a large sample approximation (that the Chi-square test 35 /// relies on) is very reasonable. If all elements are larger than 36 /// 10 a Chi-square test is reasonable to use. 37 /// 38 /// @note The statistica assumes that each column and row sum, 39 /// respectively, are fixed. Just because you have a 2x2 table, this 40 /// assumtion does not necessarily match you experimental upset. See 41 /// e.g. Barnard's test for alternative. 42 /// 13 /** 14 @brief Fisher's exact test. 15 16 Fisher's Exact test is a procedure that you can use for data 17 in a two by two contingency table: \f[ \begin{tabular}{|c|c|} 18 \hline a&b \tabularnewline \hline c&d \tabularnewline \hline 19 \end{tabular} \f] Fisher's Exact Test is based on exact 20 probabilities from a specific distribution (the hypergeometric 21 distribution). There's really no lower bound on the amount of 22 data that is needed for Fisher's Exact Test. You do have to 23 have at least one data value in each row and one data value in 24 each column. If an entire row or column is zero, then you 25 don't really have a 2 by 2 table. But you can use Fisher's 26 Exact Test when one of the cells in your table has a zero in 27 it. Fisher's Exact Test is also very useful for highly 28 imbalanced tables. If one or two of the cells in a two by two 29 table have numbers in the thousands and one or two of the 30 other cells has numbers less than 5, you can still use 31 Fisher's Exact Test. For very large tables (where all four 32 entries in the two by two table are large), your computer may 33 take too much time to compute Fisher's Exact Test. In these 34 situations, though, you might as well use the Chi-square test 35 because a large sample approximation (that the Chi-square test 36 relies on) is very reasonable. If all elements are larger than 37 10 a Chi-square test is reasonable to use. 38 39 @note The statistica assumes that each column and row sum, 40 respectively, are fixed. Just because you have a 2x2 table, this 41 assumtion does not necessarily match you experimental upset. See 42 e.g. Barnard's test for alternative. 43 */ 43 44 44 45 class Fisher : public Score -
trunk/c++_tools/statistics/FoldChange.h
r623 r648 51 51 /// @param value vector of the values 52 52 /// @param weight vector of accompanied weight to the values 53 /// @train_set defining which values to use (number of values used54 /// in the calculation is equal to size of \a train_set)55 53 /// 56 54 double score(const classifier::Target& target, -
trunk/c++_tools/statistics/MultiDimensional.h
r616 r648 32 32 33 33 /// 34 /// 34 /// Function fitting parameters of the linear model by miminizing 35 /// the quadratic deviation between model and data. 35 36 /// 36 37 void fit(const utility::matrix& X, const utility::vector& y); 37 38 38 39 /// 39 /// 40 /// @return parameters of the model 40 41 /// 41 42 utility::vector fit_parameters(void) { return fit_parameters_; } -
trunk/c++_tools/statistics/MultiDimensionalWeighted.h
r616 r648 33 33 34 34 /// 35 /// @ todo doc35 /// @see gsl_multifit_wlinear 36 36 /// 37 37 void fit(const utility::matrix& X, const utility::vector& y, … … 55 55 56 56 /// 57 /// 57 /// @return parameters of fitted model 58 58 /// 59 59 utility::vector fit_parameters(void) { return fit_parameters_; } -
trunk/c++_tools/statistics/OneDimensional.h
r616 r648 52 52 virtual double prediction_error(const double x) const=0; 53 53 54 /// 55 /// @brief print output to @a os 56 /// 54 57 std::ostream& print(std::ostream& os,const double min, 55 58 double max, const u_int n) const; … … 66 69 AveragerPair ap_; 67 70 68 double msd_; // mean squared deviation (model from data points) 71 /// 72 /// mean squared deviation (model from data points) 73 /// 74 double msd_; 69 75 }; 70 76 -
trunk/c++_tools/statistics/OneDimensionalWeighted.h
r616 r648 61 61 62 62 protected: 63 double s2_; // noise level - the typical variance for a point with 64 // weight w is s2/w 65 63 /// 64 /// noise level - the typical variance for a point with weight w 65 /// is s2/w 66 /// 67 double s2_; 66 68 }; 67 69 -
trunk/c++_tools/statistics/Polynomial.h
r616 r648 26 26 27 27 /// 28 /// 28 /// @param power degree of polynomial, e.g. 1 for a linear model 29 29 /// 30 30 inline Polynomial(size_t power) … … 37 37 38 38 /// 39 /// 39 /// fit the model by minimizing the mean squared deviation between 40 /// model and data. 40 41 /// 41 42 void fit(const utility::vector& x, const utility::vector& y); 42 43 43 44 /// 44 /// 45 /// @return parameters of the model 45 46 /// 46 47 utility::vector fit_parameters(void) { return md_.fit_parameters(); } -
trunk/c++_tools/statistics/PolynomialWeighted.h
r616 r648 26 26 27 27 /// 28 /// 28 /// @param power degree of polynomial model 29 29 /// 30 30 inline PolynomialWeighted(size_t power) -
trunk/c++_tools/statistics/ROC.h
r623 r648 101 101 inline u_int& minimum_size(void){ return minimum_size_; } 102 102 103 /// 104 /// Function returning true if target is positive (binary()) for 105 /// the sample with ith lowest data value, so i=0 corresponds to 106 /// the sample with the lowest data value and i=n()-1 the sample 107 /// with highest data value. 108 /// 103 109 bool target(const size_t i) const; 110 111 /// 112 /// @return number of samples 113 /// 104 114 inline size_t n(void) const { return vec_pair_.size(); } 115 116 /// 117 /// @return number of positive samples (Target.binary()==true) 118 /// 105 119 inline size_t n_pos(void) const { return nof_pos_; } 106 120 -
trunk/c++_tools/statistics/Score.h
r623 r648 118 118 119 119 protected: 120 /// return true if method is weighted 120 121 inline bool weighted(void) const { return weighted_; } 121 122 123 /// true if method is absolute, which means if score is below 124 /// expected value (by chance) E, score returns E-score instead. 122 125 bool absolute_; 126 /// true if method is weighted 123 127 bool weighted_; 124 128 -
trunk/c++_tools/statistics/WilcoxonFoldChange.h
r623 r648 27 27 /// @return difference of the means of the two classes 28 28 /// 29 /// @param target is +1 or -129 /// @param target defining the two groups (Target.binary() ) 30 30 /// @param value vector of the values 31 /// @train_set defining which values to use (number of values used32 /// in the calculation is equal to size of \a train_set)33 31 /// 34 32 double score(const classifier::Target& target, … … 38 36 /// @return difference of the weighted means of the two classes 39 37 /// 38 /// @param target defining the two groups (Target.binary() ) 40 39 /// @param value vector of the values (with weights) 41 /// @train_set defining which values to use (number of values used42 /// in the calculation is equal to size of \a train_set)43 40 /// 44 41 /// @note not implemented … … 50 47 /// @return difference of the weighted means of the two classes 51 48 /// 49 /// @param target defining the two groups 52 50 /// @param value vector of the values 53 51 /// @param weight vector of accompanied weight to the values 54 /// @train_set defining which values to use (number of values used55 /// in the calculation is equal to size of \a train_set)56 52 /// 57 53 /// @note not implemented -
trunk/c++_tools/statistics/utility.h
r616 r648 30 30 /// t samples without replacement and \a k of those are "good" 31 31 /// samples. \a k will follow a hypergeomtric distribution. 32 /// @cumulative hypergeomtric distribution functions P(k). 32 /// 33 /// @return cumulative hypergeomtric distribution functions P(k). 33 34 /// 34 35 double cdf_hypergeometric_P(u_int k, u_int n1, u_int n2, u_int t); -
trunk/c++_tools/utility/Exception.h
r570 r648 34 34 35 35 /// 36 /// @brief Class 36 /// @brief Class for IO errors 37 37 /// 38 38 class IO_error : public std::runtime_error 39 39 { 40 40 public: 41 /// 42 /// Default constructor 43 /// 41 44 IO_error(void) throw() : std::runtime_error("IO_error:") {} 45 46 /// 47 /// Constructor for exception with message 48 /// 42 49 IO_error(std::string message) throw() 43 50 : std::runtime_error("IO_error: " + message) {} -
trunk/c++_tools/utility/NNI.h
r616 r648 111 111 112 112 protected: 113 /** \f$ d_{ij}^2=\frac {\sum_{k=1}^C w_{ik} w_{jk} (x_{ik}-x_{jk})^2 114 }{\sum_{k=l}^C w_{ik} w_{jk} } \f$ where C is the number of 115 */ columns 113 116 std::vector<std::pair<u_int,double> > calculate_distances(const u_int) const; 117 /// Contributing nearest neighbours are added up to the user set 118 /// number, and neighbours are disqualified if their element 119 /// (column) weight is zero 114 120 std::vector<u_int> nearest_neighbours(const u_int, 115 121 const std::vector<std::pair<u_int,double> >&) const; 122 /// 123 /// original data matrix 124 /// 125 const utility::matrix& data_; 116 126 117 const utility::matrix& data_; 127 /// 128 /// data after imputation 129 /// 118 130 utility::matrix imputed_data_; 131 132 /// 133 /// number of neighbor to use 134 /// 119 135 u_int neighbours_; 136 137 /// 138 /// which rows are not imputed due to lack of data 139 /// 120 140 std::vector<size_t> not_imputed_; 141 142 /// 143 /// weight matrix 144 /// 121 145 const utility::matrix& weight_; 122 146 }; -
trunk/c++_tools/utility/Option.h
r601 r648 50 50 /// @param short_name one character key such as 'h' for -h flag 51 51 /// @param long_name string key such as "help" for --help flag 52 /// @param telling what kind argument this option expects52 /// @param arg telling what kind argument this option expects 53 53 /// @param desc string used in help display 54 54 ///
Note: See TracChangeset
for help on using the changeset viewer.