Changeset 648


Ignore:
Timestamp:
Sep 14, 2006, 5:04:17 AM (15 years ago)
Author:
Peter
Message:

fixes #133 removed all errors reported from Doxygen. Only one error left which says Index is not documented but I don't want it to be documented actually we use the Doxygens preprocessor to skip documenting that class, yet Doxygen complains that class is not documented huh. Only solution would be to move that class to its own file and not keep it together with SVM.

Location:
trunk/c++_tools
Files:
33 edited

Legend:

Unmodified
Added
Removed
  • trunk/c++_tools/classifier/DataLookup1D.h

    r624 r648  
    2525    /// Constructor.
    2626    ///
    27     /// @parameter row_vector if true (default) DataLookup1D is
    28     /// looking into a row of DataLookup2D, otherwise looking into
    29     /// a column. @parameter index which row/column to look into.
     27    /// @param row_vector if true DataLookup1D is looking into a
     28    /// row of DataLookup2D, otherwise looking into a
     29    /// column. @param index which row/column to look into.
    3030    ///
    3131    DataLookup1D(const DataLookup2D&, const size_t index,
     
    4949
    5050    ///
    51     ///
     51    /// @return number of elements
    5252    ///
    5353    inline size_t size(void) const
     
    5555
    5656    ///
    57     ///
     57    /// @brief access operator
    5858    ///
    5959    inline double operator()(const size_t i) const
     
    6464
    6565    ///
    66     ///
     66    /// @brief access operator
    6767    ///
    6868    inline double operator[](const size_t i) const
  • trunk/c++_tools/classifier/DataLookup2D.h

    r640 r648  
    7979
    8080    ///
    81     ///
     81    /// @return Data based on selected features.
    8282    ///
    8383    virtual const DataLookup2D* selected(const std::vector< size_t > &) const=0;
     
    141141
    142142  protected:
     143    ///
     144    /// @brief assignment operator
     145    ///
    143146    const DataLookup2D& operator=(const DataLookup2D&);
    144147
     148    ///
     149    /// @brief which rows to look into
     150    ///
    145151    std::vector<size_t> row_index_;
     152
     153    ///
     154    /// @brief which columns to look into
     155    ///
    146156    std::vector<size_t> column_index_;
     157
     158    ///
     159    /// poiter telling how many owners to underlying data. NULL if
     160    /// this is not an owner.
     161    ///
    147162    u_int* ref_count_;
    148163   
  • trunk/c++_tools/classifier/DataLookupWeighted1D.h

    r622 r648  
    4646    /// Constructor.
    4747    ///
    48     /// @parameter row_vector if true (default) DataLookup1D is
     48    /// @param row_vector if true (default) DataLookup1D is
    4949    /// looking into a row of DataLookup2D, otherwise looking into
    50     /// a column. @parameter index which row/column to look into.
     50    /// a column. @param index which row/column to look into.
    5151    ///
    5252    DataLookupWeighted1D(const MatrixLookupWeighted&, const size_t index,
     
    6565
    6666    ///
    67     ///
     67    /// @brief Destructor
    6868    ///
    6969    virtual ~DataLookupWeighted1D();
    7070
    7171    ///
    72     ///
     72    /// @return number of elements
    7373    ///
    7474    inline size_t size(void) const
     
    101101
    102102    ///
    103     ///
     103    /// @return data(i) * weight(i)
    104104    ///
    105105    inline double operator[](const size_t i) const
  • trunk/c++_tools/classifier/FeatureSelector.h

    r642 r648  
    4242
    4343    ///
    44     ///
     44    /// @return vector of indices corresponding to selected features.
    4545    ///
    4646    inline const std::vector<size_t> features(void) const { return features_; }
     
    5858
    5959  protected:
     60    ///
     61    /// @brief features
     62    ///
    6063    std::vector<size_t> features_;
     64
     65    ///
     66    /// number of features to skip in list
     67    ///
    6168    size_t first_;
     69
     70    ///
     71    /// number of features selected and returned
     72    ///
    6273    size_t N_;
    6374
  • trunk/c++_tools/classifier/Kernel.h

    r640 r648  
    122122
    123123  protected:
     124    /// underlyung data
    124125    const DataLookup2D* data_;
    125     // Peter can we move data_w_ to weighted daughted classes?
     126    /// same as data_ if weifghted otherwise a NULL pointer
    126127    const MatrixLookupWeighted* data_w_;
     128    /// type of Kernel Function e.g. Gaussian (aka RBF)
    127129    const KernelFunction* kf_;
     130    /// if true we own data and will delete it in destructor
    128131    const bool data_owner_;
     132    /// if true we own data_w and will delete it in destructor
    129133    bool weight_owner_;
    130134
  • trunk/c++_tools/classifier/KernelLookup.h

    r640 r648  
    139139
    140140    ///
    141     /// @retun a sub-kernel of kernel calculated using data defined by
     141    /// @return a sub-kernel of kernel calculated using data defined by
    142142    /// @a features. Each row and each columns corresponds to a traing
    143143    /// sample defined by @a train.
  • trunk/c++_tools/classifier/Kernel_MEV.h

    r629 r648  
    4747
    4848    ///
    49     ///
     49    /// Constructing a new Kernel based on selected features @a
     50    /// index. All other seeting are the same.
    5051    ///
    5152    Kernel_MEV(const Kernel_MEV& kernel, const std::vector<size_t>& index);
     
    6465   
    6566    ///
     67    /// @see Kernel_MEV(const Kernel_MEV&, const std::vector<size_t>&);
     68
    6669    ///
     70    /// @note returns dynamically allocated pointer that must be
     71    /// deleted by the caller to avoid memory leaks.
    6772    ///
    6873    const Kernel_MEV* selected(const std::vector<size_t>& index) const;
  • trunk/c++_tools/classifier/Kernel_SEV.h

    r640 r648  
    1616  ///
    1717  ///   @brief Speed Efficient Kernel
     18  ///
    1819  ///   Class taking care of the \f$ NxN \f$ kernel matrix, where
    1920  ///   \f$ N \f$ is number of samples. Type of Kernel is defined by a
     
    4546   
    4647    ///
    47     ///
     48    /// Constructs a Kernel based on selected features defined by @a index
    4849    ///
    4950    Kernel_SEV(const Kernel_SEV& kernel, const std::vector<size_t>& index);
  • trunk/c++_tools/classifier/MatrixLookup.h

    r631 r648  
    6767
    6868    ///
    69     /// Constructor creating a lookup into a sub-matrix of @matrix.
     69    /// Constructor creating a lookup into a sub-matrix of @a matrix.
    7070    ///
    7171    /// If @a row_vectors is true the new MatrixLookup will be consist
  • trunk/c++_tools/classifier/MatrixLookupWeighted.h

    r640 r648  
    7878
    7979    ///
    80     /// Constructor creating a lookup into a sub-matrix of @matrix.
     80    /// Constructor creating a lookup into a sub-matrix of @a matrix.
    8181    ///
    8282    /// If @a row_vectors is true the new MatrixLookupWeighted will be
  • trunk/c++_tools/classifier/PolynomialKernelFunction.h

    r629 r648  
    4343    ///
    4444    /// @return If order is larger than one: \f$ (1+x \cdot y)^{order}
    45     /// \f$ \n If order is one (linear): \f$ \frac{\sum w_yxy} \f$
     45    /// \f$ \n If order is one (linear): \f$ \sum w_yxy \f$
    4646    ///
    4747    ///
     
    5454    ///
    5555    /// @return If order is larger than one: \f$ (1+x \cdot y)^{order}
    56     /// \f$ \n If order is one (linear): \f$ \sum w_xw_yxy} \f$
     56    /// \f$ \n If order is one (linear): \f$ \sum w_xw_yxy \f$
    5757    ///
    5858    double operator()(const DataLookupWeighted1D& x,
  • trunk/c++_tools/classifier/SVM.h

    r641 r648  
    1919  class DataLookup2D;
    2020#ifndef DOXYGEN_SHOULD_SKIP_THIS
    21   // @internal Class keeping track of which samples are support vectors and
    22   // not. The first nof_sv elements in the vector are indices of the
    23   // support vectors
    24   //
     21  /// @internal Class keeping track of which samples are support vectors and
     22  /// not. The first nof_sv elements in the vector are indices of the
     23  /// support vectors
     24  ///
    2525  class Index
    2626  {
     
    178178    /// for training.
    179179    ///
    180     /// @note
    181     ///
    182180    void predict(const DataLookup2D& input, utility::matrix& predict) const;
    183181
     
    205203    void set_C(const double);
    206204
    207     ///
    208     /// Training the SVM following Platt's SMO, with Keerti's
    209     /// modifacation. Minimizing \f$ \frac{1}{2}\sum
    210     /// y_iy_j\alpha_i\alpha_j(K_{ij}+\frac{1}{C_i}\delta_{ij}) \f$ ,
    211     /// which corresponds to minimizing \f$ \sum w_i^2+\sum C_i\xi_i^2
    212     /// \f$.
    213     ///
     205    /**
     206      Training the SVM following Platt's SMO, with Keerti's
     207      modifacation. Minimizing \f$ \frac{1}{2}\sum
     208      y_iy_j\alpha_i\alpha_j(K_{ij}+\frac{1}{C_i}\delta_{ij}) \f$ ,
     209      which corresponds to minimizing \f$ \sum w_i^2+\sum C_i\xi_i^2
     210       \f$.
     211    */
    214212    bool train();
    215213
  • trunk/c++_tools/classifier/Sampler.h

    r619 r648  
    4444    /// @brief Constructor
    4545    /// 
    46     /// @parameter Target targets
     46    /// @param target used to balance partitions
    4747    ///
    4848    Sampler(const Target& target);
     
    9999
    100100  protected:
     101    /// Target used to balance partitions
    101102    Target target_;
     103    /// index of training sets for the partitions
    102104    std::vector<std::vector<size_t> > training_index_;
     105    /// Targets for training sets for the partitions
    103106    std::vector<Target> training_target_;
     107    /// index of validation sets for the partitions
    104108    std::vector<std::vector<size_t> > validation_index_;
     109    /// Targets for validation sets for the partitions
    105110    std::vector<Target> validation_target_;
    106111
  • trunk/c++_tools/classifier/SubsetGenerator.h

    r636 r648  
    4747    /// @brief Constructor
    4848    /// 
    49     /// @parameter sampler sampler
    50     /// @parameter data data to split up in validation and training.
     49    /// @param sampler sampler
     50    /// @param data data to split up in validation and training.
    5151    ///
    5252    SubsetGenerator(const Sampler& sampler, const DataLookup2D& data);
     
    5656    /// @brief Constructor
    5757    /// 
    58     /// @parameter Sampler
    59     /// @parameter data data to be split up in validation and training.
     58    /// @param sampler taking care of partioning dataset
     59    /// @param data data to be split up in validation and training.
     60    /// @param fs Object selecting features for each subset
    6061    ///
    6162    SubsetGenerator(const Sampler& sampler, const DataLookup2D& data,
  • trunk/c++_tools/classifier/SupervisedClassifier.h

    r635 r648  
    4848   
    4949
    50     //
    51     // Train the classifier.
    52     //
     50    ///
     51    /// Train the classifier.
     52    ///
    5353    virtual bool train()=0;
    5454
     
    6161  protected:
    6262   
     63    /// Target to train on.
    6364    const Target& target_;
     65    /// true if classifier successfully trained
    6466    bool trained_;
    6567   
  • trunk/c++_tools/random/random.h

    r614 r648  
    108108    inline std::string name(void) const { return gsl_rng_name(rng_); }
    109109
     110    ///
     111    /// @return const pointer to underlying GSL random generator.
     112    ///
    110113    inline const gsl_rng* rng(void) const { return rng_; }
    111114
     
    181184   
    182185  protected:
     186    /// GSL random gererator
    183187    RNG* rng_;
    184188  };
     
    360364
    361365  protected:
     366    /// pointer to GSL random generator
    362367    RNG* rng_;
    363368  };
     
    429434    /// @brief Constructor
    430435    ///
    431     /// @param \a m is the expectation value of the distribution.
     436    /// @param m is the expectation value of the distribution.
    432437    ///
    433438    inline Exponential(const double m=1) : m_(m) {}
     
    471476    /// @brief Constructor
    472477    /// @param s is the standard deviation \f$ \sigma \f$ of distribution
    473     /// m is the expectation value \f$ \mu \f$ of the distribution
     478    /// @param m is the expectation value \f$ \mu \f$ of the distribution
    474479    ///
    475480    inline Gaussian(const double s=1, const double m=0) : m_(m), s_(s) {}
  • trunk/c++_tools/statistics/AveragerPairWeighted.h

    r627 r648  
    4848
    4949    ///
    50     ///
     50    /// Adding two sequences of data @a x and @a y. The data
     51    /// should be paired so \f$ x(i) \f$ is associated to \f$ y(i) \f$
     52    /// @a x will be treated as having all weights equal to unity
    5153    ///
    5254    void add(const classifier::DataLookup1D& x,
     
    5456
    5557    ///
    56     ///
     58    /// Adding two sequences of data @a x and @a y. The data should be
     59    /// paired so \f$ x(i) \f$ is associated to \f$ y(i) \f$
     60    /// @a y will be treated as having all weights equal to unity
    5761    ///
    5862    inline void add(const classifier::DataLookupWeighted1D& x,
     
    6064
    6165    ///
    62     ///
     66    /// Adding two sequences of weighted data @a x and @a y. The data
     67    /// should be paired so \f$ x(i) \f$ is associated to \f$ y(i) \f$
    6368    ///
    6469    void add(const classifier::DataLookupWeighted1D& x,
  • trunk/c++_tools/statistics/Distance.h

    r616 r648  
    3030    }
    3131
     32    ///
     33    /// @return distance
     34    ///
    3235    virtual double operator()(const utility::vector& x,
    3336                              const utility::vector& y) const = 0;
    3437
    3538
     39    ///
     40    /// This function is virtual, and implemented in this base class
     41    /// copying the lookups to utilty::vectors followed by calling
     42    /// appropriate function. If speed is crucial you can implement
     43    /// this function in inherited class avoiding the copying.
     44    ///
     45    /// @return distance
     46    ///
    3647    virtual double operator()(const classifier::DataLookup1D& x,
    3748                              const classifier::DataLookup1D& y) const;
    3849       
    3950   
     51    ///
     52    /// @return weighted distance
     53    ///
    4054    virtual double operator()(const utility::vector& x,
    4155                              const utility::vector& y,
     
    4357                              const utility::vector& wy) const = 0;
    4458   
     59    ///
     60    /// This function is virtual, and implemented in this base class
     61    /// copying the lookups to utilty::vectors followed by calling
     62    /// appropiate function. If speed is crucial you can implement
     63    /// this function in inherited class avoiding the copying.
     64    ///
     65    /// @return weighted distance
     66    ///
    4567    virtual double operator()(const classifier::DataLookup1D& x,
    4668                              const classifier::DataLookup1D& y,
  • trunk/c++_tools/statistics/Fisher.h

    r623 r648  
    1111namespace theplu {
    1212namespace statistics { 
    13   ///
    14   /// @brief Fisher's exact test.   
    15   /// Fisher's Exact test is a procedure that you can use for data
    16   /// in a two by two contingency table: \f[ \begin{tabular}{|c|c|}
    17   /// \hline a&b \tabularnewline \hline c&d \tabularnewline \hline
    18   /// \end{tabular} \f] Fisher's Exact Test is based on exact
    19   /// probabilities from a specific distribution (the hypergeometric
    20   /// distribution). There's really no lower bound on the amount of
    21   /// data that is needed for Fisher's Exact Test. You do have to
    22   /// have at least one data value in each row and one data value in
    23   /// each column. If an entire row or column is zero, then you
    24   /// don't really have a 2 by 2 table. But you can use Fisher's
    25   /// Exact Test when one of the cells in your table has a zero in
    26   /// it. Fisher's Exact Test is also very useful for highly
    27   /// imbalanced tables. If one or two of the cells in a two by two
    28   /// table have numbers in the thousands and one or two of the
    29   /// other cells has numbers less than 5, you can still use
    30   /// Fisher's Exact Test. For very large tables (where all four
    31   /// entries in the two by two table are large), your computer may
    32   /// take too much time to compute Fisher's Exact Test. In these
    33   /// situations, though, you might as well use the Chi-square test
    34   /// because a large sample approximation (that the Chi-square test
    35   /// relies on) is very reasonable. If all elements are larger than
    36   /// 10 a Chi-square test is reasonable to use.
    37   ///
    38   /// @note The statistica assumes that each column and row sum,
    39   /// respectively, are fixed. Just because you have a 2x2 table, this
    40   /// assumtion does not necessarily match you experimental upset. See
    41   /// e.g. Barnard's test for alternative.
    42   ///
     13  /**
     14     @brief Fisher's exact test.   
     15
     16     Fisher's Exact test is a procedure that you can use for data
     17     in a two by two contingency table: \f[ \begin{tabular}{|c|c|}
     18     \hline a&b \tabularnewline \hline c&d \tabularnewline \hline
     19     \end{tabular} \f] Fisher's Exact Test is based on exact
     20     probabilities from a specific distribution (the hypergeometric
     21     distribution). There's really no lower bound on the amount of
     22     data that is needed for Fisher's Exact Test. You do have to
     23     have at least one data value in each row and one data value in
     24     each column. If an entire row or column is zero, then you
     25     don't really have a 2 by 2 table. But you can use Fisher's
     26     Exact Test when one of the cells in your table has a zero in
     27     it. Fisher's Exact Test is also very useful for highly
     28     imbalanced tables. If one or two of the cells in a two by two
     29     table have numbers in the thousands and one or two of the
     30     other cells has numbers less than 5, you can still use
     31     Fisher's Exact Test. For very large tables (where all four
     32     entries in the two by two table are large), your computer may
     33     take too much time to compute Fisher's Exact Test. In these
     34     situations, though, you might as well use the Chi-square test
     35     because a large sample approximation (that the Chi-square test
     36     relies on) is very reasonable. If all elements are larger than
     37     10 a Chi-square test is reasonable to use.
     38     
     39     @note The statistica assumes that each column and row sum,
     40     respectively, are fixed. Just because you have a 2x2 table, this
     41     assumtion does not necessarily match you experimental upset. See
     42     e.g. Barnard's test for alternative.
     43  */
    4344 
    4445  class Fisher : public Score
  • trunk/c++_tools/statistics/FoldChange.h

    r623 r648  
    5151    /// @param value vector of the values
    5252    /// @param weight vector of accompanied weight to the values
    53     /// @train_set defining which values to use (number of values used
    54     /// in the calculation is equal to size of \a train_set)
    5553    ///
    5654    double score(const classifier::Target& target,
  • trunk/c++_tools/statistics/MultiDimensional.h

    r616 r648  
    3232
    3333    ///
    34     ///
     34    /// Function fitting parameters of the linear model by miminizing
     35    /// the quadratic deviation between model and data.
    3536    ///
    3637    void fit(const utility::matrix& X, const utility::vector& y);
    3738
    3839    ///
    39     ///
     40    /// @return parameters of the model
    4041    ///
    4142    utility::vector fit_parameters(void) { return fit_parameters_; }
  • trunk/c++_tools/statistics/MultiDimensionalWeighted.h

    r616 r648  
    3333
    3434    ///
    35     /// @todo doc
     35    /// @see gsl_multifit_wlinear
    3636    ///
    3737    void fit(const utility::matrix& X, const utility::vector& y,
     
    5555
    5656    ///
    57     ///
     57    /// @return parameters of fitted model
    5858    ///
    5959    utility::vector fit_parameters(void) { return fit_parameters_; }
  • trunk/c++_tools/statistics/OneDimensional.h

    r616 r648  
    5252    virtual double prediction_error(const double x) const=0;
    5353
     54    ///
     55    /// @brief print output to @a os
     56    ///
    5457    std::ostream& print(std::ostream& os,const double min,
    5558                        double max, const u_int n) const;
     
    6669    AveragerPair ap_;
    6770
    68     double msd_; // mean squared deviation (model from data points)
     71    ///
     72    /// mean squared deviation (model from data points)
     73    ///
     74    double msd_;
    6975  };
    7076
  • trunk/c++_tools/statistics/OneDimensionalWeighted.h

    r616 r648  
    6161
    6262  protected:
    63     double s2_; // noise level - the typical variance for a point with
    64                 // weight w is s2/w
    65 
     63    ///
     64    /// noise level - the typical variance for a point with weight w
     65    /// is s2/w
     66    ///
     67    double s2_;
    6668  };
    6769
  • trunk/c++_tools/statistics/Polynomial.h

    r616 r648  
    2626
    2727    ///
    28     ///
     28    /// @param power degree of polynomial, e.g. 1 for a linear model
    2929    ///
    3030    inline Polynomial(size_t power)
     
    3737
    3838    ///
    39     ///
     39    /// fit the model by minimizing the mean squared deviation between
     40    /// model and data.
    4041    ///
    4142    void fit(const utility::vector& x, const utility::vector& y);
    4243
    4344    ///
    44     ///
     45    /// @return parameters of the model
    4546    ///
    4647    utility::vector fit_parameters(void) { return md_.fit_parameters(); }
  • trunk/c++_tools/statistics/PolynomialWeighted.h

    r616 r648  
    2626
    2727    ///
    28     ///
     28    /// @param power degree of polynomial model
    2929    ///
    3030    inline PolynomialWeighted(size_t power)
  • trunk/c++_tools/statistics/ROC.h

    r623 r648  
    101101    inline u_int& minimum_size(void){ return minimum_size_; } 
    102102
     103    ///
     104    /// Function returning true if target is positive (binary()) for
     105    /// the sample with ith lowest data value, so i=0 corresponds to
     106    /// the sample with the lowest data value and i=n()-1 the sample
     107    /// with highest data value.
     108    ///
    103109    bool target(const size_t i) const;
     110
     111    ///
     112    /// @return number of samples
     113    ///
    104114    inline size_t n(void) const { return vec_pair_.size(); }
     115
     116    ///
     117    /// @return number of positive samples (Target.binary()==true)
     118    ///
    105119    inline size_t n_pos(void) const { return nof_pos_; }
    106120
  • trunk/c++_tools/statistics/Score.h

    r623 r648  
    118118
    119119  protected:
     120    /// return true if method is weighted
    120121    inline bool weighted(void) const { return weighted_; }
    121122
     123    /// true if method is absolute, which means if score is below
     124    /// expected value (by chance) E, score returns E-score instead.
    122125    bool absolute_;
     126    /// true if method is weighted
    123127    bool weighted_;
    124128
  • trunk/c++_tools/statistics/WilcoxonFoldChange.h

    r623 r648  
    2727    /// @return difference of the means of the two classes
    2828    ///
    29     /// @param target is +1 or -1
     29    /// @param target defining the two groups (Target.binary() )
    3030    /// @param value vector of the values
    31     /// @train_set defining which values to use (number of values used
    32     /// in the calculation is equal to size of \a train_set)
    3331    ///
    3432    double score(const classifier::Target& target,
     
    3836    /// @return difference of the weighted means of the two classes
    3937    ///
     38    /// @param target defining the two groups (Target.binary() )
    4039    /// @param value vector of the values (with weights)
    41     /// @train_set defining which values to use (number of values used
    42     /// in the calculation is equal to size of \a train_set)
    4340    ///
    4441    /// @note not implemented
     
    5047    /// @return difference of the weighted means of the two classes
    5148    ///
     49    /// @param target defining the two groups
    5250    /// @param value vector of the values
    5351    /// @param weight vector of accompanied weight to the values
    54     /// @train_set defining which values to use (number of values used
    55     /// in the calculation is equal to size of \a train_set)
    5652    ///
    5753    /// @note not implemented
  • trunk/c++_tools/statistics/utility.h

    r616 r648  
    3030  /// t samples without replacement and \a k of those are "good"
    3131  /// samples. \a k will follow a hypergeomtric distribution.
    32   /// @cumulative hypergeomtric distribution functions P(k).
     32  ///
     33  /// @return cumulative hypergeomtric distribution functions P(k).
    3334  ///
    3435  double cdf_hypergeometric_P(u_int k, u_int n1, u_int n2, u_int t);
  • trunk/c++_tools/utility/Exception.h

    r570 r648  
    3434
    3535  ///
    36   /// @brief Class
     36  /// @brief Class for IO errors
    3737  ///
    3838  class IO_error : public std::runtime_error
    3939  {
    4040  public:
     41    ///
     42    /// Default constructor
     43    ///
    4144    IO_error(void) throw() : std::runtime_error("IO_error:") {}
     45
     46    ///
     47    /// Constructor for exception with message
     48    ///
    4249    IO_error(std::string message) throw()
    4350      : std::runtime_error("IO_error: " + message) {}
  • trunk/c++_tools/utility/NNI.h

    r616 r648  
    111111
    112112  protected:
     113    /** \f$ d_{ij}^2=\frac {\sum_{k=1}^C w_{ik} w_{jk} (x_{ik}-x_{jk})^2
     114       }{\sum_{k=l}^C w_{ik} w_{jk} } \f$ where C is the number of
     115    */ columns
    113116    std::vector<std::pair<u_int,double> > calculate_distances(const u_int) const;
     117    /// Contributing nearest neighbours are added up to the user set
     118    /// number, and neighbours are disqualified if their element
     119    /// (column) weight is zero
    114120    std::vector<u_int> nearest_neighbours(const u_int,
    115121                             const std::vector<std::pair<u_int,double> >&) const;
     122    ///
     123    /// original data matrix
     124    ///
     125    const utility::matrix& data_;
    116126
    117     const utility::matrix& data_;
     127    ///
     128    /// data after imputation
     129    ///
    118130    utility::matrix imputed_data_;
     131
     132    ///
     133    /// number of neighbor to use
     134    ///
    119135    u_int neighbours_;
     136
     137    ///
     138    /// which rows are not imputed due to lack of data
     139    ///
    120140    std::vector<size_t> not_imputed_;
     141
     142    ///
     143    /// weight matrix
     144    ///
    121145    const utility::matrix& weight_;
    122146  };
  • trunk/c++_tools/utility/Option.h

    r601 r648  
    5050    /// @param short_name one character key such as 'h' for -h flag
    5151    /// @param long_name string key such as "help" for --help flag
    52     /// @param telling what kind argument this option expects
     52    /// @param arg telling what kind argument this option expects
    5353    /// @param desc string used in help display
    5454    ///
Note: See TracChangeset for help on using the changeset viewer.