Changeset 597


Ignore:
Timestamp:
Aug 28, 2006, 3:03:54 PM (15 years ago)
Author:
Markus Ringnér
Message:

Fixed comments so they pass without some of the complaits from doxygen. Have not looked at the actual contents of comments

Location:
trunk/c++_tools
Files:
16 edited

Legend:

Unmodified
Added
Removed
  • trunk/c++_tools/classifier/ConsensusInputRanker.h

    r592 r597  
    77
    88namespace theplu {
     9
    910  class statistics::Score;
     11
    1012namespace classifier { 
    1113
     
    2022  /// sub-sets of the data, or the different lists could be different
    2123  /// because they have are generated using different criteria. Having
    22   /// \f$N\f$ lists means each row in the data matrix has \f$N\f$
     24  /// \f$ N \f$ lists means each row in the data matrix has \f$ N \f$
    2325  /// ranks (each corresponding to one list) and a consensus ranked
    2426  /// list is created by sorting the data rows with respect to their
     
    3840 
    3941  public:
     42
    4043    ///
    4144    /// @brief Default constructor
     
    9295
    9396#endif
    94 
     97é
  • trunk/c++_tools/classifier/Kernel.h

    r592 r597  
    1818  ///  @brief Abstract Base Class for Kernels.
    1919  ///
    20   ///  Class taking care of the \f$NxN\f$ kernel matrix, where \f$N\f$
     20  ///  Class taking care of the \f$ NxN \f$ kernel matrix, where \f$ N \f$
    2121  ///  is number of samples. Each element in the Kernel corresponds is
    2222  ///  the scalar product of the corresponding pair of samples. At the
     
    9595    ///
    9696    /// Calculates the scalar product (using the KernelFunction)
    97     /// between vector @a vec and the \f$i\f$th column in the data
     97    /// between vector @a vec and the \f$ i \f$ th column in the data
    9898    /// matrix.
    9999    ///   
     
    102102    ///
    103103    /// Calculates the weighted scalar product (using the
    104     /// KernelFunction) between vector @a vec and the \f$i\f$th column
     104    /// KernelFunction) between vector @a vec and the \f$ i \f$ th column
    105105    /// in the data matrix. Using a weight vector with all elements
    106106    /// equal to unity yields same result as the non-weighted version
  • trunk/c++_tools/classifier/KernelLookup.h

    r592 r597  
    6060    ///
    6161    /// Creating a Lookup into parts of the Kernel. In the created
    62     /// Lookup the element in the \f$i\f$th row in the \f$j\f$th
     62    /// Lookup the element in the \f$ i \f$ th row in the \f$ j \f$ th
    6363    /// column is identical to the element in row row[i] and columns
    6464    /// column[j] in the underlying @a kernel. If @a owner is set to
     
    9090    /// Contructor building a sub-KernelLookup from a KernelLookup
    9191    /// defined by row index vector and column index vector. In the
    92     /// created Lookup the element in the \f$i\f$th row in the
    93     /// \f$j\f$th column is identical to the element in row row[i] and
     92    /// created Lookup the element in the \f$ i \f$ th row in the
     93    /// \f$ j \f$ th column is identical to the element in row row[i] and
    9494    /// columns column[j] in the copied @a kl. The resulting
    9595    /// KernelLookup is independent of the old KernelLookup, but is
     
    171171    /// Function to calculate a new Kernel element using the
    172172    /// underlying KernelFunction. The value is calulated between @a
    173     /// vec and the data vector of the \f$i\f$th sample, in other
    174     /// words, the sample corresponding to the \f$i\f$th row or
    175     /// \f$i\f$th column. In case KernelLookup is a sub-Kernel and not
     173    /// vec and the data vector of the \f$ i \f$ th sample, in other
     174    /// words, the sample corresponding to the \f$ i \f$ th row or
     175    /// \f$ i \f$ th column. In case KernelLookup is a sub-Kernel and not
    176176    /// symmetric, the kernel value is calculated between @a vec and
    177     /// the data vector corresponding to \f$i\f$th row.
     177    /// the data vector corresponding to \f$ i \f$ th row.
    178178    ///
    179179    inline double element(const DataLookup1D& vec, const size_t i) const
  • trunk/c++_tools/classifier/KernelWeighted_MEV.h

    r592 r597  
    1616  ///
    1717  /// @brief Memory Efficient Kernel Class taking care of the
    18   /// \f$NxN\f$ kernel matrix, where \f$N\f$ is number of
     18  /// \f$ NxN \f$ kernel matrix, where \f$ N \f$ is number of
    1919  /// samples. Type of Kernel is defined by a KernelFunction. This
    2020  /// Memory Efficient Version (MEV) does not store the kernel matrix
     
    5555    ///
    5656    /// Calculates the scalar product using the weighted
    57     /// KernelFunction between data vector @a vec and column \f$i\f$
     57    /// KernelFunction between data vector @a vec and column \f$ i \f$
    5858    /// in data matrix. For @a vec a vector of unity weights is used.
    5959    ///
     
    6969    ///
    7070    /// Calculates the scalar product using the weighted
    71     /// KernelFunction between data vector @a vec and column \f$i\f$
     71    /// KernelFunction between data vector @a vec and column \f$ i \f$
    7272    /// in data matrix. For @a vec a vector of unity weights is used.
    7373    ///
  • trunk/c++_tools/classifier/KernelWeighted_SEV.h

    r592 r597  
    1919  ///   @brief Weighted Speed Efficient Kernel
    2020  ///
    21   ///   Class taking care of the \f$NxN\f$ kernel matrix, where
    22   ///   \f$N\f$ is number of samples. Type of Kernel is defined by a
     21  ///   Class taking care of the \f$ NxN \f$ kernel matrix, where
     22  ///   \f$ N \f$ is number of samples. Type of Kernel is defined by a
    2323  ///   KernelFunction. This Speed Efficient Version (SEV) calculates
    2424  ///   the kernel matrix once and the kernel is stored in
     
    2727  ///   to a zero weight, which means they will be ignored during all
    2828  ///   calculations. See KernelFunction for further details on
    29   ///   weighted calculations of the Kernel. When \f$N\f$ is large and
     29  ///   weighted calculations of the Kernel. When \f$ N \f$ is large and
    3030  ///   the kernel matrix cannot be stored in memory, use
    3131  ///   KernelWeighted_MEV instead.
     
    6868    ///
    6969    /// Calculates the scalar product using the weighted
    70     /// KernelFunction between data vector @a vec and column \f$i\f$
     70    /// KernelFunction between data vector @a vec and column \f$ i \f$
    7171    /// in data matrix. For @a vec a vector of unity weights is used.
    7272    ///
     
    8282    ///
    8383    /// Calculates the scalar product using the weighted
    84     /// KernelFunction between data vector @a vec and column \f$i\f$
     84    /// KernelFunction between data vector @a vec and column \f$ i \f$
    8585    /// in data matrix. For @a vec a vector of unity weights is used.
    8686    ///
  • trunk/c++_tools/classifier/Kernel_MEV.h

    r592 r597  
    1515  ///
    1616  /// @brief Memory Efficient Kernel Class taking care of the
    17   ///   \f$NxN\f$ kernel matrix, where \f$N\f$ is number of
     17  ///   \f$ NxN \f$ kernel matrix, where \f$ N \f$ is number of
    1818  ///   samples. Type of Kernel is defined by a KernelFunction. This
    1919  ///   Memory Efficient Version (MEV) does not store the kernel
     
    5757    ///
    5858    /// Calculates the scalar product using the KernelFunction between
    59     /// data vector @a vec and column \f$i\f$ in data matrix.
     59    /// data vector @a vec and column \f$ i \f$ in data matrix.
    6060    ///
    6161    /// @return kernel element between data @a vec and training sample @a i
     
    6666    ///
    6767    /// Using the KernelFunction this function calculates the scalar
    68     /// product between vector @a vec and the column \f$ i\f$ in data
     68    /// product between vector @a vec and the column \f$ i \f$ in data
    6969    /// matrix. The KernelFunction expects a weight vector for each of
    7070    /// the two data vectors and as this Kernel is non-weighted each
  • trunk/c++_tools/classifier/Kernel_SEV.h

    r592 r597  
    1616  ///
    1717  ///   @brief Speed Efficient Kernel
    18   ///   Class taking care of the \f$NxN\f$ kernel matrix, where
    19   ///   \f$N\f$ is number of samples. Type of Kernel is defined by a
     18  ///   Class taking care of the \f$ NxN \f$ kernel matrix, where
     19  ///   \f$ N \f$ is number of samples. Type of Kernel is defined by a
    2020  ///   KernelFunction. This Speed Efficient Version (SEV) calculated
    2121  ///   the kernel matrix once by construction and the kernel is stored in
    22   ///   memory. When \f$N\f$ is large and the kernel matrix cannot be
     22  ///   memory. When \f$ N \f$ is large and the kernel matrix cannot be
    2323  ///   stored in memory, use Kernel_MEV instead.
    2424  ///   
     
    5151    ///
    5252    /// Calculates the scalar product using the KernelFunction between
    53     /// data vector @a vec and column \f$i\f$ in data matrix.
     53    /// data vector @a vec and column \f$ i \f$ in data matrix.
    5454    ///
    5555    /// @return kernel element between data @a vec and training sample @a i
     
    5959    ///
    6060    /// Using the KernelFunction this function calculates the scalar
    61     /// product between vector @a vec and the column \f$ i\f$ in data
     61    /// product between vector @a vec and the column \f$ i \f$ in data
    6262    /// matrix. The KernelFunction expects a weight vector for each of
    6363    /// the two data vectors and as this Kernel is non-weighted each
  • trunk/c++_tools/classifier/MatrixLookupWeighted.h

    r596 r597  
    1313
    1414 
    15 
    1615  ///
    1716  /// A MatrixLookupWeighted is very similar to a MatrixLookup, but
    18   /// contains a pointer to a weight matrix as well meeaning each data
     17  /// contains a pointer to a weight matrix as well meaning each data
    1918  /// element is associated to weight.
    2019  ///
    21   /// A MatrixLookupWeighted can be created directly from a matrix or from an
    22   /// other MatrixLookupWeighted. In the latter case, the resulting
    23   /// MatrixLookupWeighted is looking directly into the underlying matrix to
    24   /// avoid multiple lookups.
     20  /// A MatrixLookupWeighted can be created directly from a matrix or
     21  /// from an other MatrixLookupWeighted. In the latter case, the
     22  /// resulting MatrixLookupWeighted is looking directly into the
     23  /// underlying matrix to avoid multiple lookups.
    2524  ///
    2625  /// There is a possibility to set the MatrixLookupWeighted as owner
     
    3231  ///
    3332  /// @todo add on weight part
     33  ///
    3434  class MatrixLookupWeighted : public DataLookup2D
    3535  {
     
    206206    const MatrixLookupWeighted* validation_data(const std::vector<size_t>&,
    207207                                        const std::vector<size_t>&) const;
     208
    208209    ///
    209210    /// Access operator
    210211    ///
    211     /// @return weight * data for element \f$i j\f$
     212    /// @return weight * data for element \f$ i j\f$
    212213    ///
    213214    inline double operator()(const size_t row, const size_t column) const
     
    234235 
    235236  ///
    236   /// The output operator MatrixLookupWeighted
     237  /// The output operator MatrixLookupWeighted 
    237238  ///
    238239  std::ostream& operator<< (std::ostream& s, const MatrixLookupWeighted&);
  • trunk/c++_tools/classifier/SVM.h

    r593 r597  
    142142
    143143    ///
    144     /// @return \f$\alpha\f$
     144    /// @return \f$ \alpha \f$
    145145    ///
    146146    inline const gslapi::vector& alpha(void) const { return alpha_; }
     
    221221    /// Training the SVM following Platt's SMO, with Keerti's
    222222    /// modifacation. Minimizing \f$ \frac{1}{2}\sum
    223     /// y_iy_j\alpha_i\alpha_j(K_{ij}+\frac{1}{C_i}\delta_{ij}) \f$,
     223    /// y_iy_j\alpha_i\alpha_j(K_{ij}+\frac{1}{C_i}\delta_{ij}) \f$ ,
    224224    /// which corresponds to minimizing \f$ \sum w_i^2+\sum C_i\xi_i^2
    225225    /// \f$.
  • trunk/c++_tools/gslapi/matrix.h

    r570 r597  
    142142    ///
    143143    /// Elementwise addition of the elements of matrix \a b to the
    144     /// elements of the calling matrix ,\f$a_{ij} = a_{ij} + b_{ij} \;
    145     /// \forall i,j\f$. The result is stored into the calling matrix.
     144    /// elements of the calling matrix ,\f$ a_{ij} = a_{ij} + b_{ij} \;
     145    /// \forall i,j \f$. The result is stored into the calling matrix.
    146146    ///
    147147    /// @return Whatever GSL returns.
     
    151151    ///
    152152    /// Add the scalar value \a d to the elements of the calling
    153     /// matrix, \f$a_{ij} = a_{ij} + d \; \forall i,j\f$. The result
     153    /// matrix, \f$ a_{ij} = a_{ij} + d \; \forall i,j \f$. The result
    154154    /// is stored into the calling matrix.
    155155    ///
     
    166166    ///
    167167    /// Elementwise division of the elemnts of the calling matrix by
    168     /// the elements of matrix \a b, \f$a_{ij} = a_{ij} / b_{ij} \;
    169     /// \forall i,j\f$. The result is stored into the calling matrix.
     168    /// the elements of matrix \a b, \f$ a_{ij} = a_{ij} / b_{ij} \;
     169    /// \forall i,j \f$. The result is stored into the calling matrix.
    170170    ///
    171171    /// @return Whatever GSL returns.
     
    257257    ///
    258258    /// Multiply the elements of matrix \a b with the elements of the
    259     /// calling matrix ,\f$a_{ij} = a_{ij} * b_{ij} \; \forall
    260     /// i,j\f$. The result is stored into the calling matrix.
     259    /// calling matrix ,\f$ a_{ij} = a_{ij} * b_{ij} \; \forall
     260    /// i,j \f$. The result is stored into the calling matrix.
    261261    ///
    262262    /// @return Whatever GSL returns.
     
    272272    ///
    273273    /// Multiply the elements of the calling matrix with a scalar \a
    274     /// d, \f$a_{ij} = d * a_{ij} \; \forall i,j\f$. The result is
     274    /// d, \f$ a_{ij} = d * a_{ij} \; \forall i,j \f$. The result is
    275275    /// stored into the calling matrix.
    276276    ///
     
    318318    ///
    319319    /// Subtract the elements of matrix \a b from the elements of the
    320     /// calling matrix ,\f$a_{ij} = a_{ij} - b_{ij} \; \forall
    321     /// i,j\f$. The result is stored into the calling matrix.
     320    /// calling matrix ,\f$ a_{ij} = a_{ij} - b_{ij} \; \forall
     321    /// i,j \f$. The result is stored into the calling matrix.
    322322    ///
    323323    /// @return Whatever GSL returns.
  • trunk/c++_tools/gslapi/vector.h

    r581 r597  
    187187
    188188    ///
    189     /// Vector addition, \f$this_i = this_i + other_i \; \forall i\f$.
     189    /// Vector addition, \f$ this_i = this_i + other_i \; \forall i \f$.
    190190    ///
    191191    /// @return GSL_SUCCESS on normal exit.
     
    195195
    196196    ///
    197     /// Add a constant to a vector, \f$this_i = this_i + term \;
    198     /// \forall i\f$.
     197    /// Add a constant to a vector, \f$ this_i = this_i + term \;
     198    /// \forall i \f$.
    199199    ///
    200200    /// @return GSL_SUCCESS on normal exit.
     
    204204
    205205    ///
    206     /// This function performs element-wise division, \f$this_i =
    207     /// this_i/other_i \; \forall i\f$.
     206    /// This function performs element-wise division, \f$ this_i =
     207    /// this_i/other_i \; \forall i \f$.
    208208    ///
    209209    /// @return GSL_SUCCESS on normal exit.
     
    279279
    280280    ///
    281     /// This function performs element-wise multiplication, \f$this_i =
    282     /// this_i * other_i \; \forall i\f$.
     281    /// This function performs element-wise multiplication, \f$ this_i =
     282    /// this_i * other_i \; \forall i \f$.
    283283    ///
    284284    /// @return GSL_SUCCESS on normal exit.
     
    296296
    297297    ///
    298     /// Rescale vector, \f$this_i = this_i * factor \; \forall i\f$.
     298    /// Rescale vector, \f$ this_i = this_i * factor \; \forall i \f$.
    299299    ///
    300300    /// @return GSL_SUCCESS on normal exit.
     
    350350
    351351    ///
    352     /// Vector subtraction, \f$this_i = this_i - other_i \; \forall i\f$.
     352    /// Vector subtraction, \f$ this_i = this_i - other_i \; \forall i \f$.
    353353    ///
    354354    /// @return GSL_SUCCESS on normal exit.
  • trunk/c++_tools/statistics/Averager.h

    r582 r597  
    6565
    6666    ///
    67     /// Rescales the object, \f$ \forall x_i \rightarrow a*x_i\f$, \f$
    68     /// \forall x_i^2 \rightarrow a^2*x_i^2 \f$
     67    /// Rescales the object, \f$ \forall x_i \rightarrow a*x_i \f$,
     68    /// \f$ \forall x_i^2 \rightarrow a^2*x_i^2 \f$
    6969    ///
    7070    inline void rescale(double a) { x_*=a; xx_*=a*a; }
     
    103103
    104104    ///
    105     /// @return \f$ \sum_i (x_i-m)^2\f$
     105    /// @return \f$ \sum_i (x_i-m)^2 \f$
    106106    ///
    107107    inline double sum_xx_centered(void) const { return xx_-x_*x_/n_; }
     
    117117    ///
    118118    /// The variance is calculated as \f$ \frac{1}{N}\sum_i
    119     /// (x_i-m)^2\f$, where \f$m\f$ is the mean.
     119    /// (x_i-m)^2 \f$, where \f$ m \f$ is the mean.
    120120    ///
    121121    /// @return estimation of variance
     
    127127    /// The variance is calculated using the \f$ (n-1) \f$ correction,
    128128    /// which means it is the best unbiased estimator of the variance
    129     /// \f$ \frac{1}{N-1}\sum_i (x_i-m)^2\f$, where \f$m\f$ is the
     129    /// \f$ \frac{1}{N-1}\sum_i (x_i-m)^2 \f$, where \f$ m \f$ is the
    130130    /// mean.
    131131    ///
  • trunk/c++_tools/statistics/AveragerPair.h

    r593 r597  
    5959
    6060    ///
    61     /// \f$\frac{\sum_i (x_i-m_x)(y_i-m_y)}{\sum_i
    62     /// (x_i-m_x)^2+\sum_i (y_i-m_y)^2 + n(m_x-m_y)^2}\f$
     61    /// \f$ \frac{\sum_i (x_i-m_x)(y_i-m_y)}{\sum_i
     62    /// (x_i-m_x)^2+\sum_i (y_i-m_y)^2 + n(m_x-m_y)^2} \f$
    6363    ///
    6464    /// In case of a zero denominator - zero is returned.
     
    7373 
    7474    ///
    75     /// \f$\frac{\sum_i (x_i-m_x)(y_i-m_y)}{\sqrt{\sum_i
    76     /// (x_i-m_x)^2\sum_i (y_i-m_y)^2}}\f$
     75    /// \f$ \frac{\sum_i (x_i-m_x)(y_i-m_y)}{\sqrt{\sum_i
     76    /// (x_i-m_x)^2\sum_i (y_i-m_y)^2}} \f$
    7777    ///
    7878    /// @return Pearson correlation coefficient.
     
    8484    ///
    8585    /// Calculating covariance using
    86     /// \f$ \frac{1}{N}\sum_i (x_i-m_x)(y_i-m_y)\f$,
    87     /// where \f$m\f$ is the mean.
     86    /// \f$ \frac{1}{N}\sum_i (x_i-m_x)(y_i-m_y) \f$,
     87    /// where \f$ m \f$ is the mean.
    8888    ///
    8989    /// @return The covariance.
     
    120120
    121121    ///
    122     /// @return \f$ \sum_i (x_i-m_x)(y_i-m_y)\f$
     122    /// @return \f$ \sum_i (x_i-m_x)(y_i-m_y) \f$
    123123    ///
    124124    inline double sum_xy_centered(void) const {return xy_-x_.sum_x()*y_.mean();}
  • trunk/c++_tools/statistics/Pearson.h

    r475 r597  
    3434    ///
    3535    /// \f$ \frac{\vert \sum_i(x_i-\bar{x})(y_i-\bar{y})\vert
    36     /// }{\sqrt{\sum_i (x_i-\bar{x})^2\sum_i (x_i-\bar{x})^2}}\f$.
     36    /// }{\sqrt{\sum_i (x_i-\bar{x})^2\sum_i (x_i-\bar{x})^2}} \f$.
    3737    /// @return Pearson correlation, if absolute=true absolute value
    3838    /// of Pearson is used.
     
    4343    ///
    4444    /// \f$ \frac{\vert \sum_iw^2_i(x_i-\bar{x})(y_i-\bar{y})\vert }
    45     /// {\sqrt{\sum_iw^2_i(x_i-\bar{x})^2\sum_iw^2_i(y_i-\bar{y})^2}}\f$,
    46     /// where \f$m_x = \frac{\sum w_ix_i}{\sum w_i}\f$ and \f$m_x =
    47     /// \frac{\sum w_ix_i}{\sum w_i}\f$. This expression is chosen to
    48     /// get a correlation equal to unity when \a x and \a y are
    49     /// equal. @return absolute value of weighted version of Pearson
    50     /// correlation.
     45    /// {\sqrt{\sum_iw^2_i(x_i-\bar{x})^2\sum_iw^2_i(y_i-\bar{y})^2}}
     46    /// \f$, where \f$ m_x = \frac{\sum w_ix_i}{\sum w_i} \f$ and \f$
     47    /// m_x = \frac{\sum w_ix_i}{\sum w_i} \f$. This expression is
     48    /// chosen to get a correlation equal to unity when \a x and \a y
     49    /// are equal. @return absolute value of weighted version of
     50    /// Pearson correlation.
    5151    ///
    5252    double score(const classifier::Target& target,
  • trunk/c++_tools/statistics/tScore.h

    r589 r597  
    3838    /// mean, \f$ n \f$ is the number of data points and \f$ s^2 =
    3939    /// \frac{ \sum_i (x_i-m_x)^2 + \sum_i (y_i-m_y)^2 }{ n_x + n_y -
    40     /// 2 }
     40    /// 2 } \f$
    4141    ///
    4242    /// @return t-score if absolute=true absolute value of t-score
     
    4949    /// Calculates the weighted t-score, i.e. the ratio between
    5050    /// difference in mean and standard deviation of this
    51     /// difference. \f$ t = \frac{ m_x - m_y } {
    52     /// \frac{s2}{n_x}+\frac{s2}{n_y} \f$ where \f$ m \f$ is the
     51    /// difference. \f$ t = \frac{ m_x - m_y }{
     52    /// \frac{s2}{n_x}+\frac{s2}{n_y}} \f$ where \f$ m \f$ is the
    5353    /// weighted mean, n is the weighted version of number of data
    5454    /// points and \f$ s2 \f$ is an estimation of the variance \f$ s^2
  • trunk/c++_tools/utility/SVD.h

    r420 r597  
    7676
    7777    ///
    78     /// Solve the system \f$Ax=b\f$ using the decomposition of A.
     78    /// Solve the system \f$ Ax=b \f$ using the decomposition of A.
    7979    ///
    8080    /// @note If decompose() has not been run the outcome of the call
Note: See TracChangeset for help on using the changeset viewer.