Changeset 4252
- Timestamp:
- Nov 18, 2022, 3:54:04 AM (2 months ago)
- Location:
- trunk
- Files:
-
- 41 edited
- 31 copied
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
trunk
- Property svn:mergeinfo changed
/branches/0.20-stable (added) merged: 4208,4210-4215,4217-4244,4246-4250
- Property svn:mergeinfo changed
-
trunk/NEWS
r4209 r4252 10 10 yat 0.20.x series from http://dev.thep.lu.se/yat/svn/branches/0.20-stable 11 11 12 version 0.20 (released NOT YET) 12 version 0.20 (released 18 November 2022) 13 - Boost 1.66 (or newer) is now required (see r4228) 13 14 - utility::replace(3) now takes const string& rather than string 14 15 - normalizer::QuantileNormalizer is deprecated; use -
trunk/README
r3999 r4252 33 33 34 34 Compile flags (`CPPFLAGS` (preprocessor), `CXXFLAGS` (compiler), 35 and LDFLAGS` (linker)) can be set at via configure, e.g.:35 and `LDFLAGS` (linker)) can be set at via configure, e.g.: 36 36 37 37 #> ./configure CPPFLAGS=-I/my/local/path/include … … 100 100 === Boost === 101 101 102 [http://www.boost.org Boost] version 1. 35or later. If you have Boost102 [http://www.boost.org Boost] version 1.66 or later. If you have Boost 103 103 installed in a non-standard location, `./configure --with-boost=DIR` 104 104 can be useful to provide the location of Boost. Boost header files are … … 227 227 Copyright (C) 2006, 2007, 2008 Jari Häkkinen, Peter Johansson 228 228 Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Peter Johansson 229 Copyright (C) 2022 Jari Häkkinen, Peter Johansson 229 230 230 231 This file is part of yat library, http://dev.thep.lu.se/yat -
trunk/configure.ac
r4207 r4252 274 274 275 275 # Boost http://www.boost.org 276 m4_define([YAT_REQUIRED_BOOST_VERSION], [1. 35])276 m4_define([YAT_REQUIRED_BOOST_VERSION], [1.66]) 277 277 AX_BOOST_BASE([YAT_REQUIRED_BOOST_VERSION], [], [AC_MSG_FAILURE([dnl 278 278 Boost YAT_REQUIRED_BOOST_VERSION (or newer) not found. Please install boost, or provide -
trunk/doc/doxygen.config.in
r3987 r4252 443 443 ALPHABETICAL_INDEX = YES 444 444 445 # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then446 # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns447 # in which this list will be split (can be a number in the range [1..20])448 449 COLS_IN_ALPHA_INDEX = 5450 451 445 # In case all classes in a project start with a common prefix, all 452 446 # classes will be put under the same header in the alphabetical index. -
trunk/m4/version.m4
r4209 r4252 100 100 # yat-0.19.1 16:1:0 101 101 # yat-0.19.2 16:2:0 102 # yat-0.20 17:0:0 102 103 # 103 104 # *Accidently, the libtool number was not updated for yat 0.5 -
trunk/test/Makefile.am
r4199 r4252 93 93 test/matrix_weighted.test test/merge.test \ 94 94 test/merge_iterator.test \ 95 test/minimizer.test \ 95 96 test/multiminimizer.test \ 96 97 test/multiminimizerderivative.test \ … … 115 116 test/queue.test test/queue2.test \ 116 117 test/queue3.test \ 118 test/random_shuffle.test \ 117 119 test/range.test \ 118 120 test/ranking.test \ 119 121 test/regression.test test/rnd.test \ 120 122 test/rng-mt.test \ 123 test/root_finder.test \ 124 test/root_finder_derivative.test \ 121 125 test/round.test \ 122 126 test/score.test \ … … 150 154 151 155 EXTRA_PROGRAMS = $(CXX_TESTS) 156 EXTRA_PROGRAMS += test/shuffle 152 157 EXTRA_PROGRAMS += test/stdopt 153 158 … … 161 166 test/doxygen_test.sh \ 162 167 test/help_test.sh \ 168 test/random_shuffle.sh \ 163 169 test/version_option_test.sh \ 164 170 test/yat_config_test.sh \ … … 213 219 test/help_test.log: test/stdopt $(shell_test_deps) 214 220 test/kendall.log: $(top_srcdir)/test/data/kendall.txt 221 test/multiprocess.log: test/data/foo.vcf.gz 215 222 test/pileup.log: test/data/foo.sorted.bam 223 test/random_shuffle.log: test/shuffle 216 224 test/static_test.log: $(srcdir)/m4/yat.m4 $(shell_test_deps) 217 225 -
trunk/test/Suite.cc
r4207 r4252 179 179 180 180 181 void Suite::require_foo_vcf_gz(void) const 182 { 183 #ifndef HAVE_BCFTOOLS_EXECUTABLE 184 out() << "no bcftools executable\n"; 185 exit (EXIT_SKIP); 186 #endif 187 } 188 189 181 190 int Suite::return_value(void) const 182 191 { -
trunk/test/Suite.h
r4207 r4252 126 126 */ 127 127 std::ostream& out(void) const; 128 129 /** 130 Call this function from tests that require file data/foo.vcf.gz 131 132 If bcftools is not available call exit(EXIT_SKIP). 133 */ 134 void require_foo_vcf_gz(void) const; 128 135 129 136 /** -
trunk/test/cox.cc
r4198 r4252 49 49 return EXIT_FAILURE; 50 50 } 51 std::cout << "OK\n"; 52 return EXIT_SUCCESS; 51 return suite.return_value(); 53 52 } 54 53 -
trunk/test/multiminimizer.cc
r4172 r4252 45 45 MyFunction func; 46 46 Vector x(2); 47 minimizer(x, func, 1e-5, 1000);47 minimizer(x, func, MultiMinimizer::Size(1e-5), 1000); 48 48 suite.out() << "result x: " << x(0) << " " << x(1) << "\n"; 49 if (!suite.equal_fix(x(0), 0.0, 1e-5)) 49 if (!suite.equal_fix(x(0), 0.0, 1e-5)) { 50 50 suite.err() << "error: incorrect x(0)\n"; 51 if (!suite.equal_fix(x(1), 0.0, 1e-5)) 51 suite.add(false); 52 } 53 if (!suite.equal_fix(x(1), 0.0, 1e-5)) { 52 54 suite.err() << "error: incorrect x(1)\n"; 53 minimizer(x, func, 1e-5); 55 suite.add(false); 56 } 57 // prefer running with limited max_epoch; only compilation test 58 if (false) { 59 minimizer(x, func, MultiMinimizer::Size(1e-5)); 60 } 54 61 } 55 62 -
trunk/test/multiminimizerderivative.cc
r4175 r4252 54 54 MyFunction func; 55 55 Vector x(2); 56 minimizer(x, func, 1e-5, 1000);56 minimizer(x, func, MultiMinimizerDerivative::Gradient(1e-5), 1000); 57 57 suite.out() << "result x: " << x(0) << " " << x(1) << "\n"; 58 if (minimizer.epochs() == 1000) { 59 suite.err() << "max epoch reached\n"; 60 suite.add(false); 61 } 58 62 if (!suite.equal_fix(x(0), 0.0, 1e-5)) 59 63 suite.err() << "error: incorrect x(0)\n"; 60 64 if (!suite.equal_fix(x(1), 0.0, 1e-5)) 61 65 suite.err() << "error: incorrect x(1)\n"; 62 minimizer(x, func, 1e-5); 66 67 if (false) { 68 minimizer(x, func, MultiMinimizerDerivative::Gradient(1e-5)); 69 } 63 70 } 64 71 -
trunk/test/multiprocess.cc
r4044 r4252 2 2 3 3 /* 4 Copyright (C) 2021 Peter Johansson4 Copyright (C) 2021, 2022 Peter Johansson 5 5 6 6 This file is part of the yat library, http://dev.thep.lu.se/yat … … 61 61 { 62 62 test::Suite suite(argc, argv); 63 suite.require_foo_vcf_gz(); 63 64 omic::VcfFile file("../../data/foo.vcf.gz"); 64 65 omic::VcfCompare compare(file.header()); -
trunk/test/normalization.cc
r4207 r4252 20 20 along with yat. If not, see <http://www.gnu.org/licenses/>. 21 21 */ 22 23 // avoid deprecation warning 24 #define YAT_DEPRECATE 22 25 23 26 #include <config.h> -
trunk/test/segment.cc
r4207 r4252 75 75 void test_comp(test::Suite& suite) 76 76 { 77 Segment<double> key ;77 Segment<double> key(0,1); 78 78 { 79 79 SegmentSet<double> set; -
trunk/test/vcf_file2.cc
r3763 r4252 2 2 3 3 /* 4 Copyright (C) 2018 Peter Johansson4 Copyright (C) 2018, 2022 Peter Johansson 5 5 6 6 This file is part of the yat library, http://dev.thep.lu.se/yat … … 32 32 { 33 33 test::Suite suite(argc, argv); 34 #ifndef HAVE_BCFTOOLS_EXECUTABLE 35 suite.out() << "no bcftools executable\n"; 36 return EXIT_SKIP; 37 #endif 34 suite.require_foo_vcf_gz(); 38 35 omic::VcfFile vcf("../../data/foo.vcf.gz"); 39 36 std::ostringstream ss; -
trunk/test/version_option_test.sh
r4207 r4252 2 2 # $Id$ 3 3 # 4 # Copyright (C) 2022 Peter Johansson4 # Copyright (C) 2022 Jari Häkkinen, Peter Johansson 5 5 # 6 6 # This file is part of the yat library, http://dev.thep.lu.se/yat … … 30 30 test -s stdout || exit_fail empty output 31 31 32 grep -P"^stdopt .* $VERSION" stdout || exit_fail33 grep -P'^Copyright.*John Doe and Jane Doe' stdout || exit_fail32 grep "^stdopt .* $VERSION" stdout || exit_fail 33 grep '^Copyright.*John Doe and Jane Doe' stdout || exit_fail 34 34 grep "This is free software" stdout || exit_fail 35 35 … … 38 38 test -s stdout || exit_fail empty output 39 39 40 grep -P"version.*output version information and exit" stdout || exit_fail40 grep "version.*output version information and exit" stdout || exit_fail 41 41 exit_success -
trunk/yat/normalizer/QuantileNormalizer.h
r4186 r4252 53 53 API. Use QuantileNormalizer2 instead. 54 54 */ 55 class QuantileNormalizer55 class YAT_DEPRECATE QuantileNormalizer 56 56 { 57 57 public: -
trunk/yat/omic/BamWriteIterator.h
r3792 r4252 6 6 /* 7 7 Copyright (C) 2012, 2013, 2017, 2018 Peter Johansson 8 Copyright (C) 2022 Jari Häkkinen 8 9 9 10 This file is part of the yat library, http://dev.thep.lu.se/yat … … 23 24 */ 24 25 25 #include <boost/ function_output_iterator.hpp>26 #include <boost/iterator/function_output_iterator.hpp> 26 27 27 28 #include <functional> -
trunk/yat/omic/GFF2.cc
r3875 r4252 2 2 3 3 /* 4 Copyright (C) 2011, 2012, 2019, 2020 Peter Johansson4 Copyright (C) 2011, 2012, 2019, 2020, 2022 Peter Johansson 5 5 6 6 This file is part of the yat library, http://dev.thep.lu.se/yat … … 53 53 --n; 54 54 // str[n] now points to last char we want to include 55 m[ key] = str.substr(i,n-i+1);55 m[std::move(key)] = str.substr(i,n-i+1); 56 56 } 57 57 -
trunk/yat/omic/GFF3.cc
r3875 r4252 2 2 3 3 /* 4 Copyright (C) 2011, 2012, 2020 Peter Johansson4 Copyright (C) 2011, 2012, 2020, 2022 Peter Johansson 5 5 6 6 This file is part of the yat library, http://dev.thep.lu.se/yat … … 42 42 if (v[0] == "") 43 43 return; 44 m[ v[0]] = "";44 m[std::move(v[0])] = ""; 45 45 } 46 m[v[0]] = v[1]; 46 else 47 m[std::move(v[0])] = std::move(v[1]); 47 48 } 48 49 -
trunk/yat/omic/VCF.cc
r4146 r4252 2 2 3 3 /* 4 Copyright (C) 2018, 2019, 2020, 2022 Peter Johansson 4 Copyright (C) 2018, 2019, 2020 Peter Johansson 5 Copyright (C) 2022 Jari Häkkinen, Peter Johansson 5 6 6 7 This file is part of the yat library, http://dev.thep.lu.se/yat … … 625 626 // clear each element in data_ (to empty vector) 626 627 std::for_each(data_.begin(), data_.end(), 627 std::mem_f un_ref(&std::vector<std::string>::clear));628 std::mem_fn(&std::vector<std::string>::clear)); 628 629 } 629 630 -
trunk/yat/random/random.h
r4207 r4252 6 6 /* 7 7 Copyright (C) 2005, 2006, 2007, 2008 Jari Häkkinen, Peter Johansson 8 Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2020, 2022 Peter Johansson 8 Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2020 Peter Johansson 9 Copyright (C) 2022 Jari Häkkinen, Peter Johansson 9 10 10 11 This file is part of the yat library, http://dev.thep.lu.se/yat … … 38 39 #include <memory> 39 40 #include <mutex> 41 #include <random> 40 42 #include <string> 41 43 #include <vector> … … 905 907 { 906 908 DiscreteUniform rnd; 907 std:: random_shuffle(first, last, rnd);909 std::shuffle(first, last, std::default_random_engine(rnd())); 908 910 } 909 911 -
trunk/yat/regression/Cox.cc
r4198 r4252 26 26 #include "detail/Cox.h" 27 27 28 #include <yat/utility/Steffenson.h> 28 29 #include <yat/utility/VectorBase.h> 29 30 30 31 #include <gsl/gsl_cdf.h> 31 32 #include <boost/math/tools/roots.hpp>33 32 34 33 #include <algorithm> … … 79 78 double beta_std_error_; 80 79 81 class logL80 class Score 82 81 { 83 82 public: 84 logL(const std::vector<TimePoint>& times); 85 std::pair<double, double> operator()(double beta) const; 86 87 double hessian(double beta) const; 83 Score(const std::vector<TimePoint>& times); 84 double operator()(double beta) const; 85 double derivative(double beta) const; 88 86 private: 89 87 const std::vector<TimePoint>& times_; … … 96 94 if (data_.empty()) 97 95 return; 98 beta_ = 0;99 96 100 97 prepare_times(); 101 logL func(times_); 102 using boost::math::tools::newton_raphson_iterate; 103 beta_ = newton_raphson_iterate(func, beta_, -1e42, 1e42, 30); 98 // score is derivative of logL 99 Score score(times_); 100 for (double b=0; b<1.1; b+=0.1) 101 score(b); 102 utility::Steffenson solver; 103 beta_ = solver(score, 0.0, 104 utility::RootFinderDerivative::Delta(0.0, 1e-5)); 104 105 if (std::isnan(beta_)) 105 106 throw std::runtime_error("beta is NaN"); 106 // Calculate 2nd deriviate at beta_; 107 double hessian = func.hessian(beta_); 108 beta_std_error_ = 1.0 / std::sqrt(hessian); 109 } 110 111 112 Cox::Impl::logL::logL(const std::vector<TimePoint>& times) 107 // 2nd derivative of logL is 1st derivative of score 108 double hessian = score.derivative(beta_); 109 beta_std_error_ = 1.0 / std::sqrt(-hessian); 110 } 111 112 113 /* 114 Without ties the log-likelihood is 115 logL = sum_i (x_i * beta - log sum_j x_j * beta) 116 where i runs over all events and j runs over all data points j 117 such that t_j >= t_i 118 119 Setting theta = x * beta we have 120 logL = sum_i (theta_i - log sum_j theta_j) = 121 = sum_i (theta_i - log theta_Q_i) = 122 123 theta = beta * x -> dtheta/dbeta = x 124 125 We handle ties using Efron's method. Let denote m_i number of data 126 points at t_i, H_i the indices of events at time t_i. 127 128 logL = sum_i (theta_H_i - sum_k^m_i-1 log(theta_Q_i - k/m_i theta_H_i)) 129 130 where theta_H_i is a sum of theta running over H_i. 131 132 The derivative (wrt beta) 133 l' = sum_i(x_H_i - sum_k^m_i-1 (theta*x_Q_i - k/m_j theta+x_H_i) / (theta_Q_i - k/m_j theta_H_i)) 134 135 */ 136 Cox::Impl::Score::Score(const std::vector<TimePoint>& times) 113 137 : times_(times) 114 138 { … … 116 140 117 141 118 std::pair<double, double> Cox::Impl::logL::operator()(double beta) const 119 { 120 // Using Efron's method: 121 // 122 // sort data wrt time and denote unique time t_i such that t_i < 123 // t_j iff i < j 124 // Let denote H_j the indices of events at time t_j, i.e., 125 // Y_i = t_j and event_i = true; n_j = |H_j| 126 // 127 // log partial likelihood 128 // logL = 129 // sum_j (sum_i x_i*beta - sum_k log{sum_i theta_i - k/n_j sum_i theta_i}) 130 // where j runs over all unique times 131 // i runs over all events in H_j 132 // k runs over all events in H_j 133 // 1st i sum runs : Y_i >= t_j 134 // 2nd i sum runs over H_j 135 // 136 // and the derivative is 137 // deriv = 138 // sum_j (sum_i x_i - sum_k {[sum_i theta_i*x_i - k/n_j sum_i theta_i*x_i] / [sum_i theta_i - k/n_j sum_i theta_i]}) 139 // 1st i sum runs : Y_i >= t_j 140 // 2nd i sum runs over H_j 141 // 3rd i sum runs : Y_i >= t_j 142 // 4th i sum runs over H_j 143 144 145 /* 146 We handle tied ties using Efron's method. Let denote H_j the 147 indices of events at time t_j 148 149 logL = 150 \sum_j (sum_i(theta_i) - \sum_k(log(sum_i(theta_i) - k/m_j sum_i(theta_i)) 151 152 where theta_i = beta * x_i 153 where j runs over all unique time points 154 1st i runs over H_j, i.e., all events at time t_j. 155 k runs over H_j 156 2nd i runs over all i: Y_i > t_j 157 3rd i runs over H_j 158 */ 159 160 double logL = 0; 161 double deriv = 0; 162 142 double Cox::Impl::Score::operator()(double beta) const 143 { 144 double score = 0; 145 // variables with suffix _Q denote sums running over data points 146 // (including events and censored data points) at current time and 147 // future 163 148 double theta_Q = 0; 164 149 double thetaX_Q = 0; 150 165 151 for (auto time = times_.rbegin(); time!=times_.rend(); ++time) { 166 double sum_event_theta = 0; 167 double sum_event_thetaX = 0; 152 // variables with suffix _H denote sums running over events at 153 // the current time. 154 double theta_H = 0; 155 double thetaX_H = 0; 168 156 for (auto it = time->events_begin(); it!=time->events_end(); ++it) { 169 157 double theta = it->theta(beta); 170 sum_event_theta+= theta;171 sum_event_thetaX+= theta * it->x;172 } 173 theta_Q += sum_event_theta;174 thetaX_Q += sum_event_thetaX;158 theta_H += theta; 159 thetaX_H += theta * it->x; 160 } 161 theta_Q += theta_H; 162 thetaX_Q += thetaX_H; 175 163 176 164 for (auto it = time->censored_begin(); it!=time->censored_end(); ++it) { … … 180 168 } 181 169 170 // loop over events at time point t 171 for (auto it = time->events_begin(); it!=time->events_end(); ++it) { 172 score += it->x; 173 174 const size_t k = it - time->events_begin(); 175 double r = static_cast<double>(k) / time->size(); 176 177 assert(theta_Q > r * theta_H); 178 179 score -= (thetaX_Q - r * thetaX_H) / (theta_Q - r * theta_H); 180 } 181 } 182 183 assert(!std::isnan(score)); 184 return score; 185 } 186 187 188 double Cox::Impl::Score::derivative(double beta) const 189 { 190 double deriv = 0; 191 // variables with suffix _Q denote sums running over data points 192 // (including events and censored data points) at current time and 193 // future 194 double theta_Q = 0; 195 double thetaX_Q = 0; 196 double thetaXX_Q = 0; 197 double XX_Q = 0; 198 199 for (auto time = times_.rbegin(); time!=times_.rend(); ++time) { 200 // variables with suffix _H denote sums running over events at 201 // the current time. 202 double theta_H = 0; 203 double thetaX_H = 0; 204 double thetaXX_H = 0; 205 double XX_H = 0; 206 for (auto it = time->events_begin(); it!=time->events_end(); ++it) { 207 double theta = it->theta(beta); 208 theta_H += theta; 209 thetaX_H += theta * it->x; 210 thetaXX_H += theta * it->x * it->x; 211 XX_H += it->x * it->x; 212 } 213 theta_Q += theta_H; 214 thetaX_Q += thetaX_H; 215 thetaXX_Q += thetaXX_H; 216 XX_Q += XX_H; 217 218 for (auto it = time->censored_begin(); it!=time->censored_end(); ++it) { 219 double theta = it->theta(beta); 220 theta_Q += theta; 221 thetaX_Q += theta * it->x; 222 thetaXX_Q += theta * it->x * it->x; 223 XX_Q += it->x * it->x; 224 } 225 226 // f = g/h 227 // g = - (thetaX_Q - r*thetaX_H) 228 // g'= - (thetaXX_Q - r*thetaXX_H) 229 // 230 // h = theta_Q - r*theta_H 231 // h'= thetaX_Q - r*thetaX_H = 232 233 // f' = (g/h)' = (g'h - gh')/h^2 = 182 234 183 235 // loop over events at time point t … … 185 237 const size_t k = it - time->events_begin(); 186 238 double r = static_cast<double>(k) / time->size(); 187 188 logL += it->x * beta; 189 assert(theta_Q > 0.0); 190 assert(theta_Q > r * sum_event_theta); 191 logL -= std::log(theta_Q - r * sum_event_theta); 192 193 deriv += it->x; 194 deriv -= (thetaX_Q - r * sum_event_thetaX) / 195 (theta_Q - r * sum_event_theta); 196 } 197 239 double g = - (thetaX_Q - r*thetaX_H); 240 double dg = - (thetaXX_Q - r * thetaXX_H); 241 242 double h = (theta_Q - r*theta_H); 243 double dh= (thetaX_Q - r*thetaX_H); 244 deriv += (dg*h - g*dh) / (h*h); 245 } 198 246 } 199 247 200 assert(!std::isnan(logL));201 248 assert(!std::isnan(deriv)); 202 return std::make_pair(-logL, -deriv); 203 } 204 205 206 double Cox::Impl::logL::hessian(double beta) const 207 { 208 // The second derivative of the log-likelihood evaluated at the 209 // maximum likelihood estimates (MLE) is the observed Fisher 210 // information 211 212 double hessian = 0; 213 214 double sum_theta = 0; 215 double sum_thetaX = 0; 216 double sum_thetaXX = 0; 217 for (const auto& t : times_) { 218 for (auto it = t.begin; it!=t.end; ++it) { 219 double theta = it->theta(beta); 220 sum_theta += theta; 221 sum_thetaX += theta * it->x; 222 sum_thetaXX += theta * it->x * it->x; 223 } 224 } 225 226 // loop over unique time points 227 for (const auto& t : times_) { 228 229 // sum over all events in H_j 230 double part_sum_theta = 0; 231 double part_sum_thetaX = 0; 232 double part_sum_thetaXX = 0; 233 for (auto it = t.events_begin(); it!=t.events_end(); ++it) { 234 double theta = it->theta(beta); 235 part_sum_theta += theta; 236 part_sum_thetaX += theta * it->x; 237 part_sum_thetaXX += theta * it->x * it->x; 238 } 239 240 // loop over events at time point t 241 for (auto it = t.events_begin(); it!=t.events_end(); ++it) { 242 const size_t k = it - t.events_begin(); 243 double r = static_cast<double>(k) / t.size(); 244 245 double S_thetaXX = sum_thetaXX - r * part_sum_thetaXX; 246 double S_thetaX = sum_thetaX - r * part_sum_thetaX; 247 double S_theta = sum_theta - r * part_sum_theta; 248 hessian += S_thetaXX/S_theta; 249 hessian -= std::pow(S_thetaX/S_theta, 2); 250 } 251 252 // update the cumulative sums 253 for (auto it = t.begin; it!=t.end; ++it) { 254 double theta = it->theta(beta); 255 sum_theta -= theta; 256 sum_thetaX -= theta * it->x; 257 sum_thetaXX -= theta * it->x * it->x; 258 } 259 } 260 return hessian; 261 } 262 249 return deriv; 250 } 263 251 264 252 // class Cox -
trunk/yat/regression/MultiCox.cc
r4198 r4252 114 114 beta_.resize(n, 0.0); 115 115 utility::BFGS2 solver(n); 116 solver(beta_, func, 1e-3);116 solver(beta_, func, utility::MultiMinimizerDerivative::Gradient(1e-3)); 117 117 118 118 // Calculate 2nd deriviate at beta_; -
trunk/yat/regression/detail/Cox.h
r4198 r4252 126 126 127 127 }; 128 129 /*130 class Cox::Impl131 {132 public:133 void add(const yat::utility::VectorBase& x,134 const yat::utility::VectorBase& time,135 const std::vector<char>& event)136 {137 assert(x.size() == time.size());138 assert(x.size() == event.size());139 for (size_t i=0; i<x.size(); ++i)140 add(x(i), time(i), event[i]);141 }142 143 144 void add(double x, double time, bool event)145 {146 data_.resize(data_.size()+1);147 data_.back().time = time;148 data_.back().x = x;149 data_.back().event = event;150 }151 152 void clear(void) { data_.clear(); }153 void train(void);154 double b(void) const { return beta_ ; }155 double z(void) const { return beta_ / beta_std_error_; }156 double p(void) const { return 2 * gsl_cdf_ugaussian_Q(std::abs(z())); }157 158 double hazard_ratio(void) const159 { return exp(beta_); }160 double hazard_ratio_lower_CI(double alpha=0.95) const161 { return exp(beta_ - hazard_ratio_CI(alpha)); }162 double hazard_ratio_upper_CI(double alpha=0.95) const163 { return exp(beta_ + hazard_ratio_CI(alpha)); }164 private:165 double hazard_ratio_CI(double alpha) const166 {167 double z = gsl_cdf_ugaussian_Qinv(0.5 * (1.0 - alpha));168 return z * beta_std_error_;169 }170 double beta_;171 double beta_std_error_;172 173 struct entry174 {175 double time;176 double x;177 bool event; // false if censored178 double theta(double beta) const179 { return exp(x * beta); }180 181 };182 std::vector<entry> data_;183 184 class logL185 {186 public:187 logL(const std::vector<entry>& data);188 std::pair<double, double> operator()(double beta) const;189 190 double hessian(double beta) const;191 private:192 const std::vector<entry>& data_;193 194 struct TimePoint195 {196 typedef std::vector<entry>::const_iterator iterator;197 iterator events_begin(void) const { return begin; }198 iterator events_end(void) const { return middle; }199 iterator censored_begin(void) const { return middle; }200 iterator censored_end(void) const { return end; }201 double time(void) const {return begin->time; }202 // number of events203 size_t size(void) const204 { return events_end() - events_begin(); }205 206 iterator begin;207 iterator middle;208 iterator end;209 };210 std::vector<TimePoint> times_;211 };212 };213 214 215 void Cox::Impl::train(void)216 {217 if (data_.empty())218 return;219 beta_ = 0;220 221 std::sort(data_.begin(), data_.end(),222 [](const entry& lhs, const entry& rhs) {223 if (lhs.time != rhs.time)224 return lhs.time < rhs.time;225 // sort events before non-events226 return lhs.event && !rhs.event;227 });228 229 logL func(data_);230 using boost::math::tools::newton_raphson_iterate;231 beta_ = newton_raphson_iterate(func, beta_, -1e42, 1e42, 30);232 if (std::isnan(beta_))233 throw std::runtime_error("beta is NaN");234 // Calculate 2nd deriviate at beta_;235 double hessian = func.hessian(beta_);236 beta_std_error_ = 1.0 / std::sqrt(hessian);237 }238 239 240 Cox::Impl::logL::logL(const std::vector<entry>& data)241 : data_(data)242 {243 for (auto it = data.begin(); it!=data.end(); ) {244 TimePoint time;245 time.begin = it;246 while (it!=data.end() && it->event &&247 it->time == time.begin->time)248 ++it;249 time.middle = it;250 while (it!=data.end() && !it->event &&251 it->time == time.begin->time)252 ++it;253 time.end = it;254 times_.push_back(std::move(time));255 }256 }257 258 std::pair<double, double> Cox::Impl::logL::operator()(double beta) const259 {260 // Using Efron's method:261 //262 // sort data wrt time and denote unique time t_i such that t_i <263 // t_j iff i < j264 // Let denote H_j the indices of events at time t_j, i.e.,265 // Y_i = t_j and event_i = true; n_j = |H_j|266 //267 // log partial likelihood268 // logL =269 // sum_j (sum_i x_i*beta - sum_k log{sum_i theta_i - k/n_j sum_i theta_i})270 // where j runs over all unique times271 // i runs over all events in H_j272 // k runs over all events in H_j273 // 1st i sum runs : Y_i >= t_j274 // 2nd i sum runs over H_j275 //276 // and the derivative is277 // deriv =278 // sum_j (sum_i x_i - sum_k {[sum_i theta_i*x_i - k/n_j sum_i theta_i*x_i] / [sum_i theta_i - k/n_j sum_i theta_i]})279 // 1st i sum runs : Y_i >= t_j280 // 2nd i sum runs over H_j281 // 3rd i sum runs : Y_i >= t_j282 // 4th i sum runs over H_j283 */284 285 286 /*287 We handle tied ties using Efron's method. Let denote H_j the288 indices of events at time t_j289 290 logL =291 \sum_j (sum_i(theta_i) - \sum_k(log(sum_i(theta_i) - k/m_j sum_i(theta_i))292 293 where theta_i = beta * x_i294 where j runs over all unique time points295 1st i runs over H_j, i.e., all events at time t_j.296 k runs over H_j297 2nd i runs over all i: Y_i > t_j298 3rd i runs over H_j299 */300 /*301 double logL = 0;302 double deriv = 0;303 304 double theta_Q = 0;305 double thetaX_Q = 0;306 for (auto time = times_.rbegin(); time!=times_.rend(); ++time) {307 double sum_event_theta = 0;308 double sum_event_thetaX = 0;309 for (auto it = time->events_begin(); it!=time->events_end(); ++it) {310 double theta = it->theta(beta);311 sum_event_theta += theta;312 sum_event_thetaX += theta * it->x;313 }314 theta_Q += sum_event_theta;315 thetaX_Q += sum_event_thetaX;316 317 for (auto it = time->censored_begin(); it!=time->censored_end(); ++it) {318 double theta = it->theta(beta);319 theta_Q += theta;320 thetaX_Q += theta * it->x;321 }322 323 324 // loop over events at time point t325 for (auto it = time->events_begin(); it!=time->events_end(); ++it) {326 const size_t k = it - time->events_begin();327 double r = static_cast<double>(k) / time->size();328 329 logL += it->x * beta;330 assert(theta_Q > 0.0);331 assert(theta_Q > r * sum_event_theta);332 logL -= std::log(theta_Q - r * sum_event_theta);333 334 deriv += it->x;335 deriv -= (thetaX_Q - r * sum_event_thetaX) /336 (theta_Q - r * sum_event_theta);337 }338 339 }340 */341 /*342 double S_theta = 0;343 double S_thetaX = 0;344 for (auto time = times_.rbegin(); time!=times_.rend(); ++time) {345 double sum_theta = 0;346 double sum_thetaX = 0;347 for (auto it = time->events_begin(); it!=time->events_end(); ++it) {348 double theta = it->theta(beta);349 sum_theta += theta;350 sum_thetaX += theta * it->x;351 }352 assert(!std::isinf(sum_theta));353 assert(!std::isinf(sum_thetaX));354 time->sum_event_theta = sum_theta;355 time->sum_event_thetaX = sum_thetaX;356 357 sum_theta = 0;358 sum_thetaX = 0;359 for (auto it = time->censored_begin(); it!=time->censored_end(); ++it) {360 double theta = it->theta(beta);361 sum_theta += theta;362 sum_thetaX += theta * it->x;363 }364 365 S_theta += time->sum_event_theta + sum_theta;366 S_thetaX += time->sum_event_thetaX + sum_thetaX;367 time->theta_Q = S_theta;368 time->thetaX_Q = S_thetaX;369 }370 371 372 373 for (const auto& time : times_) {374 // loop over events at time point t375 for (auto it = time.events_begin(); it!=time.events_end(); ++it) {376 const size_t k = it - time.events_begin();377 double r = static_cast<double>(k) / time.size();378 379 assert(!std::isinf(time.sum_event_theta));380 assert(!std::isinf(time.theta_Q));381 assert(time.theta_Q - r * time.sum_event_theta > 0.0);382 logL += it->x * beta;383 logL -= std::log(time.theta_Q - r * time.sum_event_theta);384 385 deriv += it->x;386 assert(time.theta_Q != r * time.sum_event_theta);387 deriv -= (time.thetaX_Q - r * time.sum_event_thetaX) /388 (time.theta_Q - r * time.sum_event_theta);389 }390 }391 */392 /*393 assert(!std::isnan(logL));394 assert(!std::isnan(deriv));395 return std::make_pair(-logL, -deriv);396 }397 398 399 double Cox::Impl::logL::hessian(double beta) const400 {401 // The second derivative of the log-likelihood evaluated at the402 // maximum likelihood estimates (MLE) is the observed Fisher403 // information404 405 double hessian = 0;406 407 double sum_theta = 0;408 double sum_thetaX = 0;409 double sum_thetaXX = 0;410 for (const auto& t : data_) {411 double theta = t.theta(beta);412 sum_theta += theta;413 sum_thetaX += theta * t.x;414 sum_thetaXX += theta * t.x * t.x;415 }416 417 // loop over unique time points418 for (const auto& t : times_) {419 420 // sum over all events in H_j421 double part_sum_theta = 0;422 double part_sum_thetaX = 0;423 double part_sum_thetaXX = 0;424 for (auto it = t.events_begin(); it!=t.events_end(); ++it) {425 double theta = it->theta(beta);426 part_sum_theta += theta;427 part_sum_thetaX += theta * it->x;428 part_sum_thetaXX += theta * it->x * it->x;429 }430 431 // loop over events at time point t432 for (auto it = t.events_begin(); it!=t.events_end(); ++it) {433 const size_t k = it - t.events_begin();434 double r = static_cast<double>(k) / t.size();435 436 double S_thetaXX = sum_thetaXX - r * part_sum_thetaXX;437 double S_thetaX = sum_thetaX - r * part_sum_thetaX;438 double S_theta = sum_theta - r * part_sum_theta;439 hessian += S_thetaXX/S_theta;440 hessian -= std::pow(S_thetaX/S_theta, 2);441 }442 443 // update the cumulative sums444 for (auto it = t.begin; it!=t.end; ++it) {445 double theta = it->theta(beta);446 sum_theta -= theta;447 sum_thetaX -= theta * it->x;448 sum_thetaXX -= theta * it->x * it->x;449 }450 }451 return hessian;452 }453 454 455 // class Cox456 457 Cox::Cox(void)458 : pimpl_(new Impl)459 {460 }461 462 463 Cox::Cox(const Cox& other)464 : pimpl_(new Impl(*other.pimpl_))465 {466 }467 468 469 Cox::Cox(Cox&& other)470 {471 std::swap(pimpl_, other.pimpl_);472 }473 474 475 Cox::~Cox(void)476 {477 }478 479 480 Cox& Cox::operator=(const Cox& other)481 {482 assert(other.pimpl_);483 pimpl_.reset(new Impl(*other.pimpl_));484 return *this;485 }486 487 488 Cox& Cox::operator=(Cox&& other)489 {490 std::swap(pimpl_, other.pimpl_);491 return *this;492 }493 494 495 void Cox::add(const yat::utility::VectorBase& x,496 const yat::utility::VectorBase& time,497 const std::vector<char>& event)498 {499 pimpl_->add(x, time, event);500 }501 502 503 void Cox::Cox::add(double x, double time, bool event)504 {505 pimpl_->add(x, time, event);506 }507 508 509 void Cox::clear(void)510 {511 pimpl_->clear();512 }513 514 515 void Cox::train(void)516 {517 pimpl_->train();518 }519 520 521 double Cox::b(void) const522 {523 return pimpl_->b();524 }525 526 527 double Cox::z(void) const528 {529 return pimpl_->z();530 }531 532 533 double Cox::p(void) const534 {535 return pimpl_->p();536 }537 538 539 double Cox::hazard_ratio(void) const540 {541 return pimpl_->hazard_ratio();542 }543 544 545 double Cox::hazard_ratio_lower_CI(double alpha) const546 {547 return pimpl_->hazard_ratio_lower_CI(alpha);548 }549 550 551 double Cox::hazard_ratio_upper_CI(double alpha) const552 {553 return pimpl_->hazard_ratio_upper_CI(alpha);554 }555 */556 128 }}}} 557 129 #endif -
trunk/yat/statistics/NegativeBinomialExtendedMixture.cc
r4192 r4252 26 26 #include <yat/statistics/GaussianMixture.h> 27 27 #include <yat/utility/BFGS2.h> 28 #include <yat/utility/Exception.h> 28 29 #include <yat/utility/Matrix.h> 29 30 … … 34 35 #include <cassert> 35 36 #include <cmath> 37 #include <sstream> 36 38 #include <utility> 37 39 #include <vector> … … 184 186 param(1) = alpha_(i); 185 187 Q func(data_, h); 186 solver(param, func, 1e-3, 1000); 188 size_t m_epochs = 1000; 189 solver(param, func, utility::MultiMinimizerDerivative::Gradient(1e-3), 190 m_epochs); 191 if (solver.epochs() == m_epochs) { 192 std::ostringstream msg; 193 msg << "NegativeBinomialExtendedMixture: maximal epochs reached"; 194 throw utility::runtime_error(msg.str()); 195 } 187 196 m_(i) = param(0); 188 197 alpha_(i) = param(1); -
trunk/yat/statistics/NegativeBinomialMixture.cc
r4184 r4252 238 238 param(1) = alpha_(i); 239 239 Q func(count_, h); 240 solver(param, func, 1e-3);240 solver(param, func, utility::MultiMinimizerDerivative::Gradient(1e-3)); 241 241 m_(i) = param(0); 242 242 alpha_(i) = param(1); -
trunk/yat/utility/BLAS_level2.h
r3999 r4252 5 5 6 6 /* 7 Copyright (C) 2017, 2019, 2020 Peter Johansson7 Copyright (C) 2017, 2019, 2020, 2022 Peter Johansson 8 8 9 9 This file is part of the yat library, http://dev.thep.lu.se/yat … … 96 96 97 97 /** 98 \relates Matrix 98 \relates MatrixBase 99 99 \relates VectorBase 100 100 … … 113 113 114 114 /** 115 \relates Matrix 115 \relates MatrixBase 116 116 \relates VectorBase 117 117 -
trunk/yat/utility/BinaryOstreamIterator.h
r3902 r4252 6 6 /* 7 7 Copyright (C) 2020 Peter Johansson 8 Copyright (C) 2022 Jari Häkkinen 8 9 9 10 This file is part of the yat library, http://dev.thep.lu.se/yat … … 25 26 #include "utility.h" 26 27 27 #include <boost/ function_output_iterator.hpp>28 #include <boost/iterator/function_output_iterator.hpp> 28 29 29 30 namespace theplu { -
trunk/yat/utility/CommandLine.cc
r4207 r4252 4 4 Copyright (C) 2007 Jari Häkkinen, Peter Johansson, Markus Ringnér 5 5 Copyright (C) 2008 Jari Häkkinen, Peter Johansson 6 Copyright (C) 2009, 2010, 2011, 2012, 2013, 2018, 2022 Peter Johansson 6 Copyright (C) 2009, 2010, 2011, 2012, 2013, 2018 Peter Johansson 7 Copyright (C) 2022 Jari Häkkinen, Peter Johansson 7 8 8 9 This file is part of the yat library, http://dev.thep.lu.se/yat … … 110 111 using namespace std; 111 112 // just in case it is not pristine 112 for_each(options_.begin(), options_.end(),std::mem_f un(&Option::reset));113 for_each(options_.begin(), options_.end(),std::mem_fn(&Option::reset)); 113 114 114 115 std::vector<std::string> arguments(argv, argv+argc); … … 135 136 try { 136 137 for_each(options_.begin(), options_.end(), 137 std::mem_f un(&Option::validate));138 std::mem_fn(&Option::validate)); 138 139 } 139 140 catch (cmd_error& e) { -
trunk/yat/utility/Makefile.am
r4207 r4252 25 25 yat/utility/BFGS.cc \ 26 26 yat/utility/BFGS2.cc \ 27 yat/utility/Bisection.cc \ 28 yat/utility/Brent.cc \ 29 yat/utility/BrentMinimizer.cc \ 27 30 yat/utility/Cigar.cc \ 28 31 yat/utility/ColumnStream.cc \ … … 32 35 yat/utility/DiagonalMatrix.cc \ 33 36 yat/utility/Exception.cc \ 37 yat/utility/FalsePosition.cc \ 34 38 yat/utility/FileUtil.cc \ 35 39 yat/utility/FletcherReevesConjugate.cc \ 36 40 yat/utility/GetlineIterator.cc \ 41 yat/utility/GoldenSection.cc \ 37 42 yat/utility/Index.cc \ 38 43 yat/utility/KernelPCA.cc \ … … 44 49 yat/utility/MatrixView.cc \ 45 50 yat/utility/MatrixWeighted.cc \ 51 yat/utility/Minimizer.cc \ 46 52 yat/utility/MultiMinimizer.cc \ 47 53 yat/utility/MultiMinimizerDerivative.cc \ … … 49 55 yat/utility/NelderMeadSimplex2.cc \ 50 56 yat/utility/NelderMeadSimplex2Rand.cc \ 57 yat/utility/Newton.cc \ 51 58 yat/utility/NNI.cc \ 52 59 yat/utility/Option.cc \ … … 59 66 yat/utility/PCA.cc \ 60 67 yat/utility/PolakRibiereConjugate.cc \ 68 yat/utility/QuadGolden.cc \ 69 yat/utility/RootFinder.cc \ 70 yat/utility/RootFinderDerivative.cc \ 71 yat/utility/Secant.cc \ 61 72 yat/utility/split.cc \ 62 73 yat/utility/stl_utility.cc \ 63 74 yat/utility/Scheduler.cc \ 64 75 yat/utility/SmithWaterman.cc \ 76 yat/utility/Steffenson.cc \ 65 77 yat/utility/SteepestDescent.cc \ 66 78 yat/utility/SVD.cc \ … … 90 102 $(srcdir)/yat/utility/BLAS_utility.h \ 91 103 $(srcdir)/yat/utility/boost_exception_ptr.h \ 104 $(srcdir)/yat/utility/Bisection.h \ 105 $(srcdir)/yat/utility/Brent.h \ 106 $(srcdir)/yat/utility/BrentMinimizer.h \ 92 107 $(srcdir)/yat/utility/Cigar.h \ 93 108 $(srcdir)/yat/utility/CigarIterator.h \ … … 103 118 $(srcdir)/yat/utility/DiagonalMatrix.h \ 104 119 $(srcdir)/yat/utility/Exception.h \ 120 $(srcdir)/yat/utility/FalsePosition.h \ 105 121 $(srcdir)/yat/utility/FileUtil.h \ 106 122 $(srcdir)/yat/utility/FletcherReevesConjugate.h \ 107 123 $(srcdir)/yat/utility/GetlineIterator.h \ 108 124 $(srcdir)/yat/utility/getvector.h \ 125 $(srcdir)/yat/utility/GoldenSection.h \ 109 126 $(srcdir)/yat/utility/Index.h \ 110 127 $(srcdir)/yat/utility/iterator_traits.h \ … … 121 138 $(srcdir)/yat/utility/merge.h \ 122 139 $(srcdir)/yat/utility/MergeIterator.h \ 140 $(srcdir)/yat/utility/Minimizer.h \ 123 141 $(srcdir)/yat/utility/MultiMinimizer.h \ 124 142 $(srcdir)/yat/utility/MultiMinimizerDerivative.h \ … … 127 145 $(srcdir)/yat/utility/NelderMeadSimplex2.h \ 128 146 $(srcdir)/yat/utility/NelderMeadSimplex2Rand.h \ 147 $(srcdir)/yat/utility/Newton.h \ 129 148 $(srcdir)/yat/utility/NNI.h \ 130 149 $(srcdir)/yat/utility/Option.h \ … … 140 159 $(srcdir)/yat/utility/PolakRibiereConjugate.h \ 141 160 $(srcdir)/yat/utility/PriorityQueue.h \ 161 $(srcdir)/yat/utility/QuadGolden.h \ 142 162 $(srcdir)/yat/utility/Queue.h \ 163 $(srcdir)/yat/utility/RootFinder.h \ 164 $(srcdir)/yat/utility/RootFinderDerivative.h \ 143 165 $(srcdir)/yat/utility/round.h \ 144 166 $(srcdir)/yat/utility/Scheduler.h \ 167 $(srcdir)/yat/utility/Secant.h \ 145 168 $(srcdir)/yat/utility/Segment.h \ 146 169 $(srcdir)/yat/utility/SegmentMap.h \ … … 153 176 $(srcdir)/yat/utility/split.h \ 154 177 $(srcdir)/yat/utility/SteepestDescent.h \ 178 $(srcdir)/yat/utility/Steffenson.h \ 155 179 $(srcdir)/yat/utility/StreamRedirect.h \ 156 180 $(srcdir)/yat/utility/Range.h \ … … 177 201 178 202 include yat/utility/expression/Makefile.am 179 include yat/utility/multi minimizer/Makefile.am203 include yat/utility/multivariable/Makefile.am 180 204 include yat/utility/ranking/Makefile.am 205 include yat/utility/univariable/Makefile.am -
trunk/yat/utility/MatrixBase.h
r4207 r4252 261 261 262 262 /** 263 \brief Locate the maximum and minimum element in the Matrix. 264 265 \return The indices to the element with the minimum and maximum 266 values of the Matrix, respectively. 263 \brief Locate the maximum and minimum element in the matrix, \a 264 M. 265 266 The indices to the element with the minimum and maximum values of 267 the matrix are returned through parameters \a min and \a max, 268 respectively. 267 269 268 270 \note Lower index has precedence (searching in row-major order). 269 271 270 \relates Matrix 271 */ 272 void minmax_index(const MatrixBase& ,272 \relates MatrixBase 273 */ 274 void minmax_index(const MatrixBase& M, 273 275 std::pair<size_t,size_t>& min, 274 276 std::pair<size_t,size_t>& max); -
trunk/yat/utility/MultiMinimizer.cc
r4172 r4252 58 58 59 59 60 MultiMinimizer::Size::Size(double epsabs) 61 : epsabs_(epsabs) 62 {} 63 64 65 bool MultiMinimizer::Size::operator()(const gsl_multimin_fminimizer* m) 66 { 67 double size = gsl_multimin_fminimizer_size(m); 68 return gsl_multimin_test_size(size, epsabs_) != GSL_CONTINUE; 69 } 70 71 60 72 void 61 73 MultiMinimizer::GslFree::operator()(gsl_multimin_fminimizer* p) const -
trunk/yat/utility/MultiMinimizer.h
r4172 r4252 23 23 */ 24 24 25 #include "multiminimizer/f.h" 25 #include "Exception.h" 26 #include "multivariable/f.h" 26 27 27 28 #include "Vector.h" … … 73 74 yat::utility::Vector& step(void); 74 75 76 /// Abstract class defining the interface for functions 77 /// determining when the minimisation stops. 78 class Stopper 79 { 80 public: 81 /// return true when search should stop 82 virtual bool operator()(const gsl_multimin_fminimizer*)=0; 83 }; 84 85 86 /** 87 wrapper around 88 <a href="https://www.gnu.org/software/gsl/doc/html/multimin.html#stopping-criteria">gsl_multimin_test_size</a> 89 */ 90 class Size : public Stopper 91 { 92 public: 93 /// \param epsabs tolerance 94 explicit Size(double epsabs); 95 96 /** 97 \return \c true if <a 98 href="https://www.gnu.org/software/gsl/doc/html/multimin.html#stopping-criteria">gsl_multimin_test_size</a>(size, 99 epsabs) does not return \c GSL_CONTINUE, where \c size is 100 defined by \c gsl_multimin_fminimizer_size and \c epsabs is 101 defined in constructor. 102 */ 103 bool operator()(const gsl_multimin_fminimizer*); 104 private: 105 double epsabs_; 106 }; 107 75 108 /** 76 109 Function finds an \c x that minimizes the function defined by 77 110 \c func. It calls gsl_multimin_fminimizer_iterate until either 78 \c GSL_ENOPROG is returned or the size is smaller than \c 79 epsabs, as tested by gsl_multimin_test_size. 111 \c GSL_ENOPROG is returned or the \c stopper returns true. 80 112 81 113 Type Requirements: … … 85 117 template<class FUNC> 86 118 void operator()(yat::utility::VectorMutable& x, FUNC& func, 87 double epsabs);119 Stopper&& stopper); 88 120 89 121 /** 90 122 Same as 91 operator()(yat::utility::VectorMutable&, FUNC& func, double epsabs);123 operator()(yat::utility::VectorMutable&, FUNC& func, Stopper&&); 92 124 but at maximum run \c max_epochs epochs (iterations). 93 125 */ 94 126 template<class FUNC> 95 127 void operator()(yat::utility::VectorMutable&, FUNC& func, 96 double epsabs, unsigned int max_epochs);128 Stopper&& stopper, unsigned int max_epochs); 97 129 protected: 98 130 /** … … 120 152 template<class FUNC> 121 153 void MultiMinimizer::operator()(yat::utility::VectorMutable& x, 122 FUNC& func, double epsabs) 154 FUNC& func, 155 MultiMinimizer::Stopper&& stopper) 123 156 { 124 157 unsigned int max_epochs = std::numeric_limits<unsigned int>::max(); 125 (*this)(x, func, epsabs, max_epochs);158 (*this)(x, func, std::move(stopper), max_epochs); 126 159 } 127 160 … … 129 162 template<class FUNC> 130 163 void MultiMinimizer::operator()(yat::utility::VectorMutable& x, 131 FUNC& func, double epsabs, 164 FUNC& func, 165 MultiMinimizer::Stopper&& stopper, 132 166 unsigned int max_epochs) 133 167 { … … 135 169 gsl_multimin_function gsl_func; 136 170 gsl_func.n = x.size(); 137 gsl_func.f = multi minimizer::f<FUNC>;171 gsl_func.f = multivariable::f<FUNC>; 138 172 gsl_func.params = &func; 139 173 … … 147 181 if (status == GSL_ENOPROG) 148 182 break; 149 throw yat::utility::GSL_error("MultiMinimizer", status);183 throw GSL_error("MultiMinimizer", status); 150 184 } 151 double size = gsl_multimin_fminimizer_size(solver_.get()); 152 if (gsl_multimin_test_size(size, epsabs) != GSL_CONTINUE) 185 if (stopper(solver_.get())) 153 186 break; 187 //double size = gsl_multimin_fminimizer_size(solver_.get()); 188 //if (gsl_multimin_test_size(size, epsabs) != GSL_CONTINUE) 189 //break; 154 190 } 155 191 -
trunk/yat/utility/MultiMinimizerDerivative.cc
r4176 r4252 65 65 66 66 67 MultiMinimizerDerivative::Gradient::Gradient(double epsabs) 68 : epsabs_(epsabs) 69 {} 70 71 72 bool 73 MultiMinimizerDerivative::Gradient::operator()(const gsl_multimin_fdfminimizer* m) 74 { 75 return gsl_multimin_test_gradient(m->gradient,epsabs_) != GSL_CONTINUE; 76 } 77 78 67 79 void 68 80 MultiMinimizerDerivative::GslFree::operator()(gsl_multimin_fdfminimizer* p) const -
trunk/yat/utility/MultiMinimizerDerivative.h
r4177 r4252 19 19 // along with this program. If not, see <https://www.gnu.org/licenses/>. 20 20 21 #include "multi minimizer/df.h"22 #include "multi minimizer/f.h"21 #include "multivariable/df.h" 22 #include "multivariable/f.h" 23 23 24 24 #include <yat/utility/Vector.h> … … 86 86 void tolerance(double tol); 87 87 88 /// Abstract class defining the interface for functions defining 89 /// the stop criteria in the minimization 90 class Stopper 91 { 92 public: 93 /// return true is search should stop 94 virtual bool operator()(const gsl_multimin_fdfminimizer*)=0; 95 }; 96 97 98 /// wrapper around 99 /// <a href="https://www.gnu.org/software/gsl/doc/html/multimin.html#stopping-criteria">gsl_multimin_test_gradient</a> 100 class Gradient : public Stopper 101 { 102 public: 103 /// \param epsabs tolerance 104 explicit Gradient(double epsabs); 105 106 /** 107 \return \c true if <a 108 href="https://www.gnu.org/software/gsl/doc/html/multimin.html#stopping-criteria">gsl_multimin_test_gradient</a>(gradient, 109 epsabs) does not return \c GSL_CONTINUE, where \c gradient is 110 defined by \c gsl_multimin_fdfminimizer_gradient and \c 111 epsabs is defined in constructor. 112 */ 113 bool operator()(const gsl_multimin_fdfminimizer*); 114 private: 115 double epsabs_; 116 }; 117 118 88 119 /** 89 120 Function finds an \c x that minimizes the function defined by … … 100 131 */ 101 132 template<class FUNC> 102 void operator()(yat::utility::VectorMutable&, FUNC& func, double epsabs); 133 void operator()(yat::utility::VectorMutable&, FUNC& func, 134 MultiMinimizerDerivative::Stopper&& stopper); 103 135 104 136 /** … … 109 141 template<class FUNC> 110 142 void operator()(yat::utility::VectorMutable&, FUNC& func, 111 double epsabs, unsigned int max_epochs); 143 MultiMinimizerDerivative::Stopper&& stopper, 144 unsigned int max_epochs); 112 145 protected: 113 146 /** … … 136 169 137 170 template<class FUNC> 138 void MultiMinimizerDerivative::operator()(yat::utility::VectorMutable& x, 139 FUNC& func, double epsabs) 171 void 172 MultiMinimizerDerivative::operator()(yat::utility::VectorMutable& x, 173 FUNC& func, 174 MultiMinimizerDerivative::Stopper&& stopper) 140 175 { 141 176 unsigned int max_epochs = std::numeric_limits<unsigned int>::max(); 142 (*this)(x, func, epsabs, max_epochs);177 (*this)(x, func, std::move(stopper), max_epochs); 143 178 } 144 179 145 180 146 181 template<class FUNC> 147 void MultiMinimizerDerivative::operator()(yat::utility::VectorMutable& x, 148 FUNC& func, double epsabs, 149 unsigned int max_epochs) 182 void 183 MultiMinimizerDerivative::operator()(yat::utility::VectorMutable& x, 184 FUNC& func, 185 MultiMinimizerDerivative::Stopper&& stopper, 186 unsigned int max_epochs) 150 187 { 151 188 assert(size_ == x.size()); 152 189 gsl_multimin_function_fdf gsl_func; 153 190 gsl_func.n = size_; 154 gsl_func.f = multi minimizer::f<FUNC>;155 gsl_func.df = multi minimizer::df<FUNC>;156 gsl_func.fdf = multi minimizer::fdf<FUNC>;191 gsl_func.f = multivariable::f<FUNC>; 192 gsl_func.df = multivariable::df<FUNC>; 193 gsl_func.fdf = multivariable::fdf<FUNC>; 157 194 gsl_func.params = &func; 158 195 … … 169 206 } 170 207 171 if ( gsl_multimin_test_gradient(solver_->gradient,epsabs) != GSL_CONTINUE)208 if (stopper(solver_.get())) 172 209 break; 210 //if (gsl_multimin_test_gradient(solver_->gradient,epsabs) != GSL_CONTINUE) 211 //break; 173 212 } 174 213 -
trunk/yat/utility/OstreamIterator.h
r3154 r4252 6 6 /* 7 7 Copyright (C) 2013, 2014 Peter Johansson 8 Copyright (C) 2022 Jari Häkkinen 8 9 9 10 This file is part of the yat library, http://dev.thep.lu.se/yat … … 25 26 #include "yat_assert.h" 26 27 27 #include <boost/ function_output_iterator.hpp>28 #include <boost/iterator/function_output_iterator.hpp> 28 29 29 30 #include <functional> -
trunk/yat/utility/multivariable/Makefile.am
r4175 r4252 18 18 # along with yat. If not, see <https://www.gnu.org/licenses/>. 19 19 20 nobase_include_HEADERS += $(srcdir)/yat/utility/multi minimizer/df.h21 nobase_include_HEADERS += $(srcdir)/yat/utility/multi minimizer/f.h20 nobase_include_HEADERS += $(srcdir)/yat/utility/multivariable/df.h 21 nobase_include_HEADERS += $(srcdir)/yat/utility/multivariable/f.h -
trunk/yat/utility/multivariable/df.h
r4175 r4252 1 #ifndef theplu_yat_utility_multi minimizer_df2 #define theplu_yat_utility_multi minimizer_df1 #ifndef theplu_yat_utility_multivariable_df 2 #define theplu_yat_utility_multivariable_df 3 3 4 4 // $Id$ … … 34 34 namespace utility { 35 35 36 namespace multi minimizer36 namespace multivariable 37 37 { 38 38 template<class FUNC> … … 53 53 df<FUNC>(x, params, gradient); 54 54 } 55 } // end namespace multi _minimizer55 } // end namespace multivariable 56 56 57 57 }}} // end namespaces utility, yat and theplu -
trunk/yat/utility/multivariable/f.h
r4175 r4252 1 #ifndef theplu_yat_utility_multi minimizer_f2 #define theplu_yat_utility_multi minimizer_f1 #ifndef theplu_yat_utility_multivariable_f 2 #define theplu_yat_utility_multivariable_f 3 3 4 4 // $Id$ … … 31 31 namespace utility { 32 32 33 namespace multi minimizer33 namespace multivariable 34 34 { 35 35 template<class FUNC>
Note: See TracChangeset
for help on using the changeset viewer.