1 | // $Id: Local.cc 789 2007-03-10 20:07:13Z jari $ |
---|
2 | |
---|
3 | /* |
---|
4 | Copyright (C) The authors contributing to this file. |
---|
5 | |
---|
6 | This file is part of the yat library, http://lev.thep.lu.se/trac/yat |
---|
7 | |
---|
8 | The yat library is free software; you can redistribute it and/or |
---|
9 | modify it under the terms of the GNU General Public License as |
---|
10 | published by the Free Software Foundation; either version 2 of the |
---|
11 | License, or (at your option) any later version. |
---|
12 | |
---|
13 | The yat library is distributed in the hope that it will be useful, |
---|
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
16 | General Public License for more details. |
---|
17 | |
---|
18 | You should have received a copy of the GNU General Public License |
---|
19 | along with this program; if not, write to the Free Software |
---|
20 | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA |
---|
21 | 02111-1307, USA. |
---|
22 | */ |
---|
23 | |
---|
24 | #include "Local.h" |
---|
25 | #include "Kernel.h" |
---|
26 | #include "OneDimensionalWeighted.h" |
---|
27 | #include "yat/utility/vector.h" |
---|
28 | |
---|
29 | #include <algorithm> |
---|
30 | #include <cassert> |
---|
31 | #include <iostream> |
---|
32 | |
---|
33 | namespace theplu { |
---|
34 | namespace yat { |
---|
35 | namespace regression { |
---|
36 | |
---|
37 | Local::Local(OneDimensionalWeighted& r, Kernel& k) |
---|
38 | : kernel_(&k), regressor_(&r) |
---|
39 | { |
---|
40 | } |
---|
41 | |
---|
42 | Local::~Local(void) |
---|
43 | { |
---|
44 | } |
---|
45 | |
---|
46 | void Local::add(const double x, const double y) |
---|
47 | { |
---|
48 | data_.push_back(std::make_pair(x,y)); |
---|
49 | } |
---|
50 | |
---|
51 | void Local::fit(const size_t step_size, const size_t nof_points) |
---|
52 | { |
---|
53 | if (step_size==0 || nof_points<3){ |
---|
54 | std::cerr << "yat::regression::Local " |
---|
55 | << "Parameters invalid. Fitting ignored." << std::endl; |
---|
56 | return; |
---|
57 | } |
---|
58 | |
---|
59 | size_t nof_fits=data_.size()/step_size; |
---|
60 | x_.clone(utility::vector(nof_fits)); |
---|
61 | y_predicted_.clone(utility::vector(x_.size())); |
---|
62 | y_err_.clone(utility::vector(x_.size())); |
---|
63 | sort(data_.begin(), data_.end()); |
---|
64 | |
---|
65 | // coying data to 2 utility vectors ONCE to use views from |
---|
66 | utility::vector x(data_.size()); |
---|
67 | utility::vector y(data_.size()); |
---|
68 | for (size_t j=0; j<x.size(); j++){ |
---|
69 | x(j)=data_[j].first; |
---|
70 | y(j)=data_[j].second; |
---|
71 | } |
---|
72 | |
---|
73 | // looping over regression points and perform local regression |
---|
74 | for (size_t i=0; i<nof_fits; i++) { |
---|
75 | size_t max_index = static_cast<size_t>( (i+0.5)*step_size ); |
---|
76 | size_t min_index; |
---|
77 | double width; // distance from middle of windo to border of window |
---|
78 | double x_mid; // middle of window |
---|
79 | // right border case |
---|
80 | if (max_index > data_.size()-1){ |
---|
81 | min_index = max_index - nof_points + 1; |
---|
82 | max_index = data_.size()-1; |
---|
83 | width = ( (( x(max_index)-x(0) )*(nof_points-1)) / |
---|
84 | ( 2*(max_index-min_index)) ); |
---|
85 | x_mid = x(min_index)+width; |
---|
86 | } |
---|
87 | // normal middle case |
---|
88 | else if (max_index > nof_points-1){ |
---|
89 | min_index = max_index - nof_points + 1; |
---|
90 | width = (x(max_index)-x(min_index))/2; |
---|
91 | x_mid = x(min_index)+width; |
---|
92 | } |
---|
93 | // left border case |
---|
94 | else { |
---|
95 | min_index = 0; |
---|
96 | width = ( (( x(max_index)-x(0) )*(nof_points-1)) / |
---|
97 | ( 2*(max_index-min_index)) ); |
---|
98 | x_mid = x(max_index)-width; |
---|
99 | } |
---|
100 | assert(min_index<data_.size()); |
---|
101 | assert(max_index<data_.size()); |
---|
102 | |
---|
103 | utility::vector x_local(x, min_index, max_index-min_index+1); |
---|
104 | utility::vector y_local(y, min_index, max_index-min_index+1); |
---|
105 | |
---|
106 | // calculating weights |
---|
107 | utility::vector w(max_index-min_index+1); |
---|
108 | for (size_t j=0; j<w.size(); j++) |
---|
109 | w(j) = (*kernel_)( (x_local(j)- x_mid)/width ); |
---|
110 | |
---|
111 | // fitting the regressor locally |
---|
112 | regressor_->fit(x_local,y_local,w); |
---|
113 | assert(i<y_predicted_.size()); |
---|
114 | assert(i<y_err_.size()); |
---|
115 | y_predicted_(i) = regressor_->predict(x(i*step_size)); |
---|
116 | y_err_(i) = sqrt(regressor_->standard_error2(x(i*step_size))); |
---|
117 | } |
---|
118 | } |
---|
119 | |
---|
120 | const utility::vector& Local::x(void) const |
---|
121 | { |
---|
122 | return x_; |
---|
123 | } |
---|
124 | |
---|
125 | const utility::vector& Local::y_predicted(void) const |
---|
126 | { |
---|
127 | return y_predicted_; |
---|
128 | } |
---|
129 | |
---|
130 | const utility::vector& Local::y_err(void) const |
---|
131 | { |
---|
132 | return y_err_; |
---|
133 | } |
---|
134 | |
---|
135 | std::ostream& operator<<(std::ostream& os, const Local& r) |
---|
136 | { |
---|
137 | os << "# column 1: x\n" |
---|
138 | << "# column 2: y\n" |
---|
139 | << "# column 3: y_err\n"; |
---|
140 | for (size_t i=0; i<r.x().size(); i++) { |
---|
141 | os << r.x()(i) << "\t" |
---|
142 | << r.y_predicted()(i) << "\t" |
---|
143 | << r.y_err()(i) << "\n"; |
---|
144 | } |
---|
145 | |
---|
146 | return os; |
---|
147 | } |
---|
148 | |
---|
149 | }}} // of namespaces regression, yat, and theplu |
---|