Skip to content

Commit adef654

Browse files
Merge pull request #23 from TobiasWallner/dev
Dev
2 parents 04a0a93 + aaa0118 commit adef654

File tree

4 files changed

+171
-40
lines changed

4 files changed

+171
-40
lines changed

include/controlpp/Estimators.hpp

Lines changed: 156 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ namespace controlpp
2121
*/
2222
template<class T, int XRows, int XCols, int XOpt, int XMaxRows, int XMaxCols>
2323
requires((XRows >= XCols) || (XRows == Eigen::Dynamic) || (XCols == Eigen::Dynamic))
24-
Eigen::Vector<T, XCols> least_squares(const Eigen::Matrix<T, XRows, XCols, XOpt, XMaxRows, XMaxCols>& X, const Eigen::Vector<T, XRows>& y) noexcept {
24+
Eigen::Vector<T, XCols> least_squares(const Eigen::Matrix<T, XRows, XCols, XOpt, XMaxRows, XMaxCols>& X, const Eigen::Vector<T, XRows>& y) {
2525
Eigen::Vector<T, XCols> result = X.colPivHouseholderQr().solve(y).eval();
2626
return result;
2727
}
@@ -45,20 +45,26 @@ namespace controlpp
4545
*
4646
* ---
4747
*
48-
* Note that for a number of outputs greater than one (`NOutputs>1`)
48+
* For a number of outputs greater than one (`NOutputs>1`)
4949
* the model uses the 'Multi-Output System with shared parameter vector'.
50-
*
5150
* This allows to use multiple sensors observing the same
5251
* system states and parameters to increase the estimation result.
5352
*
53+
* There is a default regularisation therm (default: 1e-9) added to the diagonal elements of the covariance updata
54+
*
55+
* There is gain clamping (default: [-10, +10]) applied to the parameter update therem K.
5456
*/
5557
template<class T, size_t NParams, size_t NMeasurements = 1>
5658
class ReccursiveLeastSquares{
5759
private:
5860

59-
Eigen::Matrix<T, NParams, NParams> _cov; ///< previous covariance
60-
Eigen::Vector<T, NParams> _param; ///< previous parameter estimate
61-
double _memory = 0.98;
61+
Eigen::Matrix<T, NParams, NParams> _cov; ///< previous covariance
62+
Eigen::Vector<T, NParams> _param; ///< previous parameter estimate
63+
Eigen::Vector<T, NParams> _K;
64+
T _memory = 0.98;
65+
T _cov_regularisation = 1e-9;
66+
T _gain_clamp = 10;
67+
6268

6369
public:
6470

@@ -80,21 +86,18 @@ namespace controlpp
8086
* - `memory` < 1: forgetting older values with an exponential decay
8187
* Often used values are between 0.8 and 0.98
8288
*
83-
* \param cov_reg A value that will be added to the diagonal of the covariance matrix at each update
84-
* to prevent the covariance to be become too small, ill formed and unregular. This is mainly to increase numerical stability.
8589
*/
8690
inline ReccursiveLeastSquares(
8791
const Eigen::Vector<T, NParams>& param_hint = Eigen::Vector<T, NParams>().setOne(),
88-
const Eigen::Matrix<T, NParams, NParams>& cov_hint = (Eigen::Matrix<T, NParams, NParams>::Identity() * T(1000)),
89-
double memory = 0.95
90-
)
91-
: _cov(cov_hint)
92-
, _param(param_hint)
92+
T memory = 0.99)
93+
: _param(param_hint)
9394
, _memory(memory)
9495
{
9596
if(memory <= T(0) || memory > T(1)){
9697
throw std::invalid_argument("Error: ReccursiveLeastSquares::ReccursiveLeastSquares(): memory has to be in the open-closed range of: (0, 1]");
9798
}
99+
this->_cov = (Eigen::Matrix<T, NParams, NParams>::Identity() * T(1000));
100+
this->_K.setZero();
98101
}
99102

100103
/**
@@ -103,30 +106,37 @@ namespace controlpp
103106
* \param s The known system inputs
104107
* \returns The new parameter state estimate
105108
*/
106-
void add(const T& y, const Eigen::Matrix<T, NMeasurements, NParams>& s) noexcept {
107-
const Eigen::Matrix<T, NMeasurements, NMeasurements> I = Eigen::Matrix<T, NMeasurements, NMeasurements>::Identity();
109+
void add(const T& y, const Eigen::Matrix<T, NMeasurements, NParams>& s) {
110+
this->_cov.diagonal().array() += this->_cov_regularisation;
108111

109112
// Gain
110113
const auto A = (this->_cov * s.transpose()).eval();
111-
const auto B = (this->_memory * I + s * this->_cov * s.transpose()).eval();
114+
const Eigen::Matrix<T, NMeasurements, NMeasurements> I_m = Eigen::Matrix<T, NMeasurements, NMeasurements>::Identity();
115+
auto B1 = (s * this->_cov * s.transpose()).eval();
116+
B1.diagonal().array() += this->_memory;
117+
const auto B = ((B1 + B1.transpose())/2).eval(); // force symetry
112118

113119
// calculate: K = A * B^-1
114-
const Eigen::Vector<T, NParams> K = B.transpose().llt().solve(A.transpose()).transpose().eval();
120+
this->_K = B.transpose().llt().solve(A.transpose()).transpose().eval();
121+
122+
// Limit the update gain
123+
for(int i = 0; i < this->_K.size(); ++i){
124+
this->_K.at(i) = std::clamp(this->_K.at(i), -this->_gain_clamp, this->_gain_clamp);
125+
}
115126

116127
// Update
117-
const Eigen::Vector<T, NParams> new_param = (this->_param + K * (y - s * this->_param)).eval();
118-
this->_param = new_param;
128+
this->_param += this->_K * (y - s * this->_param);
119129

120130
// Covariance
121-
const Eigen::Matrix<T, NParams, NParams> new_cov = (this->_cov - K * s * this->_cov).eval() / this->_memory;
122-
this->_cov = new_cov;
131+
this->_cov -= this->_K * s * this->_cov;
132+
this->_cov /= this->_memory;
123133
}
124134

125135
/**
126136
* \brief returns the current best estimate
127137
* \returns the parameter vector
128138
*/
129-
inline const Eigen::Vector<T, NParams>& estimate() const noexcept {return this->_param;}
139+
inline const Eigen::Vector<T, NParams>& estimate() const {return this->_param;}
130140

131141
/**
132142
* \brief returns the current covariance
@@ -135,7 +145,70 @@ namespace controlpp
135145
*
136146
* \returns the current covariance matrix
137147
*/
138-
inline const Eigen::Matrix<T, NParams, NParams>& covariance() const noexcept {return this->_cov;}
148+
[[nodiscard]] inline const Eigen::Matrix<T, NParams, NParams>& cov() const {return this->_cov;}
149+
150+
inline void set_cov(const Eigen::Matrix<T, NParams, NParams>& cov) {
151+
this->_cov = cov;
152+
}
153+
154+
/**
155+
* @brief Sets the memory factor
156+
*
157+
* The memory factor [0, 1], where:
158+
* - larger values: Old parameters and inputs are remembered for longer (slower changes, more robust to noise).
159+
* - smaller values: Old parameters are forgotten more quickly (faster changes)
160+
*
161+
* @param memory The new memory factor
162+
*/
163+
inline void set_memory(const T& memory){
164+
this->_memory = memory;
165+
}
166+
167+
/**
168+
* @brief Returns the memory factor
169+
*
170+
* The memory factor [0, 1], where:
171+
* - larger values: Old parameters and inputs are remembered for longer (slower changes, more robust to noise).
172+
* - smaller values: Old parameters are forgotten more quickly (faster changes)
173+
*
174+
* @returns The current memory factor
175+
*/
176+
[[nodiscard]] inline const T& memory() const {
177+
return this->_memory;
178+
}
179+
180+
/**
181+
* \brief Limits the update gain K.
182+
*
183+
* Limits the update gain K from -gain_clamp to +gain_clamp.
184+
* Prevents too fast updates and ill conditioned updates. For example from a lack of excitation variety
185+
*
186+
* default is 10
187+
*
188+
* \param gain_clamp The new gain clamp
189+
*/
190+
inline void set_gain_clamp(const T& gain_clamp) {
191+
this->_gain_clamp = gain_clamp;
192+
}
193+
194+
/**
195+
* @brief Returns the currect gain clamp factor
196+
* @return The currect gain clamp factor
197+
*/
198+
[[nodiscard]] inline const T& gain_clamp(){
199+
return this->_gain_clamp;
200+
}
201+
202+
203+
204+
/**
205+
* @brief Returns the gain K used in the parameter update
206+
*
207+
* The gain K can be seen as a measure of uncertainty
208+
*
209+
* @return The gain vector;
210+
*/
211+
inline const Eigen::Vector<T, NParams>& gain() const {return this->_K;}
139212
};
140213

141214
/**
@@ -174,7 +247,11 @@ namespace controlpp
174247

175248
Eigen::Matrix<T, NParams, NParams> _cov; ///< previous covariance
176249
Eigen::Vector<T, NParams> _param; ///< previous parameter estimate
177-
double _memory = 0.98;
250+
Eigen::Vector<T, NParams> _K;
251+
T _memory = 0.98;
252+
T _cov_regularisation = 1e-9;
253+
T _gain_clamp = 10;
254+
178255

179256
public:
180257

@@ -196,20 +273,25 @@ namespace controlpp
196273
* - `memory` < 1: forgetting older values with an exponential decay
197274
* Often used values are between `0.9 `and `0.995`.
198275
*
199-
* \param cov_reg A value that will be added to the diagonal of the covariance matrix at each update
276+
* \param cov_regularisation A value that will be added to the diagonal of the covariance matrix before each update
200277
* to prevent the covariance to be become too small, ill formed and unregular. This is mainly to increase numerical stability.
278+
*
201279
*/
202280
inline ReccursiveLeastSquares(
203281
const Eigen::Vector<T, NParams>& param_hint = Eigen::Vector<T, NParams>().setOnes(),
204282
const Eigen::Matrix<T, NParams, NParams>& cov_hint = (Eigen::Matrix<T, NParams, NParams>::Identity() * T(1000)),
205-
double memory = 0.99)
283+
T memory = 0.99,
284+
T cov_regularisation = 1e-9
285+
)
206286
: _cov(cov_hint)
207287
, _param(param_hint)
208288
, _memory(memory)
289+
, _cov_regularisation(cov_regularisation)
209290
{
210291
if(memory <= T(0) || memory > T(1)){
211292
throw std::invalid_argument("Error: ReccursiveLeastSquares::ReccursiveLeastSquares(): memory has to be in the open-closed range of: (0, 1]");
212293
}
294+
this->_K.setZero();
213295
}
214296

215297
inline void set_cov(const Eigen::Matrix<T, NParams, NParams>& cov){
@@ -224,29 +306,44 @@ namespace controlpp
224306
this->_memory = memory;
225307
}
226308

309+
inline void set_gain_clamp(const T& gain_clamp){
310+
this->_gain_clamp = gain_clamp;
311+
}
312+
313+
[[nodiscard]] inline const T& gain_clamp() const {
314+
return this->_gain_clamp;
315+
}
316+
227317
/**
228318
* \brief Adds a new input output pair that updates the estimate
229319
* \param y The new system measurements/outputs
230320
* \param s The known system inputs
231321
* \returns The new parameter state estimate
232322
*/
233-
void add(const T& y, const Eigen::Vector<T, NParams>& s) noexcept {
323+
void add(const T& y, const Eigen::Vector<T, NParams>& s) {
324+
this->_cov.diagonal().array() += this->_cov_regularisation;
325+
234326
// Gain
235-
const Eigen::Vector<T, NParams> K = ((this->_cov * s) / (this->_memory + s.transpose() * this->_cov * s)).eval();
327+
const auto A = (this->_cov * s).eval();
328+
const T B = this->_memory + s.transpose() * this->_cov * s;
329+
this->_K = A / B;
330+
331+
for(int i = 0; i < this->_K.size(); ++i){
332+
this->_K(i) = std::clamp(this->_K(i), -this->_gain_clamp, this->_gain_clamp);
333+
}
236334

237335
// Update
238-
const Eigen::Vector<T, NParams> new_param = (this->_param + K * (y - s.transpose() * this->_param)).eval();
239-
this->_param = new_param;
336+
this->_param += this->_K * (y - s.transpose() * this->_param);
240337

241-
const Eigen::Matrix<T, NParams, NParams> new_cov = ((this->_cov - K * s.transpose() * this->_cov) / this->_memory).eval();
242-
this->_cov = new_cov;
338+
this->_cov -= this->_K * s.transpose() * this->_cov;
339+
this->_cov /= this->_memory;
243340
}
244341

245342
/**
246343
* \brief returns the current best estimate
247344
* \returns the parameter vector
248345
*/
249-
inline const Eigen::Vector<T, NParams>& estimate() const noexcept {return this->_param;}
346+
inline const Eigen::Vector<T, NParams>& estimate() const {return this->_param;}
250347

251348
/**
252349
* \brief returns the current covariance
@@ -255,7 +352,16 @@ namespace controlpp
255352
*
256353
* \returns the current covariance matrix
257354
*/
258-
inline const Eigen::Matrix<T, NParams, NParams>& cov() const noexcept {return this->_cov;}
355+
inline const Eigen::Matrix<T, NParams, NParams>& cov() const {return this->_cov;}
356+
357+
/**
358+
* @brief Returns the gain used for the updata
359+
*
360+
* The gain can be seen as a measurement of uncertainty
361+
*
362+
* @return The gain used in the parameter update
363+
*/
364+
inline const Eigen::Vector<T, NParams>& gain() const {return this->_K;}
259365
};
260366

261367
/**
@@ -369,6 +475,22 @@ namespace controlpp
369475
return result;
370476
}
371477

478+
const Eigen::Matrix<T, NumOrder+1 + DenOrder, NumOrder+1 + DenOrder> cov(){
479+
return this->rls.cov();
480+
}
481+
482+
const Eigen::Vector<T, NumOrder+1 + DenOrder>& gain(){
483+
return this->rls.gain();
484+
}
485+
486+
void set_gain_clamp(const T& g){
487+
this->rls.set_gain_clamp(g);
488+
}
489+
490+
[[nodiscard]] const T& gain_clamp() const {
491+
return this->rls.gain_clamp();
492+
}
493+
372494
};
373495

374496
} // namespace controlpp

include/controlpp/KalmanFilter.hpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -259,9 +259,10 @@ namespace controlpp
259259
// > note S is already symetric, so no need to transpose
260260
//
261261
// K = (S.inverse() * U.transpose()).transpose()
262-
const auto U = P_p * _H.transpose();
263-
const auto S = _H * P_p * _H.transpose() + _R;
264-
const Eigen::Matrix<T, NStates, NMeasurements> K = (S.ldlt().solve(U.transpose())).transpose();
262+
const auto U = (P_p * _H.transpose()).eval();
263+
const auto S1 = _H * P_p * _H.transpose() + _R;
264+
const auto S = ((S1 + S1.transpose()) / 2).eval();
265+
const Eigen::Matrix<T, NStates, NMeasurements> K = (S.llt().solve(U.transpose())).transpose();
265266

266267
_x = x_p + K * (z - _H * x_p);
267268

include/controlpp/TransferFunction.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,12 +203,12 @@ namespace controlpp
203203

204204
void print(std::ostream& stream, std::string_view var="x") const {
205205
stream << "num: "; this->num().print(stream, var);
206-
stream << "\nden: "; this->den().print(stream, var); stream << '\n';
206+
stream << "\nden: "; this->den().print(stream, var);
207207
}
208208

209209
void print(std::ostream& stream, std::function<std::string(int i)> var) const {
210210
stream << "num: "; this->num().print(stream, var);
211-
stream << "\nden: "; this->den().print(stream, var); stream << '\n';
211+
stream << "\nden: "; this->den().print(stream, var);
212212
}
213213

214214
friend std::ostream& operator<<(std::ostream& stream, const TransferFunction& rpoly){

tests/Estimators_Tests.cpp

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,13 @@ TEST(Estimators, DtfEstimator){
8383
const double u = (i == 0) ? 0.0 : 1.0;
8484
const double y = dssf.input(u);
8585
dtf_est.add(y, u);
86+
// std::cout << "iteration: " << i << std::endl;
87+
// std::cout << "- estimage:" << std::endl;
88+
// std::cout << dtf_est.estimate() << std::endl;
89+
// std::cout << "- covariance:" << std::endl;
90+
// std::cout << dtf_est.cov() << std::endl;
91+
// std::cout << "- gain:" << std::endl;
92+
// std::cout << dtf_est.gain().transpose() << '\n' << std::endl;
8693
}
8794

8895
const auto Gz_est = dtf_est.estimate();
@@ -96,7 +103,8 @@ TEST(Estimators, DtfEstimator){
96103
const double u = 1.0;
97104
const double y = dssf.input(u);
98105
const double y_est = dssf_est.input(u);
99-
ASSERT_NEAR(y, y_est, 0.01);
106+
// std::cout << "iteration: " << i << ", y: " << y << ", y_est: " << y_est << std::endl;
107+
ASSERT_NEAR(y, y_est, 0.05);
100108
}
101109

102110
}

0 commit comments

Comments
 (0)