From 550053561a5651c3d75891011f64683e3020f557 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Fri, 1 Mar 2019 17:24:32 +0100 Subject: [PATCH 1/2] Kriging: Implement Gu robust parameter estimation We add optimization of the kriging parameters according to gu2016: http://doc.openturns.org/papers/gu2016.pdf It consists in penalizing the integrated likelihood by a chosen prior. We implement the two proposed types of prior: - jointly-robust prior - reference prior In addition to the new likelihood estimation options, we add the parametrization of the covariance model scale parameter: - default - inverse - log-inverse --- lib/etc/openturns.conf.in | 2 + lib/src/Base/Common/ResourceMap.cxx | 2 + lib/src/Base/Stat/AbsoluteExponential.cxx | 37 +- lib/src/Base/Stat/CovarianceModel.cxx | 18 + .../Stat/CovarianceModelImplementation.cxx | 133 ++- lib/src/Base/Stat/ExponentialModel.cxx | 36 +- .../Stat/ExponentiallyDampedCosineModel.cxx | 37 +- lib/src/Base/Stat/GeneralizedExponential.cxx | 37 +- lib/src/Base/Stat/MaternModel.cxx | 26 +- lib/src/Base/Stat/ProductCovarianceModel.cxx | 32 + lib/src/Base/Stat/SphericalModel.cxx | 37 +- lib/src/Base/Stat/SquaredExponential.cxx | 37 +- .../Base/Stat/TensorizedCovarianceModel.cxx | 6 + .../Base/Stat/openturns/CovarianceModel.hxx | 8 + .../CovarianceModelImplementation.hxx | 11 + lib/src/Base/Stat/openturns/MaternModel.hxx | 2 + .../Stat/openturns/ProductCovarianceModel.hxx | 7 + .../openturns/TensorizedCovarianceModel.hxx | 3 + .../Kriging/GeneralLinearModelAlgorithm.cxx | 292 ++++- .../MetaModel/Kriging/KrigingAlgorithm.cxx | 11 + .../openturns/GeneralLinearModelAlgorithm.hxx | 16 + .../Kriging/openturns/KrigingAlgorithm.hxx | 6 + python/doc/bibliography.rst | 6 + .../meta_modeling/kriging_robust.ipynb | 170 +++ .../examples/meta_modeling/meta_modeling.rst | 1 + .../theory/meta_modeling/kriging_robust.rst | 122 ++ .../theory/meta_modeling/meta_modeling.rst | 1 + .../CovarianceModelImplementation_doc.i.in | 49 + python/src/CovarianceModel_doc.i.in | 6 + .../src/GeneralLinearModelAlgorithm_doc.i.in | 38 + python/src/KrigingAlgorithm_doc.i.in | 4 + python/test/CMakeLists.txt | 1 + python/test/t_KrigingAlgorithm_gu.expout | 15 + python/test/t_KrigingAlgorithm_gu.py | 151 +++ validation/src/KrigingGu/Gu.py | 295 +++++ validation/src/KrigingGu/ValidGuPython.py | 1056 +++++++++++++++++ validation/src/KrigingGu/ValidRobustGasp.py | 179 +++ .../KrigingGu/compute_validation_values.py | 168 +++ 38 files changed, 2992 insertions(+), 66 deletions(-) create mode 100644 python/doc/examples/meta_modeling/kriging_robust.ipynb create mode 100644 python/doc/theory/meta_modeling/kriging_robust.rst create mode 100644 python/test/t_KrigingAlgorithm_gu.expout create mode 100755 python/test/t_KrigingAlgorithm_gu.py create mode 100644 validation/src/KrigingGu/Gu.py create mode 100755 validation/src/KrigingGu/ValidGuPython.py create mode 100755 validation/src/KrigingGu/ValidRobustGasp.py create mode 100644 validation/src/KrigingGu/compute_validation_values.py diff --git a/lib/etc/openturns.conf.in b/lib/etc/openturns.conf.in index 83948395774..992ee85a584 100644 --- a/lib/etc/openturns.conf.in +++ b/lib/etc/openturns.conf.in @@ -606,6 +606,8 @@ + + diff --git a/lib/src/Base/Common/ResourceMap.cxx b/lib/src/Base/Common/ResourceMap.cxx index 8c93dbeb016..d109777fa15 100644 --- a/lib/src/Base/Common/ResourceMap.cxx +++ b/lib/src/Base/Common/ResourceMap.cxx @@ -1218,6 +1218,8 @@ void ResourceMap::loadDefaultConfiguration() addAsScalar("GeneralLinearModelAlgorithm-StartingScaling", 1.0e-13); addAsString("GeneralLinearModelAlgorithm-DefaultOptimizationAlgorithm", "TNC"); addAsString("GeneralLinearModelAlgorithm-LinearAlgebra", "LAPACK"); + addAsScalar( "GeneralLinearModelAlgorithm-JointlyRobustPriorB0", 1.0); + addAsScalar( "GeneralLinearModelAlgorithm-JointlyRobustPriorB1", 0.2); // KrigingAlgorithm parameters // addAsScalar("KrigingAlgorithm-MaximalScaling", 1.0e5); diff --git a/lib/src/Base/Stat/AbsoluteExponential.cxx b/lib/src/Base/Stat/AbsoluteExponential.cxx index 7cc3716af50..c4d73d3aa12 100644 --- a/lib/src/Base/Stat/AbsoluteExponential.cxx +++ b/lib/src/Base/Stat/AbsoluteExponential.cxx @@ -64,8 +64,25 @@ AbsoluteExponential * AbsoluteExponential::clone() const Scalar AbsoluteExponential::computeStandardRepresentative(const Point & tau) const { if (tau.getDimension() != inputDimension_) throw InvalidArgumentException(HERE) << "Error: expected a shift of dimension=" << inputDimension_ << ", got dimension=" << tau.getDimension(); + + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Point tauOverTheta(inputDimension_); - for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / scale_[i]; + for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / theta[i]; const Scalar tauOverThetaNorm = tauOverTheta.norm1(); return tauOverThetaNorm <= SpecFunc::ScalarEpsilon ? 1.0 + nuggetFactor_ : exp(-tauOverThetaNorm); } @@ -73,12 +90,28 @@ Scalar AbsoluteExponential::computeStandardRepresentative(const Point & tau) con Scalar AbsoluteExponential::computeStandardRepresentative(const Collection::const_iterator & s_begin, const Collection::const_iterator & t_begin) const { + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Scalar tauOverThetaNorm = 0; Collection::const_iterator s_it = s_begin; Collection::const_iterator t_it = t_begin; for (UnsignedInteger i = 0; i < inputDimension_; ++i, ++s_it, ++t_it) { - tauOverThetaNorm += std::abs(*s_it - *t_it) / scale_[i]; + tauOverThetaNorm += std::abs(*s_it - *t_it) / theta[i]; } return tauOverThetaNorm <= SpecFunc::ScalarEpsilon ? 1.0 + nuggetFactor_ : exp(-tauOverThetaNorm); } diff --git a/lib/src/Base/Stat/CovarianceModel.cxx b/lib/src/Base/Stat/CovarianceModel.cxx index 3654e65360e..9a18418a3b9 100644 --- a/lib/src/Base/Stat/CovarianceModel.cxx +++ b/lib/src/Base/Stat/CovarianceModel.cxx @@ -221,6 +221,18 @@ void CovarianceModel::setScale(const Point & scale) getImplementation()->setScale(scale); } +/* Scale accessor */ +CovarianceModel::ScaleParametrization CovarianceModel::getScaleParametrization() const +{ + return getImplementation()->getScaleParametrization(); +} + +void CovarianceModel::setScaleParametrization(const ScaleParametrization scaleParametrization) +{ + copyOnWrite(); + getImplementation()->setScaleParametrization(scaleParametrization); +} + /* Spatial correlation accessor */ CorrelationMatrix CovarianceModel::getOutputCorrelation() const { @@ -304,6 +316,12 @@ Bool CovarianceModel::isDiagonal() const return getImplementation()->isDiagonal(); } +/* Is it a composite covariance model ? */ +Bool CovarianceModel::isComposite() const +{ + return getImplementation()->isComposite(); +} + /* Drawing method */ Graph CovarianceModel::draw(const UnsignedInteger rowIndex, const UnsignedInteger columnIndex, diff --git a/lib/src/Base/Stat/CovarianceModelImplementation.cxx b/lib/src/Base/Stat/CovarianceModelImplementation.cxx index fa50ae05bb8..4ce40334a08 100644 --- a/lib/src/Base/Stat/CovarianceModelImplementation.cxx +++ b/lib/src/Base/Stat/CovarianceModelImplementation.cxx @@ -45,6 +45,7 @@ static const Factory Factory_CovarianceModelImple CovarianceModelImplementation::CovarianceModelImplementation(const UnsignedInteger inputDimension) : PersistentObject() , scale_(inputDimension, 1.0) + , scaleParametrization_(STANDARD) , inputDimension_(inputDimension) , amplitude_(1, 1.0) , outputDimension_(1) @@ -65,6 +66,7 @@ CovarianceModelImplementation::CovarianceModelImplementation(const Point & scale const Point & amplitude) : PersistentObject() , scale_(0) + , scaleParametrization_(STANDARD) , inputDimension_(scale.getDimension()) , amplitude_(0) , outputDimension_(amplitude.getDimension()) @@ -88,6 +90,7 @@ CovarianceModelImplementation::CovarianceModelImplementation(const Point & scale const CorrelationMatrix & spatialCorrelation) : PersistentObject() , scale_(0) + , scaleParametrization_(STANDARD) , inputDimension_(scale.getDimension()) , amplitude_(0) , outputDimension_(amplitude.getDimension()) @@ -110,6 +113,7 @@ CovarianceModelImplementation::CovarianceModelImplementation(const Point & scale const CovarianceMatrix & spatialCovariance) : PersistentObject() , scale_(0) + , scaleParametrization_(STANDARD) , inputDimension_(scale.getDimension()) , amplitude_(0) , outputDimension_(spatialCovariance.getDimension()) @@ -597,12 +601,84 @@ Point CovarianceModelImplementation::getScale() const void CovarianceModelImplementation::setScale(const Point & scale) { if (scale.getDimension() != inputDimension_) throw InvalidArgumentException(HERE) << "In CovarianceModelImplementation::setScale: the given scale has a dimension=" << scale.getDimension() << " different from the input dimension=" << inputDimension_; - for (UnsignedInteger index = 0; index < inputDimension_; ++index) - if (!(scale[index] > 0.0)) - throw InvalidArgumentException(HERE) << "In CovarianceModelImplementation::setScale: the component " << index << " of scale is non positive" ; + if ((scaleParametrization_ == STANDARD) || (scaleParametrization_ == INVERSE)) + for (UnsignedInteger index = 0; index < inputDimension_; ++index) + if (!(scale[index] > 0.0)) + throw InvalidArgumentException(HERE) << "In CovarianceModelImplementation::setScale: the component " << index << " of scale is non positive" ; scale_ = scale; } + +// Scale parametrization accessor +CovarianceModelImplementation::ScaleParametrization CovarianceModelImplementation::getScaleParametrization() const +{ + return scaleParametrization_; +} + +void CovarianceModelImplementation::setScaleParametrization(const ScaleParametrization scaleParametrization) +{ + Point scale(getScale()); + switch (scaleParametrization_) + { + case STANDARD: + switch (scaleParametrization) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + // STANDARD -> INVERSE + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + scale[i] = 1.0 / scale[i]; + break; + case LOGINVERSE: + // STANDARD -> LOGINVERSE + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + scale[i] = -std::log(scale[i]); + break; + } + break; + case INVERSE: + switch (scaleParametrization) + { + case STANDARD: + // INVERSE -> STANDARD + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + scale[i] = 1.0 / scale[i]; + break; + case INVERSE: + // nothing to do + break; + case LOGINVERSE: + // INVERSE -> LOGINVERSE + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + scale[i] = -std::log(1.0 / scale[i]); + break; + } + break; + case LOGINVERSE: + switch (scaleParametrization) + { + case STANDARD: + // LOGINVERSE -> STANDARD + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + scale[i] = std::exp(- scale[i]); + break; + case INVERSE: + // LOGINVERSE -> INVERSE + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + scale[i] = 1.0 / std::exp(- scale[i]); + break; + case LOGINVERSE: + // nothing to do + break; + } + break; + } + scaleParametrization_ = scaleParametrization; + setScale(scale); +} + /* Output correlation accessor */ CorrelationMatrix CovarianceModelImplementation::getOutputCorrelation() const { @@ -649,29 +725,23 @@ void CovarianceModelImplementation::setNuggetFactor(const Scalar nuggetFactor) void CovarianceModelImplementation::setFullParameter(const Point & parameter) { // Here we manage only the generic parameters - // First the scale parameter - UnsignedInteger index = 0; // Check the size const UnsignedInteger totalSize = inputDimension_ + outputDimension_ * (outputDimension_ + 1) / 2; if (parameter.getSize() < totalSize) throw InvalidArgumentException(HERE) << "In CovarianceModelImplementation::setFullParameter, points have incompatible size. Point size = " << parameter.getSize() << " whereas expected size = " << totalSize ; - for (UnsignedInteger i = 0; i < inputDimension_; ++ i) - { - if (!(parameter[index] > 0.0)) - throw InvalidArgumentException(HERE) << "In CovarianceModelImplementation::setParameter, the component " << index << " of scale is non positive" ; - scale_[i] = parameter[index]; - ++ index; - } + // First the scale parameter + Point scale(inputDimension_); + std::copy(parameter.begin(), parameter.begin() + inputDimension_, scale.begin()); + setScale(scale); + // Second the amplitude parameter - for (UnsignedInteger i = 0; i < outputDimension_; ++ i) - { - if (!(parameter[index] > 0.0)) - throw InvalidArgumentException(HERE) << "In CovarianceModelImplementation::setParameter, the component " << index << " of amplitude is non positive" ; - amplitude_[i] = parameter[index]; - ++ index; - } + Point amplitude(outputDimension_); + std::copy(parameter.begin() + inputDimension_, parameter.begin() + inputDimension_ + outputDimension_, amplitude.begin()); + setAmplitude(amplitude); + + UnsignedInteger index = inputDimension_ + outputDimension_; CorrelationMatrix outputCorrelation(outputDimension_); // Third the output correlation parameter, only the lower triangle for (UnsignedInteger i = 0; i < outputDimension_; ++ i) @@ -710,8 +780,21 @@ Description CovarianceModelImplementation::getFullParameterDescription() const // First the scale parameter Description description(0); // First the scale parameter + String scalePrefix; + switch (scaleParametrization_) + { + case STANDARD: + // empty + break; + case INVERSE: + scalePrefix = "inverse_"; + break; + case LOGINVERSE: + scalePrefix = "loginverse_"; + break; + } for (UnsignedInteger j = 0; j < inputDimension_; ++j) - description.add(OSS() << "scale_" << j); + description.add(OSS() << scalePrefix << "scale_" << j); // Second the amplitude parameter for (UnsignedInteger j = 0; j < outputDimension_; ++j) description.add(OSS() << "amplitude_" << j); @@ -791,6 +874,12 @@ Bool CovarianceModelImplementation::isDiagonal() const return isDiagonal_; } +/* Is it a composite model ? */ +Bool CovarianceModelImplementation::isComposite() const +{ + return false; +} + /* Marginal accessor */ CovarianceModel CovarianceModelImplementation::getMarginal(const UnsignedInteger index) const { @@ -937,6 +1026,7 @@ void CovarianceModelImplementation::save(Advocate & adv) const { PersistentObject::save(adv); adv.saveAttribute("scale_", scale_); + adv.saveAttribute("scaleParametrization_", static_cast(scaleParametrization_)); adv.saveAttribute("inputDimension_", inputDimension_); adv.saveAttribute("amplitude_", amplitude_); adv.saveAttribute("outputDimension_", outputDimension_); @@ -952,6 +1042,9 @@ void CovarianceModelImplementation::load(Advocate & adv) { PersistentObject::load(adv); adv.loadAttribute("scale_", scale_); + UnsignedInteger scaleParametrization = 0; + adv.loadAttribute( "scaleParametrization_", scaleParametrization ); + scaleParametrization_ = static_cast(scaleParametrization); adv.loadAttribute("inputDimension_", inputDimension_); adv.loadAttribute("amplitude_", amplitude_); adv.loadAttribute("outputDimension_", outputDimension_); diff --git a/lib/src/Base/Stat/ExponentialModel.cxx b/lib/src/Base/Stat/ExponentialModel.cxx index 3670e2532b0..f2f6d16ccf2 100644 --- a/lib/src/Base/Stat/ExponentialModel.cxx +++ b/lib/src/Base/Stat/ExponentialModel.cxx @@ -82,7 +82,23 @@ Scalar ExponentialModel::computeStandardRepresentative(const Point & tau) const throw InvalidArgumentException(HERE) << "In ExponentialModel::computeStandardRepresentative: expected a shift of dimension=" << getInputDimension() << ", got dimension=" << tau.getDimension(); // Absolute value of tau / scale Point tauOverTheta(getInputDimension()); - for (UnsignedInteger i = 0; i < getInputDimension(); ++i) tauOverTheta[i] = tau[i] / scale_[i]; + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + + for (UnsignedInteger i = 0; i < getInputDimension(); ++i) tauOverTheta[i] = tau[i] / theta[i]; const Scalar tauOverThetaNorm = tauOverTheta.norm(); // Return value return (tauOverThetaNorm == 0.0 ? 1.0 + nuggetFactor_ : exp(- tauOverThetaNorm )); @@ -91,12 +107,28 @@ Scalar ExponentialModel::computeStandardRepresentative(const Point & tau) const Scalar ExponentialModel::computeStandardRepresentative(const Collection::const_iterator & s_begin, const Collection::const_iterator & t_begin) const { + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Scalar tauOverThetaNorm = 0; Collection::const_iterator s_it = s_begin; Collection::const_iterator t_it = t_begin; for (UnsignedInteger i = 0; i < inputDimension_; ++i, ++s_it, ++t_it) { - const Scalar dx = (*s_it - *t_it) / scale_[i]; + const Scalar dx = (*s_it - *t_it) / theta[i]; tauOverThetaNorm += dx * dx; } tauOverThetaNorm = sqrt(tauOverThetaNorm); diff --git a/lib/src/Base/Stat/ExponentiallyDampedCosineModel.cxx b/lib/src/Base/Stat/ExponentiallyDampedCosineModel.cxx index 712eb126250..993ef0a3ca0 100644 --- a/lib/src/Base/Stat/ExponentiallyDampedCosineModel.cxx +++ b/lib/src/Base/Stat/ExponentiallyDampedCosineModel.cxx @@ -79,8 +79,25 @@ Scalar ExponentiallyDampedCosineModel::computeStandardRepresentative(const Point { if (tau.getDimension() != inputDimension_) throw InvalidArgumentException(HERE) << "In ExponentiallyDampedCosineModel::computeStandardRepresentative: expected a shift of dimension=" << inputDimension_ << ", got dimension=" << tau.getDimension(); + + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Point tauOverTheta(inputDimension_); - for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / scale_[i]; + for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / theta[i]; const Scalar absTau = tauOverTheta.norm(); if (absTau <= SpecFunc::ScalarEpsilon) return 1.0 + nuggetFactor_; @@ -90,12 +107,28 @@ Scalar ExponentiallyDampedCosineModel::computeStandardRepresentative(const Point Scalar ExponentiallyDampedCosineModel::computeStandardRepresentative(const Collection::const_iterator & s_begin, const Collection::const_iterator & t_begin) const { + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Scalar absTau = 0; Collection::const_iterator s_it = s_begin; Collection::const_iterator t_it = t_begin; for (UnsignedInteger i = 0; i < inputDimension_; ++i, ++s_it, ++t_it) { - const Scalar dx = (*s_it - *t_it) / scale_[i]; + const Scalar dx = (*s_it - *t_it) / theta[i]; absTau += dx * dx; } absTau = sqrt(absTau); diff --git a/lib/src/Base/Stat/GeneralizedExponential.cxx b/lib/src/Base/Stat/GeneralizedExponential.cxx index 67d5bc8b797..172f14ab315 100644 --- a/lib/src/Base/Stat/GeneralizedExponential.cxx +++ b/lib/src/Base/Stat/GeneralizedExponential.cxx @@ -71,8 +71,25 @@ GeneralizedExponential * GeneralizedExponential::clone() const Scalar GeneralizedExponential::computeStandardRepresentative(const Point & tau) const { if (tau.getDimension() != inputDimension_) throw InvalidArgumentException(HERE) << "Error: expected a shift of dimension=" << inputDimension_ << ", got dimension=" << tau.getDimension(); + + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Point tauOverTheta(inputDimension_); - for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / scale_[i]; + for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / theta[i]; const Scalar tauOverThetaNorm = tauOverTheta.norm(); return tauOverThetaNorm <= SpecFunc::ScalarEpsilon ? 1.0 + nuggetFactor_ : exp(-pow(tauOverThetaNorm, p_)); } @@ -80,12 +97,28 @@ Scalar GeneralizedExponential::computeStandardRepresentative(const Point & tau) Scalar GeneralizedExponential::computeStandardRepresentative(const Collection::const_iterator & s_begin, const Collection::const_iterator & t_begin) const { + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Scalar tauOverThetaNorm = 0; Collection::const_iterator s_it = s_begin; Collection::const_iterator t_it = t_begin; for (UnsignedInteger i = 0; i < inputDimension_; ++i, ++s_it, ++t_it) { - const Scalar dx = (*s_it - *t_it) / scale_[i]; + const Scalar dx = (*s_it - *t_it) / theta[i]; tauOverThetaNorm += dx * dx; } tauOverThetaNorm = sqrt(tauOverThetaNorm); diff --git a/lib/src/Base/Stat/MaternModel.cxx b/lib/src/Base/Stat/MaternModel.cxx index 01805fca858..076c2d2d600 100644 --- a/lib/src/Base/Stat/MaternModel.cxx +++ b/lib/src/Base/Stat/MaternModel.cxx @@ -76,8 +76,24 @@ void MaternModel::computeLogNormalizationFactor() void MaternModel::computeSqrt2nuOverTheta() { + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + // Compute useful scaling factor - for(UnsignedInteger i = 0; i < inputDimension_; ++i) sqrt2nuOverTheta_[i] = sqrt(2.0 * nu_) / scale_[i]; + for(UnsignedInteger i = 0; i < inputDimension_; ++i) sqrt2nuOverTheta_[i] = sqrt(2.0 * nu_) / theta[i]; } /* Virtual constructor */ @@ -117,6 +133,13 @@ Scalar MaternModel::computeStandardRepresentative(const Collection::cons return exp(logNormalizationFactor_ + nu_ * std::log(scaledPoint) + SpecFunc::LogBesselK(nu_, scaledPoint)); } +void MaternModel::setScaleParametrization(const ScaleParametrization scaleParametrization) +{ + CovarianceModelImplementation::setScaleParametrization(scaleParametrization); + computeSqrt2nuOverTheta(); +} + + /* Gradient */ Matrix MaternModel::partialGradient(const Point & s, const Point & t) const @@ -128,6 +151,7 @@ Matrix MaternModel::partialGradient(const Point & s, for(UnsignedInteger i = 0; i < inputDimension_; ++i) scaledTau[i] = tau[i] * sqrt2nuOverTheta_[i]; const Scalar scaledTauNorm = scaledTau.norm(); const Scalar norm2 = scaledTauNorm * scaledTauNorm; + // For zero norm if (norm2 == 0.0) { diff --git a/lib/src/Base/Stat/ProductCovarianceModel.cxx b/lib/src/Base/Stat/ProductCovarianceModel.cxx index 4a017891d63..d8aad60fb85 100644 --- a/lib/src/Base/Stat/ProductCovarianceModel.cxx +++ b/lib/src/Base/Stat/ProductCovarianceModel.cxx @@ -81,6 +81,12 @@ void ProductCovarianceModel::setCollection(const CovarianceModelCollection & col // Handle 'specific' parameters extraParameterNumber_ = Indices(collection.getSize()); + // check that scale parametrizations match + const ScaleParametrization scaleParametrization = collection[0].getScaleParametrization(); + for (UnsignedInteger i = 1; i < size; ++ i) + if (collection[i].getScaleParametrization() != scaleParametrization) + throw InvalidArgumentException(HERE) << "ProductCovarianceModel does not allow different scale parametrizations"; + // Filling the active parameters activeParameter_ = Indices(0); for (UnsignedInteger i = 0; i < size; ++i) @@ -360,6 +366,26 @@ void ProductCovarianceModel::setScale(const Point & scale) scale_ = scale; } +// Scale parametrization accessor +CovarianceModelImplementation::ScaleParametrization ProductCovarianceModel::getScaleParametrization() const +{ + return collection_[0].getScaleParametrization(); +} + +void ProductCovarianceModel::setScaleParametrization(const ScaleParametrization scaleParametrization) +{ + for (UnsignedInteger i = 0; i < collection_.getSize(); ++i) + { + collection_[i].setScaleParametrization(scaleParametrization); + } + + // copy back scale + Point scale; + for (UnsignedInteger i = 0; i < collection_.getSize(); ++i) + scale.add(collection_[i].getScale()); + scale_ = scale; +} + /* Is it a stationary model ? */ Bool ProductCovarianceModel::isStationary() const { @@ -368,6 +394,12 @@ Bool ProductCovarianceModel::isStationary() const return true; } +/* Is it a composite model ? */ +Bool ProductCovarianceModel::isComposite() const +{ + return true; +} + /* String converter */ String ProductCovarianceModel::__repr__() const { diff --git a/lib/src/Base/Stat/SphericalModel.cxx b/lib/src/Base/Stat/SphericalModel.cxx index 0aa77885434..12c92f8cbee 100644 --- a/lib/src/Base/Stat/SphericalModel.cxx +++ b/lib/src/Base/Stat/SphericalModel.cxx @@ -79,8 +79,25 @@ Scalar SphericalModel::computeStandardRepresentative(const Point & tau) const { if (tau.getDimension() != inputDimension_) throw InvalidArgumentException(HERE) << "In SphericalModel::computeStandardRepresentative: expected a shift of dimension=" << inputDimension_ << ", got dimension=" << tau.getDimension(); + + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Point tauOverTheta(inputDimension_); - for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / scale_[i]; + for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / theta[i]; const Scalar normTauOverScaleA = tauOverTheta.norm() / radius_; if (normTauOverScaleA <= SpecFunc::ScalarEpsilon) return 1.0 + nuggetFactor_; if (normTauOverScaleA >= 1.0) return 0.0; @@ -90,12 +107,28 @@ Scalar SphericalModel::computeStandardRepresentative(const Point & tau) const Scalar SphericalModel::computeStandardRepresentative(const Collection::const_iterator & s_begin, const Collection::const_iterator & t_begin) const { + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Scalar normTauOverScaleA = 0; Collection::const_iterator s_it = s_begin; Collection::const_iterator t_it = t_begin; for (UnsignedInteger i = 0; i < inputDimension_; ++i, ++s_it, ++t_it) { - const Scalar dx = (*s_it - *t_it) / scale_[i]; + const Scalar dx = (*s_it - *t_it) / theta[i]; normTauOverScaleA += dx * dx; } normTauOverScaleA = sqrt(normTauOverScaleA) / radius_; diff --git a/lib/src/Base/Stat/SquaredExponential.cxx b/lib/src/Base/Stat/SquaredExponential.cxx index 08fa3357874..fe89d614225 100644 --- a/lib/src/Base/Stat/SquaredExponential.cxx +++ b/lib/src/Base/Stat/SquaredExponential.cxx @@ -64,8 +64,25 @@ SquaredExponential * SquaredExponential::clone() const Scalar SquaredExponential::computeStandardRepresentative(const Point & tau) const { if (tau.getDimension() != inputDimension_) throw InvalidArgumentException(HERE) << "Error: expected a shift of dimension=" << inputDimension_ << ", got dimension=" << tau.getDimension(); + + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Point tauOverTheta(inputDimension_); - for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / scale_[i]; + for (UnsignedInteger i = 0; i < inputDimension_; ++i) tauOverTheta[i] = tau[i] / theta[i]; const Scalar tauOverTheta2 = tauOverTheta.normSquare(); return tauOverTheta2 <= SpecFunc::ScalarEpsilon ? 1.0 + nuggetFactor_ : exp(-0.5 * tauOverTheta2); } @@ -73,12 +90,28 @@ Scalar SquaredExponential::computeStandardRepresentative(const Point & tau) cons Scalar SquaredExponential::computeStandardRepresentative(const Collection::const_iterator & s_begin, const Collection::const_iterator & t_begin) const { + Point theta(scale_); + switch (scaleParametrization_) + { + case STANDARD: + // nothing to do + break; + case INVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = 1.0 / scale_[i]; + break; + case LOGINVERSE: + for(UnsignedInteger i = 0; i < inputDimension_; ++i) + theta[i] = std::exp(- scale_[i]); + break; + } + Scalar tauOverTheta2 = 0; Collection::const_iterator s_it = s_begin; Collection::const_iterator t_it = t_begin; for (UnsignedInteger i = 0; i < inputDimension_; ++i, ++s_it, ++t_it) { - const Scalar dx = (*s_it - *t_it) / scale_[i]; + const Scalar dx = (*s_it - *t_it) / theta[i]; tauOverTheta2 += dx * dx; } return tauOverTheta2 <= SpecFunc::ScalarEpsilon ? 1.0 + nuggetFactor_ : exp(-0.5 * tauOverTheta2); diff --git a/lib/src/Base/Stat/TensorizedCovarianceModel.cxx b/lib/src/Base/Stat/TensorizedCovarianceModel.cxx index cd36dcd85cd..a7407b8cb52 100644 --- a/lib/src/Base/Stat/TensorizedCovarianceModel.cxx +++ b/lib/src/Base/Stat/TensorizedCovarianceModel.cxx @@ -261,6 +261,12 @@ Bool TensorizedCovarianceModel::isDiagonal() const return true; } +/* Is it a composite model ? */ +Bool TensorizedCovarianceModel::isComposite() const +{ + return true; +} + /* String converter */ String TensorizedCovarianceModel::__repr__() const { diff --git a/lib/src/Base/Stat/openturns/CovarianceModel.hxx b/lib/src/Base/Stat/openturns/CovarianceModel.hxx index 64db828c9be..3252f5f0d4e 100644 --- a/lib/src/Base/Stat/openturns/CovarianceModel.hxx +++ b/lib/src/Base/Stat/openturns/CovarianceModel.hxx @@ -39,6 +39,7 @@ class OT_API CovarianceModel public: typedef CovarianceModelImplementation::Implementation Implementation; + typedef CovarianceModelImplementation::ScaleParametrization ScaleParametrization; /** Default constructor without parameter */ CovarianceModel(); @@ -121,6 +122,10 @@ public: Point getScale() const; void setScale(const Point & scale); + /** Scale parametrization accessor */ + ScaleParametrization getScaleParametrization() const; + void setScaleParametrization(const ScaleParametrization scaleParametrization); + /** Spatial correlation accessors */ CorrelationMatrix getOutputCorrelation() const; void setOutputCorrelation(const CorrelationMatrix & correlation); @@ -150,6 +155,9 @@ public: /** Is it a diagonal model ? */ virtual Bool isDiagonal() const; + /** Is it a composite model ? */ + virtual Bool isComposite() const; + /** Drawing method */ virtual Graph draw(const UnsignedInteger rowIndex = 0, const UnsignedInteger columnIndex = 0, diff --git a/lib/src/Base/Stat/openturns/CovarianceModelImplementation.hxx b/lib/src/Base/Stat/openturns/CovarianceModelImplementation.hxx index 0c144e15646..2390d74a5c7 100644 --- a/lib/src/Base/Stat/openturns/CovarianceModelImplementation.hxx +++ b/lib/src/Base/Stat/openturns/CovarianceModelImplementation.hxx @@ -46,6 +46,7 @@ class OT_API CovarianceModelImplementation public: typedef Pointer Implementation; + typedef enum {STANDARD, INVERSE, LOGINVERSE} ScaleParametrization; /** Dimension-based constructor */ explicit CovarianceModelImplementation(const UnsignedInteger outputDimension = 1); @@ -142,6 +143,9 @@ public: /** Is it a diagonal covariance model ? */ virtual Bool isDiagonal() const; + /** Is it a composite covariance model ? */ + virtual Bool isComposite() const; + /** Amplitude accessors */ virtual Point getAmplitude() const; virtual void setAmplitude(const Point & amplitude); @@ -150,6 +154,10 @@ public: virtual Point getScale() const; virtual void setScale(const Point & scale); + // Scale parametrization accessor + virtual ScaleParametrization getScaleParametrization() const; + virtual void setScaleParametrization(const ScaleParametrization scaleParametrization); + /** Output correlation accessors */ virtual CorrelationMatrix getOutputCorrelation() const; virtual void setOutputCorrelation(const CorrelationMatrix & correlation); @@ -207,6 +215,9 @@ protected: /** Container for scale values */ Point scale_; + // scale parametrization + ScaleParametrization scaleParametrization_; + /** Input dimension */ UnsignedInteger inputDimension_; diff --git a/lib/src/Base/Stat/openturns/MaternModel.hxx b/lib/src/Base/Stat/openturns/MaternModel.hxx index 03a5ca8ef6e..e7e6ed3b1a1 100644 --- a/lib/src/Base/Stat/openturns/MaternModel.hxx +++ b/lib/src/Base/Stat/openturns/MaternModel.hxx @@ -35,6 +35,7 @@ class OT_API MaternModel CLASSNAME public: + typedef CovarianceModelImplementation::ScaleParametrization ScaleParametrization; /** Constructor based on input dimension*/ explicit MaternModel(const UnsignedInteger inputDimension = 1); @@ -57,6 +58,7 @@ public: Scalar computeStandardRepresentative(const Collection::const_iterator & s_begin, const Collection::const_iterator & t_begin) const; #endif + virtual void setScaleParametrization(const ScaleParametrization scaleParametrization); /** Gradient */ virtual Matrix partialGradient(const Point & s, diff --git a/lib/src/Base/Stat/openturns/ProductCovarianceModel.hxx b/lib/src/Base/Stat/openturns/ProductCovarianceModel.hxx index 95656cc82df..87f40864853 100644 --- a/lib/src/Base/Stat/openturns/ProductCovarianceModel.hxx +++ b/lib/src/Base/Stat/openturns/ProductCovarianceModel.hxx @@ -74,9 +74,16 @@ public: /** Scale accessor */ void setScale(const Point & scale); + /** Scale parametrization accessor */ + virtual ScaleParametrization getScaleParametrization() const; + virtual void setScaleParametrization(const ScaleParametrization scaleParametrization); + /** Is it a stationary covariance model ? */ virtual Bool isStationary() const; + /** Is it a composite covariance model ? */ + virtual Bool isComposite() const; + /** String converter */ String __repr__() const; diff --git a/lib/src/Base/Stat/openturns/TensorizedCovarianceModel.hxx b/lib/src/Base/Stat/openturns/TensorizedCovarianceModel.hxx index e24bb55e959..30c0a4b9974 100644 --- a/lib/src/Base/Stat/openturns/TensorizedCovarianceModel.hxx +++ b/lib/src/Base/Stat/openturns/TensorizedCovarianceModel.hxx @@ -82,6 +82,9 @@ public: /** Is it a diagonal covariance model ? */ virtual Bool isDiagonal() const; + /** Is it a composite covariance model ? */ + virtual Bool isComposite() const; + /** String converter */ String __repr__() const; diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx index f63bf5f2fe6..3aae6a0542d 100644 --- a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx +++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx @@ -72,6 +72,7 @@ GeneralLinearModelAlgorithm::GeneralLinearModelAlgorithm() , optimizeParameters_(true) , analyticalAmplitude_(false) , lastReducedLogLikelihood_(SpecFunc::LogMinScalar) + , scalePrior_(NONE) { // Set the default covariance to adapt the active parameters of the covariance model setCovarianceModel(CovarianceModel()); @@ -107,6 +108,7 @@ GeneralLinearModelAlgorithm::GeneralLinearModelAlgorithm(const Sample & inputSam , optimizeParameters_(ResourceMap::GetAsBool("GeneralLinearModelAlgorithm-OptimizeParameters")) , analyticalAmplitude_(false) , lastReducedLogLikelihood_(SpecFunc::LogMinScalar) + , scalePrior_(NONE) { // Set data setData(inputSample, outputSample); @@ -167,6 +169,7 @@ GeneralLinearModelAlgorithm::GeneralLinearModelAlgorithm(const Sample & inputSam , optimizeParameters_(ResourceMap::GetAsBool("GeneralLinearModelAlgorithm-OptimizeParameters")) , analyticalAmplitude_(false) , lastReducedLogLikelihood_(SpecFunc::LogMinScalar) + , scalePrior_(NONE) { // Set data setData(inputSample, outputSample); @@ -240,6 +243,7 @@ GeneralLinearModelAlgorithm::GeneralLinearModelAlgorithm(const Sample & inputSam , optimizeParameters_(ResourceMap::GetAsBool("GeneralLinearModelAlgorithm-OptimizeParameters")) , analyticalAmplitude_(false) , lastReducedLogLikelihood_(SpecFunc::LogMinScalar) + , scalePrior_(NONE) { // Set data setData(inputSample, outputSample); @@ -356,8 +360,8 @@ void GeneralLinearModelAlgorithm::setCovarianceModel(const CovarianceModel & cov const Scalar scaleFactor(ResourceMap::GetAsScalar( "GeneralLinearModelAlgorithm-DefaultOptimizationScaleFactor")); if (!(scaleFactor > 0)) throw InvalidArgumentException(HERE) << "Scale factor set in ResourceMap is invalid. It should be a positive value. Here, scale = " << scaleFactor ; - const Point lowerBound(optimizationDimension, ResourceMap::GetAsScalar( "GeneralLinearModelAlgorithm-DefaultOptimizationLowerBound")); - Point upperBound(optimizationDimension, ResourceMap::GetAsScalar( "GeneralLinearModelAlgorithm-DefaultOptimizationUpperBound")); + Point lowerBound(optimizationDimension, ResourceMap::GetAsScalar("GeneralLinearModelAlgorithm-DefaultOptimizationLowerBound")); + Point upperBound(optimizationDimension, ResourceMap::GetAsScalar("GeneralLinearModelAlgorithm-DefaultOptimizationUpperBound")); // We could set scale parameter if these parameters are enabled. // check if scale is active const Indices activeParameters(reducedCovarianceModel_.getActiveParameter()); @@ -370,13 +374,66 @@ void GeneralLinearModelAlgorithm::setCovarianceModel(const CovarianceModel & cov if (isScaleActive) { const Point inputSampleRange(normalizedInputSample_.getMax() - normalizedInputSample_.getMin()); - for (UnsignedInteger k = 0; k < reducedCovarianceModel_.getScale().getSize(); ++k) upperBound[k] = inputSampleRange[k] * scaleFactor; + for (UnsignedInteger k = 0; k < reducedCovarianceModel_.getScale().getSize(); ++ k) + upperBound[k] = inputSampleRange[k] * scaleFactor; + + // transform bounds in the target parametrization + const ScaleParametrization scaleParametrization = reducedCovarianceModel_.getScaleParametrization(); + switch (scaleParametrization) + { + case ScaleParametrization::STANDARD: + // nothing to do + break; + case ScaleParametrization::INVERSE: + { + for (UnsignedInteger k = 0; k < reducedCovarianceModel_.getScale().getSize(); ++k) + { + const Scalar lb = 1.0 / upperBound[k]; + const Scalar ub = 1.0 / lowerBound[k]; + lowerBound[k] = lb; + upperBound[k] = ub; + } + break; + } + case ScaleParametrization::LOGINVERSE: + { + for (UnsignedInteger k = 0; k < reducedCovarianceModel_.getScale().getSize(); ++k) + { + const Scalar lb = -std::log(upperBound[k]); + const Scalar ub = -std::log(lowerBound[k]); + lowerBound[k] = lb; + upperBound[k] = ub; + } + break; + } + } } LOGWARN(OSS() << "Warning! For coherency we set scale upper bounds = " << upperBound.__str__()); - optimizationBounds_ = Interval(lowerBound, upperBound); } else optimizationBounds_ = Interval(); + + if (scalePrior_ != NONE) + { + if (reducedCovarianceModel_.getOutputDimension() == 1) + { + const Description activeParametersDescription(reducedCovarianceModel_.getParameterDescription()); + Bool activeAmplitude = false; + Bool activeScale = false; + // And one of the active parameters must be called amplitude_0 + for (UnsignedInteger i = 0; i < activeParametersDescription.getSize(); ++i) + { + if (activeParametersDescription[i] == "amplitude_0") + activeAmplitude = true; + if (activeParametersDescription[i] == "scale_0") + activeScale = true; + } + if (!activeAmplitude || !activeScale) + throw NotYetImplementedException(HERE) << "Gu penalization cannot be used without active amplitude and scale"; + } + else + throw NotYetImplementedException(HERE) << "Gu penalization cannot be used on multi-d models"; +} } CovarianceModel GeneralLinearModelAlgorithm::getCovarianceModel() const @@ -672,6 +729,133 @@ Scalar GeneralLinearModelAlgorithm::maximizeReducedLogLikelihood() return optimalLogLikelihood; } + +// when the likelihood is integrated: log(\det{FtR^{-1}F}) +Scalar GeneralLinearModelAlgorithm::correctIntegratedLikelihoodLogDeterminant() const +{ + Matrix LiF(covarianceCholeskyFactor_.solveLinearSystem(F_)); + Matrix LtiLiF(covarianceCholeskyFactor_.transpose().solveLinearSystem(LiF)); + Scalar sign = 1.0; + SymmetricMatrix FtLtiLiF((F_.transpose()*LtiLiF).getImplementation()); + const Scalar logDetFtLtiLiF = FtLtiLiF.computeLogAbsoluteDeterminant(sign, false); + return logDetFtLtiLiF; +} + + +// If a prior is used, compute its value as a penalization term +Scalar GeneralLinearModelAlgorithm::computeLogIntegratedLikelihoodPenalization() const +{ + const UnsignedInteger size = inputSample_.getSize(); + const UnsignedInteger inputDimension = inputSample_.getDimension(); + Scalar penalizationFactor = 0.0; + switch (scalePrior_) + { + case NONE: + // nothing to do + break; + case JOINTLYROBUST: + { + const Scalar b0 = ResourceMap::GetAsScalar("GeneralLinearModelAlgorithm-JointlyRobustPriorB0"); + const Scalar b1 = ResourceMap::GetAsScalar("GeneralLinearModelAlgorithm-JointlyRobustPriorB1"); + // gu2018 formulation: maximum gap between design points instead of the mean gap + const Scalar b = b0 * std::pow(1.0 * size, -1.0 / inputDimension) * (b1 + inputDimension); + Point C((inputSample_.getMax() - inputSample_.getMin()) * std::pow(1.0 * size, -1.0 / inputDimension)); + + // the C*scale term does not depend on the parametrization + CovarianceModel reducedCovarianceModelCopy(reducedCovarianceModel_); + reducedCovarianceModelCopy.setScaleParametrization(ScaleParametrization::INVERSE); + const Point inverseScale(reducedCovarianceModelCopy.getScale()); + const Scalar dotCscale = C.dot(inverseScale); + + const Scalar nugget = reducedCovarianceModel_.getNuggetFactor(); + penalizationFactor = b1 * std::log(dotCscale + nugget) - b * (dotCscale + nugget); + const ScaleParametrization scaleParametrization = reducedCovarianceModel_.getScaleParametrization(); + + // this term is the logarithm of the Jacobian determinant, only for standard and log-inverse parametrizations + const Point scale(reducedCovarianceModel_.getScale()); + switch (scaleParametrization) + { + case ScaleParametrization::STANDARD: + { + Scalar sumLog = 0.0; + for (UnsignedInteger j = 0; j < inputDimension; ++ j) + sumLog += std::log(scale[j]); + penalizationFactor -= 2.0 * sumLog; + break; + } + case ScaleParametrization::INVERSE: + // nothing to do + break; + case ScaleParametrization::LOGINVERSE: + { + Scalar sumXi = 0.0; + for (UnsignedInteger j = 0; j < inputDimension; ++ j) + sumXi += scale[j]; + penalizationFactor += sumXi; + break; + } + } + break; + } + case REFERENCE: + { + SymmetricMatrix iTheta(inputDimension + 1); + + // discretize gradient wrt scale + Collection dCds(inputDimension, SymmetricMatrix(size)); + for (UnsignedInteger k1 = 0; k1 < size; ++ k1) + { + for (UnsignedInteger k2 = 0; k2 <= k1; ++ k2) + { + Matrix parameterGradient(reducedCovarianceModel_.parameterGradient(normalizedInputSample_[k1], normalizedInputSample_[k2])); + for (UnsignedInteger j = 0; j < inputDimension; ++ j) + { + // assume scale gradient is at the n first components + dCds[j](k1, k2) = parameterGradient(j, 0); + } + } + } + + // TODO: cache sigmaTheta + SquareMatrix Linv(covarianceCholeskyFactor_.solveLinearSystem(IdentityMatrix(covarianceCholeskyFactor_.getNbRows())).getImplementation()); + SquareMatrix LLtinv(covarianceCholeskyFactor_.transpose().solveLinearSystem(Linv).getImplementation()); + SquareMatrix sigmaTheta(LLtinv); + if (F_.getNbColumns() > 0) { + SquareMatrix FtLLtinvF((F_.transpose()*LLtinv*F_).getImplementation()); + Matrix FtLLtinvFiFtLLtinv((FtLLtinvF.solveLinearSystem(F_.transpose()*LLtinv))); + SquareMatrix LLtinvFFtLLtinvFiFtLLtinv((LLtinv*F_*FtLLtinvFiFtLLtinv).getImplementation()); + sigmaTheta = sigmaTheta - LLtinvFFtLLtinvFiFtLLtinv; + } + + // lower triangle + for (UnsignedInteger i = 0; i < inputDimension; ++ i) + { + for (UnsignedInteger j = 0; j <= i; ++ j) + { + iTheta(i, j) = (sigmaTheta * dCds[i] * sigmaTheta * dCds[j]).computeTrace(); + } + } + // bottom line + for (UnsignedInteger j = 0; j < inputDimension; ++ j) + { + iTheta(inputDimension, j) = (sigmaTheta * dCds[j]).computeTrace(); + } + // bottom right corner + iTheta(inputDimension, inputDimension) = size - beta_.getSize(); + + Scalar sign = 1.0; + penalizationFactor = 0.5 * iTheta.computeLogAbsoluteDeterminant(sign, false); + break; + } + case FLAT: + { + penalizationFactor = 0; + break; + } + } + return penalizationFactor; +} + Point GeneralLinearModelAlgorithm::computeReducedLogLikelihood(const Point & parameters) const { // Check that the parameters have a size compatible with the covariance model @@ -680,44 +864,71 @@ Point GeneralLinearModelAlgorithm::computeReducedLogLikelihood(const Point & par << " covariance model requires an argument of size " << reducedCovarianceModel_.getParameter().getSize() << " but here we got " << parameters.getSize(); LOGDEBUG(OSS(false) << "Compute reduced log-likelihood for parameters=" << parameters); - const Scalar constant = - SpecFunc::LOGSQRT2PI * static_cast(inputSample_.getSize()) * static_cast(outputSample_.getDimension()); - Scalar logDeterminant = 0.0; + const UnsignedInteger size = inputSample_.getSize(); // If the amplitude is deduced from the other parameters, work with // the correlation function if (analyticalAmplitude_) reducedCovarianceModel_.setAmplitude(Point(1, 1.0)); reducedCovarianceModel_.setParameter(parameters); // First, compute the log-determinant of the Cholesky factor of the covariance // matrix. As a by-product, also compute rho. + Scalar logLikelihood = 0.0; if (method_ == 0) - logDeterminant = computeLapackLogDeterminantCholesky(); + logLikelihood = computeLapackLogDeterminantCholesky(); + else + logLikelihood = computeHMatLogDeterminantCholesky(); + LOGDEBUG(OSS(false) << "log-determinant=" << logLikelihood << ", rho=" << rho_); + + // If no prior is used + if (scalePrior_ == NONE) + { + // Compute the amplitude using an analytical formula if needed + // and update the reduced log-likelihood. + if (analyticalAmplitude_) + { + LOGDEBUG("Analytical amplitude"); + // J(\sigma)=-\log(\sqrt{\sigma^{2N}\det{R}})-(Y-M)^tR^{-1}(Y-M)/(2\sigma^2) + // =-N\log(\sigma)-\log(\det{R})/2-(Y-M)^tR^{-1}(Y-M)/(2\sigma^2) + // dJ/d\sigma=-N/\sigma+(Y-M)^tR^{-1}(Y-M)/\sigma^3=0 + // \sigma=\sqrt{(Y-M)^tR^{-1}(Y-M)/N} + const Scalar sigma = std::sqrt(rho_.normSquare() / (ResourceMap::GetAsBool("GeneralLinearModelAlgorithm-UnbiasedVariance") ? size - beta_.getSize() : size)); + LOGDEBUG(OSS(false) << "sigma=" << sigma); + reducedCovarianceModel_.setAmplitude(Point(1, sigma)); + logLikelihood += 2.0 * ((scalePrior_ == NONE) ? size : size - beta_.getSize()) * std::log(sigma); + rho_ /= sigma; + LOGDEBUG(OSS(false) << "rho_=" << rho_); + } + + const Scalar epsilon = rho_.normSquare(); + LOGDEBUG(OSS(false) << "epsilon=||rho||^2=" << epsilon); + + if (epsilon <= 0.0) + logLikelihood = SpecFunc::LogMinScalar; + // For the general multidimensional case, we have to compute the general log-likelihood (ie including marginal variances) + else + { + const Scalar constant = -SpecFunc::LOGSQRT2PI * static_cast(size) * static_cast(outputSample_.getDimension()); + logLikelihood = constant - 0.5 * (logLikelihood + epsilon); + } + } + // If a prior is used as penalization, + // compute it and return the log-posterior. else - logDeterminant = computeHMatLogDeterminantCholesky(); - // Compute the amplitude using an analytical formula if needed - // and update the reduced log-likelihood. - if (analyticalAmplitude_) { - LOGDEBUG("Analytical amplitude"); - // J(\sigma)=-\log(\sqrt{\sigma^{2N}\det{R}})-(Y-M)^tR^{-1}(Y-M)/(2\sigma^2) - // =-N\log(\sigma)-\log(\det{R})/2-(Y-M)^tR^{-1}(Y-M)/(2\sigma^2) - // dJ/d\sigma=-N/\sigma+(Y-M)^tR^{-1}(Y-M)/\sigma^3=0 - // \sigma=\sqrt{(Y-M)^tR^{-1}(Y-M)/N} - const UnsignedInteger size = inputSample_.getSize(); - const Scalar sigma = std::sqrt(rho_.normSquare() / (ResourceMap::GetAsBool("GeneralLinearModelAlgorithm-UnbiasedVariance") ? size - beta_.getSize() : size)); - LOGDEBUG(OSS(false) << "sigma=" << sigma); - reducedCovarianceModel_.setAmplitude(Point(1, sigma)); - logDeterminant += 2.0 * size * std::log(sigma); - rho_ /= sigma; - LOGDEBUG(OSS(false) << "rho_=" << rho_); - } // analyticalAmplitude - - LOGDEBUG(OSS(false) << "log-determinant=" << logDeterminant << ", rho=" << rho_); - const Scalar epsilon = rho_.normSquare(); - LOGDEBUG(OSS(false) << "epsilon=||rho||^2=" << epsilon); - if (epsilon <= 0) lastReducedLogLikelihood_ = SpecFunc::LogMinScalar; - // For the general multidimensional case, we have to compute the general log-likelihood (ie including marginal variances) - else lastReducedLogLikelihood_ = constant - 0.5 * (logDeterminant + epsilon); - LOGINFO(OSS(false) << "Reduced log-likelihood=" << lastReducedLogLikelihood_); - return Point(1, lastReducedLogLikelihood_); + // we use the integrated likelihood, add log(\det{FtR^{-1}F}) term to the reduced log-likelihood + if (basisCollection_.getSize() > 0) + logLikelihood += correctIntegratedLikelihoodLogDeterminant(); + + // Compute logarithm of the prior penalization. + const Scalar penalizationFactor = computeLogIntegratedLikelihoodPenalization(); + LOGINFO(OSS(false) << "penalizationFactor=" << penalizationFactor); + + // Compute logarithm of the integrated likelihood. + const Scalar logIntegratedLikelihood = -0.5 * logLikelihood - 0.5 * (size - beta_.getSize()) * std::log(rho_.normSquare()); + logLikelihood = logIntegratedLikelihood + penalizationFactor; + } + LOGINFO(OSS(false) << "log-likelihood=" << logLikelihood); + lastReducedLogLikelihood_ = logLikelihood; + return Point(1, logLikelihood); } @@ -936,6 +1147,17 @@ Point GeneralLinearModelAlgorithm::getRho() const return rho_; } +// Scale prior accessor +GeneralLinearModelAlgorithm::ScalePrior GeneralLinearModelAlgorithm::getScalePrior() const +{ + return scalePrior_; +} + +void GeneralLinearModelAlgorithm::setScalePrior(const ScalePrior scalePrior) +{ + scalePrior_ = scalePrior; +} + /* String converter */ String GeneralLinearModelAlgorithm::__repr__() const { @@ -1040,6 +1262,7 @@ void GeneralLinearModelAlgorithm::save(Advocate & adv) const adv.saveAttribute( "covarianceCholeskyFactor_", covarianceCholeskyFactor_ ); adv.saveAttribute( "optimizeParameters_", optimizeParameters_ ); adv.saveAttribute( "noise_", noise_ ); + adv.saveAttribute( "scalePrior_", static_cast(scalePrior_) ); } @@ -1062,6 +1285,9 @@ void GeneralLinearModelAlgorithm::load(Advocate & adv) adv.loadAttribute( "covarianceCholeskyFactor_", covarianceCholeskyFactor_ ); adv.loadAttribute( "optimizeParameters_", optimizeParameters_ ); adv.loadAttribute( "noise_", noise_ ); + UnsignedInteger scalePrior = 0; + adv.loadAttribute( "scalePrior_", scalePrior ); + scalePrior_ = static_cast(scalePrior); } END_NAMESPACE_OPENTURNS diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/KrigingAlgorithm.cxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/KrigingAlgorithm.cxx index 8165b85c623..b682fbaeb17 100644 --- a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/KrigingAlgorithm.cxx +++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/KrigingAlgorithm.cxx @@ -272,6 +272,17 @@ void KrigingAlgorithm::setMethod(const String & method) glmAlgo_.setMethod(0); } +// Scale prior accessor +KrigingAlgorithm::ScalePrior KrigingAlgorithm::getScalePrior() const +{ + return glmAlgo_.getScalePrior(); +} + +void KrigingAlgorithm::setScalePrior(const ScalePrior scalePrior) +{ + glmAlgo_.setScalePrior(scalePrior); +} + /* Method save() stores the object through the StorageManager */ void KrigingAlgorithm::save(Advocate & adv) const { diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GeneralLinearModelAlgorithm.hxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GeneralLinearModelAlgorithm.hxx index 063616be19b..2c11bf2d2a7 100644 --- a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GeneralLinearModelAlgorithm.hxx +++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GeneralLinearModelAlgorithm.hxx @@ -46,6 +46,8 @@ public: typedef GeneralLinearModelResult::BasisCollection BasisCollection; typedef GeneralLinearModelResult::BasisPersistentCollection BasisPersistentCollection; + typedef enum {NONE, JOINTLYROBUST, REFERENCE, FLAT} ScalePrior; + typedef CovarianceModel::ScaleParametrization ScaleParametrization; /** Default constructor */ GeneralLinearModelAlgorithm(); @@ -143,9 +145,19 @@ protected: // Initialize default optimization solver void initializeDefaultOptimizationAlgorithm(); + // when the likelihood is integrated: log(\det{FtR^{-1}F}) + Scalar correctIntegratedLikelihoodLogDeterminant() const; + + // If a prior is used, compute its value as a penalization term + Scalar computeLogIntegratedLikelihoodPenalization() const; + friend class KrigingAlgorithm; Point getRho() const; + // Scale prior accessor + ScalePrior getScalePrior() const; + void setScalePrior(const ScalePrior likelihoodPrior); + private: // Helper class to compute the reduced log-likelihood function of the model @@ -295,6 +307,10 @@ private: /** Cache of the last computed reduced log-likelihood */ mutable Scalar lastReducedLogLikelihood_; + + // scale prior + ScalePrior scalePrior_; + }; // class GeneralLinearModelAlgorithm diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/KrigingAlgorithm.hxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/KrigingAlgorithm.hxx index e0c1ff78ba2..1772fb7c898 100644 --- a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/KrigingAlgorithm.hxx +++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/KrigingAlgorithm.hxx @@ -46,6 +46,8 @@ public: typedef KrigingResult::BasisCollection BasisCollection; typedef KrigingResult::BasisPersistentCollection BasisPersistentCollection; + typedef GeneralLinearModelAlgorithm::ScalePrior ScalePrior; + typedef CovarianceModel::ScaleParametrization ScaleParametrization; /** Default constructor */ KrigingAlgorithm(); @@ -103,6 +105,10 @@ public: void setMethod(const String & method); String getMethod() const; + /** Scale prior accessor */ + ScalePrior getScalePrior() const; + void setScalePrior(const ScalePrior likelihoodPrior); + /** Method save() stores the object through the StorageManager */ void save(Advocate & adv) const; diff --git a/python/doc/bibliography.rst b/python/doc/bibliography.rst index b8124506f17..f296608432a 100644 --- a/python/doc/bibliography.rst +++ b/python/doc/bibliography.rst @@ -67,6 +67,12 @@ Bibliography .. [gamboa2013] Gamboa, F., Janon, A., Klein, T. & Lagnoux, A. *Sensitivity analysis for multidimensional and functional outputs.* 2013. `pdf `__ +.. [gu2016] Gu M., *Robust Uncertainty Quantification and Scalable Computation + for Computer Models with Massive Output*, 2016. + `pdf `__ +.. [gu2018] Gu M, *Jointly Robust Prior for Gaussian Stochastic Process in Emulation, + Calibration and Variable Selection*, 2018. + `pdf `__ .. [hormann1993] Hormann W., *The generation of Binomial Random Variates* Journal of Statistical Computation and Simulation 46, pp. 101-110, 1993. `pdf `__ diff --git a/python/doc/examples/meta_modeling/kriging_robust.ipynb b/python/doc/examples/meta_modeling/kriging_robust.ipynb new file mode 100644 index 00000000000..f19e3939398 --- /dev/null +++ b/python/doc/examples/meta_modeling/kriging_robust.ipynb @@ -0,0 +1,170 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Robust estimation of kriging hyperparameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example we are going to create a kriging metamodel with robust estimation of the hyperparameters.\n", + "\n", + "Here\n", + "\n", + "$$h(x) = cos(x_1 + x_2)$$" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import print_function\n", + "import openturns as ot" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# prepare some X/Y data\n", + "ot.RandomGenerator.SetSeed(0)\n", + "dimension = 2\n", + "input_names = ['x1', 'x2']\n", + "formulas = ['cos(x1 + x2)']\n", + "model = ot.SymbolicFunction(input_names, formulas)\n", + "distribution = ot.Normal(dimension)\n", + "x = distribution.getSample(30)\n", + "y = model(x)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "parameter= [0.1,0.1,1]\n" + ] + } + ], + "source": [ + "# choose a covariance model\n", + "covarianceModel = ot.SquaredExponential([0.1]*dimension, [1.0])\n", + "\n", + "# choose a parametrization in which the scale is optimized\n", + "covarianceModel.setScaleParametrization(ot.CovarianceModelImplementation.STANDARD)\n", + "#covarianceModel.setScaleParametrization(ot.CovarianceModelImplementation.INVERSE)\n", + "#covarianceModel.setScaleParametrization(ot.CovarianceModelImplementation.LOGINVERSE)\n", + "print(\"parameter=\", covarianceModel.getParameter())" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# create a kriging model\n", + "basis = ot.ConstantBasisFactory(dimension).build()\n", + "algo = ot.KrigingAlgorithm(x, y, covarianceModel, basis)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# choose a prior type\n", + "algo.setScalePrior(ot.GeneralLinearModelAlgorithm.JOINTLYROBUST)\n", + "# or algo.setScalePrior(ot.GeneralLinearModelAlgorithm.REFERENCE)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "algo.run()\n", + "result = algo.getResult()\n", + "responseSurface = result.getMetaModel()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "optimized parameter= [1.97545,2.11943]\n" + ] + } + ], + "source": [ + "print(\"optimized parameter=\", result.getCovarianceModel().getParameter())" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAHgCAYAAAA10dzkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzdeXxM9+L/8fdM9ojYQtSWROxFqBBBJNqgqJaiaGurVqtVLbebVqtouV2oVhfVcqmlja1cSxFLIsS+q13sSwglJJF1fn/4yq+5YilJTmbm9Xw88qj5zDln3vNpIm/nzDnHZLFYLAIAAIDdMBsdAAAAAAWLAggAAGBnKIAAAAB2hgIIAABgZyiAAAAAdoYCCAAAYGcogAAAAHaGAggAAGBnKIAAAAB2hgIIoFA7dOiQWrVqpWLFislkMmn+/PlGR8pVWFiYwsLCjI5xR/Hx8ercubNKlSolk8mkcePGGR0JgEEogIANuHz5svr166fSpUurSJEiatGihbZt22Z0rDzRq1cv7d69W59++qmmTZumwMBAw7Ls3btXH3/8sY4dO2ZYhgcxaNAgLVu2TEOGDNG0adP0+OOPP/A2f/jhB3Xp0kWVKlWSyWRS7969HzwogHxn4l7AgHXLyspSSEiIdu7cqbffflteXl76/vvvdfLkSW3dulVVq1Y1OuJ9S0lJkbu7uz744AN98sknRsfRnDlz1KVLF61evfqWvX1paWmSJGdnZwOS3ZuyZcsqPDxc06dPz7Nt+vr66urVq2rUqJFWrFih5557TlOmTMmz7QPIH45GBwDwYObMmaPY2FjNnj1bnTt3liQ988wzqlatmoYNG6aZM2canPD+XbhwQZJUvHhxg5PcXWEufjedP38+z+cyOjo6e++fh4dHnm4bQP7hEDBQCK1evVomk0m///77Lc/NnDlTJpNJ69evl3SjAHp7e+vpp5/OXqZ06dJ65plntGDBAqWmpt7xtRYsWKB27dqpXLlycnFxkb+/v0aOHKnMzMwcyx06dEidOnVS2bJl5erqqgoVKqhbt266cuXKHbcfExOTfYjQxcVFFStW1KBBg5SSknLH9T7++GP5+PhIkt5++22ZTCb5+vpKknr37p395/9dx2Qy5RgzmUwaMGCA5s+fr9q1a8vFxUUPP/ywli5desv6p0+fVt++fbPnws/PT/3791daWpqmTJmiLl26SJJatGghk8kkk8mkqKgoSbl/BvD8+fPq27evvL295erqqoCAAE2dOjXHMseOHZPJZNKXX36piRMnyt/fXy4uLmrYsKE2b958xzm6KS4uTl26dFHJkiXl7u6uxo0ba/HixdnPT5kyRSaTSRaLRd9991129tsZNmyYzGazVq5cmWO8X79+cnZ21s6dO7PHfHx87rgtAIUTewCBQigsLEwVK1bUjBkz1LFjxxzPzZgxQ/7+/goODpYkbd++XY888ojM5pz/nmvUqJEmTpyogwcPqk6dOrd9rSlTpsjDw0ODBw+Wh4eHVq1apY8++kiJiYn64osvJN04vNm6dWulpqbq9ddfV9myZXX69GktWrRIly9fVrFixW67/dmzZys5OVn9+/dXqVKltGnTJo0fP16nTp3S7Nmzb7ve008/reLFi2vQoEHq3r272rZte997mNauXat58+bp1VdfVdGiRfXNN9+oU6dOOnHihEqVKiVJOnPmjBo1apT9ecoaNWro9OnTmjNnjpKTk9W8eXMNHDhQ33zzjd5//33VrFlTkrL/+79SUlIUFhamw4cPa8CAAfLz89Ps2bPVu3dvXb58WW+88UaO5WfOnKmrV6/q5Zdflslk0ueff66nn35acXFxcnJyuu17i4+PV5MmTZScnKyBAweqVKlSmjp1qp588knNmTNHHTt2VPPmzTVt2jT16NFDLVu2VM+ePe84X0OHDtXChQvVt29f7d69W0WLFtWyZcv0008/aeTIkQoICPgn0w+gMLIAKJSGDBlicXFxsVy+fDl77Pz58xZHR0fLsGHDsseKFClieeGFF25Zf/HixRZJlqVLl97xdZKTk28Ze/nlly3u7u6W69evWywWi2X79u0WSZbZs2f/4/eR2/ZHjx5tMZlMluPHj99x3aNHj1okWb744osc47169bL4+PjcsvywYcMs//vXmiSLs7Oz5fDhw9ljO3futEiyjB8/PnusZ8+eFrPZbNm8efMt283KyrJYLBbL7NmzLZIsq1evvmWZ0NBQS2hoaPbjcePGWSRZpk+fnj2WlpZmCQ4Otnh4eFgSExNzvMdSpUpZLl26lL3sggULLJIsCxcuzGVm/r8333zTIskSExOTPXb16lWLn5+fxdfX15KZmZljLl577bU7bu+m3bt3W5ydnS0vvvii5a+//rKUL1/eEhgYaElPT7/tOkWKFLH06tXrnrYPwFgcAgYKqZ49eyo1NVVz5szJHouIiFBGRoaef/757LGUlBS5uLjcsr6rq2v283fi5uaW/eerV68qISFBISEhSk5O1v79+yUpew/fsmXLlJyc/I/ex9+3n5SUpISEBDVp0kQWi0Xbt2//R9u6X+Hh4fL3989+XLduXXl6eiouLk7SjRNp5s+fr/bt2+d6lvH9HOJcsmSJypYtq+7du2ePOTk5aeDAgbp27Zqio6NzLN+1a1eVKFEi+3FISIgkZWe80+s0atRIzZo1yx7z8PBQv379dOzYMe3du/cfZ5ek2rVra/jw4fr555/VunVrJSQkaOrUqXJ05MARYAsogEAhVaNGDTVs2FAzZszIHpsxY4YaN26sKlWqZI+5ubnl+jm/69evZz9/J3/++ac6duyoYsWKydPTU6VLl84umDc/3+fn56fBgwfr559/lpeXl1q3bq3vvvvurp//k6QTJ06od+/eKlmypDw8PFS6dGmFhobm2H5+q1Sp0i1jJUqU0F9//SXpxskmiYmJql27dp695vHjx1W1atVbDs3fPGR8/PjxO2a8WQZvZrzT61SvXv2W8du9zj/x9ttvKyAgQJs2bdKwYcNUq1at+94WgMKFAggUYj179lR0dLROnTqlI0eOaMOGDTn2/knSQw89pLNnz96y7s2xcuXK3Xb7ly9fVmhoqHbu3KkRI0Zo4cKFioyM1GeffSbpxp6xm8aMGaNdu3bp/fffV0pKigYOHKiHH35Yp06duu32MzMz1bJlSy1evFjvvvuu5s+fr8jIyOzLhPx9+//E7fbI/e+JKzc5ODjkOm4pRFfBKowZ4+LidOjQIUnS7t27DcsBIO+xLx8oxLp166bBgwfr119/VUpKipycnNS1a9ccy9SrV08xMTHKysrKsbdp48aNcnd3V7Vq1W67/aioKF28eFHz5s1T8+bNs8ePHj2a6/J16tRRnTp1NHToUMXGxqpp06aaMGHCba/Rt3v3bh08eFBTp07NceJBZGTkPb3/2ylRooQuX758y/j97u0qXbq0PD09tWfPnjsu908OBfv4+GjXrl23/H+5eVj95hnOD8rHx0cHDhy4ZfxBXycrK0u9e/eWp6en3nzzTY0aNUqdO3fOcbY5AOvFHkCgEPPy8lKbNm00ffp0zZgxQ48//ri8vLxyLNO5c2fFx8dr3rx52WMJCQmaPXu22rdvn+vnA2+6udfp73uZ0tLS9P333+dYLjExURkZGTnG6tSpI7PZfMfLzOS2fYvFoq+//vq269wLf39/XblyRbt27coeO3v2bK6XzbkXZrNZHTp00MKFC7Vly5Zbnr+Zv0iRIpKUa/n8X23bttW5c+cUERGRPZaRkaHx48fLw8Mj+zD4g2rbtq02bdqUfVkg6cZnLSdOnChfX9/7Pmw7duxYxcbGauLEiRo5cqSaNGmi/v37KyEhIU9yAzAWewCBQq5nz57ZF3geOXLkLc937txZjRs3Vp8+fbR3797sO4FkZmZq+PDhd9x2kyZNVKJECfXq1UsDBw6UyWTStGnTbjnsuGrVKg0YMEBdunRRtWrVlJGRoWnTpsnBwUGdOnW67fZr1Kghf39/vfXWWzp9+rQ8PT01d+7cu36u7W66deumd999Vx07dtTAgQOVnJysH374QdWqVbvvW+CNGjVKy5cvV2hoqPr166eaNWvq7Nmzmj17ttauXavixYurXr16cnBw0GeffaYrV67IxcVFjz76qMqUKXPL9vr166cff/xRvXv31tatW+Xr66s5c+Zo3bp1GjdunIoWLfpAc3DTe++9p19//VVt2rTRwIEDVbJkSU2dOlVHjx7V3Llzb/kM4r3Yt2+fPvzwQ/Xu3Vvt27eXdONyQfXq1dOrr76qWbNmZS+7cOHC7OsCpqena9euXdl7hJ988knVrVs3D94lgDxn3AnIAO5FamqqpUSJEpZixYpZUlJScl3m0qVLlr59+1pKlSplcXd3t4SGhuZ6OZPcrFu3ztK4cWOLm5ubpVy5cpZ33nnHsmzZshyXO4mLi7O88MILFn9/f4urq6ulZMmSlhYtWlhWrFhx1+3v3bvXEh4ebvHw8LB4eXlZXnrppezLsPznP/+547q3uwyMxWKxLF++3FK7dm2Ls7OzpXr16pbp06ff9jIwuV36xMfH55ZLlhw/ftzSs2dPS+nSpS0uLi6WypUrW1577TVLampq9jI//fSTpXLlyhYHB4ccc/S/l4GxWCyW+Ph4S58+fSxeXl4WZ2dnS506dW55z3d6j5JyXPLndo4cOWLp3LmzpXjx4hZXV1dLo0aNLIsWLcp1e3e7DExGRoalYcOGlgoVKuS4BJHFYrF8/fXXFkmWiIiI7LFevXpZJOX6dbf/vwCMw72AgUIuIyND5cqVU/v27TVp0iSj4wAAbACfAQQKufnz5+vChQt3vXsDAAD3ij2AQCG1ceNG7dq1SyNHjpSXl9d9f7YNAID/xR5AoJD64Ycf1L9/f5UpU0a//PKL0XEAADaEPYAAAAB2hj2AAAAAdoYCCAAAYGcogAAAAHaGAggAAGBnKIAAAAB2hgIIAABgZyiAAAAAdoYCCAAAYGcogAAAAHaGAggAAGBnKIAAAAB2hgIIAABgZyiAAAAAdoYCCAAAYGcogAAAAHaGAggAAGBnKIAAAAB2hgIIAABgZyiAAAAAdoYCCAAAYGcogAAAAHaGAggAAGBnKIAAAAB2hgIIAABgZyiAAAAAdoYCCAAAYGcogAAAAHaGAggAAGBnHI0OYM2ysrJ05swZFS1aVCaTyeg4AADgHlgsFl29elXlypWT2Wyf+8IogA/gzJkzqlixotExAADAfTh58qQqVKhgdAxDUAAfQNGiRSXd+Aby9PQ0OI3x0tPTtXz5crVq1UpOTk5Gx7FZzHPBYJ4LBvNcMJjnnBITE1WxYsXs3+P2iAL4AG4e9vX09KQA6sZfMO7u7vL09OQvmHzEPBcM5rlgMM8Fg3nOnT1/fMs+D3wDAADYMQogAACAnaEAAgAA2BkKIAAAgJ2hAAIAANgZCiAAAICdoQACAADYGQogAACAnaEAAgAA2BmbKYBr1qxR+/btVa5cOZlMJs2fP/+u60RFRemRRx6Ri4uLqlSpoilTpuR/UAAAAIPZTAFMSkpSQECAvvvuu3ta/ujRo2rXrp1atGihHTt26M0339SLL76oZcuW5XNSAAAAY9nMvYDbtGmjNm3a3PPyEyZMkJ+fn8aMGSNJqlmzptauXauvvvpKrVu3zq+YAAAAhrOZAvhPrV+/XuHh4TnGWrdurTfffPO266Smpio1NTX7cWJioqQbN9lOT0/Ps2zX4pO0b0KMzE4OMjk5yuToIAdnB5kczTI5OcrB2UFOnq5yLu4u15Luci3hJrdS7nJydcizDPfj5hzk5VzgVsxzwWCeCwbzXDCY55yYBzsugOfOnZO3t3eOMW9vbyUmJiolJUVubm63rDN69GgNHz78lvHly5fL3d09z7IlbftLz37a5x+vlypnJctdV82euubgqWtOnkpxLqoUl6JKdfdQmruH0op7KrNkUcm7qMwPecipvJtcyjjLZDblWf7IyMg82xZuj3kuGMxzwWCeCwbzfENycrLREQxntwXwfgwZMkSDBw/OfpyYmKiKFSuqVatW8vT0zLPXyXg0Q6efaKGs9ExlpWcqMzUj+89Z6ZnKvJ6hjGvXlXH1utKvJCvz6o0vS1KKsq4lSVcSZb56RY6Jf8k9+bJKJJ+R++XLKpp+SSWzEuSkjByvlypnXTSX0UW3CkosXkFpZSrIUrGinCtXUNFaFVSqfkWVru0ts8OdS2J6eroiIyPVsmVLOTk55dl8ICfmuWAwzwWDeS4YzHNON4/g2TO7LYBly5ZVfHx8jrH4+Hh5enrmuvdPklxcXOTi4nLLuJOTU57+QDk5Oal8Q588297fWTKzdOXEZV3aF6+rR84r+Wi80k7Gy3IuXg7nTqnIxZMqv2envLeflJuuZ6+XLDcdd/HXpRJVdL28v0xVq6hIQBWVbuyvCk0qycH5/x9+zuv5QO6Y54LBPBcM5rlgMM83MAd2XACDg4O1ZMmSHGORkZEKDg42KFHBMDmYVcyvpIr5lZRU87bLWbIs+uvIRV3YdlKXd53Q9b1x0pEjcj97WL67FqjC1qNy/C1TkpQiVx13raGEMjV1vXhpbVyTroceq6NKLfzl5Ga332IAABRaNvPb+dq1azp8+HD246NHj2rHjh0qWbKkKlWqpCFDhuj06dP65ZdfJEmvvPKKvv32W73zzjt64YUXtGrVKs2aNUuLFy826i0UKiazSSWqeqlEVS+pa/1bns9ISdfJjSd0Pvawrm3ZL9P+vSp2eq9anViqEru+kcbeOLS837WmLpSvr8w69VQsrL78OgSouE8xA94RAAC4yWYK4JYtW9SiRYvsxzc/q9erVy9NmTJFZ8+e1YkTJ7Kf9/Pz0+LFizVo0CB9/fXXqlChgn7++WcuAXOPHN2cVDHMXxXD/CXdmLP09HQtXrRYQX6Bil99SInr/5Rp106VOrlDlY/8Ktf5qdKb0glHP50uU1/XazWQZ8sgVeneUMUq5t1nKAEAwJ3ZTAEMCwuTxWK57fO53eUjLCxM27dvz8dU9sdkNsnrYW89VK+CNOj/F/KMlHQdXnZA8Uu3K2PLDnke2a5aKz5TsRWJynrXpMPONXW2UpCyGgXJu32QqnSoLUdXm/n2BACgUOE3LAqEo5uTqnSorSodakvqIUnKysjSkT/26+z8jcrasFFljm5UlZm/yHFmpq7KQwdKNdHVgBCVeKq5qvdoJLcSrsa+CQAAbAQFEIYxO5rl376W/NvXknTjuofJCcnaG7FVlxatU5GtMXpk1ZcqtupDpb7hrJ2ejXTp4RAVfSJMtfo1k7tX3l17EQAAe0IBRKHi7uWuuq+FSK+FSHpPmWmZOvD7bp2bHSPnjTF6eONklVk/WqkfOGt7sSa6HBiu0t3DVeO5BhwyBgDgHpmNDgDciYOzg6p3rafQOa8r+OQslU4/qyML92pDpy+V5uapBis/U+0XGyvJrZQ2PNRB0V2/18mYY0bHBgCgUGOXCayKyWyS/xM15f9ETUmvK+N6hvZM26yEiJUqviVSDWa9IadZr+mwc02dqtNWnt3aqvYrzeTs4Wx0dAAACg32AMKqObo6qvZLwQpbMVT1Lkcr5eRFbXh7rs76NlGN7TP1yNuP6XpRL60v30lr+kxWwr4LRkcGAMBwFEDYFM8Knmr8+dMKOfCzvNNP68Bv27Ut/F15XD2rZlNeVIlaZbWjWKiiOnylk2uOGh0XAABDUABhs0xmk6p3raewyA9UJzFWF/ecU2yviUpzLargBe+pYmhlHXCrp6gWw3Vw7m5Zsm5/HUkAAGwJBRB2o/TDZRQypa8axS9S2ukErR80SwneD6t+1FhV61xXcW61FBX2sQ4v3Gd0VAAA8hUFEHapaLmiCh7bRU2PzZDb1Qva/PFina3QSPWjv1KVJ2vpoGtdrQ7/VMdWHL77xgAAsDIUQNg9Zw9nNRzWVs2OTJXLX/HaOGS+LpStrYYrR8u3ZVXtLRKo6I7jdGFPvNFRAQDIExRA4G9ci7sqaNRTanpspswXzmv94NlKLFZJwfPfUYk65bWpTDute/03JV9MMToqAAD3jQII3Ia7l7uCx3RW4zPzlHTorGK7jZfb9b/U9NvuyvDyVky1F7R9XLSyMjl5BABgXSiAwD0oUaWUmv/aX3USY3U88qC2NR8kn6NRqj8oTCfcqml169E6t+2M0TEBALgnFEDgH/IJr6qw6OGqmHpEO7+J1umKwQpaPlJeDSppY9kntfGD/yo9JcPomAAA3BYFELhPJrNJAa83V9Mjvyj92BnFdhuvolfPKGjUU7rkUVFRwUO42DQAoFCiAAJ5oJhPcTX/tb9qJW3Rgd+2a//DnVV/ww8qH+qvTd5PaPPwJcpMyzQ6JgAAkiiAQJ6r3rWeQneNl2P8aa3r9ZOKXj2jhh+30+kiVRXV9nNdPJBgdEQAgJ2jAAL5pEiZIgqZ0lc1rm3Vnp836IRPczX+4yN51CivtZV7aN/0rUZHBADYKQogkM9MZpNq9w1Ss8NTlHzwtNa3+1Q+J9eqZo9A7SwWovVvz1XGdU4aAQAUHAogUIBKVi2lsEVvqVzSYW1873dZTA4K/rKzznlUUVT7Mbpy/LLREQEAdoACCBjAwdlBQaM7qN7lKO2bvlXHfELVZNEQOfhWUHTd13UiKs7oiAAAG0YBBAxW87lH1OzIVF3eeUJbmv9LD+/5TeVbVFWsT3ftm7HN6HgAABtEAQQKiTJ1yyoserjczx/X2q7jVeHMRtV8voG2lmqprf+OlCWLW84BAPIGBRAoZNy93BX626sqd/WgYgf+JveUS2owpJX2ezRQ7MDfuJ4gAOCBUQCBQsrR1VFNvu6qGte2aNvnK5TkXlpNxnfX2RJ1dfXb7UpPTjc6IgDASlEAgULOZDbpkbcfU2DCMu2btkXxpWrp+RXDdalULa15doJSE1ONjggAsDIUQMCK1Hy+gRqcmK0Z70zUiXJBavbrq7pUorKin/5ayQnJRscDAFgJCiBghTyalFHDI9N1fMleHfELV9Pf/6WkMn6KemqsUi6lGB0PAFDIUQABK+bXpoaaHZ6qM6sOaH/VJ9Tsv+/oSml/RXf5lkPDAIDbogACNqBSC3+FHJik0yv267BvSzWb84YulqyiNc/9qLRraUbHAwAUMhRAwIb4PFZFzY5M1Yklf+pY+WZqNrO/4ktUV0yfydxvGACQjQII2CC/NjXU5PivOjJvl055BypkSl8dL1ZHG96ZxwWlAQAUQMCWVe1YW8GnZmvf9K36q2glNf6ik/70DNaOcVFGRwMAGIgCCNiBms89osCEZdr+5UqZlKV6g1poc+k2OhCxw+hoAAADUAABO1L/X4+qVuJGrX9rjryuxKl6t/pa5/usTq09ZnQ0AEABogACdsZkNin4i06qmPin1vSYKP+TUfIKqaGo4CFKPJVodDwAQAGgAAJ2ytHVUc1/eUlFzx7ShtD31GjD10qtdOPSMZwxDAC2jQII2LkiZYooLOpjXdl0UAf82qj5zFd0tHh9bR293OhoAIB8QgEEIEl6qGEFNTsyVXunblaySwk1eL+1NpdppyOL9hkdDQCQxyiAAHKo1TNQdf+K1oa356rMX/vl076OousO0MUDCUZHAwDkEQoggFuYzCY1/vxplb24V2uf+Ez1dk+XY40qimo/RunJ6UbHAwA8IAoggNty8XRR2MJ/KWP/Ye2o87xCFr2j4yXrafvY1UZHAwA8AAoggLsqVd1Lobu+1eGIbUp2LqH6/3pUsT7ddXbLaaOjAQDuAwUQwD2r/kyA6lyO0dqXpqrqyVXyaFhDUU98yWFhALAyFEAA/4jJbFKziT3lfPSAtgW8oJDF7+pEiQBtH7PK6GgAgHtEAQRwX4r5FFfojq91OGKbrrmWUv23HlNspW4cFgYAK0ABBPBAqj8ToLp/rdHal39R1VOrVbRhdUW1/Vxp19KMjgYAuA0KIIAHZjKb1GxCDzkfPaCtAX0V8scQHfdqoD0/bzA6GgAgFxRAAHnm5mHhQzO3KN3BRbVeaqLouq/r6pmrRkcDAPwNBRBAnqvRvb6qXdygNU+NUeDuybpaqZY2fvBfo2MBAP4PBRBAvnB0dVTY/EH6K+ZPnS5RW0GjntL6Cl0Uv+Os0dEAwO5RAAHkqwrNfBUYv0Sxr/+qKmfWyLV+Ta15fqKyMrKMjgYAdosCCCDfmcwmNfmmmxwP7dOuqp3UfMbL2l0qTHFL9hsdDQDsEgUQQIEp4V9SIQcnafuYVSqWclbl2wUo6tERXDIGAAoYBRBAgas/uIW8z+3S+iZvqdnqEYorE6QDs3YaHQsA7AYFEIAh3Eq6KWzdpzo0fZPMlkxV7hqoqMdGcl9hACgAFEAAhqr53CPyid+sdU3fVbNVw3XYq7EOzt1tdCwAsGkUQACGc/F0UdjaT3Rw6gY5ZqXKt3MDRbX8VBnXM4yOBgA2iQIIoNCo1TNQlc5vVWzwWwpZ8ZEOlmqsQ7/vMToWANgcCiCAQsXF00VhsaO0f/J6uWQkq9LTDRTVejR7AwEgD1EAARRKD/dppPLx27Q+aJBClg/VAa8mOvzfvUbHAgCbYFMF8LvvvpOvr69cXV0VFBSkTZs23XbZ9PR0jRgxQv7+/nJ1dVVAQICWLl1agGkB3I1rcVeFbfi39v0cK7f0RJV/qoGiu3wrS5bF6GgAYNVspgBGRERo8ODBGjZsmLZt26aAgAC1bt1a58+fz3X5oUOH6scff9T48eO1d+9evfLKK+rYsaO2b99ewMkB3E3tvkEqe3qbNtV5UaFzXtcW77Y6v+uc0bEAwGrZTAEcO3asXnrpJfXp00e1atXShAkT5O7ursmTJ+e6/LRp0/T++++rbdu2qly5svr376+2bdtqzJgxBZwcwL1w93JX6K7x2jx8iXwubZe5Xh1tHDLf6FgAYJUcjQ6QF9LS0rR161YNGTIke8xsNis8PFzr16/PdZ3U1FS5urrmGHNzc9PatWtv+zqpqalKTU3NfpyYmCjpxuHk9HQuXntzDpiL/GXv81xvSLgudtymuJb91fjfHbVmXl/VWfGFPMp65Onr2Ps8FxTmuWAwzzkxD5LJYrFY/Ydpzpw5o/Lly/aM0xIAACAASURBVCs2NlbBwcHZ4++8846io6O1cePGW9Z59tlntXPnTs2fP1/+/v5auXKlnnrqKWVmZuYoeX/38ccfa/jw4beMz5w5U+7u7nn3hgDclSXLoqTx29Rh9RjFOzyk2P5vqWj4Q0bHAmAFkpOT9eyzz+rKlSvy9PQ0Oo4h7LYAXrhwQS+99JIWLlwok8kkf39/hYeHa/LkyUpJScn1dXLbA1ixYkUlJCTY7TfQ36WnpysyMlItW7aUk5OT0XFsFvOc0/EVh3S9Sx/VSNqqNaFDFbzwXTm6PvjBDea5YDDPBYN5zikxMVFeXl52XQBt4hCwl5eXHBwcFB8fn2M8Pj5eZcuWzXWd0qVLa/78+bp+/bouXryocuXK6b333lPlypVv+zouLi5ycXG5ZdzJyYkfqL9hPgoG83xDlTa1lH5+rda2+USh0SO1t9wyeS6YLp9H/fNk+8xzwWCeCwbzfANzYCMngTg7O6tBgwZauXJl9lhWVpZWrlyZY49gblxdXVW+fHllZGRo7ty5euqpp/I7LoA85uTupLDo4dr741p5Xj+vko/V07r+042OBQCFlk0UQEkaPHiwfvrpJ02dOlX79u1T//79lZSUpD59+kiSevbsmeMkkY0bN2revHmKi4tTTEyMHn/8cWVlZemdd94x6i0AeEB1+gWr5PEd2lW5o5pO6KGYqn2UdD7J6FgAUOjYxCFgSeratasuXLigjz76SOfOnVO9evW0dOlSeXt7S5JOnDghs/n/993r169r6NChiouLk4eHh9q2batp06apePHiRr0FAHmgaLmianrkF63t95ge+elVnau4QZkzIlStc12jowFAoWEzBVCSBgwYoAEDBuT6XFRUVI7HoaGh2ruX20oBtqrZxF462jFIGU93lU+XRlrTfZxCpr8sk9lkdDQAMJzNHAIGgP/l16aGfM5s0KaHX1DzX/trQ6VndOX4ZaNjAYDhKIAAbJprCTc13/O9NvxrtmqdjlRilfr68z+3v084ANgDCiAAu9D4y866uma7rrh6q9oLTRX1xJfKysgyOhYAGIICCMBuVAjxU/X4GK1rNEhhi9/W1nJPKGHfBaNjAUCBowACsCtO7k4K2/i5toxYIr+EzUqvXV+7J+Z+z3AAsFUUQAB2KfDDNsrYvEMXPHxV4+Xmiu70jSxZVn9nTAC4JxRAAHarbIPyqnl2tWIfeV2h897Qet9uunrmqtGxACDfUQAB2DUndyeFbh2r9YNnq/bJP3Ter5EO/5drhAKwbRRAAJAUPKazEpZsVpbJQWWfaqQNb0QYHQkA8g0FEAD+T+U21VXuxEbt8OuokB96yPzGXKVdSzM6FgDkOQogAPxNkTJF1PTwL4p65lu1Oh6hoxUf05mNJ42OBQB5igIIAP/DZDap6fR+mj3wGxW/flYuwfW1dfRyo2MBQJ6hAALAbRR9tKycdm1UXKmGqv/+44pqPZpLxQCwCRRAALiDklVLqcHZxVoTMlRhy9/XBp+uSjqfZHQsAHggFEAAuAuzo1lha0Zow9tzVefUEp32CdaJqDijYwHAfaMAAsA9avz50zo7b4NcMpLl8WhDbft8hdGRAOC+UAAB4B+o2rG2ih3crLiSDRXwbmtFtR/D5wIBWB0KIAD8Q8X9Sqj+mcWKCXpbYYveUmzl55WckGx0LAC4ZxRAALgPDs4OCtvwb8W+/qvqH/9dxys106l1x42OBQD3hAIIAA+gyTfddDJivTzS/5JrSKB2jIsyOhIA3BUFEAAeUPVnAuS+Z7NOFK+r2oPCFd3lWz4XCKBQowACQB4oVd1Ldc8s07r6ryt0zuuKqfua0pPTjY4FALmiAAJAHnF0dVTotq8U0/MnBf/5k3ZXaKPLR/8yOhYA3IICCAB5LGTqi/rzq0j5Xt6uv6o31rHIQ0ZHAoAcKIAAkA/qvRmmq5EblWUyq1jrIG0fs8roSACQjQIIAPnE57Eq8jq4XnElA1X7rdZa89yPRkcCAEkUQADIV8V8iivg1BKtr/Oyms98RdH131TG9QyjYwGwcxRAAMhnjq6Oar7rW6155ls13fGttld6UldOXDE6FgA7RgEEgALSPOI17Ry1RNUuxOpCtSY6ERVndCQAdooCCAAFqMGQVrq4eIOcslJV5NFG2vXDOqMjAbBDFEAAKGCV29aQ596NOun5sKq9+phi35xldCQAdoYCCAAGKFGllGqeWK6tvp3U5OuuWt32c24fB6DAUAABwCAuni5qcmS6opoNVYs/3lVMnVc5QxhAgaAAAoCBTGaTwmJGKqb3z2qy9ydtr/SUrp27ZnQsADaOAggAhUDIf/pq56glqn4hRicrN9e5bWeMjgTAhlEAAaCQaDCklc7NilHxtPPKbNRYh37fY3QkADaKAggAhUi1LgHSho265lRSZZ5uqq2frzQ6EgAbRAEEgELmocDyKnd4jQ55Bavuu48r5sWpRkcCYGMogABQCBUt76mA4wu1oXpvhUzqrdWhH3OZGAB5hgIIAIWUk7uTmu2dqKjWo9RizXDF1HqZy8QAyBMUQAAoxExmk8KWDtHal6aoyYHJ2urXSckXU4yOBcDKUQABwAo0m9hL24cvVO1zK3TYL1x/HblkdCQAVowCCABWouFHbXRs0iqVv3ZAF2uF6MzGk0ZHAmClKIAAYEUefiFIiUvWyTUzSWraRIcX/Gl0JABWiAIIAFbG7/Hqctocq6tOJeXVsZl2fr/O6EgArAwFEACskHf9cip7YI2OeQao2mvh2jBkgdGRAFgRCiAAWKlilYqp5vGl2lm+nRr++2mt6fGT0ZEAWAkKIABYMZdirmoYF6F1tV9R8+n9tLrFCC4YDeCuKIAAYOUcnB0UsvNbRYV/ohZRw7Sm7gBlZWQZHQtAIUYBBAAbYDKbFBb5gWJ6TFTInz9ofZUeSk9ONzoWgEKKAggANiTkl5e0cXCEGh6fre1+TyvlEncNAXArCiAA2JjgMV20+5P/qvb5ldpfuY0STyUaHQlAIUMBBAAb1OCDx3Xkh0hVvrJDp6o/qoR9F4yOBKAQoQACgI2q80pTnYuIVunrJ3WlXnOd2XTK6EgACgkKIADYsOrPBChp6Vq5ZCYrs0kzHYs8ZHQkAIUABRAAbJxvy6pyWL9OaQ5ucn88RPsjdhodCYDBKIAAYAcealhBxXeuUYJrBT3ULVS7fuD+wYA9owACgJ0oVaO0KhxYpaPF6qnKqy215dNlRkcCYBAKIADYEc8Knqoe94f2eD+mukPba8O7vxsdCYABKIAAYGfcSrqpftw8ban0tAI/76J1A341OhKAAkYBBAA75OTupKBDM7TB/3kFf/ecYl74j9GRABQgR6MDAACM4eDsoCb7J2ttgJua/+cFRSelKDTiVaNjASgANrUH8LvvvpOvr69cXV0VFBSkTZs23XH5cePGqXr16nJzc1PFihU1aNAgXb9+vYDSAoDxzI5mhez+XtH131TorNcU9eRYoyMBKAA2UwAjIiI0ePBgDRs2TNu2bVNAQIBat26t8+fP57r8zJkz9d5772nYsGHat2+fJk2apIiICL3//vsFnBwAjGUym9R8y1hFNRmisIX/0urwT42OBCCf2UwBHDt2rF566SX16dNHtWrV0oQJE+Tu7q7JkyfnunxsbKyaNm2qZ599Vr6+vmrVqpW6d+9+172GAGCLTGaTwtaNUtRjI9Vi5VBFNf1AliyL0bEA5BObKIBpaWnaunWrwsPDs8fMZrPCw8O1fv36XNdp0qSJtm7dml344uLitGTJErVt27ZAMgNAYRS2Yqii23+psNhRig78FyUQsFE2cRJIQkKCMjMz5e3tnWPc29tb+/fvz3WdZ599VgkJCWrWrJksFosyMjL0yiuv3PEQcGpqqlJTU7MfJyYmSpLS09OVnp6eB+/Eut2cA+YifzHPBcOe57nJ3IGKetZZYXMGKqp2soK3fi2zY/7sL7DneS5IzHNOzIONFMD7ERUVpVGjRun7779XUFCQDh8+rDfeeEMjR47Uhx9+mOs6o0eP1vDhw28ZX758udzd3fM7stWIjIw0OoJdYJ4Lht3O8/OVNOPSR+q+aqSWV76k5G+6ycHZId9ezm7nuYAxzzckJycbHcFwJovFYvX799PS0uTu7q45c+aoQ4cO2eO9evXS5cuXtWDBglvWCQkJUePGjfXFF19kj02fPl39+vXTtWvXZDbf+q/d3PYAVqxYUQkJCfL09Mzjd2V90tPTFRkZqZYtW8rJycnoODaLeS4YzPMNGwb+puAJfbShUhc12DNZjq55u9+AeS4YzHNOiYmJ8vLy0pUrV+z297dN7AF0dnZWgwYNtHLlyuwCmJWVpZUrV2rAgAG5rpOcnHxLyXNwuPGv29t1YhcXF7m4uNwy7uTkxA/U3zAfBYN5Lhj2Ps8hP/TQeg93BX3ZTZtqS0EHpuV5CZSY54LCPN/AHNhIAZSkwYMHq1evXgoMDFSjRo00btw4JSUlqU+fPpKknj17qnz58ho9erQkqX379ho7dqzq16+ffQj4ww8/VPv27bOLIABACv6ikzaaI9To867aVDVLDQ/OkJObzfz6AOySzfwEd+3aVRcuXNBHH32kc+fOqV69elq6dGn2iSEnTpzIscdv6NChMplMGjp0qE6fPq3SpUurffv2+vRTrn8FAP8r6LOntdE0Sw0/e0abqz2rhgdmyMmdvSiAtbKZAihJAwYMuO0h36ioqByPHR0dNWzYMA0bNqwAkgGA9Qv6d0dtNM9R4Ogu2lLtWQUenEkJBKyUTVwHEABQMIJGPaUdH8xRg9MLtKVqd6UnczkNwBpRAAEA/0ijT57Ujg/nqsGZ/2prla5Ku5ZmdCQA/xAFEADwjzUa0V47P5qn+mcXa1tVSiBgbSiAAID70nD4E9o1bJ7qn1uibVWfoQQCVoQCCAC4bw0/bqfdI+ar3rml2l6li1KvUgIBa0ABBAA8kMAP2+jPT+YrIH6ZdlTtrNTE1LuvBMBQFEAAwANr8MHj+vPTBQqIX67t1bsqLYmzg4HCjAIIAMgTDd5vrT0jflf9c39oa/VnlXE9w+hIAG6DAggAyDOBH7bRzg9mK/D0fG2s3lOZaZlGRwKQi0JRAFNTU5WaymdGAMAWNPrkSW196zcFnZil9TVfUGZ6ltGRAPwPwwpgZGSk2rZtqxIlSsjd3V3u7u4qUaKE2rZtqxUrVhgVCwCQBxp/0UmbBk5XcNx0ravdT1kZlECgMDGkAE6dOlVt27ZVsWLF9NVXX2nRokVatGiRvvrqKxUvXlxt27bVtGnTjIgGAMgjTb7upg0vT1Gzg5MVE/CaLFkWoyMB+D+ORrzop59+qnHjxum111675bnevXurWbNmGjFihHr06GFAOgBAXmk6oYfWpqUr9D99FfWIs0K3jZPJbDI6FmD3DNkDeOLECYWHh9/2+ccee0ynTp0qwEQAgPzSbPILinlugsJ2fqPoRm+zJxAoBAwpgA8//LAmTZp02+cnT56sWrVqFWAiAEB+Cpn+stZ0/kZhW8coqtkHlEDAYIYcAh4zZoyeeOIJLV26VOHh4fL29pYkxcfHa+XKlYqLi9PixYuNiAYAyCfNZ7+u6KfS1eK//9LqR13UImqY0ZEAu2VIAQwLC9OePXs0YcIErV+/XufOnZMklS1bVm3atNErr7wiX19fI6IBAPJR6ILBimqTphZLh2h1a2c1W/SW0ZEAu2RIAZQkX19fnT17ViNGjFBoaKhRMQAABSzsj/cU1SJVLZa/r9XPuEm9/YyOBNgdQy8EfeXKFbVs2VJVq1bVqFGjdObMGSPjAAAKSOjKjxTd8C21mD9IV7/eZnQcwO4YWgDnz5+v06dPq3///oqIiJCPj4/atGmj2bNnKz2dG4kDgK0ymU1qvuFzRdd6Wc+uHqkNg2YbHQmwK4bfCq506dIaPHiwdu7cqY0bN6pKlSrq2bOnypUrp0GDBunQoUNGRwQA5AOT2aTGW77WCu+OavxdL236cKHRkQC7YXgBvOns2bOKjIxUZGSkHBwc1LZtW+3evVu1atXSV199ZXQ8AEA+MDualTT+eW15qL3qftJF275YaXQkwC4YWgDT09M1d+5cPfHEE/Lx8dHs2bP15ptv6syZM5o6dapWrFihWbNmacSIEUbGBADkI7OzWXV2/6LdXi1U/Z0ntfvHWKMjATbPsLOAJemhhx5SVlaWunfvrk2bNqlevXq3LNOiRQsVL17cgHQAgILi4umih/fN1cEqbeT3Slvt81ilms89YnQswGYZWgC/+uordenSRa6urrddpnjx4jp69GgBpgIAGMHdy12V9yzUyRrh8u7RWoeLRqvKk9wVCsgPhh4C7tGjxx3LHwDAvnhW8FS5nUt1yeUheXQM1/FVR4yOBNikQnMSCAAAklTCv6RKbI5UioOHHFo9prObTxkdCbA5FEAAQKFTura3nKNXSLIopVm4Lvx53uhIgE2hAAIACqXywZWUuWylimRcUULDx3XlxBWjIwE2gwIIACi0fB6roisRy/TQ9aM6WvdJpVxKMToSYBMogACAQq1a57o68d0iVbuyWbtqdVV6SobRkQCrRwEEABR6dfs31Z/D5+qR+D+0sXZfZWVkGR0JsGoUQACAVWj4URttHvCLmsRNU0yjf8mSZTE6EmC1KIAAAKvRZHx3xXT9VqHbxymq1Sij4wBWiwIIALAqob+9qqhHR6jFyqFa0/0Ho+MAVokCCACwOqGRQxVd7w01++01xQ78zeg4gNWhAAIArI7JbFLI5rGK9XtODcf30JZPlhodCbAqFEAAgFUyO5oVtGeytnm3Uc0PO2n3j7FGRwKsBgUQAGC1nNydVHdvhA4VC1TF/u10cO5uoyMBVoECCACwam4l3eS367865+orz2da69TaY0ZHAgo9CiAAwOoVq1RMpTYtVarZXemPtlbC/gSjIwGFGgUQAGATStf2lpYtk0fGZZ0LbKek80lGRwIKLQogAMBm+Dzqr4Rflsgnaa/21u6i9OR0oyMBhRIFEABgU2o+30AHR89TwIUV2lj3JW4ZB+SCAggAsDkN3mupLQOmqtmRqYpq+r7RcYBChwIIALBJTcZ3V9RTY9Viw78V9fQ3RscBChUKIADAZoXNH6SowLfU/Pc3FftGhNFxgEKDAggAsGnN13+mWL/nFPhND237YqXRcYBCgQIIALBpN28Zt6vUo6ryTkft/3W70ZEAw1EAAQA2z8ndSTX2zNEp9+oq+XwbnYiKMzoSYCgKIADALniU9VDpTYuV7FBUWa1a68Kf542OBBiGAggAsBulHy4j8/Jlcs+8qvig9twtBHaLAggAsCuVwirr4i9L5Jv0p/bU7a6M6xlGRwIKHAUQAGB3aj73iPYNn60G8UsUGziQu4XA7lAAAQB2qeFHbbS+5wQ1//MHRbf73Og4QIGiAAIA7FbI1BcVFfKhwpa+p3WvzTQ6DlBgKIAAALsWGjVca/17qeH3vbVjXJTRcYACQQEEANg1k9mkRjsmanfJUPkN6qDDC/40OhKQ7yiAAAC75+zhrKo75+qcq4/cOrXRuW1njI4E5CsKIAAAkjwreMozZolMsuhys3a6euaq0ZGAfEMBBADg/zwUWF5Js//QQylHdaBuZ6UnpxsdCcgXFEAAAP6masfaihvzu+peXK0NAf24RiBskk0VwO+++06+vr5ydXVVUFCQNm3adNtlw8LCZDKZbvlq165dASYGABRG9Qe30Ob+/1HI4SmKfnS40XGAPGczBTAiIkKDBw/WsGHDtG3bNgUEBKh169Y6fz73m33PmzdPZ8+ezf7as2ePHBwc1KVLlwJODgAojJp+/5yiWo1SWPRwre33i9FxgDxlMwVw7Nixeumll9SnTx/VqlVLEyZMkLu7uyZPnpzr8iVLllTZsmWzvyIjI+Xu7k4BBABkC/3jPa2p/qIa/fQi1wiETbGJApiWlqatW7cqPDw8e8xsNis8PFzr16+/p21MmjRJ3bp1U5EiRfIrJgDAypjMJgVv+167S4bKd3BHxS3Zb3QkIE84Gh0gLyQkJCgzM1Pe3t45xr29vbV//91/WDdt2qQ9e/Zo0qRJd1wuNTVVqamp2Y8TExMlSenp6UpP50yxm3PAXOQv5rlgMM8Fwyrm2Uny3fyrEmqGyumpdjq3LUalapQ2OtU/YhXzXICYBxspgA9q0qRJqlOnjho1anTH5UaPHq3hw2/9MPDy5cvl7u6eX/GsTmRkpNER7ALzXDCY54JhDfOcNOxdPTb0HZ1p1EHrfn5Xjh7W9yvUGua5ICQnJxsdwXDW992bCy8vLzk4OCg+Pj7HeHx8vMqWLXvHdZOSkvTbb79pxIgRd32dIUOGaPDgwdmPExMTVbFiRbVq1Uqenp73F96GpKenKzIyUi1btpSTk5PRcWwW81wwmOeCYVXz3FbaV6aKqr8UruQhsxR44BeZHa3jk1RWNc8F4OYRPHtmEwXQ2dlZDRo00MqVK9WhQwdJUlZWllauXKkBAwbccd3Zs2crNTVVzz///F1fx8XFRS4uLreMOzk58QP1N8xHwWCeCwbzXDCsZZ7rvthEGw7NUPDnnbQmvIrC1n1qdKR/xFrmOb8xBzZyEogkDR48WD/99JOmTp2qffv2qX///kpKSlKfPn0kST179tSQIUNuWW/SpEnq0KGDSpUqVdCRAQBWqPFnHbXmiS8UFjtKMX1yv9IEUNjZxB5ASeratasuXLigjz76SOfOnVO9evW0dOnS7BNDTpw4IbM5Z989cOCA1q5dq+XLlxsRGQBgpUIXDNaaOocVPOVlbavlo0fefszoSMA/YjMFUJIGDBhw20O+UVFRt4xVr15dFgu3+AEA/DMms0lNto7X9orHVO2dTjpcPVZVnqxldCzgntnMIWAAAAqSo6ujqm+PULxrJbl0aqcLe+LvvhJQSFAAAQC4T54VPOURtVhOWamKb/ykkhO4vAisAwUQAIAHUC6oov6aulB+SXu0s15PZWVkGR0JuCsKIAAAD6jm8w20570ZCjo9T2vCPjI6DnBXFEAAAPJA0OgOWtPm3wpb96nW9Z9udBzgjiiAAADkkdBFbyumah8FTuir3T/GGh0HuC0KIAAAecRkNilo2wTt9wxS2f4ddGrtMaMjAbmiAAIAkIecPZxVYdM8JTsUVUp4eyWe4r6zKHwogAAA5LFS1b2UPnehyqSe0IFHuiszLdPoSEAOFEAAAPJBlSdr6dAns1T/wjKtbfyW0XGAHCiAAADkk8APWmtdl68Vun2c1jw/0eg4QDYKIAAA+Sh01muKrv2agme8pm1frDQ6DiCJAggAQL5runmcdpZ6VJXf7ay4Pw4YHQegAAIAkN8cXR1VdWuELjqVlemp9vrryCWjI8HOUQABACgAxXyKy/GPRfLMuKRjgZ2VnpxudCTYMQogAAAFxOdRf536ep4evrxW6xsONDoO7BgFEACAAhTwenNt6PG9mu+doOhu3xsdB3aKAggAQAFr/suLigoYqKYRA7V9zCqj48AOUQABADBAsw1jtLNkC/m83UXHVx0xOg7sDAUQAAADOLo6qvKmCCU6llR62ye5ZzAKFAUQAACDlPAvqczfF6p06intD3yOewajwFAAAQAwkH+7Gjo4/Dc1iF+imNChRseBnaAAAgBgsIYftVHME58rbMO/FfvaDKPjwA5QAAEAKARCFwzW2so99cj3fbV3yiaj48DGUQABACgETGaTArf+qEMe9VWybwed23bG6EiwYRRAAAAKCdfiriqz9ndlmRx0sXkHpVxKMToSbBQFEACAQsQ7oKwuT10gv6Q92tbgRVmyLEZHgg2iAAIAUMjUeu4R7Rj4HzU9NlPR7T4zOg5sEAUQAIBCqMnXXRXV9AM1X/q+toz8w+g4sDEUQAAACqnmUSO0pUw7VRn2rI6vOGR0HNgQCiAAAIWU2dGsapum65Kjt9Kf6KCrZ64aHQk2ggIIAEAhVtynmCy/z1eZ1JPaG9hTWRlZRkeCDaAAAgBQyPm3q6F9709X0Nn5WtP6U6PjwAZQAAEAsAJBnz6p1WHD1XzVMG36aKHRcWDlKIAAAFiJ0Mih2lT2KVUf+byO/rHf6DiwYhRAAACshNnRrFpbftEF5wqydOigKyeuGB0JVooCCACAFfEsX1QOC+erVNo5HWj0PCeF4L5QAAEAsDJ+rarq4PBfFRi/WGseG250HFghCiAAAFao4UdtFN3qU4WtGaGN7/1udBxYGQogAABWKuyP97S+fGfV+qynjvz3T6PjwIpQAAEAsFIms0l1tvxHZ1385NC5g64c+8voSLASFEAAAKyYR1kPufwxX8UyLupgo+eUmZZpdCRYAQogAABWzqdFZR35NEKPXFimmBYfGh0HVoACCACADQgc0lIx7T5TWOxorR80y+g4KOQogAAA2IjQ//5L6yp1V91xfXRwzi6j46AQowACAGAjTGaT6m/9WSfdqsm1ewddPspJIcgdBRAAABvi7uUuj+W/yyPzig4FcacQ5I4CCACAjanQzFdxI2eqwYU/tKblSKPjoBCiAAIAYIMCP2it6EdHqHnUcG0dudToOChkKIAAANio0GXva7P3E6rxSU8l775sdBwUIhRAAABslNnRrBobf9FlRy8FjPhSyQnJRkdCIUEBBADAhhXzKa6kaRHySY/T7qYDZMmyGB0JhQAFEAAAG1ft6Tqa1+Z9NTs6XTHdvzc6DgoBCiAAAHbA8+XaWl33dQXPelO7J8YaHQcGowACAGAnGkX/W3s9G8urfxdd2H3O6DgwEAUQAAA74VzESd5Rs2RWlk6HdFVGSrrRkWAQCiAA4P+1d+fBVdb3Hsc/52SVJYQISYAGIkqiKEQuNJElhmoCKCqxtiIgIEXoUKIIdgpUxuBSwJpSsEPHinKh9+qwtVAKUYqQlGYh7PeCbFckFUnCYkoSwMn6u384pAQCkoTzPMl53q+ZW7dwfgAAEcRJREFU88f55Xkyn/PlzMlnnoUDBwnv00lF76zRvSU5yo6faXcc2IQCCACAw8RMHaTsJ3+jhD2/Ve5Lq+yOAxtQAAEAcKCEtS8oq9to9Vo8Ucc3fGZ3HFiMAggAgAO53C712fmeCgK6y/2jJ1V6ssTuSLAQBRAAAIdqHdpa/hv/rPaVZ3Q4brxMdY3dkWARCiAAAA4WmXiXjr7yX4or/Iu2D3/L7jiwiFcVwCVLligyMlKBgYGKi4vTzp07b7j9+fPnNXXqVHXq1EkBAQGKiopSenq6RWkBAGge4t58XBkD52jQ5jna9+stdseBBbymAK5atUozZsxQamqq9u7dq5iYGA0dOlRnzpypd/uKigolJSUpPz9fa9eu1dGjR7V06VJ16dLF4uQAANjvwW1zte/2JHWdNUoFeSftjgMP85oCuHDhQk2aNEkTJkxQz5499e6776pVq1ZatmxZvdsvW7ZMxcXFWr9+vQYOHKjIyEglJCQoJibG4uQAANjPx99Hd+R8qG/crVWc+LQqLlTYHQke5BUFsKKiQnv27FFiYmLtmtvtVmJionJzc+vdZ8OGDerfv7+mTp2qsLAw3XfffZo3b56qq6utig0AQLNye9TtOv+H1Yq6sEe58b+wOw48yNfuALfCuXPnVF1drbCwsDrrYWFhOnLkSL37fPHFF9q2bZvGjBmj9PR0ff755/rZz36myspKpaam1rtPeXm5ysvLa5+XlpZKkiorK1VZydfpXJ4Bs/As5mwN5mwN5myNhsw5etx/KCs9TT/48zRlv/SAYt9+ytPxLMf7zUsKYGPU1NQoNDRU7733nnx8fNS3b1+dOnVKb7/99nUL4Pz58/Xaa69ds/63v/1NrVq18nTkFmPLFi4gtgJztgZztgZztsbNztk821Wfbn9CcYsnaW3wN2rVJ8TDyax16dIluyPYzisKYIcOHeTj46PTp0/XWT99+rTCw8Pr3adTp07y8/OTj49P7do999yjoqIiVVRUyN/f/5p9Zs+erRkzZtQ+Ly0tVUREhIYMGaKgoKBb9GparsrKSm3ZskVJSUny8/OzO47XYs7WYM7WYM7WaMycy3Y/qLM9BqjfgoUKPZGlVh2850DH5TN4TuYVBdDf3199+/bV1q1blZycLOnbI3xbt25VSkpKvfsMHDhQH330kWpqauR2f3sp5LFjx9SpU6d6y58kBQQEKCAg4Jp1Pz8/PriuwDyswZytwZytwZyt0ZA5h3QL0blVf1KXH8Zqf/yLGvh/yyWXy7MBLcJ7zUtuApGkGTNmaOnSpVqxYoUOHz6sKVOm6OLFi5owYYIkady4cZo9e3bt9lOmTFFxcbGmTZumY8eOadOmTZo3b56mTp1q10sAAKBZiXryXu2b/K4GHv+jsiZ8YHcc3EJecQRQkkaOHKmzZ8/q1VdfVVFRke6//3598skntTeGfPnll7VH+iQpIiJCmzdv1vTp09W7d2916dJF06ZN08yZM+16CQAANDuD/jBWf9+erbgVKTo6rK+in+ljdyTcAl5TACUpJSXluqd8MzMzr1nr37+/duzY4eFUAAC0bHG5i3Si8y61GvtjlfTfrXbdgu2OhCbymlPAAADAMwKDA9Uqfa3aVX2tI/0nyNQYuyOhiSiAAADgO3UbfIeO/nKF4grXa3vyQrvjoIkogAAA4KbE/eoJZcTO1MC/ztT//j7L7jhoAgogAAC4aYMy39TBoIEKfeFpnT14+rt3QLNEAQQAADfN7zZfhWeulFs1+urB0aquqLY7EhqBAggAABokvE8nnUpbqd7/ylRW4ly746ARKIAAAKDB+kwfrL8n/UoJ/3hTu9/42O44aCAKIAAAaJTB6b9QXsfH1D31WZ3K/dLuOGgACiAAAGgUt69bPXJW6KI7SOeTfqyKCxV2R8JNogACAIBGC7krRKUfrFGPi/uUm8DXqbYUFEAAANAk947vp+wnf6OEvYu085W/2B0HN4ECCAAAmmzw2hTldvqhouY/p6+y8u2Og+9AAQQAAE3mcrt0T84HKnMH6/wjz6jyItcDNmcUQAAAcEsERwarZOlqRV3Yq5yE2XbHwQ1QAAEAwC1z34TvKyf5bSXsWaidczbYHQfXQQEEAAC3VMKfXtSO8GT1mPecTuX80+44qAcFEAAA3FIut0vR2ct0wd1O/xo6kusBmyEKIAAAuOXad2+v839Y9e31gIN/aXccXIUCCAAAPKLXxFhlP/GWEnb/Rrte/avdcXAFCiAAAPCYweteUl7YE7rrzfEq2MH3BTcXFEAAAOAxLrdLUdn/qQvuIBUnjVTlpUq7I0EUQAAA4GHt7wzR+XdXKfrCbuX84BW740AUQAAAYIFez8cp67G3lLDzbe2au8nuOI5HAQQAAJZIWD9deaGP687Xx6lw50m74zgaBRAAAFjC7eNSj+zluuRuo3OJz6jqG64HtAsFEAAAWCbkrhAV/36l7i7bqeyH5tgdx7EogAAAwFK9J/dX1qPzlbDj19r9errdcRyJAggAACyX8JcZygt9THfMHafCXV/ZHcdxKIAAAMBybl+37vrHcpW7b9OZpNGqLq+yO5KjUAABAIAtbo+6XWd++5HuK8lW1tA37I7jKBRAAABgm/tfiNf2h15T/N/f0P8syrA7jmNQAAEAgK0e/Hi29gf/QGEvj9HXh8/YHccRKIAAAMBWPv4+6rztv+VjqnQi4TmZ6hq7I3k9CiAAALBdeJ9OOjH3j+p39mP948mFdsfxehRAAADQLMS+OkwZ3/+F+v91tg4vz7M7jlejAAIAgGZjYMabOtK6r9pMekYl/zxvdxyvRQEEAADNhn9rP7VLX6m2Vf/SofjJMjXG7kheiQIIAACala4PRurQ9Pf1/ZN/1tFV++2O45V87Q4AAABwtQELf6QTw4/p7oe72x3FK3EEEAAANEt3UP48hgIIAADgMBRAAAAAh6EAAgAAOAwFEAAAwGEogAAAAA5DAQQAAHAYCiAAAIDDUAABAAAchgIIAADgMBRAAAAAh6EAAgAAOAwFEAAAwGEogAAAAA7ja3eAlswYI0kqLS21OUnzUFlZqUuXLqm0tFR+fn52x/FazNkazNkazNkazLmuy3+3L/8ddyIKYBOUlZVJkiIiImxOAgAAGqqsrEzt2rWzO4YtXMbJ9beJampqVFBQoLZt28rlctkdx3alpaWKiIjQyZMnFRQUZHccr8WcrcGcrcGcrcGc6zLGqKysTJ07d5bb7cyr4TgC2ARut1vf+9737I7R7AQFBfEBYwHmbA3mbA3mbA3m/G9OPfJ3mTNrLwAAgINRAAEAABzGZ+7cuXPtDgHv4ePjo8GDB8vXl6sLPIk5W4M5W4M5W4M540rcBAIAAOAwnAIGAABwGAogAACAw1AAAQAAHIYCCAAA4DAUQDRacXGxxowZo6CgIAUHB2vixIm6cOHCTe1rjNEjjzwil8ul9evXezhpy9fQWRcXF+uFF15QdHS0brvtNnXt2lUvvviiSkpKLEzd/C1ZskSRkZEKDAxUXFycdu7cecPt16xZo7vvvluBgYHq1auX0tPTLUrasjVkzkuXLlV8fLzat2+v9u3bKzEx8Tv/XfCthr6fL1u5cqVcLpeSk5M9nBDNCQUQjTZmzBh99tln2rJlizZu3Kjt27dr8uTJN7XvokWL+Pq8BmjorAsKClRQUKC0tDQdPHhQy5cv1yeffKKJEydamLp5W7VqlWbMmKHU1FTt3btXMTExGjp0qM6cOVPv9jk5ORo1apQmTpyoffv2KTk5WcnJyTp48KDFyVuWhs45MzNTo0aNUkZGhnJzcxUREaEhQ4bo1KlTFidvWRo658vy8/P185//XPHx8RYlRbNhgEY4dOiQkWR27dpVu/bxxx8bl8tlTp06dcN99+3bZ7p06WIKCwuNJLNu3TpPx23RmjLrK61evdr4+/ubyspKT8RscWJjY83UqVNrn1dXV5vOnTub+fPn17v9008/bYYPH15nLS4uzvz0pz/1aM6WrqFzvlpVVZVp27atWbFihacieoXGzLmqqsoMGDDAvP/++2b8+PFmxIgRVkRFM8ERQDRKbm6ugoOD1a9fv9q1xMREud1u5eXlXXe/S5cuafTo0VqyZInCw8OtiNriNXbWVyspKVFQUBD/CaykiooK7dmzR4mJibVrbrdbiYmJys3NrXef3NzcOttL0tChQ6+7PRo356tdunRJlZWVCgkJ8VTMFq+xc3799dcVGhrKmQGH4i8BGqWoqEihoaF11nx9fRUSEqKioqLr7jd9+nQNGDBAI0aM8HREr9HYWV/p3LlzeuONN276FL23O3funKqrqxUWFlZnPSwsTEeOHKl3n6Kionq3v9l/AydqzJyvNnPmTHXu3Pma8o1/a8ycs7Ky9MEHH2j//v1WREQzxBFA1DFr1iy5XK4bPm72g/tqGzZs0LZt27Ro0aJbnLpl8uSsr1RaWqrhw4erZ8+e4psf0ZIsWLBAK1eu1Lp16xQYGGh3HK9RVlamsWPHaunSperQoYPdcWATjgCijpdfflnPPffcDbfp3r27wsPDr7m4uKqqSsXFxdc9tbtt2zYdP35cwcHBddafeuopxcfHKzMzsynRWxxPzvqysrIyDRs2TG3bttW6devk5+fX1NheoUOHDvLx8dHp06frrJ8+ffq6Mw0PD2/Q9mjcnC9LS0vTggUL9Omnn6p3796ejNniNXTOx48fV35+vh5//PHatZqaGknfnl04evSo7rzzTs+Ghv3svggRLdPlGxN2795du7Z58+Yb3phQWFhoDhw4UOchySxevNh88cUXVkVvcRoza2OMKSkpMQ888IBJSEgwFy9etCJqixIbG2tSUlJqn1dXV5suXbrc8CaQxx57rM5a//79uQnkOzR0zsYY89Zbb5mgoCCTm5trRUSv0JA5f/PNN9d8Fo8YMcI89NBD5sCBA6a8vNzK6LAJBRCNNmzYMNOnTx+Tl5dnsrKyTI8ePcyoUaNqf/7VV1+Z6Ohok5eXd93fIe4CvikNnXVJSYmJi4szvXr1Mp9//rkpLCysfVRVVdn1MpqVlStXmoCAALN8+XJz6NAhM3nyZBMcHGyKioqMMcaMHTvWzJo1q3b77Oxs4+vra9LS0szhw4dNamqq8fPzMwcOHLDrJbQIDZ3zggULjL+/v1m7dm2d921ZWZldL6FFaOicr8ZdwM5DAUSjff3112bUqFGmTZs2JigoyEyYMKHOh/SJEyeMJJORkXHd30EBvDkNnXVGRoaRVO/jxIkT9ryIZuh3v/ud6dq1q/H39zexsbFmx44dtT9LSEgw48ePr7P96tWrTVRUlPH39zf33nuv2bRpk8WJW6aGzLlbt271vm9TU1OtD97CNPT9fCUKoPO4jDHG6tPOAAAAsA93AQMAADgMBRAAAMBhKIAAAAAOQwEEAABwGAogAACAw1AAAQAAHIYCCAAA4DAUQAAAAIehAALAdRQWFmr06NGKioqS2+3WSy+9ZHckALglKIAAcB3l5eXq2LGj5syZo5iYGLvjAMAtQwEE4Fhnz55VeHi45s2bV7uWk5Mjf39/bd26VZGRkVq8eLHGjRundu3a2ZgUAG4tX7sDAIBdOnbsqGXLlik5OVlDhgxRdHS0xo4dq5SUFD388MN2xwMAj6EAAnC0Rx99VJMmTdKYMWPUr18/tW7dWvPnz7c7FgB4FKeAATheWlqaqqqqtGbNGn344YcKCAiwOxIAeBQFEIDjHT9+XAUFBaqpqVF+fr7dcQDA4zgFDMDRKioq9Oyzz2rkyJGKjo7W888/rwMHDig0NNTuaADgMRRAAI72yiuvqKSkRO+8847atGmj9PR0/eQnP9HGjRslSfv375ckXbhwQWfPntX+/fvl7++vnj172hkbAJrEZYwxdocAADtkZmYqKSlJGRkZGjRokCQpPz9fMTExWrBggaZMmSKXy3XNft26deNUMYAWjQIIAADgMNwEAgAA4DAUQAAAAIehAAIAADgMBRAAAMBhKIAAAAAOQwEEAABwGAogAACAw1AAAQAAHIYCCAAA4DAUQAAAAIehAAIAADgMBRAAAMBh/h9TwIf8lPCwfwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "class=Graph name=y0 as a function of x1 implementation=class=GraphImplementation name=y0 as a function of x1 title=y0 as a function of x1 xTitle=x1 yTitle=y0 axes=ON grid=ON legendposition= legendFontSize=1 drawables=[class=Drawable name=Unnamed implementation=class=Curve name=Unnamed derived from class=DrawableImplementation name=Unnamed legend= data=class=Sample name=Unnamed implementation=class=SampleImplementation name=Unnamed size=129 dimension=2 data=[[-0.5,1.00001],[-0.492188,0.999982],[-0.484375,0.99989],[-0.476562,0.999737],[-0.46875,0.999523],[-0.460938,0.999248],[-0.453125,0.998912],[-0.445312,0.998515],[-0.4375,0.998057],[-0.429688,0.997538],[-0.421875,0.996958],[-0.414062,0.996318],[-0.40625,0.995616],[-0.398438,0.994854],[-0.390625,0.994031],[-0.382812,0.993148],[-0.375,0.992204],[-0.367188,0.991199],[-0.359375,0.990134],[-0.351562,0.989008],[-0.34375,0.987822],[-0.335938,0.986576],[-0.328125,0.98527],[-0.320312,0.983903],[-0.3125,0.982477],[-0.304688,0.98099],[-0.296875,0.979444],[-0.289062,0.977838],[-0.28125,0.976172],[-0.273438,0.974446],[-0.265625,0.972662],[-0.257812,0.970817],[-0.25,0.968914],[-0.242188,0.966951],[-0.234375,0.96493],[-0.226562,0.962849],[-0.21875,0.96071],[-0.210938,0.958512],[-0.203125,0.956256],[-0.195312,0.953941],[-0.1875,0.951568],[-0.179688,0.949137],[-0.171875,0.946648],[-0.164062,0.944102],[-0.15625,0.941497],[-0.148438,0.938836],[-0.140625,0.936117],[-0.132812,0.933341],[-0.125,0.930508],[-0.117188,0.927618],[-0.109375,0.924671],[-0.101562,0.921668],[-0.09375,0.918609],[-0.0859375,0.915494],[-0.078125,0.912323],[-0.0703125,0.909096],[-0.0625,0.905814],[-0.0546875,0.902476],[-0.046875,0.899084],[-0.0390625,0.895636],[-0.03125,0.892134],[-0.0234375,0.888578],[-0.015625,0.884967],[-0.0078125,0.881302],[0,0.877583],[0.0078125,0.873811],[0.015625,0.869986],[0.0234375,0.866107],[0.03125,0.862176],[0.0390625,0.858192],[0.046875,0.854155],[0.0546875,0.850067],[0.0625,0.845926],[0.0703125,0.841734],[0.078125,0.837491],[0.0859375,0.833196],[0.09375,0.828851],[0.101562,0.824455],[0.109375,0.820008],[0.117188,0.815512],[0.125,0.810966],[0.132812,0.80637],[0.140625,0.801725],[0.148438,0.797031],[0.15625,0.792289],[0.164062,0.787498],[0.171875,0.782659],[0.179688,0.777772],[0.1875,0.772838],[0.195312,0.767856],[0.203125,0.762828],[0.210938,0.757753],[0.21875,0.752632],[0.226562,0.747465],[0.234375,0.742252],[0.242188,0.736994],[0.25,0.731691],[0.257812,0.726343],[0.265625,0.720951],[0.273438,0.715515],[0.28125,0.710035],[0.289062,0.704511],[0.296875,0.698945],[0.304688,0.693336],[0.3125,0.687685],[0.320312,0.681992],[0.328125,0.676257],[0.335938,0.67048],[0.34375,0.664663],[0.351562,0.658805],[0.359375,0.652907],[0.367188,0.646969],[0.375,0.640991],[0.382812,0.634975],[0.390625,0.628919],[0.398438,0.622825],[0.40625,0.616693],[0.414062,0.610524],[0.421875,0.604317],[0.429688,0.598073],[0.4375,0.591793],[0.445312,0.585476],[0.453125,0.579124],[0.460938,0.572736],[0.46875,0.566313],[0.476562,0.559856],[0.484375,0.553364],[0.492188,0.546839],[0.5,0.54028]] color=blue fillStyle=solid lineStyle=solid pointStyle=none lineWidth=1,class=Drawable name=Unnamed implementation=class=Curve name=Unnamed derived from class=DrawableImplementation name=Unnamed legend= data=class=Sample name=Unnamed implementation=class=SampleImplementation name=Unnamed size=129 dimension=2 data=[[-0.5,1],[-0.492188,0.999969],[-0.484375,0.999878],[-0.476562,0.999725],[-0.46875,0.999512],[-0.460938,0.999237],[-0.453125,0.998902],[-0.445312,0.998505],[-0.4375,0.998048],[-0.429688,0.997529],[-0.421875,0.99695],[-0.414062,0.99631],[-0.40625,0.995609],[-0.398438,0.994847],[-0.390625,0.994025],[-0.382812,0.993141],[-0.375,0.992198],[-0.367188,0.991193],[-0.359375,0.990129],[-0.351562,0.989003],[-0.34375,0.987818],[-0.335938,0.986572],[-0.328125,0.985266],[-0.320312,0.9839],[-0.3125,0.982473],[-0.304688,0.980987],[-0.296875,0.979441],[-0.289062,0.977835],[-0.28125,0.976169],[-0.273438,0.974444],[-0.265625,0.97266],[-0.257812,0.970816],[-0.25,0.968912],[-0.242188,0.96695],[-0.234375,0.964929],[-0.226562,0.962848],[-0.21875,0.960709],[-0.210938,0.958512],[-0.203125,0.956255],[-0.195312,0.953941],[-0.1875,0.951568],[-0.179688,0.949137],[-0.171875,0.946648],[-0.164062,0.944102],[-0.15625,0.941497],[-0.148438,0.938836],[-0.140625,0.936117],[-0.132812,0.933341],[-0.125,0.930508],[-0.117188,0.927618],[-0.109375,0.924671],[-0.101562,0.921668],[-0.09375,0.918609],[-0.0859375,0.915494],[-0.078125,0.912323],[-0.0703125,0.909096],[-0.0625,0.905814],[-0.0546875,0.902476],[-0.046875,0.899083],[-0.0390625,0.895636],[-0.03125,0.892134],[-0.0234375,0.888577],[-0.015625,0.884966],[-0.0078125,0.881301],[0,0.877583],[0.0078125,0.87381],[0.015625,0.869985],[0.0234375,0.866106],[0.03125,0.862174],[0.0390625,0.85819],[0.046875,0.854154],[0.0546875,0.850065],[0.0625,0.845924],[0.0703125,0.841732],[0.078125,0.837489],[0.0859375,0.833194],[0.09375,0.828848],[0.101562,0.824452],[0.109375,0.820006],[0.117188,0.815509],[0.125,0.810963],[0.132812,0.806367],[0.140625,0.801722],[0.148438,0.797028],[0.15625,0.792286],[0.164062,0.787495],[0.171875,0.782656],[0.179688,0.777769],[0.1875,0.772835],[0.195312,0.767854],[0.203125,0.762825],[0.210938,0.75775],[0.21875,0.752629],[0.226562,0.747462],[0.234375,0.74225],[0.242188,0.736992],[0.25,0.731689],[0.257812,0.726341],[0.265625,0.720949],[0.273438,0.715513],[0.28125,0.710034],[0.289062,0.704511],[0.296875,0.698945],[0.304688,0.693336],[0.3125,0.687686],[0.320312,0.681993],[0.328125,0.676258],[0.335938,0.670482],[0.34375,0.664666],[0.351562,0.658808],[0.359375,0.652911],[0.367188,0.646974],[0.375,0.640997],[0.382812,0.634981],[0.390625,0.628926],[0.398438,0.622833],[0.40625,0.616702],[0.414062,0.610533],[0.421875,0.604327],[0.429688,0.598084],[0.4375,0.591805],[0.445312,0.58549],[0.453125,0.579138],[0.460938,0.572752],[0.46875,0.56633],[0.476562,0.559874],[0.484375,0.553384],[0.492188,0.54686],[0.5,0.540302]] color=red fillStyle=solid lineStyle=solid pointStyle=none lineWidth=1]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# plot 2nd output of our model with x1=0.5\n", + "graph = ot.ParametricFunction(responseSurface, [0], [0.5]).draw(-0.5, 0.5)\n", + "curve = ot.ParametricFunction(model, [0], [0.5]).draw(-0.5, 0.5).getDrawable(0)\n", + "curve.setColor('red')\n", + "graph.add(curve)\n", + "graph" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/python/doc/examples/meta_modeling/meta_modeling.rst b/python/doc/examples/meta_modeling/meta_modeling.rst index 30e79da384a..8c6a9bae532 100644 --- a/python/doc/examples/meta_modeling/meta_modeling.rst +++ b/python/doc/examples/meta_modeling/meta_modeling.rst @@ -46,6 +46,7 @@ Kriging metamodel kriging_advanced.ipynb kriging_beam_arbitrary_trend.ipynb kriging_hyperparameters_optimization.ipynb + kriging_robust.ipynb Fields metamodels ----------------- diff --git a/python/doc/theory/meta_modeling/kriging_robust.rst b/python/doc/theory/meta_modeling/kriging_robust.rst new file mode 100644 index 00000000000..aa3cf990199 --- /dev/null +++ b/python/doc/theory/meta_modeling/kriging_robust.rst @@ -0,0 +1,122 @@ +.. _kriging_robust: + +Robust estimation of kriging parameters +--------------------------------------- + +The robust estimation of :math:`\vect{\theta} \in \Rset^{n_\theta}` hyperparameters +according to [gu2016]_ apply in the following conditions: + +- The output is of dimension :math:`d=1` +- The amplitude :math:`\sigma` is scalar +- The correlation function :math:`R` is the identity +- The active parameters are :math:`\sigma` and :math:`\vect{\theta}` + +The model likelihood writes: + +.. math:: + + \cL(\beta, \sigma, \vect{\theta}, \epsilon;(\vect{x}_k, \vect{y}_k)_{1 \leq k \leq N}) = \left(\frac{1}{2\pi \sigma^2}\right)^{\frac{N}{2}} |\det (\Sigma_{\vect{\theta}} + \epsilon \textit{I})|^{-\frac{1}{2}} \exp\Big\{ -\dfrac{1}{2\sigma^2}\Tr{\left( \vect{y}-\textbf{F} \beta \right)} (\Sigma_{\vect{\theta}}+\epsilon \textit{I})^{-1} \left( \vect{y}-\textbf{F} \beta \right) \Big\} + + +The maximum likelihood estimator of :math:`\beta` writes: + +.. math:: + + \hat{\beta} = (\Tr{F}(\Sigma_{\vect{\theta}}+\epsilon \textit{I})^{-1} F)^{-1} \Tr{F}(\Sigma_{\vect{\theta}}+\epsilon \textit{I})^{-1} \vect{y} + + +The maximum likelihood estimator of :math:`\sigma` writes: + +.. math:: + + \hat{\sigma} = \sqrt{\frac{\Tr{\vect{y}} \tilde{\Sigma}_{\vect{\theta},\epsilon} \vect{y}}{N-n_1}} + + +Then the reduced likelihood writes: + +.. math:: + + \cL_{res}(\vect{\theta}, \epsilon;(\vect{x}_k, \vect{y}_k)_{1 \leq k \leq N}) = \frac{(N-n_1)^{\frac{N}{2}}}{(2\pi)^{\frac{N}{2}}} (\Tr{\vect{y}} \tilde{\Sigma}_{\vect{\theta},\epsilon} \vect{y})^{-\frac{N}{2}} |\det (\Sigma_{\vect{\theta}} + \epsilon \textit{I})|^{-\frac{1}{2}} \exp\Big\{ -\frac{N-n_1}{2} \Big\} + + +We chose the integrated likelihood: + +.. math:: + + \cL_{int}(\vect{\theta}, \epsilon;(\vect{x}_k, \vect{y}_k)_{1 \leq k \leq N}) = |\det (\Sigma_{\vect{\theta}} + \epsilon \textit{I})|^{-\frac{1}{2}} |\det\Big\{ \Tr{F}(\Sigma_{\vect{\theta}}+\epsilon \textit{I})^{-1} F \Big\}| (\Tr{\vect{y}} \tilde{\Sigma}_{\vect{\theta},\epsilon} \vect{y})^{-\frac{N-n_1}{2}} + + +The Gu method consists in penalizing the integrated likelihood by a chosen prior :math:`f(\vect{\theta}, \epsilon)`: + +.. math:: + + \cL_{int}(\vect{\theta}, \epsilon;\vect{y}) f(\vect{\theta}, \epsilon) + +We actually minimize the penalized integrated likelihood in log scale: + +.. math:: + + log \cL_{int}(\vect{\theta}, \epsilon;(\vect{x}_k, \vect{y}_k)_{1 \leq k \leq N}) + log f(\vect{\theta}, \epsilon) = -log |\det (\Sigma_{\vect{\theta}} + \epsilon \textit{I})| -log |\det\Big\{ \Tr{F}(\Sigma_{\vect{\theta}}+\epsilon \textit{I})^{-1} F \Big\}| - (N - n_1) log (\Tr{\vect{y}} \tilde{\Sigma}_{\vect{\theta},\epsilon} \vect{y}) + log f(\vect{\theta}, \epsilon) + + +Jointly Robust Prior +~~~~~~~~~~~~~~~~~~~~ + +The first prior proposed by Gu writes: + +.. math:: + + f_J(\theta, \epsilon) = c \left( \sum_{a=1}^n C_a \vect{\theta}_a + \epsilon \right)^{b_1} \exp \left(-b \sum{a=1}^n C_a \vect{\theta}^{-1} \right) \prod{a=1}^n \vect{\theta}_a^{-2} + +with :math:`b_0 > 0`, :math:`b_1 \in (0,1)` and :math:`b = b_0 N^{-\frac{1}{n}} (n + b_1)` + +We use the formulation from [gu2018]_ (page 12) as the maximum gap between design points instead of the mean gap from [gu2016]_: + +.. math:: + + C_a = |\max_{1 \leq k \leq N} x_{k,a} - \min_{1 \leq k \leq N} x_{k,a}| N^{-\frac{1}{n}} + +Reference Prior +~~~~~~~~~~~~~~~ + +The second prior proposed by Gu writes: + +.. math:: + + \mathcal{I}(\vect{\theta}, \epsilon)_{a,b} = Tr \left[ \tilde{\Sigma}_{\vect{\theta},\epsilon} (\frac{\delta}{\delta \theta_a} \Sigma_{\vect{\theta}}) \tilde{\Sigma}_{\vect{\theta},\epsilon} (\frac{\delta}{\delta \theta_b} \Sigma_{\vect{\theta}}) \right] + +.. math:: + + f_R(\vect{\theta}, \epsilon) = \sqrt{\det \mathcal{I}(\vect{\theta}, \epsilon)} + +Flat prior +~~~~~~~~~~ + +Allows to use the integrated likelihood with no penalization. + +Scale reparametrization +~~~~~~~~~~~~~~~~~~~~~~~ + +During the penalized likelihood optimization the scale Gu proposes the following parametrizations of the scale: + +- Standard parametrization: :math:`\vect{\theta} = (\theta_1, \dots, \theta_n)` +- Inverse parametrization: :math:`\vect{\mu} = (\theta_1^{-1}, \dots, \theta_n^{-1})` +- Log-inverse parametrization: :math:`\vect{\xi} = (-log(\theta_1), \dots, -log(\theta_n))` + + +.. topic:: API: + + - See :class:`~openturns.KrigingAlgorithm` + - See :class:`~openturns.GeneralLinearModelAlgorithm` + + +.. topic:: Examples: + + - See :doc:`/examples/meta_modeling/kriging_robust` + + +.. topic:: References: + + - [gu2016]_ + - [gu2018]_ + diff --git a/python/doc/theory/meta_modeling/meta_modeling.rst b/python/doc/theory/meta_modeling/meta_modeling.rst index 420bbd6348d..fa1e2a16f16 100644 --- a/python/doc/theory/meta_modeling/meta_modeling.rst +++ b/python/doc/theory/meta_modeling/meta_modeling.rst @@ -17,6 +17,7 @@ A special focus will be given to polynomial response surfaces. polynomial_least_squares polynomial_sparse_least_squares kriging + kriging_robust Functional chaos ---------------- diff --git a/python/src/CovarianceModelImplementation_doc.i.in b/python/src/CovarianceModelImplementation_doc.i.in index 50fdc3d5164..5d4f8d23476 100644 --- a/python/src/CovarianceModelImplementation_doc.i.in +++ b/python/src/CovarianceModelImplementation_doc.i.in @@ -373,6 +373,20 @@ We note :math:`C^{stat}(\vect{\tau})` for :math:`C(\vect{s}, \vect{s}+\vect{\tau %feature("docstring") OT::CovarianceModelImplementation::isStationary OT_CovarianceModel_isStationary_doc +// --------------------------------------------------------------------- + +%define OT_CovarianceModel_isComposite_doc +"Test whether the model is defined from other models. + +Returns +------- +isComposite : bool + *True* if the model is composite." +%enddef +%feature("docstring") OT::CovarianceModelImplementation::isComposite +OT_CovarianceModel_isComposite_doc + + // --------------------------------------------------------------------- %define OT_CovarianceModel_partialGradient_doc @@ -618,3 +632,38 @@ description : :class:`~openturns.Description` %feature("docstring") OT::CovarianceModelImplementation::getFullParameterDescription OT_CovarianceModel_getFullParameterDescription_doc +// --------------------------------------------------------------------- + +%define OT_CovarianceModel_setScaleParametrization_doc +"Accessor to the scale parametrization. + +Parameters +---------- +scale_parametrization : int + The type of parametrization for the scale parameter. + Available values: + + - CovarianceModel.STANDARD: :math:`\vect{\theta} = (\theta_1, \dots, \theta_n)` + - CovarianceModel.INVERSE: :math:`\vect{\mu} = (\theta_1^{-1}, \dots, \theta_n^{-1})` + - CovarianceModel.LOGINVERSE :math:`\vect{\xi} = (-log(\theta_1), \dots, -log(\theta_n))`" +%enddef +%feature("docstring") OT::CovarianceModelImplementation::setScaleParametrization +OT_CovarianceModel_setScaleParametrization_doc + +// --------------------------------------------------------------------- + +%define OT_CovarianceModel_getScaleParametrization_doc +"Accessor to the scale parametrization. + +Returns +------- +scale_parametrization : int + The type of parametrization for the scale parameter. + Available values: + + - CovarianceModel.STANDARD: :math:`\vect{\theta} = (\theta_1, \dots, \theta_n)` + - CovarianceModel.INVERSE: :math:`\vect{\mu} = (\theta_1^{-1}, \dots, \theta_n^{-1})` + - CovarianceModel.LOGINVERSE :math:`\vect{\xi} = (-log(\theta_1), \dots, -log(\theta_n1))`" +%enddef +%feature("docstring") OT::CovarianceModelImplementation::getScaleParametrization +OT_CovarianceModel_getScaleParametrization_doc diff --git a/python/src/CovarianceModel_doc.i.in b/python/src/CovarianceModel_doc.i.in index 766c48daeb7..90c985113a7 100644 --- a/python/src/CovarianceModel_doc.i.in +++ b/python/src/CovarianceModel_doc.i.in @@ -40,6 +40,8 @@ OT_CovarianceModel_getInputDimension_doc OT_CovarianceModel_isDiagonal_doc %feature("docstring") OT::CovarianceModel::isStationary OT_CovarianceModel_isStationary_doc +%feature("docstring") OT::CovarianceModel::isComposite +OT_CovarianceModel_isComposite_doc %feature("docstring") OT::CovarianceModel::partialGradient OT_CovarianceModel_partialGradient_doc %feature("docstring") OT::CovarianceModel::parameterGradient @@ -66,3 +68,7 @@ OT_CovarianceModel_setFullParameter_doc OT_CovarianceModel_getFullParameter_doc %feature("docstring") OT::CovarianceModel::getFullParameterDescription OT_CovarianceModel_getFullParameterDescription_doc +%feature("docstring") OT::CovarianceModel::setScaleParametrization +OT_CovarianceModel_setScaleParametrization_doc +%feature("docstring") OT::CovarianceModel::getScaleParametrization +OT_CovarianceModel_getScaleParametrization_doc diff --git a/python/src/GeneralLinearModelAlgorithm_doc.i.in b/python/src/GeneralLinearModelAlgorithm_doc.i.in index b7a0605dca1..471c5af0356 100644 --- a/python/src/GeneralLinearModelAlgorithm_doc.i.in +++ b/python/src/GeneralLinearModelAlgorithm_doc.i.in @@ -410,3 +410,41 @@ Parameters ---------- noise : sequence of positive float The noise variance :math:`\tau_k^2` of each output value." + +// --------------------------------------------------------------------- + +%define OT_GeneralLinearModelAlgorithm_setScalePrior_doc +"Accessor to the scale prior type. + +Allows to choose bayesian priors. + +Parameters +---------- +scale_prior : int + The type of prior for the parameters likelihood. + Available values: + + - GeneralLinearModelAlgorithm.NONE: no bayesian prior (default) + - GeneralLinearModelAlgorithm.JOINTLYROBUST: jointly-robust prior + - GeneralLinearModelAlgorithm.REFERENCE: reference prior + - GeneralLinearModelAlgorithm.FLAT: flat prior + +Notes +----- +Bayesian priors are only available with an 1-d output." +%enddef +%feature("docstring") OT::GeneralLinearModelAlgorithm::setScalePrior +OT_GeneralLinearModelAlgorithm_setScalePrior_doc + +// --------------------------------------------------------------------- + +%define OT_GeneralLinearModelAlgorithm_getScalePrior_doc +"Accessor to the scale prior type. + +Returns +------- +scale_prior : int + The type of prior for the parameters likelihood." +%enddef +%feature("docstring") OT::GeneralLinearModelAlgorithm::getScalePrior +OT_GeneralLinearModelAlgorithm_getScalePrior_doc diff --git a/python/src/KrigingAlgorithm_doc.i.in b/python/src/KrigingAlgorithm_doc.i.in index d2e48e2cbb5..dd4ec74ef97 100644 --- a/python/src/KrigingAlgorithm_doc.i.in +++ b/python/src/KrigingAlgorithm_doc.i.in @@ -322,3 +322,7 @@ The setter update the implementation and require new evaluation. We might also use the ResourceMap key to set the method when instantiating the algorithm. For that purpose, we can use ResourceMap.SetAsString(`GeneralLinearModelAlgorithm-LinearAlgebra`, key) with `key` being `HMAT` or `LAPACK`." +%feature("docstring") OT::KrigingAlgorithm::setScalePrior +OT_GeneralLinearModelAlgorithm_setScalePrior_doc +%feature("docstring") OT::KrigingAlgorithm::getScalePrior +OT_GeneralLinearModelAlgorithm_getScalePrior_doc diff --git a/python/test/CMakeLists.txt b/python/test/CMakeLists.txt index f922c464c33..48a62442902 100644 --- a/python/test/CMakeLists.txt +++ b/python/test/CMakeLists.txt @@ -662,6 +662,7 @@ ot_pyinstallcheck_test (FunctionalChaos_gsobol_sparse) ot_pyinstallcheck_test (FunctionalChaos_nd) ot_pyinstallcheck_test (FunctionalChaosSobolIndices_std) ot_pyinstallcheck_test (KrigingAlgorithm_std) +ot_pyinstallcheck_test (KrigingAlgorithm_gu) ot_pyinstallcheck_test (KrigingRandomVector_std) ot_pyinstallcheck_test (MetaModelValidation_std) ot_pyinstallcheck_test (GeneralLinearModelAlgorithm_std) diff --git a/python/test/t_KrigingAlgorithm_gu.expout b/python/test/t_KrigingAlgorithm_gu.expout new file mode 100644 index 00000000000..b7ff52ce136 --- /dev/null +++ b/python/test/t_KrigingAlgorithm_gu.expout @@ -0,0 +1,15 @@ +parameters= [0.0100001] +parameters= [100] +parameters= [4.60517] +parameters= [0.108418] +parameters= [0.186263] +parameters= [0.440023] +parameters= [0.0100001] +parameters= [100] +parameters= [4.60517] +Test A : prior = REFERENCE, parametrization = STANDARD +Test B : prior = REFERENCE, parametrization = INVERSE +Test C : prior = REFERENCE, parametrization = LOGINVERSE +Test D : prior = JOINTLYROBUST, parametrization = STANDARD +Test E : prior = JOINTLYROBUST, parametrization = INVERSE +Test F : prior = JOINTLYROBUST, parametrization = LOGINVERSE diff --git a/python/test/t_KrigingAlgorithm_gu.py b/python/test/t_KrigingAlgorithm_gu.py new file mode 100755 index 00000000000..b1f5dcbb656 --- /dev/null +++ b/python/test/t_KrigingAlgorithm_gu.py @@ -0,0 +1,151 @@ +#! /usr/bin/env python + +from __future__ import print_function +import openturns as ot +from openturns.testing import assert_almost_equal + +ot.TESTPREAMBLE() + +sampleSize = 6 +dimension = 1 + +f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)']) + +X = ot.Sample([1.0, 3.0, 5.0, 6.0, 7.0, 8.0], dimension) +Y = f(X) + +# Test 1 +for prior in [ot.GeneralLinearModelAlgorithm.NONE, ot.GeneralLinearModelAlgorithm.JOINTLYROBUST, ot.GeneralLinearModelAlgorithm.REFERENCE]: + for parametrization in [ot.CovarianceModelImplementation.STANDARD, ot.CovarianceModelImplementation.INVERSE, ot.CovarianceModelImplementation.LOGINVERSE]: + + ot.RandomGenerator.SetSeed(0) + + # create algorithm + basis = ot.ConstantBasisFactory(dimension).build() + covarianceModel = ot.SquaredExponential([1e-05], [4.11749]) + covarianceModel.setScaleParametrization(parametrization) + + algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis) + algo.setScalePrior(prior) + algo.run() + + # perform an evaluation + result = algo.getResult() + print('parameters=', result.getCovarianceModel().getParameter()) + #print("X=", X) + #print("f(X)=", Y) + + assert_almost_equal(result.getMetaModel()(X), Y) + assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3) + assert_almost_equal(result.getRelativeErrors(), [5.20873e-21]) + + # Kriging variance is 0 on learning points + var = result.getConditionalCovariance(X) + + # assert_almost_equal could not be applied to matrices + # application to Point + covariancePoint = ot.Point(var.getImplementation()) + theoricalVariance = ot.Point(sampleSize * sampleSize) + assert_almost_equal(covariancePoint, theoricalVariance, 8.95e-7, 8.95e-7) + +# Tests with MaternModel +scale = [1.0] +amplitude = [4.123456] +nu = 0.5 + +# Test A : prior = REFERENCE, parametrization = STANDARD +print("Test A : prior = REFERENCE, parametrization = STANDARD") +prior = ot.GeneralLinearModelAlgorithm.REFERENCE +parametrization = ot.CovarianceModelImplementation.STANDARD +covarianceModel = ot.MaternModel(scale, amplitude, nu) +covarianceModel.setScaleParametrization(parametrization) +algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis, False) +algo.setScalePrior(prior) +algo.run() +result = algo.getResult() +scaleOTGU = result.getCovarianceModel().getParameter() +assert_almost_equal(scaleOTGU[0], 0.8296342228649921, 1.e-1) # Not accurate +rllfunction = algo.getReducedLogLikelihoodFunction() +objective_value = rllfunction(scaleOTGU) +assert_almost_equal(objective_value[0], -11.164598619012276, 1.e-3) + +# Test B : prior = REFERENCE, parametrization = INVERSE +print("Test B : prior = REFERENCE, parametrization = INVERSE") +prior = ot.GeneralLinearModelAlgorithm.REFERENCE +parametrization = ot.CovarianceModelImplementation.INVERSE +covarianceModel = ot.MaternModel(scale, amplitude, nu) +covarianceModel.setScaleParametrization(parametrization) +algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis, False) +algo.setScalePrior(prior) +algo.run() +result = algo.getResult() +scaleOTGU = result.getCovarianceModel().getParameter() +assert_almost_equal(scaleOTGU[0], 0.006700808318183162, 1.e1) # Not accurate +rllfunction = algo.getReducedLogLikelihoodFunction() +objective_value = rllfunction(scaleOTGU) +assert_almost_equal(objective_value[0], -9.508456344459574, 1.e-2) + +# Test C : prior = REFERENCE, parametrization = LOGINVERSE +print("Test C : prior = REFERENCE, parametrization = LOGINVERSE") +prior = ot.GeneralLinearModelAlgorithm.REFERENCE +parametrization = ot.CovarianceModelImplementation.LOGINVERSE +covarianceModel = ot.MaternModel(scale, amplitude, nu) +covarianceModel.setScaleParametrization(parametrization) +algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis, False) +algo.setScalePrior(prior) +algo.run() +result = algo.getResult() +scaleOTGU = result.getCovarianceModel().getParameter() +assert_almost_equal(scaleOTGU[0], -0.6160855351429757, 1.e-2) +rllfunction = algo.getReducedLogLikelihoodFunction() +objective_value = rllfunction(scaleOTGU) +assert_almost_equal(objective_value[0], -10.959574974572401, 1.e-6) + +# Test D : prior = JOINTLYROBUST, parametrization = STANDARD +print("Test D : prior = JOINTLYROBUST, parametrization = STANDARD") +prior = ot.GeneralLinearModelAlgorithm.JOINTLYROBUST +parametrization = ot.CovarianceModelImplementation.STANDARD +covarianceModel = ot.MaternModel(scale, amplitude, nu) +covarianceModel.setScaleParametrization(parametrization) +algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis, False) +algo.setScalePrior(prior) +algo.run() +result = algo.getResult() +scaleOTGU = result.getCovarianceModel().getParameter() +assert_almost_equal(scaleOTGU[0], 0.10613379308918715, 1.e-4) +rllfunction = algo.getReducedLogLikelihoodFunction() +objective_value = rllfunction(scaleOTGU) +assert_almost_equal(objective_value[0], -9.680803951403554, 1.e-8) + +# Test E : prior = JOINTLYROBUST, parametrization = INVERSE +print("Test E : prior = JOINTLYROBUST, parametrization = INVERSE") +prior = ot.GeneralLinearModelAlgorithm.JOINTLYROBUST +parametrization = ot.CovarianceModelImplementation.INVERSE +covarianceModel = ot.MaternModel(scale, amplitude, nu) +covarianceModel.setScaleParametrization(parametrization) +algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis, False) +algo.setScalePrior(prior) +algo.run() +result = algo.getResult() +scaleOTGU = result.getCovarianceModel().getParameter() +assert_almost_equal(scaleOTGU[0], 0.32282538656275017, 1.e-4) +rllfunction = algo.getReducedLogLikelihoodFunction() +objective_value = rllfunction(scaleOTGU) +assert_almost_equal(objective_value[0], -11.688006015506188, 1.e-8) + +# Test F : prior = JOINTLYROBUST, parametrization = LOGINVERSE +print("Test F : prior = JOINTLYROBUST, parametrization = LOGINVERSE") +prior = ot.GeneralLinearModelAlgorithm.JOINTLYROBUST +parametrization = ot.CovarianceModelImplementation.LOGINVERSE +covarianceModel = ot.MaternModel(scale, amplitude, nu) +covarianceModel.setScaleParametrization(parametrization) +algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis, False) +algo.setScalePrior(prior) +algo.run() +result = algo.getResult() +scaleOTGU = result.getCovarianceModel().getParameter() +assert_almost_equal(scaleOTGU[0], 1.5708551041440961, 1.e-6) +rllfunction = algo.getReducedLogLikelihoodFunction() +objective_value = rllfunction(scaleOTGU) +assert_almost_equal(objective_value[0], -11.638534909763555, 1.e-8) + diff --git a/validation/src/KrigingGu/Gu.py b/validation/src/KrigingGu/Gu.py new file mode 100644 index 00000000000..be71de14738 --- /dev/null +++ b/validation/src/KrigingGu/Gu.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Oct 30 08:49:05 2019 + +@author: Joseph MURÉ +""" +from operator import mul +from functools import reduce +import numpy as np +import openturns as ot +from scipy.optimize import minimize + +def matern(x, nu, theta): + """Implémentation directe du noyau de Matérn avec paramétrisation standard. + + Arguments: + nu : paramètre de régularité + x : np.array représentant l'écart algébrique entre deux points + theta : np.array de même taille que x contenant les longueurs de corrélation + + """ + if len(x)!=len(theta): + raise ValueError("La longueur de x est {!r} alors que la longueur de theta est {!r}.".format(len(x), len(theta))) + x = np.array(x) + theta = np.array(theta) + if all(x)==0: + return 1. + expression = np.sqrt(2 * nu) * np.linalg.norm(x / theta) + return np.exp((1. - nu) * np.log(2.)) / ot.SpecFunc_Gamma(nu) * \ + np.exp(nu * np.log(expression)) * ot.SpecFunc_BesselK(nu, expression) + +def derivee_matern(x, indice_derive, nu, theta): + """Implémentation de la dérivée du noyau de Matérn avec paramétrisation standard. + Il s'agit de la dérivée partielle par rapport au indice_derive-ième élément du paramètre theta. + + Arguments: + indice_derive : numéro de la longueur de corrélation par rapport à laquelle est faite la dérivation + nu : paramètre de régularité + x : np.array représentant l'écart algébrique entre deux points + theta : np.array de même taille que x contenant les longueurs de corrélation + + """ + if len(x)!=len(theta): + raise ValueError("La longueur de x est {!r} alors que la longueur de theta est {!r}.".format(len(x), len(theta))) + if indice_derive<0: + raise ValueError("indice_derive vaut {!r} : ce devrait être un entier positif ou nul.".format(indice_derive)) + if indice_derive>=len(theta): + raise ValueError("indice_derive vaut {!r} or theta n'a que {!r} composantes !".format(indice_derive, len(theta))) + x = np.array(x) + theta = np.array(theta) + if all(x)==0: + return 0. + expression = np.sqrt(2 * nu) * np.linalg.norm(x / theta) + return nu / ot.SpecFunc_Gamma(nu) / np.power(2., nu-2.) * x[indice_derive]**2 * \ + theta[indice_derive]**(-3) * \ + np.power(expression, nu-1.) * ot.SpecFunc_BesselK(nu - 1., expression) + + +class Gu: + def __init__(self, input_Sample, output_Sample, trend_Basis, initial_CovarianceModel, prior='reference', parametrization='standard'): + if type(input_Sample)!=ot.typ.Sample: + raise TypeError("input_Sample should be an OpenTURNS Sample, not be of {}.".format(type(input_Sample))) + if type(output_Sample)!=ot.typ.Sample: + raise TypeError("output_Sample should be an OpenTURNS Sample, not be of {}.".format(type(input_Sample))) + if output_Sample.getDimension()!=1: + raise ValueError("output_Sample should be of dimension 1.") + if output_Sample.getSize()!=input_Sample.getSize(): + message = "input_Sample has size {} and ".format(input_Sample.getSize()) + message += "output_Sample has size {}, ".format(output_Sample.getSize()) + message += "but they should have the same size." + raise ValueError(message) + if type(trend_Basis)!=ot.func.Basis: + raise TypeError("trend_Basis should be an OpenTURNS Basis, not be of {}.".format(type(trend_Basis))) + + if type(initial_CovarianceModel)==ot.statistics.ProductCovarianceModel: + collection = initial_CovarianceModel.getCollection() + + for model in collection: + if model.getImplementation().getClassName()!='MaternModel': + raise TypeError("In a ProductCovarianceModel, all models should be Matern, not be of type {}.".format(model.getImplementation().getClassName())) + if model.getFullParameter()[-1]!=collection[0].getFullParameter()[-1]: + raise TypeError("In a ProductCovarianceModel, all models should have the same nu.") + + self._nu = collection[0].getFullParameter()[-1] + elif type(initial_CovarianceModel)!=ot.statistics.MaternModel: + raise TypeError("initial_CovarianceModel should be an OpenTURNS MaternModel, not be of {}.".format(type(initial_CovarianceModel))) + else: + self._nu = initial_CovarianceModel.getNu() + + if initial_CovarianceModel.getScale().getSize()!=input_Sample.getDimension(): + initial_scale = str(list(np.array(initial_CovarianceModel.getScale()))) + message = "The scale parameter of initial_CovarianceModel is {}, ".format(initial_scale) + message += "but input_Sample is of dimension {}. ".format(input_Sample.getDimension()) + message += "The dimensions do not match." + raise ValueError(message) + + if parametrization not in ['standard', 'inverse', 'log-inverse']: + raise ValueError("parametrization should be 'standard', 'inverse' or 'log-inverse'.") + + if prior not in ['reference', 'jointly-robust']: + raise ValueError("prior should be either 'reference' or 'jointly-robust'.") + + self._parametrization = parametrization + self._prior = prior + + self._input_Sample = input_Sample # Should it be a deepcopy? + self._algebraic_gaps = self.compute_algebraic_gaps() + self._output_Sample = output_Sample + self._trend_Basis = trend_Basis + self._current_CovarianceModel = initial_CovarianceModel + + self._W, self._H = self.compute_W() + if self._trend_Basis.getSize()!=0: + self._additional_loglikelihood_term = float(-0.5 * np.log(np.linalg.det(np.transpose(self._H) @ self._H))) + else: + self._additional_loglikelihood_term = 0.0 + + self._restricted_output = np.transpose(self._W) @ output_Sample + + if type(initial_CovarianceModel)==ot.statistics.ProductCovarianceModel: + self._list_dim1_MaternModels = [] + placeholder_output_Sample = ot.Sample(np.ones((input_Sample.getSize(),1))) + for index, length_scale in enumerate(initial_CovarianceModel.getScale()): + dim1_input_Sample = input_Sample.getMarginal(index) + dim1_CovarianceModel = ot.MaternModel([length_scale], [1.0], self._nu) + self._list_dim1_MaternModels.append(Gu(dim1_input_Sample, placeholder_output_Sample, ot.Basis(0), dim1_CovarianceModel)) + + if self._prior=='jointly-robust': + self._b0 = 1.0 + self._b1 = 0.2 + self._b = self._b0 * np.exp(-1./self._input_Sample.getDimension() * np.log(self._input_Sample.getSize())) * (self._input_Sample.getDimension() + self._b1) + #non_zero_gaps = self._input_Sample.getSize() * (self._input_Sample.getSize() - 1) + #self._Ca = np.apply_over_axes(a=np.abs(self.compute_algebraic_gaps()), axes=(0,1), func=np.sum)[0, 0, :] / non_zero_gaps + self._Ca = np.apply_over_axes(a=self.compute_algebraic_gaps(), axes=(0,1), func=np.max)[0, 0, :] / self._input_Sample.getSize()**(1./self._input_Sample.getDimension()) + + self.update_current_correlation() + #self.update_current_log_likelihood() + + + + def compute_W(self): + if self._trend_Basis.getSize()==0: + return np.identity(self._input_Sample.getSize()), 0 + else: + if self._trend_Basis.getSize()>=self._input_Sample.getSize(): + message = "Size of trend_Basis {} >= ".format(self._trend_Basis.getSize()) + message += "size of input/output_Sample {}.".format(self._input_Sample.getSize()) + raise ValueError(message) + + matrice_H = self._trend_Basis.build(0)(self._input_Sample) + for dim in range(1, self._trend_Basis.getSize()): + matrice_H = np.concatenate((matrice_H, self._trend_Basis.build(dim)(self._input_Sample)), axis=1) + + u, s, vh = np.linalg.svd(matrice_H) + return u[:, self._trend_Basis.getSize():], matrice_H + + def compute_algebraic_gaps(self): + soustractions = np.subtract.outer(self._input_Sample, self._input_Sample) + ecarts_algebriques = np.zeros((soustractions.shape[0], soustractions.shape[2], soustractions.shape[1])) + for indice in range(ecarts_algebriques.shape[2]): + ecarts_algebriques[:, :, indice] = soustractions[:, indice, :, indice] + return ecarts_algebriques + + def compute_matern_correlation_matrix(self): + """Renvoie la matrice de corrélation correspondant à un noyau de Matérn utilisant la paramétrisation standard. + + Arguments : + nu : paramètre de régularité + plan_xp : ot.Sample contenant le plan d'expériences + theta : np.array de même taille que x contenant les longueurs de corrélation + + """ + if type(self._current_CovarianceModel)==ot.statistics.MaternModel: + matrice_corr = np.apply_along_axis(matern, + 2, + self._algebraic_gaps, + nu=self._nu, + theta=self._current_CovarianceModel.getScale()) + else: + matrices_corr_1d = [gu_instance._current_correlation_matrix for gu_instance in self._list_dim1_MaternModels] + matrice_corr = reduce(mul, matrices_corr_1d, 1) + return matrice_corr + + def compute_matern_derivative_matrix(self, diff_index): + """Renvoie la matrice de corrélation dérivée par rapport à la indice_derive-ième longeur de corrélation pour un noyau de Matérn. + + Arguments : + indice_derive : numéro de la longueur de corrélation par rapport à laquelle est faite la dérivation + nu : paramètre de régularité + plan_xp : ot.Sample contenant le plan d'expériences + theta : np.array de même taille que x contenant les longueurs de corrélation + + """ + if type(self._current_CovarianceModel)==ot.statistics.MaternModel: + matrice_corr_derivee = np.apply_along_axis(derivee_matern, + 2, + self._algebraic_gaps, + indice_derive=diff_index, + nu=self._nu, + theta=self._current_CovarianceModel.getScale()) + else: + matrices_corr_1d_without_diff_index = [gu_instance._current_correlation_matrix for + gu_instance in self._list_dim1_MaternModels[0:diff_index] \ + + self._list_dim1_MaternModels[diff_index+1:]] + matrice_corr_derivee = reduce(mul, matrices_corr_1d_without_diff_index, 1) + #print(matrice_corr_derivee) + matrice_corr_derivee *= self._list_dim1_MaternModels[diff_index].compute_matern_derivative_matrix(0) + return matrice_corr_derivee + + def element_info_fisher_matern(self, ligne, colonne): + """Renvoie un élément de la matrice d'information de Fisher.""" + if ligne==self._input_Sample.getDimension() or colonne==self._input_Sample.getDimension(): + if ligne==self._input_Sample.getDimension() and colonne==self._input_Sample.getDimension(): + return self._input_Sample.getSize() - self._trend_Basis.getSize() + derivee = np.transpose(self._W) @ self.compute_matern_derivative_matrix(min(ligne, colonne)) @ self._W + return np.trace(self._current_correlation_inverse @ derivee) + derivee_lig = np.transpose(self._W) @ self.compute_matern_derivative_matrix(ligne) @ self._W + facteur_lig = self._current_correlation_inverse @ derivee_lig + derivee_col = np.transpose(self._W) @ self.compute_matern_derivative_matrix(colonne) @ self._W + facteur_col = self._current_correlation_inverse @ derivee_col + return np.trace(np.matmul(facteur_lig, facteur_col)) + + def info_fisher_matern(self): + """Renvoie la matrice d'information de Fisher""" + nb_lignes_colonnes = self._input_Sample.getDimension()+1 + matrice_info_fisher = np.zeros((nb_lignes_colonnes, nb_lignes_colonnes)) + for ligne in range(nb_lignes_colonnes): + for colonne in range(nb_lignes_colonnes): + if colonne>=ligne: + matrice_info_fisher[ligne, colonne] = self.element_info_fisher_matern(ligne, colonne) + else: + matrice_info_fisher[ligne, colonne] = matrice_info_fisher[colonne, ligne] + return matrice_info_fisher + + def update_current_correlation(self): + nugget = self._current_CovarianceModel.getNuggetFactor() + + if type(self._current_CovarianceModel)==ot.statistics.ProductCovarianceModel: + for index, gu_instance in enumerate(self._list_dim1_MaternModels): + gu_instance._current_CovarianceModel.setScale([self._current_CovarianceModel.getScale()[index]]) + gu_instance.update_current_correlation() + + correlation_matrix = self.compute_matern_correlation_matrix() + correlation_matrix += nugget * np.identity(self._input_Sample.getSize()) + + self._current_correlation_matrix = np.transpose(self._W) @ correlation_matrix @ self._W + #self._current_correlation_inverse = np.linalg.inv(self._current_correlation_matrix) + + def update_current_correlation_inverse(self): + self._current_correlation_inverse = np.linalg.inv(self._current_correlation_matrix) + + + def update_current_log_likelihood(self): + mahalonobis = np.transpose(self._restricted_output) @ self._current_correlation_inverse @ self._restricted_output + + log_likelihood = -0.5 * np.log(np.linalg.det(self._current_correlation_matrix)) + log_likelihood -= 0.5 * self._restricted_output.size * np.log(mahalonobis) + + # To be consistent with Julien's implementation + log_likelihood += self._additional_loglikelihood_term + + self._current_log_likelihood = float(log_likelihood) + + def update_current_log_prior(self): + if self._prior=='reference': + self._current_log_prior = float(0.5 * np.log(np.linalg.det(self.info_fisher_matern()))) + else: + main_term = (self._Ca / np.array(self._current_CovarianceModel.getScale())).sum() + self._current_CovarianceModel.getNuggetFactor() + self._main_term = main_term + self._current_log_prior = self._b1 * np.log(main_term) - self._b * main_term - 2 * np.log(self._current_CovarianceModel.getScale()).sum() + + if self._parametrization=='inverse': + self._current_log_prior += 2 * np.log(self._current_CovarianceModel.getScale()).sum() + + if self._parametrization=='log-inverse': + self._current_log_prior += np.log(self._current_CovarianceModel.getScale()).sum() + + def set_scale(self, scale_vector): + if (np.array(scale_vector)<1e-12).sum()>0: + return np.inf + self._current_CovarianceModel.setScale(scale_vector) + self.update_current_correlation() + self.update_current_correlation_inverse() + self.update_current_log_likelihood() + self.update_current_log_prior() + return - self._current_log_prior - self._current_log_likelihood #the optimizer minimizes + + def optimize_scale(self): + #bounds = [(1e-12,10)] * self._input_Sample.getDimension() + return minimize(self.set_scale, self._current_CovarianceModel.getScale())#, bounds=bounds) + + + + diff --git a/validation/src/KrigingGu/ValidGuPython.py b/validation/src/KrigingGu/ValidGuPython.py new file mode 100755 index 00000000000..96c49e9b10a --- /dev/null +++ b/validation/src/KrigingGu/ValidGuPython.py @@ -0,0 +1,1056 @@ + +# coding: utf-8 + +# # Implémentation Python de la méthode de Gu +# +# L'implémentation est limitée aux noyaux de Matérn et au prior de référence. Le Jointly Robust Prior n'est pas implémenté. + +# In[1]: + + +import openturns as ot +import numpy as np +import matplotlib.pyplot as plt + + +# ## Création d'un plan d'expériences et d'une réalisation d'un processus gaussien + +# In[2]: + + +NB_PTS_PLAN_EXPERIENCE = 20 +DIMENSION_SPATIALE = 3 + +TREND = ot.SymbolicFunction(['x1','x2','x3'],['5']) +#TREND = ot.SymbolicFunction(['x'], ['0']) +NU = 1.5 + +AMPLITUDE = [2.0] +SCALE = [0.1, 0.1, 0.1] +#SCALE = [.3] + +loi_ech = ot.ComposedDistribution([ot.Uniform(0,1)] * DIMENSION_SPATIALE) +points = loi_ech.getSample(NB_PTS_PLAN_EXPERIENCE) + +modele_covariance = ot.MaternModel(SCALE, AMPLITUDE, NU) +sorties= ot.GaussianProcess(ot.TrendTransform(TREND ,ot.Mesh(points)), modele_covariance,ot.Mesh(points)).getRealization() + + +# ## Création manuelle du noyau + +# ### Noyau de Matérn avec paramétrisation standard + +# In[3]: + + +def matern(x, nu, theta): + """Implémentation directe du noyau de Matérn avec paramétrisation standard. + + Arguments: + nu : paramètre de régularité + x : np.array représentant l'écart algébrique entre deux points + theta : np.array de même taille que x contenant les longueurs de corrélation + + """ + if len(x)!=len(theta): + raise ValueError("La longueur de x est {!r} alors que la longueur de theta est {!r}.".format(len(x), len(theta))) + x = np.array(x) + theta = np.array(theta) + if all(x)==0: + return 1.0 + expression = np.sqrt(2 * nu) * np.linalg.norm(x / theta) + return np.exp((1. - nu) * np.log(2.)) / ot.SpecFunc_Gamma(nu) * np.exp(nu * np.log(expression)) * ot.SpecFunc_BesselK(nu, expression) + +def derivee_matern(x, indice_derive, nu, theta): + """Implémentation de la dérivée du noyau de Matérn avec paramétrisation standard. + Il s'agit de la dérivée partielle par rapport au indice_derive-ième élément du paramètre theta. + + Arguments: + indice_derive : numéro de la longueur de corrélation par rapport à laquelle est faite la dérivation + nu : paramètre de régularité + x : np.array représentant l'écart algébrique entre deux points + theta : np.array de même taille que x contenant les longueurs de corrélation + + """ + if len(x)!=len(theta): + raise ValueError("La longueur de x est {!r} alors que la longueur de theta est {!r}.".format(len(x), len(theta))) + if indice_derive<0: + raise ValueError("indice_derive vaut {!r} : ce devrait être un entier positif ou nul.".format(indice_derive)) + if indice_derive>=len(theta): + raise ValueError("indice_derive vaut {!r} or theta n'a que {!r} composantes !".format(indice_derive, len(theta))) + x = np.array(x) + theta = np.array(theta) + if all(x)==0: + return 0. + expression = np.sqrt(2 * nu) * np.linalg.norm(x / theta) + return nu / ot.SpecFunc_Gamma(nu) / np.power(2., nu-2.) * x[indice_derive]**2 * theta[indice_derive]**(-3) * np.power(expression, nu-1.) * ot.SpecFunc_BesselK(nu - 1., expression) + + +# In[4]: + + +print('matern(nu=NU, x=[2, 2], theta=[3, 4])=', matern(nu=NU, x=[2, 2], theta=[3, 4])) + + +# In[5]: + + +print('derivee_matern=', [ derivee_matern(indice_derive=i, nu=NU, x=[2, 2], theta=[3., 4]) for i in range(2)]) + + + + +# In[104]: + + +ot_matern = ot.MaternModel([3, 4], AMPLITUDE, NU ) +print('ot_matern([2,2])=',ot_matern([2,2])) +print('ot_matern.parameterGradient=', ot_matern.parameterGradient([2,2],[0,0])) + +# In[105]: + + +h=1.e-8 +(matern(nu=NU, x=[2,2], theta=[3+h,4]) - matern(nu=NU, x=[2,2], theta=[3,4]))/h + + +# ### Noyau de Matérn avec paramétrisation inverse + +# In[8]: + + +def matern_param_inverse(x, nu, mu): + """Implémentation directe du noyau de Matérn avec paramétrisation inverse. + + Arguments: + nu : paramètre de régularité + x : np.array représentant l'écart algébrique entre deux points + mu : np.array de même taille que x contenant les longueurs de corrélation inverses + + """ + if len(x)!=len(mu): + raise ValueError("La longueur de x est {!r} alors que la longueur de mu est {!r}.".format(len(x), len(mu))) + x = np.array(x) + mu = np.array(mu) + return matern(nu=nu, x=x, theta=1. / mu) + +def derivee_matern_param_inverse(x, indice_derive, nu, mu): + """Implémentation de la dérivée du noyau de Matérn avec paramétrisation inverse. + Il s'agit de la dérivée partielle par rapport au indice_derive-ième élément du paramètre mu. + + Arguments: + indice_derive : numéro de la longueur de corrélation inverse par rapport à laquelle est faite la dérivation + nu : paramètre de régularité + x : np.array représentant l'écart algébrique entre deux points + mu : np.array de même taille que x contenant les longueurs de corrélation inverses + + """ + if len(x)!=len(mu): + raise ValueError("La longueur de x est {!r} alors que la longueur de mu est {!r}.".format(len(x), len(mu))) + if indice_derive<0: + raise ValueError("indice_derive vaut {!r} : ce devrait être un entier positif ou nul.".format(indice_derive)) + if indice_derive>=len(mu): + raise ValueError("indice_derive vaut {!r} or mu n'a que {!r} composantes !".format(indice_derive, len(mu))) + x = np.array(x) + mu = np.array(mu) + return derivee_matern(indice_derive=indice_derive, nu=nu, x=x, theta=1. / mu) / -mu[indice_derive]**2 + + +# In[9]: + + + +print('matern_param_inverse=', matern_param_inverse(nu=NU, x=[2, 2], mu=[1/3, 1/4])) +ot_matern.setScaleParametrization(ot.CovarianceModelImplementation.INVERSE) +print(ot_matern.getParameter(), ot_matern.getParameterDescription()) +print('ot_matern([2,2])=',ot_matern([2,2])) +print('ot_matern.parameterGradient=', ot_matern.parameterGradient([2,2],[0,0])) +# In[10]: + + + +print('derivee_matern_param_inverse=', [derivee_matern_param_inverse(indice_derive=i, nu=NU, x=[2, 2], mu=[1/3., 1/4]) for i in range(2)]) + +# ### Noyau de Matérn avec paramétrisation log-inverse + +# In[11]: + + +def matern_param_log_inverse(x, nu, xi): + """Implémentation directe du noyau de Matérn avec paramétrisation log-inverse. + + Arguments: + nu : paramètre de régularité + x : np.array représentant l'écart algébrique entre deux points + xi : np.array de même taille que x contenant l'opposé des logartihmes des longueurs de corrélation + + """ + if len(x)!=len(xi): + raise ValueError("La longueur de x est {!r} alors que la longueur de xi est {!r}.".format(len(x), len(xi))) + x = np.array(x) + xi = np.array(xi) + return matern(nu=nu, x=x, theta=np.exp(- xi)) + +def derivee_matern_param_log_inverse(x, indice_derive, nu, xi): + """Implémentation de la dérivée du noyau de Matérn avec paramétrisation log-inverse. + Il s'agit de la dérivée partielle par rapport au indice_derive-ième élément du paramètre xi. + + Arguments: + indice_derive : numéro de la longueur de corrélation log-inverse par rapport à laquelle est faite la dérivation + nu : paramètre de régularité + x : np.array représentant l'écart algébrique entre deux points + xi : np.array de même taille que x contenant l'opposé des logartihmes des longueurs de corrélation + + """ + if len(x)!=len(xi): + raise ValueError("La longueur de x est {!r} alors que la longueur de xi est {!r}.".format(len(x), len(xi))) + if indice_derive<0: + raise ValueError("indice_derive vaut {!r} : ce devrait être un entier positif ou nul.".format(indice_derive)) + if indice_derive>=len(xi): + raise ValueError("indice_derive vaut {!r} or xi n'a que {!r} composantes !".format(indice_derive, len(xi))) + x = np.array(x) + xi = np.array(xi) + return derivee_matern(indice_derive=indice_derive, nu=nu, x=x, theta=np.exp(- xi)) * -np.exp(- xi[indice_derive]) + + +# In[12]: + + +def matern_gen(x, nu, p, param): + if param == 'standard': + return matern(x, nu, p) + elif param == 'inverse': + return matern_param_inverse(x, nu, p) + elif param == 'log_inverse': + return matern_param_log_inverse(x, nu, p) + raise ValueError(param) + + +def derivee_matern_gen(x, indice_derive, nu, p, param): + if param == 'standard': + return derivee_matern(x, indice_derive, nu) + elif param == 'inverse': + return derivee_matern_param_inverse(x, indice_derive, nu) + elif param == 'log_inverse': + return derivee_matern_param_log_inverse(x, indice_derive, nu, p) + raise ValueError(param) + + +print('matern_param_log_inverse', matern_param_log_inverse(nu=NU, x=[2, 2], xi=[-np.log(3), -np.log(4)])) +ot_matern.setScaleParametrization(ot.CovarianceModelImplementation.LOGINVERSE) +print(ot_matern.getParameter(), ot_matern.getParameterDescription()) +print('ot_matern([2,2])=',ot_matern([2,2])) +print('ot_matern.parameterGradient=', ot_matern.parameterGradient([2,2],[0,0])) + +# In[13]: + + +print('derivee_matern_param_log_inverse', [derivee_matern_param_log_inverse(indice_derive=i, nu=NU, x=[2, 2], xi=[-np.log(3), -np.log(4)]) for i in range(2)]) + + +# ## Création de la matrice de corrélation et de sa dérivée par rapport au paramètre + +# ### Paramétrisation standard + +# In[14]: + + +def matrice_correlation_matern(nu, plan_xp, theta): + """Renvoie la matrice de corrélation correspondant à un noyau de Matérn utilisant la paramétrisation standard. + + Arguments : + nu : paramètre de régularité + plan_xp : ot.Sample contenant le plan d'expériences + theta : np.array de même taille que x contenant les longueurs de corrélation + + """ + soustractions = np.subtract.outer(plan_xp, plan_xp) + ecarts_algebriques = np.zeros((soustractions.shape[0], soustractions.shape[2], soustractions.shape[1])) + for indice in range(ecarts_algebriques.shape[2]): + ecarts_algebriques[:, :, indice] = soustractions[:, indice, :, indice] + matrice_corr = np.apply_along_axis(matern, 2, ecarts_algebriques, nu=nu, theta=theta) + return matrice_corr + +def matrice_correlation_derivee_matern(indice_derive, nu, plan_xp, theta): + """Renvoie la matrice de corrélation dérivée par rapport à la indice_derive-ième longeur de corrélation pour un noyau de Matérn. + + Arguments : + indice_derive : numéro de la longueur de corrélation par rapport à laquelle est faite la dérivation + nu : paramètre de régularité + plan_xp : ot.Sample contenant le plan d'expériences + theta : np.array de même taille que x contenant les longueurs de corrélation + + """ + soustractions = np.subtract.outer(plan_xp, plan_xp) + ecarts_algebriques = np.zeros((soustractions.shape[0], soustractions.shape[2], soustractions.shape[1])) + for indice in range(ecarts_algebriques.shape[2]): + ecarts_algebriques[:, :, indice] = soustractions[:, indice, :, indice] + matrice_corr_derivee = np.apply_along_axis(derivee_matern, 2, ecarts_algebriques, indice_derive=indice_derive, nu=nu, theta=theta) + return matrice_corr_derivee + + +# ### Paramétrisation inverse + +# In[15]: + + +def matrice_correlation_matern_param_inverse(nu, plan_xp, mu): + """Renvoie la matrice de corrélation correspondant à un noyau de Matérn utilisant la paramétrisation standard. + + Arguments : + nu : paramètre de régularité + plan_xp : ot.Sample contenant le plan d'expériences + mu : np.array de même taille que x contenant les longueurs de corrélation inverses + + """ + soustractions = np.subtract.outer(plan_xp, plan_xp) + ecarts_algebriques = np.zeros((soustractions.shape[0], soustractions.shape[2], soustractions.shape[1])) + for indice in range(ecarts_algebriques.shape[2]): + ecarts_algebriques[:, :, indice] = soustractions[:, indice, :, indice] + matrice_corr = np.apply_along_axis(matern_param_inverse, 2, ecarts_algebriques, nu=nu, mu=mu) + return matrice_corr + +def matrice_correlation_derivee_matern_param_inverse(indice_derive, nu, plan_xp, mu): + """Renvoie la matrice de corrélation dérivée par rapport à la indice_derive-ième longeur de corrélation pour un noyau de Matérn. + + Arguments : + indice_derive : numéro de la longueur de corrélation par rapport à laquelle est faite la dérivation + nu : paramètre de régularité + plan_xp : ot.Sample contenant le plan d'expériences + mu : np.array de même taille que x contenant les longueurs de corrélation inverses + + """ + soustractions = np.subtract.outer(plan_xp, plan_xp) + ecarts_algebriques = np.zeros((soustractions.shape[0], soustractions.shape[2], soustractions.shape[1])) + for indice in range(ecarts_algebriques.shape[2]): + ecarts_algebriques[:, :, indice] = soustractions[:, indice, :, indice] + matrice_corr_derivee = np.apply_along_axis(derivee_matern_param_inverse, 2, ecarts_algebriques, indice_derive=indice_derive, nu=nu, mu=mu) + return matrice_corr_derivee + + +# ### Paramétrisation log-inverse + +# In[16]: + + +def matrice_correlation_matern_param_log_inverse(nu, plan_xp, xi): + """Renvoie la matrice de corrélation correspondant à un noyau de Matérn utilisant la paramétrisation standard. + + Arguments : + nu : paramètre de régularité + plan_xp : ot.Sample contenant le plan d'expériences + xi : np.array de même taille que x contenant les longueurs de corrélation log-inverses + + """ + soustractions = np.subtract.outer(plan_xp, plan_xp) + ecarts_algebriques = np.zeros((soustractions.shape[0], soustractions.shape[2], soustractions.shape[1])) + for indice in range(ecarts_algebriques.shape[2]): + ecarts_algebriques[:, :, indice] = soustractions[:, indice, :, indice] + matrice_corr = np.apply_along_axis(matern_param_log_inverse, 2, ecarts_algebriques, nu=nu, xi=xi) + return matrice_corr + +def matrice_correlation_derivee_matern_param_log_inverse(indice_derive, nu, plan_xp, xi): + """Renvoie la matrice de corrélation dérivée par rapport à la indice_derive-ième longeur de corrélation pour un noyau de Matérn. + + Arguments : + indice_derive : numéro de la longueur de corrélation par rapport à laquelle est faite la dérivation + nu : paramètre de régularité + plan_xp : ot.Sample contenant le plan d'expériences + xi : np.array de même taille que x contenant les longueurs de corrélation log-inverses + + """ + soustractions = np.subtract.outer(plan_xp, plan_xp) + ecarts_algebriques = np.zeros((soustractions.shape[0], soustractions.shape[2], soustractions.shape[1])) + for indice in range(ecarts_algebriques.shape[2]): + ecarts_algebriques[:, :, indice] = soustractions[:, indice, :, indice] + matrice_corr_derivee = np.apply_along_axis(derivee_matern_param_log_inverse, 2, ecarts_algebriques, indice_derive=indice_derive, nu=nu, xi=xi) + return matrice_corr_derivee + + + +def matrice_correlation_matern_gen(nu, plan_xp, p, param): + if param == 'standard': + return matrice_correlation_matern(nu, plan_xp, p) + elif param == 'inverse': + return matrice_correlation_matern_param_inverse(nu, plan_xp, p) + elif param == 'log_inverse': + return matrice_correlation_matern_param_log_inverse(nu, plan_xp, p) + raise ValueError(param) + + +def matrice_correlation_derivee_matern_gen(indice_derive, nu, plan_xp, p, param): + if param == 'standard': + return matrice_correlation_derivee_matern(indice_derive, nu, plan_xp, p) + elif param == 'inverse': + return matrice_correlation_derivee_matern_param_inverse(indice_derive, nu, plan_xp, p) + elif param == 'log_inverse': + return matrice_correlation_derivee_matern_param_log_inverse(indice_derive, nu, plan_xp, p) + raise ValueError(param) + +# ### Calcul des matrices de corrélation correspondant aux différentes parmétrisations + +# In[17]: + + +print('matrice_correlation_matern=', matrice_correlation_matern(nu=NU, plan_xp=points, theta=SCALE)) +print('MaternModel.discretize=', ot.MaternModel(SCALE, [1.0], NU).discretize(points)) + +#exit(1) + + + +# In[18]: + + +matrice_correlation_matern_param_inverse(nu=NU, plan_xp=points, mu=1./np.array(SCALE)) + + +# In[19]: + + +matrice_correlation_matern_param_log_inverse(nu=NU, plan_xp=points, xi=-np.log(np.array(SCALE))) + + +# In[20]: + + +matrice_correlation_derivee_matern(indice_derive=0, nu=NU, plan_xp=points, theta=SCALE) + + +# In[21]: + + +matrice_correlation_derivee_matern_param_inverse(indice_derive=0, nu=NU, plan_xp=points, mu=1./np.array(SCALE)) + + +# In[22]: + + +matrice_correlation_derivee_matern_param_log_inverse(indice_derive=0, nu=NU, plan_xp=points, xi=-np.log(np.array(SCALE))) + + +# ## Matrices intervenant dans les formules calculées une fois pour toutes + +# In[23]: + + +def matrices_issues_correlation(matrice_correlation, plan_xp, liste_fonctions_tendance, pepite): + """Renvoie un dictionnaire contenant : + correlation : la matrice de corrélation passée en argument + correlation_conception : matrice homogène à une matrice de corrélation restreinte à l'espace de tendance + correlation_inverse_transformee : matrice sigma-tilde du cahier des charges + + Arguments : + matrice_correlation : np.array contenant les correlations entre points du plan_xp + plan_xp : ot.Sample qui recense les points d'observation + liste_fonctions_tendance + pepite + + """ + matrice_correlation_inverse = np.linalg.inv(matrice_correlation) + matrice_conception = tendance(liste_fonctions_tendance=liste_fonctions_tendance, plan_xp=plan_xp) + matrice_correlation_inverse_conception = np.matmul(matrice_correlation_inverse, matrice_conception) + matrice_correlation_conception = np.linalg.inv(np.matmul(np.transpose(matrice_conception), + matrice_correlation_inverse_conception)) + matrice_correlation_inverse_transformee = matrice_correlation_inverse - np.matmul(np.matmul(matrice_correlation_inverse_conception, + matrice_correlation_conception), + np.transpose(matrice_correlation_inverse_conception)) + return {'correlation' : matrice_correlation, + 'conception' : matrice_conception, #NÉCESSAIRE UNIQUEMENT SI ON VEUT ANNULER L'EFFET DU CHOIX DES FONCTIONS DE BASE liste_fonctions_tendance + 'correlation_conception' : matrice_correlation_conception, + 'correlation_inverse_transformee' : matrice_correlation_inverse_transformee} + + +# ## Matrice de tendance $\boldsymbol{F}$ + +# In[24]: + + +def tendance(liste_fonctions_tendance, plan_xp): + """Renvoie la matrice de tendance. Sa taille est plan_sp.getSize() x len(liste_fonctions_tendance). + + Arguments : + liste_fonctions_tendance : liste de fonctions python acceptant plan_xp en entree + plan_xp : ot.Sample contenant le plan d'expériences + + """ + matrice_tendance = np.zeros((plan_xp.getSize(), len(liste_fonctions_tendance))) + for indice, fonction in enumerate(liste_fonctions_tendance): + matrice_tendance[:, indice] = fonction(plan_xp) + #print('tendance=', matrice_tendance) + + return matrice_tendance + + +# ### Définition de la fonction constante... +# ... et des fonctions coordonnées, ces dernières n'étant pas utilisées pour l'instant. + +# In[25]: + + +def un(matrice): return np.ones(len(matrice)) +def coord1(matrice) : return np.squeeze(matrice[:, 0]) +def coord2(matrice) : return np.squeeze(matrice[:, 1]) +def coord3(matrice) : return np.squeeze(matrice[:, 2]) +LISTE_TENDANCE = [un] + +CORR_MATERN = matrice_correlation_matern(nu=NU, plan_xp=points, theta=SCALE) +#DIC_CORR = matrices_issues_correlation(liste_fonctions_tendance=LISTE_TENDANCE, matrice_correlation=CORR_MATERN, pepite=0., plan_xp=points) + + +# In[26]: + + +un(points) + + +# In[27]: + + +#tend = tendance(LISTE_TENDANCE, points) +#tend + + +# ## Vraisemblance intégrée + +# ### Fonction générique prenant en entrée les matrices apparaissant dans les formules + +# In[29]: + + +def log_vraisemblance_integree(dic_correlation, sorties): + """Renvoie la vraisemblance intégrée.""" + #Le terme bonus sert à éliminer l'impact du choix de matrice de conception. + #Il est constant et donc inutile pour l'optimisation. + #terme_bonus = 0.5 * np.log(np.linalg.det(np.matmul(np.transpose(dic_correlation['conception']), + # dic_correlation['conception']))) + + #print('log_vraisemblance_integree C=', dic_correlation['correlation']) + terme0 = -0.5 * np.log(np.linalg.det(dic_correlation['correlation'])) + terme1 = 0.5 * np.log(np.linalg.det(dic_correlation['correlation_conception'])) + terme2 = -0.5 * (dic_correlation['correlation'].shape[0] - + dic_correlation['correlation_conception'].shape[0]) *\ + np.log(np.matmul(np.transpose(sorties), + np.matmul(dic_correlation['correlation_inverse_transformee'], + sorties))) + print('log_vraisemblance_integree terme0=', -2*terme0, 'terme1=',-2*terme1,'terme2=', -2*float(terme2)) + return float(terme0 + terme1 + terme2) # + terme_bonus) + + +# In[30]: + + +#log_vraisemblance_integree(DIC_CORR, sorties) + + +# ### Fonction de plus haut niveau prenant en entrée le paramètre. +# Cette fonction n'est pas utilisée dans la suite. + +# In[70]: + + +def log_vraisemblance_integree_matern(theta, plan_xp, sorties, nu, liste_fonctions_tendance=None, pepite=0.): + """Fonction à optimiser.""" + matrice_correlation = matrice_correlation_matern(nu=nu, plan_xp=plan_xp, theta=theta) + dic_correlation = matrices_issues_correlation(matrice_correlation, plan_xp, liste_fonctions_tendance, pepite) + + + return log_vraisemblance_integree(dic_correlation, sorties) + + +# In[71]: + + +#log_vraisemblance_integree_matern(theta=SCALE, plan_xp=points, sorties=sorties, nu=NU, liste_fonctions_tendance=LISTE_TENDANCE, pepite=0.) + + +# ## Prior de Jeffreys + +# ### Fonction renseignant un élément de la matrice d'information de Fisher +# La fonction existe en 3 versions : une pour chaque paramétrisation. + +# In[31]: + + +def element_info_fisher_matern(dic_correlation, ligne, colonne, theta, nu, plan_xp): + """Renvoie un élément de la matrice d'information de Fisher.""" + #print('element_info_fisher_matern correlation=', dic_correlation['correlation'].shape, dic_correlation['correlation']) + #print('element_info_fisher_matern correlation_inverse_transformee=', dic_correlation['correlation_inverse_transformee'].shape, dic_correlation['correlation_inverse_transformee']) + if ligne==len(theta) or colonne==len(theta): + if ligne==len(theta) and colonne==len(theta): + return dic_correlation['correlation'].shape[0] - dic_correlation['correlation_conception'].shape[0] + derivee = matrice_correlation_derivee_matern(indice_derive=min(ligne,colonne), + nu=nu, plan_xp=plan_xp, theta=theta) + return np.trace(np.matmul(dic_correlation['correlation_inverse_transformee'], + derivee)) + derivee_lig = matrice_correlation_derivee_matern(indice_derive=ligne, nu=nu, + plan_xp=plan_xp, theta=theta) + facteur_lig = np.matmul(dic_correlation['correlation_inverse_transformee'], + derivee_lig) + derivee_col = matrice_correlation_derivee_matern(indice_derive=colonne, nu=nu, + plan_xp=plan_xp, theta=theta) + facteur_col = np.matmul(dic_correlation['correlation_inverse_transformee'], + derivee_col) + return np.trace(np.matmul(facteur_lig, facteur_col)) + + +# In[32]: + + +def element_info_fisher_matern_param_inverse(dic_correlation, ligne, colonne, mu, nu, plan_xp): + """Renvoie un élément de la matrice d'information de Fisher.""" + if ligne==len(mu) or colonne==len(mu): + if ligne==len(mu) and colonne==len(mu): + return dic_correlation['correlation'].shape[0] - dic_correlation['correlation_conception'].shape[0] + derivee = matrice_correlation_derivee_matern_param_inverse(indice_derive=min(ligne,colonne), + nu=nu, plan_xp=plan_xp, mu=mu) + return np.trace(np.matmul(dic_correlation['correlation_inverse_transformee'], + derivee)) + derivee_lig = matrice_correlation_derivee_matern_param_inverse(indice_derive=ligne, nu=nu, + plan_xp=plan_xp, mu=mu) + facteur_lig = np.matmul(dic_correlation['correlation_inverse_transformee'], + derivee_lig) + derivee_col = matrice_correlation_derivee_matern_param_inverse(indice_derive=colonne, nu=nu, + plan_xp=plan_xp, mu=mu) + facteur_col = np.matmul(dic_correlation['correlation_inverse_transformee'], + derivee_col) + return np.trace(np.matmul(facteur_lig, facteur_col)) + + +# In[40]: + + +def element_info_fisher_matern_param_log_inverse(dic_correlation, ligne, colonne, xi, nu, plan_xp): + """Renvoie un élément de la matrice d'information de Fisher.""" + if ligne==len(xi) or colonne==len(xi): + if ligne==len(xi) and colonne==len(xi): + return dic_correlation['correlation'].shape[0] - dic_correlation['correlation_conception'].shape[0] + derivee = matrice_correlation_derivee_matern_param_log_inverse(indice_derive=min(ligne,colonne), + nu=nu, plan_xp=plan_xp, xi=xi) + return np.trace(np.matmul(dic_correlation['correlation_inverse_transformee'], + derivee)) + derivee_lig = matrice_correlation_derivee_matern_param_log_inverse(indice_derive=ligne, nu=nu, + plan_xp=plan_xp, xi=xi) + facteur_lig = np.matmul(dic_correlation['correlation_inverse_transformee'], + derivee_lig) + derivee_col = matrice_correlation_derivee_matern_param_log_inverse(indice_derive=colonne, nu=nu, + plan_xp=plan_xp, xi=xi) + facteur_col = np.matmul(dic_correlation['correlation_inverse_transformee'], + derivee_col) + return np.trace(np.matmul(facteur_lig, facteur_col)) + + +# ### Fonction retournant la matrice d'information de Fisher complète +# La fonction existe en 3 versions : une pour chaque paramétrisation. + +# In[34]: + + +def element_info_fisher_matern_gen(dic_correlation, ligne, colonne, p, nu, plan_xp, param): + if param == 'standard': + return element_info_fisher_matern(dic_correlation, ligne, colonne, p, nu, plan_xp) + elif param == 'inverse': + return element_info_fisher_matern_param_inverse(dic_correlation, ligne, colonne, p, nu, plan_xp) + elif param == 'log_inverse': + return element_info_fisher_matern_param_log_inverse(dic_correlation, ligne, colonne, p, nu, plan_xp) + raise ValueError(param) + + +def info_fisher_matern(dic_correlation, theta, nu, plan_xp): + """Renvoie la matrice d'information de Fisher""" + nb_lignes_colonnes = plan_xp.getDimension()+1 + matrice_info_fisher = np.zeros((nb_lignes_colonnes, nb_lignes_colonnes)) + for ligne in range(nb_lignes_colonnes): + for colonne in range(nb_lignes_colonnes): + if colonne>=ligne: + matrice_info_fisher[ligne, colonne] = element_info_fisher_matern(dic_correlation, + ligne, + colonne, + theta, + nu, + plan_xp) + else: + matrice_info_fisher[ligne, colonne] = matrice_info_fisher[colonne, ligne] + #print('info_fisher_matern=', matrice_info_fisher) + return matrice_info_fisher + + +# In[35]: + + +def info_fisher_matern_param_inverse(dic_correlation, mu, nu, plan_xp): + """Renvoie la matrice d'information de Fisher""" + nb_lignes_colonnes = plan_xp.getDimension()+1 + matrice_info_fisher = np.zeros((nb_lignes_colonnes, nb_lignes_colonnes)) + for ligne in range(nb_lignes_colonnes): + for colonne in range(nb_lignes_colonnes): + if colonne>=ligne: + matrice_info_fisher[ligne, colonne] = element_info_fisher_matern_param_inverse(dic_correlation, + ligne, + colonne, + mu, + nu, + plan_xp) + else: + matrice_info_fisher[ligne, colonne] = matrice_info_fisher[colonne, ligne] + return matrice_info_fisher + + +# In[36]: + + +def info_fisher_matern_param_log_inverse(dic_correlation, xi, nu, plan_xp): + """Renvoie la matrice d'information de Fisher""" + nb_lignes_colonnes = plan_xp.getDimension()+1 + matrice_info_fisher = np.zeros((nb_lignes_colonnes, nb_lignes_colonnes)) + for ligne in range(nb_lignes_colonnes): + for colonne in range(nb_lignes_colonnes): + if colonne>=ligne: + matrice_info_fisher[ligne, colonne] = element_info_fisher_matern_param_log_inverse(dic_correlation, + ligne, + colonne, + xi, + nu, + plan_xp) + else: + matrice_info_fisher[ligne, colonne] = matrice_info_fisher[colonne, ligne] + return matrice_info_fisher + + +# In[37]: + +def info_fisher_matern_gen(dic_correlation, p, nu, plan_xp, param): + if param == 'standard': + return info_fisher_matern(dic_correlation, p, nu, plan_xp) + elif param == 'inverse': + return info_fisher_matern_param_inverse(dic_correlation, p, nu, plan_xp) + elif param == 'log_inverse': + return info_fisher_matern_param_log_inverse(dic_correlation, p, nu, plan_xp) + raise ValueError(param) + + +#info_fisher_matern(dic_correlation=DIC_CORR, theta=SCALE, nu=NU, plan_xp=points) + + +# In[38]: + + +#info_fisher_matern_param_inverse(dic_correlation=DIC_CORR, mu=1./np.array(SCALE), nu=NU, plan_xp=points) + + +# In[41]: + + +#info_fisher_matern_param_log_inverse(dic_correlation=DIC_CORR, xi=-np.log(np.array(SCALE)), nu=NU, plan_xp=points) + + +# ## Logarithme du prior de référence (= prior de Jeffreys) +# 3 versions corresponant à chacune des 3 paramétrisations. + +# In[52]: + + +def log_prior_jeffreys_matern(theta, nu, plan_xp, liste_fonctions_tendance, pepite): + """Densité du prior de Jeffreys au point theta.""" + matrice_correlation = matrice_correlation_matern(nu=nu, plan_xp=plan_xp, theta=theta) + dic_correlation = matrices_issues_correlation(matrice_correlation=matrice_correlation, + plan_xp=plan_xp, + liste_fonctions_tendance=liste_fonctions_tendance, + pepite=pepite) + return 0.5 * np.log(np.linalg.det(info_fisher_matern(dic_correlation, theta, nu, plan_xp))) + + +# In[53]: + + +def log_prior_jeffreys_matern_param_inverse(mu, nu, plan_xp, liste_fonctions_tendance, pepite): + """Densité du prior de Jeffreys au point mu.""" + matrice_correlation = matrice_correlation_matern_param_inverse(nu=nu, plan_xp=plan_xp, mu=mu) + dic_correlation = matrices_issues_correlation(matrice_correlation=matrice_correlation, + plan_xp=plan_xp, + liste_fonctions_tendance=liste_fonctions_tendance, + pepite=pepite) + return 0.5 * np.log(np.linalg.det(info_fisher_matern_param_inverse(dic_correlation, mu, nu, plan_xp))) + + +# In[55]: + + +def log_prior_jeffreys_matern_param_log_inverse(xi, nu, plan_xp, liste_fonctions_tendance, pepite): + """Densité du prior de Jeffreys au point xi.""" + matrice_correlation = matrice_correlation_matern_param_log_inverse(nu=nu, plan_xp=plan_xp, xi=xi) + dic_correlation = matrices_issues_correlation(matrice_correlation=matrice_correlation, + plan_xp=plan_xp, + liste_fonctions_tendance=liste_fonctions_tendance, + pepite=pepite) + return 0.5 * np.log(np.linalg.det(info_fisher_matern_param_log_inverse(dic_correlation, xi, nu, plan_xp))) + + +# In[43]: + +def log_prior_jeffreys_matern_gen(p, nu, plan_xp, liste_fonctions_tendance, pepite, param): + if param == 'standard': + return log_prior_jeffreys_matern(p, nu, plan_xp, liste_fonctions_tendance, pepite) + elif param == 'inverse': + return log_prior_jeffreys_matern_param_inverse(p, nu, plan_xp, liste_fonctions_tendance, pepite) + elif param == 'log_inverse': + return log_prior_jeffreys_matern_param_log_inverse(p, nu, plan_xp, liste_fonctions_tendance, pepite) + raise ValueError(param) + +#log_prior_jeffreys_matern(SCALE, NU, points, LISTE_TENDANCE, pepite=0.) + + +# In[54]: + + +#log_prior_jeffreys_matern_param_inverse(1./np.array(SCALE), NU, points, LISTE_TENDANCE, pepite=0.) + + +# In[56]: + + +#log_prior_jeffreys_matern_param_log_inverse(-np.log(SCALE), NU, points, LISTE_TENDANCE, pepite=0.) + + +# ## Logarithme de la vraisemblance pénalisée par le prior de référence +# 3 versions corresponant à chacune des 3 paramétrisations. + +# In[44]: + + +def log_vraisemblance_penalisee_matern(theta, plan_xp, sorties, nu, liste_fonctions_tendance=None, pepite=0.): + """Fonction à optimiser.""" + matrice_correlation = matrice_correlation_matern(nu=nu, plan_xp=plan_xp, theta=theta) + dic_correlation = matrices_issues_correlation(matrice_correlation, plan_xp, liste_fonctions_tendance, pepite) + + penalization = 0.5 * np.log(np.linalg.det(info_fisher_matern(dic_correlation, theta, nu, plan_xp))) + #print('penalization=', 2*penalization) + + total = log_vraisemblance_integree(dic_correlation, sorties) + penalization + print('penalization=', -2*penalization, 'total=', -2*total) + return total + + +# In[76]: + + +def log_vraisemblance_penalisee_matern_param_inverse(mu, plan_xp, sorties, nu, liste_fonctions_tendance=None, pepite=0.): + """Fonction à optimiser.""" + matrice_correlation = matrice_correlation_matern_param_inverse(nu=nu, plan_xp=plan_xp, mu=mu) + dic_correlation = matrices_issues_correlation(matrice_correlation, plan_xp, liste_fonctions_tendance, pepite) + + + return log_vraisemblance_integree(dic_correlation, sorties) + 0.5 * np.log(np.linalg.det(info_fisher_matern_param_inverse(dic_correlation, mu, nu, plan_xp))) + + +# In[77]: + + +def log_vraisemblance_penalisee_matern_param_log_inverse(xi, plan_xp, sorties, nu, liste_fonctions_tendance=None, pepite=0.): + """Fonction à optimiser.""" + matrice_correlation = matrice_correlation_matern_param_log_inverse(nu=nu, plan_xp=plan_xp, xi=xi) + dic_correlation = matrices_issues_correlation(matrice_correlation, plan_xp, liste_fonctions_tendance, pepite) + + return log_vraisemblance_integree(dic_correlation, sorties) + 0.5 * np.log(np.linalg.det(info_fisher_matern_param_log_inverse(dic_correlation, xi, nu, plan_xp))) + + +# In[45]: + + +def log_vraisemblance_penalisee_matern_gen(p, plan_xp, sorties, nu, liste_fonctions_tendance=None, pepite=0., param='standard'): + if param == 'standard': + return log_vraisemblance_penalisee_matern(p, plan_xp, sorties, nu, liste_fonctions_tendance, pepite) + elif param == 'inverse': + return log_vraisemblance_penalisee_matern_param_inverse(p, plan_xp, sorties, nu, liste_fonctions_tendance, pepite) + elif param == 'log_inverse': + return log_vraisemblance_penalisee_matern_param_log_inverse(p, plan_xp, sorties, nu, liste_fonctions_tendance, pepite) + raise ValueError(param) + + +#log_vraisemblance_penalisee_matern(SCALE, plan_xp=points, sorties=sorties, nu=NU, liste_fonctions_tendance=LISTE_TENDANCE) + + +# In[60]: + + +#log_vraisemblance_penalisee_matern_param_inverse(1./np.array(SCALE), plan_xp=points, sorties=sorties, nu=NU, liste_fonctions_tendance=LISTE_TENDANCE) + + +# In[61]: + + +#log_vraisemblance_penalisee_matern_param_log_inverse(-np.log(SCALE), plan_xp=points, sorties=sorties, nu=NU, liste_fonctions_tendance=LISTE_TENDANCE) + + +# ## Courbes de vraisemblance +# 3 versions corresponant à chacune des 3 paramétrisations. +# +# **Nota bene** : +# +# - dans tous les graphes, l'axe des abscisses représente le paramètre standard, i.e. $\boldsymbol{\theta}$, et ce même lorsque la paramétrisation utilisée pour pénaliser la vraisemblance est inverse ou log-inverse. +# - pour pouvoir représenter ces graphes, on ne considère que les valeurs du paramètre standard qui rendent le noyau isotrope. Par exemple, abscisse = 0.2 signifie que $\boldsymbol{\theta} = (0.2, 0.2, 0.2)$. + +# In[115]: + + +def courbe_vraisemblance_penalisee_matern(seed, theta, plan_xp, tendance, nu, liste_fonctions_tendance=None, pepite=0.): + ot.RandomGenerator.SetSeed(seed) + modele_covariance = ot.MaternModel(theta, AMPLITUDE, nu) + realisation = ot.GaussianProcess(ot.TrendTransform(tendance ,ot.Mesh(plan_xp)), modele_covariance,ot.Mesh(plan_xp)).getRealization() + nb_abs = 50 + lin = np.linspace(1./nb_abs, 1, nb_abs).reshape(nb_abs, 1) + mat = np.concatenate((lin,lin,lin), axis=1) + posterior = np.apply_along_axis(log_vraisemblance_penalisee_matern, 1, mat, plan_xp=plan_xp, sorties=realisation.getValues(), nu=nu, liste_fonctions_tendance=liste_fonctions_tendance, pepite=pepite) + prior = np.apply_along_axis(log_prior_jeffreys_matern, 1, mat, plan_xp=plan_xp, nu=nu, liste_fonctions_tendance=liste_fonctions_tendance, pepite=pepite) + vraisemblance_integree = posterior - prior + + figure = plt.figure() + ax = figure.add_subplot(111) + line_posterior = ax.plot(lin, posterior, label='Vraisemblance pénalisée') + line_vrais_integree = ax.plot(lin, vraisemblance_integree, label='Vraisemblance intégrée') + titre = 'Paramétrisation standard' + ax.set_title(titre) + ax.set_xlabel('theta') + ax.legend() + figure.savefig(titre+'.pdf') + + +# In[113]: + + +def courbe_vraisemblance_penalisee_matern_param_inverse(seed, theta, plan_xp, tendance, nu, liste_fonctions_tendance=None, pepite=0.): + ot.RandomGenerator.SetSeed(seed) + modele_covariance = ot.MaternModel(theta, AMPLITUDE, nu) + realisation = ot.GaussianProcess(ot.TrendTransform(tendance ,ot.Mesh(plan_xp)), modele_covariance,ot.Mesh(plan_xp)).getRealization() + nb_abs = 50 + lin = np.linspace(1./nb_abs, 1, nb_abs).reshape(nb_abs, 1) + mat = np.concatenate((lin,lin,lin), axis=1) + posterior = np.apply_along_axis(log_vraisemblance_penalisee_matern_param_inverse, 1, 1./mat, plan_xp=plan_xp, sorties=realisation.getValues(), nu=nu, liste_fonctions_tendance=liste_fonctions_tendance, pepite=pepite) + prior = np.apply_along_axis(log_prior_jeffreys_matern_param_inverse, 1, 1./mat, plan_xp=plan_xp, nu=nu, liste_fonctions_tendance=liste_fonctions_tendance, pepite=pepite) + vraisemblance_integree = posterior - prior + + figure = plt.figure() + ax = figure.add_subplot(111) + line_posterior = ax.plot(lin, posterior, label='Vraisemblance pénalisée') + line_vrais_integree = ax.plot(lin, vraisemblance_integree, label='Vraisemblance intégrée') + titre = 'Paramétrisation inverse' + ax.set_title(titre) + ax.set_xlabel('theta') + ax.legend() + figure.savefig(titre+'.pdf') + + +# In[117]: + + +def courbe_vraisemblance_penalisee_matern_param_log_inverse(seed, theta, plan_xp, tendance, nu, liste_fonctions_tendance=None, pepite=0.): + ot.RandomGenerator.SetSeed(seed) + modele_covariance = ot.MaternModel(theta, AMPLITUDE, nu) + realisation = ot.GaussianProcess(ot.TrendTransform(tendance ,ot.Mesh(plan_xp)), modele_covariance,ot.Mesh(plan_xp)).getRealization() + nb_abs = 50 + lin = np.linspace(1./nb_abs, 1, nb_abs).reshape(nb_abs, 1) + mat = np.concatenate((lin,lin,lin), axis=1) + posterior = np.apply_along_axis(log_vraisemblance_penalisee_matern_param_log_inverse, 1, -np.log(mat), plan_xp=plan_xp, sorties=realisation.getValues(), nu=nu, liste_fonctions_tendance=liste_fonctions_tendance, pepite=pepite) + prior = np.apply_along_axis(log_prior_jeffreys_matern_param_log_inverse, 1, -np.log(mat), plan_xp=plan_xp, nu=nu, liste_fonctions_tendance=liste_fonctions_tendance, pepite=pepite) + vraisemblance_integree = posterior - prior + + figure = plt.figure() + ax = figure.add_subplot(111) + line_posterior = ax.plot(lin, posterior, label='Vraisemblance pénalisée') + line_vrais_integree = ax.plot(lin, vraisemblance_integree, label='Vraisemblance intégrée') + titre = 'Paramétrisation log-inverse' + ax.set_title(titre) + ax.set_xlabel('theta') + ax.legend() + figure.savefig(titre+'.pdf') + + +# In[116]: + + +def courbe_vraisemblance_penalisee_matern_gen(seed, theta, plan_xp, tendance, nu, liste_fonctions_tendance=None, pepite=0., param='standard'): + ot.RandomGenerator.SetSeed(seed) + modele_covariance = ot.MaternModel(theta, AMPLITUDE, nu) + realisation = ot.GaussianProcess(ot.TrendTransform(tendance, ot.Mesh(plan_xp)), modele_covariance,ot.Mesh(plan_xp)).getRealization() + nb_abs = 50 + lin = np.linspace(1./nb_abs, 1, nb_abs).reshape(nb_abs, 1) + mat = np.concatenate((lin,lin,lin), axis=1) + if param == 'standard': + pass + elif param == 'inverse': + mat = 1./mat + elif param == 'log_inverse': + mat = -np.log(mat) + else: + raise ValueError(param) + posterior = np.apply_along_axis(log_vraisemblance_penalisee_matern_gen, 1, mat, plan_xp=plan_xp, sorties=realisation.getValues(), nu=nu, liste_fonctions_tendance=liste_fonctions_tendance, pepite=pepite, param=param) + prior = np.apply_along_axis(log_prior_jeffreys_matern_gen, 1, mat, plan_xp=plan_xp, nu=nu, liste_fonctions_tendance=liste_fonctions_tendance, pepite=pepite, param=param) + vraisemblance_integree = posterior - prior + + figure = plt.figure() + ax = figure.add_subplot(111) + line_posterior = ax.plot(lin, posterior, label='Penalized likelihood') + line_vrais_integree = ax.plot(lin, vraisemblance_integree, label='Integrated likelihood') + ax.set_title('py prior=reference param='+ param) + ax.set_xlabel('p') + ax.legend() + figure.savefig('py_refprior_'+param+'.png') + + +courbe_vraisemblance_penalisee_matern(seed=0, theta = SCALE, plan_xp=points, tendance=TREND, nu=NU, liste_fonctions_tendance=[un, coord1, coord2, coord3]) + +LISTE_TENDANCE=[ot.SymbolicFunction(['x1','x2', 'x3'], ['1']),ot.SymbolicFunction(['x1','x2', 'x3'], ['x1']),ot.SymbolicFunction(['x1','x2', 'x3'], ['x2']),ot.SymbolicFunction(['x1','x2', 'x3'], ['x3'])] +seed = 0 + + +def courbe_vraisemblance_penalisee_matern_gen_ot(param): + modele_covariance = ot.MaternModel(SCALE, AMPLITUDE, NU) + parametrization = {'standard': ot.CovarianceModelImplementation.STANDARD, 'inverse': ot.CovarianceModelImplementation.INVERSE, 'log_inverse': ot.CovarianceModelImplementation.LOGINVERSE}[param] + modele_covariance.setScaleParametrization(parametrization) + ot.RandomGenerator.SetSeed(seed) + realisation = ot.GaussianProcess(ot.TrendTransform(TREND ,ot.Mesh(points)), modele_covariance,ot.Mesh(points)).getRealization() + krigeage = ot.KrigingAlgorithm(points,realisation.getValues(),modele_covariance,ot.Basis(LISTE_TENDANCE),False) + + krigeage.setScalePrior(ot.GeneralLinearModelAlgorithm.REFERENCE) + llf = krigeage.getReducedLogLikelihoodFunction() + nb_abs = 50 + lin = np.linspace(1./nb_abs, 1, nb_abs).reshape(nb_abs, 1) + mat = np.concatenate((lin,lin,lin), axis=1) + if param == 'standard': + pass + elif param == 'inverse': + mat = 1./mat + elif param == 'log_inverse': + mat = -np.log(mat) + else: + raise ValueError(param) + figure = plt.figure() + ax = figure.add_subplot(111) + posterior = [llf(mat[i])[0] for i in range(nb_abs)] + ax.plot(lin, posterior, label='Penalized likelihood') + ax.set_title('ot prior=reference param='+ param) + ax.set_xlabel('p') + ax.legend() + figure.savefig('ot_refprior_'+param+'.png') + +for param in ['standard', 'inverse', 'log_inverse']: + courbe_vraisemblance_penalisee_matern_gen(seed=0, theta = SCALE, plan_xp=points, tendance=TREND, nu=NU, liste_fonctions_tendance=[un, coord1, coord2, coord3], param=param) + courbe_vraisemblance_penalisee_matern_gen_ot(param) + + + + +#log_vraisemblance_penalisee_matern + +# In[114]: + + +#courbe_vraisemblance_penalisee_matern_param_inverse(seed=0, theta = SCALE, plan_xp=points, tendance=TREND, nu=NU, liste_fonctions_tendance=[un, coord1, coord2, coord3]) + + +# In[118]: + + +#courbe_vraisemblance_penalisee_matern_param_log_inverse(seed=0, theta = SCALE, plan_xp=points, tendance=TREND, nu=NU, liste_fonctions_tendance=[un, coord1, coord2, coord3]) + + diff --git a/validation/src/KrigingGu/ValidRobustGasp.py b/validation/src/KrigingGu/ValidRobustGasp.py new file mode 100755 index 00000000000..d25f3cff67d --- /dev/null +++ b/validation/src/KrigingGu/ValidRobustGasp.py @@ -0,0 +1,179 @@ +import numpy as np +import openturns as ot +from openturns.viewer import View + +ot.Log.Show(ot.Log.Flags() ^ ot.Log.WARN) + +ot.ResourceMap.SetAsScalar("GeneralLinearModelAlgorithm-DefaultOptimizationLowerBound", 1e-4); +ot.ResourceMap.SetAsScalar("GeneralLinearModelAlgorithm-DefaultOptimizationUpperBound", 1e4); + +import subprocess + +def evalueErreur(noyau_covariance, realisation, base=ot.Basis(0), prior=0, parametrization=0): + """ + Évalue la distance entre + 1. le vrai paramètre d'échelle (fourni par la commande noyau_covariance.getScale()) et + 2. le paramètre d'échelle estimé sur la base d'une realisation du vecteur gaussien observé. + + Arguments : + noyau_covariance -- ot.CovarianceModel représentant le vrai modèle + realisation -- ot.Realization sur laquelle est faite l'estimation + base -- ot.Basis : doit être spécifié pour le krigeage universel + """ + noyau_covariance.setScaleParametrization(parametrization) + processus = ot.GaussianProcess(noyau_covariance, realisation.getMesh()) + + sigma = np.array(noyau_covariance.getAmplitude()) # vraie amplitude + matrice_covariance = noyau_covariance.discretize(realisation.getMesh()) # vraie matrice de covariance + matrice_correlation = np.multiply(sigma**(-2), matrice_covariance) # vraie matrice de corrélation + matrice_fisher = np.arctanh(matrice_correlation - np.identity(realisation.getValues().getSize())) # matrice de corrélation transformée + + #La ligne suivante définit l'algorithme de krigeage. C'est à ce niveau qu'il faut permettre d'estimer les paramètres par les méthodes de Gu. + krigeage = ot.KrigingAlgorithm(realisation.getMesh().getVertices(),realisation,noyau_covariance,base,False) + krigeage.setScalePrior(prior) + krigeage.run() + + resultat = krigeage.getResult() + sigma_estime = np.array(resultat.getCovarianceModel().getAmplitude()) # amplitude estimée + matrice_covariance_estimee = resultat.getCovarianceModel().discretize(realisation.getMesh()) # matrice de covariance estimée + matrice_correlation_estimee = np.multiply(sigma_estime**(-2), matrice_covariance_estimee) # matrice de corrélation estimée + matrice_fisher_estimee = np.arctanh(matrice_correlation_estimee - np.identity(realisation.getValues().getSize())) # matrice de corrélation transformée + + return np.linalg.norm(matrice_fisher_estimee - matrice_fisher,'fro') # erreur d'estimation + +NB_ITERATIONS = 100 + +def generePoints(nb_pts, dim) : + points = ot.RandomGenerator.Generate(nb_pts * dim) + points = ot.Matrix(nb_pts, dim, points) + return np.array(points) + +NB_PTS_PLAN_EXPERIENCE = 30 +DIMENSION_SPATIALE = 3 +AMPLITUDE = [2.0] +SCALE = [0.4, 0.8, 0.2] +TREND = ot.SymbolicFunction(['x1','x2','x3'],['5 + 20*x1 + 3*x2 - 12 * x3']) + + +def echantillonneErreurs (noyau_covariance, prior, parametrization) : + """ + Produit un dictionnaire contenant : + 1. "simple" : ot.Sample des erreurs d'estimation de krigeage simple pour NB_ITERATIONS plans d'expériences et réalisations de processus gaussiens. + 2. "universel" : ot.Sample des erreurs d'estimation de krigeage universel (base affine) pour NB_ITERATIONS plans d'expériences et réalisations de processus gaussiens. + 3. "simple_distribution" : ot.Distribution obtenue par KernelSmoothing du ot.Sample "simple" + 4. "universel_distribution" : ot.Distribution obtenue par KernelSmoothing du ot.Sample "universel" + + Argument : + noyau_covariance -- ot.CovarianceModel représentant le vrai modèle + """ + + ech_erreur_correlation = np.empty(NB_ITERATIONS) + ech_erreur_correlation_univ = np.empty(NB_ITERATIONS) + + + for iter in range(NB_ITERATIONS): + print(iter, NB_ITERATIONS) + points = generePoints(NB_PTS_PLAN_EXPERIENCE, DIMENSION_SPATIALE) + + realisation = ot.GaussianProcess(noyau_covariance,ot.Mesh(points)).getRealization() #réalisation avec moyenne nulle + realisation_univ = ot.Field(realisation.getMesh(),realisation.getValues() + TREND(points)) #réalisation + ajout du vecteur moyenne + + ech_erreur_correlation[iter] = np.array(evalueErreur(noyau_covariance, realisation, ot.Basis(0), prior, parametrization)) #erreur d'estimation en krigeage simple + + ech_erreur_correlation_univ[iter] = np.array(evalueErreur(noyau_covariance, realisation_univ, + ot.LinearBasisFactory(DIMENSION_SPATIALE).build(), prior, parametrization)) #erreur d'estimation en krigeage universel + + ech_erreur_correlation = ot.Sample(ech_erreur_correlation.reshape((NB_ITERATIONS,1))) #reshape nécessaire car Sample demande un 2d-array + ech_erreur_correlation_univ = ot.Sample(ech_erreur_correlation_univ.reshape((NB_ITERATIONS,1))) + + ech_erreur = {} + ech_erreur["simple"] = ech_erreur_correlation + ech_erreur["universel"] = ech_erreur_correlation_univ + ech_erreur["simple_distribution"] = ot.KernelSmoothing().build(ech_erreur_correlation) + ech_erreur["universel_distribution"] = ot.KernelSmoothing().build(ech_erreur_correlation_univ) + + return ech_erreur + +MATERN_CINQ_DEMIS = ot.MaternModel(SCALE,AMPLITUDE,2.5) +MATERN_TROIS_DEMIS = ot.MaternModel(SCALE,AMPLITUDE,1.5) +EXPONENTIEL = ot.ExponentialModel(SCALE,AMPLITUDE) +EXPONENTIEL_ABSOLU = ot.AbsoluteExponential(SCALE,AMPLITUDE) +COSINUS_EXPONENTIEL = ot.ExponentiallyDampedCosineModel(SCALE,AMPLITUDE,1.0) +EXPONENTIEL_CARRE = ot.SquaredExponential(SCALE,AMPLITUDE) +EXPONENTIEL_GENERALISE = ot.GeneralizedExponential(SCALE,AMPLITUDE,1.9) +SPHERIQUE = ot.SphericalModel(SCALE,AMPLITUDE,1.5) +PRODUIT_MATERN_CINQ_DEMIS = ot.ProductCovarianceModel([ot.MaternModel([SCALE[i]], AMPLITUDE, 2.5) for i in range(3)]) +PRODUIT_MATERN_TROIS_DEMIS = ot.ProductCovarianceModel([ot.MaternModel([SCALE[i]], AMPLITUDE, 1.5) for i in range(3)]) +PRODUIT_EXPONENTIEL = ot.ProductCovarianceModel([ot.ExponentialModel([SCALE[i]], AMPLITUDE) for i in range(3)]) +PRODUIT_EXPONENTIEL_GENERALISE = ot.ProductCovarianceModel([ot.GeneralizedExponential([SCALE[i]], AMPLITUDE, 1.9) for i in range(3)]) + + +def getCovModelLabel(model): + covname = covarianceModel.getClassName() + covname += str(int(10*covarianceModel.getFullParameter()[-1])) if covarianceModel.getClassName() == 'MaternModel' else '' + if covname == 'ProductCovarianceModel': + submodel = covarianceModel.getCollection()[0].getImplementation() + subname = submodel.getClassName() + subname += str(int(10*submodel.getFullParameter()[-1])) if subname == 'MaternModel' else '' + covname += subname + return covname + +prior_string = {ot.GeneralLinearModelAlgorithm.NONE: 'none', + ot.GeneralLinearModelAlgorithm.JOINTLYROBUST: 'jointlyrobust', + ot.GeneralLinearModelAlgorithm.REFERENCE: 'reference'} +parametrization_string = {ot.CovarianceModelImplementation.STANDARD: 'standard', + ot.CovarianceModelImplementation.INVERSE: 'inverse', + ot.CovarianceModelImplementation.LOGINVERSE: 'loginverse'} + +covModels = [MATERN_CINQ_DEMIS, MATERN_TROIS_DEMIS, EXPONENTIEL, EXPONENTIEL_ABSOLU, COSINUS_EXPONENTIEL, EXPONENTIEL_CARRE, EXPONENTIEL_GENERALISE, SPHERIQUE, PRODUIT_MATERN_CINQ_DEMIS, PRODUIT_MATERN_TROIS_DEMIS, PRODUIT_EXPONENTIEL, PRODUIT_EXPONENTIEL_GENERALISE] + +# write report +with open('gu.tex', 'w') as tex: + tex.write('\\documentclass[12pt,a4paper]{article}\n') + tex.write('\\usepackage{graphicx}\n') + tex.write('\\usepackage{geometry}\n') + tex.write('\\usepackage{hyperref}\n') + tex.write('\\geometry{margin=5mm}\n') + tex.write('\\begin{document}\n') + tex.write('\\tableofcontents\n') + tex.write('\\newpage\n') + for covarianceModel in covModels: + covname = getCovModelLabel(covarianceModel) + tex.write('\\section{'+covname+'}\n') + tex.write('\\begin{tabular}{ccc}\n') + tex.write(' standard & inverse & log-inverse \\\\\n') + for prior in [ot.GeneralLinearModelAlgorithm.NONE, ot.GeneralLinearModelAlgorithm.JOINTLYROBUST, ot.GeneralLinearModelAlgorithm.REFERENCE]: + for parametrization in [ot.CovarianceModelImplementation.STANDARD, + ot.CovarianceModelImplementation.INVERSE, + ot.CovarianceModelImplementation.LOGINVERSE]: + eotab = '\\\\' if parametrization==ot.CovarianceModelImplementation.LOGINVERSE else '&' + tex.write(' \\includegraphics[width=60mm]{graph_'+covname+'_'+prior_string[prior]+'_'+parametrization_string[parametrization]+'.png}'+eotab+'\n') + tex.write('\\end{tabular}\n') + tex.write('\\newpage\n') + tex.write('\\end{document}\n') + +# write pngs +for covarianceModel in covModels: + covname = getCovModelLabel(covarianceModel) + for prior in [ot.GeneralLinearModelAlgorithm.NONE, ot.GeneralLinearModelAlgorithm.JOINTLYROBUST, ot.GeneralLinearModelAlgorithm.REFERENCE]: + for parametrization in [ot.CovarianceModelImplementation.STANDARD, + ot.CovarianceModelImplementation.INVERSE, + ot.CovarianceModelImplementation.LOGINVERSE]: + if covname.startswith('ProductCovarianceModel') and parametrization != ot.CovarianceModelImplementation.STANDARD: + subprocess.run('cp graph_'+covname+'_'+prior_string[prior]+'_standard.png graph_'+covname+'_'+prior_string[prior]+'_'+parametrization_string[parametrization]+'.png', shell=True) + continue + ot.RandomGenerator.SetSeed(0) + print('-- covmodel=', covname, 'prior=', prior_string[prior], 'parametrization=', parametrization_string[parametrization]) + ECH_ERREURS = echantillonneErreurs(covarianceModel, prior, parametrization) + graph = ECH_ERREURS["simple_distribution"].drawPDF() + graph.add(ECH_ERREURS["universel_distribution"].drawPDF()) + graph.setLegends(['simple kriging','universal kriging']) + graph.setColors(['blue','red']) + graph.setXTitle('') + graph.setBoundingBox(ot.Interval([0.0, 0.0], [20.0, 0.5])) + graph.setTitle("Estimated error probability density\ncov="+covname[:6]+"... prior="+prior_string[prior]+" param="+parametrization_string[parametrization]+" pts="+str(NB_ITERATIONS)) + view = View(graph) + view.save('graph_'+covname+'_'+prior_string[prior]+'_'+parametrization_string[parametrization]+'.png') + view.close() + +subprocess.run('pdflatex -halt-on-error gu.tex && pdflatex gu.tex', shell=True) diff --git a/validation/src/KrigingGu/compute_validation_values.py b/validation/src/KrigingGu/compute_validation_values.py new file mode 100644 index 00000000000..91469672207 --- /dev/null +++ b/validation/src/KrigingGu/compute_validation_values.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Test and validate OpenTURNS Kriging Algorithm with Gu methods. +""" +import openturns as ot +import sys +sys.path.insert(0,'.') +from Gu import Gu + +print(ot.__version__) + +sampleSize = 6 +dimension = 1 + +f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)']) +log = ot.SymbolicFunction(['x'], ['log(x)']) + +X = ot.Sample([1.0, 3.0, 5.0, 6.0, 7.0, 8.0], dimension) +Y = f(X) + +basis = ot.ConstantBasisFactory(dimension).build() + +scale = [1.0] +amplitude = [4.123456] +nu = 0.5 + +def FitKrigingOpenTURNS(scale, amplitude, nu, + prior, parametrization): + """ + Fit a Kriging with OpenTURNS. + """ + covarianceModel = ot.MaternModel(scale, amplitude, nu) + covarianceModel.setScaleParametrization(parametrization) + algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis, False) + algo.setScalePrior(prior) + algo.run() + result = algo.getResult() + print('OpenTURNS') + scaleOTGU = result.getCovarianceModel().getParameter() + print('parameters=', scaleOTGU) + rllfunction = algo.getReducedLogLikelihoodFunction() + objective_value = rllfunction(scaleOTGU) + print("Objectif value =", objective_value[0]) + print("") + return + +def ComputeParametrizationName(parametrization): + if parametrization == ot.CovarianceModelImplementation.STANDARD: + parametrizationname = 'standard' + elif parametrization == ot.CovarianceModelImplementation.INVERSE: + parametrizationname = 'inverse' + elif parametrization == ot.CovarianceModelImplementation.LOGINVERSE: + parametrizationname = 'log-inverse' + else: + raise ValueError("Unexpected parametrization value %d" % (parametrization)) + return parametrizationname + +def ComputePriorName(prior): + if prior == ot.GeneralLinearModelAlgorithm.REFERENCE: + priorname = 'reference' + elif prior == ot.GeneralLinearModelAlgorithm.JOINTLYROBUST: + priorname = 'jointly-robust' + else: + raise ValueError("Unexpected prior value %d" % (prior)) + return priorname + +def FitKrigingGu(scale, amplitude, nu, + prior, parametrization): + """ + Fit a Kriging with Gu class. + """ + priorname = ComputePriorName(prior) + parametrizationname = ComputeParametrizationName(parametrization) + + print("") + print('Gu class') + covarianceModel = ot.MaternModel(scale, amplitude, nu) + gu = Gu(X, Y, basis, covarianceModel, priorname, parametrizationname) + result = gu.optimize_scale() + scaleJOGU = result.x + + + if parametrization == ot.CovarianceModelImplementation.STANDARD: + print("parameter=", scaleJOGU[0]) + elif parametrization == ot.CovarianceModelImplementation.INVERSE: + print("parameter=", 1.0 / scaleJOGU[0]) + elif parametrization == ot.CovarianceModelImplementation.LOGINVERSE: + print("parameter=", log(1.0 / scaleJOGU)[0]) + else: + raise ValueError("Unexpected parametrization value %d" % (parametrization)) + + objective_value = gu.set_scale(scaleJOGU) + print("Objectif value =", - objective_value) + return + +#################################################################### +# Test A : prior = REFERENCE, parametrization = STANDARD + +prior = ot.GeneralLinearModelAlgorithm.REFERENCE +parametrization = ot.CovarianceModelImplementation.STANDARD +priorname = ComputePriorName(prior) +parametrizationname = ComputeParametrizationName(parametrization) +print("") +print("+ prior", priorname, ", parametrization", parametrizationname) +FitKrigingOpenTURNS(scale, amplitude, nu, prior, parametrization) +FitKrigingGu(scale, amplitude, nu, prior, parametrization) + +#################################################################### +# Test B : prior = REFERENCE, parametrization = INVERSE + +prior = ot.GeneralLinearModelAlgorithm.REFERENCE +parametrization = ot.CovarianceModelImplementation.INVERSE +priorname = ComputePriorName(prior) +parametrizationname = ComputeParametrizationName(parametrization) +print("") +print("+ prior", priorname, ", parametrization", parametrizationname) +FitKrigingOpenTURNS(scale, amplitude, nu, prior, parametrization) +FitKrigingGu(scale, amplitude, nu, prior, parametrization) + +#################################################################### +# Test C : prior = REFERENCE, parametrization = LOGINVERSE + +prior = ot.GeneralLinearModelAlgorithm.REFERENCE +parametrization = ot.CovarianceModelImplementation.LOGINVERSE +priorname = ComputePriorName(prior) +parametrizationname = ComputeParametrizationName(parametrization) +print("") +print("+ prior", priorname, ", parametrization", parametrizationname) +FitKrigingOpenTURNS(scale, amplitude, nu, prior, parametrization) +FitKrigingGu(scale, amplitude, nu, prior, parametrization) + +#################################################################### +# Test D : prior = JOINTLYROBUST, parametrization = STANDARD + +prior = ot.GeneralLinearModelAlgorithm.JOINTLYROBUST +parametrization = ot.CovarianceModelImplementation.STANDARD +priorname = ComputePriorName(prior) +parametrizationname = ComputeParametrizationName(parametrization) +print("") +print("+ prior", priorname, ", parametrization", parametrizationname) +FitKrigingOpenTURNS(scale, amplitude, nu, prior, parametrization) +FitKrigingGu(scale, amplitude, nu, prior, parametrization) + +#################################################################### +# Test E : prior = JOINTLYROBUST, parametrization = INVERSE + +prior = ot.GeneralLinearModelAlgorithm.JOINTLYROBUST +parametrization = ot.CovarianceModelImplementation.INVERSE +priorname = ComputePriorName(prior) +parametrizationname = ComputeParametrizationName(parametrization) +print("") +print("+ prior", priorname, ", parametrization", parametrizationname) +FitKrigingOpenTURNS(scale, amplitude, nu, prior, parametrization) +FitKrigingGu(scale, amplitude, nu, prior, parametrization) + +#################################################################### +# Test F : prior = JOINTLYROBUST, parametrization = LOGINVERSE + +prior = ot.GeneralLinearModelAlgorithm.JOINTLYROBUST +parametrization = ot.CovarianceModelImplementation.LOGINVERSE +priorname = ComputePriorName(prior) +parametrizationname = ComputeParametrizationName(parametrization) +print("") +print("+ prior", priorname, ", parametrization", parametrizationname) +FitKrigingOpenTURNS(scale, amplitude, nu, prior, parametrization) +FitKrigingGu(scale, amplitude, nu, prior, parametrization) + From 39560673ef4d1b2b7ecd423bd011e3d137e9722d Mon Sep 17 00:00:00 2001 From: josephmure Date: Tue, 17 Mar 2020 15:14:50 +0100 Subject: [PATCH 2/2] Compute log-derivative of correlation matrix in reference prior computation. This matches implmentation in R package RobustGaSP. Currently implemented unitary tests all pass. --- .../Kriging/GeneralLinearModelAlgorithm.cxx | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx index 3aae6a0542d..e61efb2d1fe 100644 --- a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx +++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx @@ -827,18 +827,26 @@ Scalar GeneralLinearModelAlgorithm::computeLogIntegratedLikelihoodPenalization() sigmaTheta = sigmaTheta - LLtinvFFtLLtinvFiFtLLtinv; } + // compute "log-derivative" of the correlation matrix with respect to the scale parameters + Collection correlationMatrixLogDerivatives(inputDimension, SquareMatrix(size)); + for (UnsignedInteger i = 0; i < inputDimension; ++ i) + { + correlationMatrixLogDerivatives[i] = sigmaTheta * dCds[i]; + } + + // compute Fisher information matrix iTheta // lower triangle for (UnsignedInteger i = 0; i < inputDimension; ++ i) { for (UnsignedInteger j = 0; j <= i; ++ j) { - iTheta(i, j) = (sigmaTheta * dCds[i] * sigmaTheta * dCds[j]).computeTrace(); + iTheta(i, j) = (correlationMatrixLogDerivatives[i] * correlationMatrixLogDerivatives[j]).computeTrace(); } } // bottom line for (UnsignedInteger j = 0; j < inputDimension; ++ j) { - iTheta(inputDimension, j) = (sigmaTheta * dCds[j]).computeTrace(); + iTheta(inputDimension, j) = correlationMatrixLogDerivatives[j].computeTrace(); } // bottom right corner iTheta(inputDimension, inputDimension) = size - beta_.getSize();