From 696ad076414f25e6c947fc99d876dfd4eb4b53cc Mon Sep 17 00:00:00 2001 From: deseilligny Date: Tue, 19 Dec 2023 17:56:09 +0100 Subject: [PATCH 01/21] In constraint optim --- MMVII/src/Matrix/cResolSysNonLinear.cpp | 118 ++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/MMVII/src/Matrix/cResolSysNonLinear.cpp b/MMVII/src/Matrix/cResolSysNonLinear.cpp index de0661cf86..d183942543 100755 --- a/MMVII/src/Matrix/cResolSysNonLinear.cpp +++ b/MMVII/src/Matrix/cResolSysNonLinear.cpp @@ -9,6 +9,9 @@ using namespace MMVII; namespace MMVII { + + + template class cREAL8_RWAdapt : public cResidualWeighter { public : @@ -360,12 +363,127 @@ int cREAL8_RSNL::CountFreeVariables() const } +/* ************************************************************ */ +/* */ +/* cOneLinearConstraint */ +/* */ +/* ************************************************************ */ + +/* Class for handling linear constraint in non linear optimization system. + * Note the constraint on a vector X as : + * + * mL . X = mC where mL is a non null vector + * + * The way it is done : + * + * (1) We select an arbitray non null coord of L Li!=0; (something like the biggest one) + * (2) We suppose Li=1.0 (in fact we have it by mL = mL/Li , mC = mC/Li) + * (3) Let not X' the vector X without Xi + * (4) we have Xi = mC- mL X' + * (5) Each time we add a new obs in sytem : + * A.X = B + * A.x-B = A' X' -B + Ai Xi = A' X' -B + Ai (mC-mL X') + * + * (A'-Ai mL) = B + * + */ + +template class cOneLinearConstraint +{ + public : + typedef cSparseVect tSV; + /** In Cstr we can fix the index of subst, if it value -1 let the system select the best , fixing can be usefull in case + * of equivalence + */ + cOneLinearConstraint(const tSV&aLP,const Type& aCste,cSetIntDyn & aSetSubst,int aInd =-1); + + void ModifDenseEqLinear(cDenseVect & aCoeff,Type & aRHS, const cDenseVect & aCurSol); + private : + tSV mLP; /// Linear part + int mISubst; /// Indexe which is substituted + Type mCste; /// Constant of the constrainte +}; + +template cOneLinearConstraint::cOneLinearConstraint(const tSV&aLP,const Type& aCste,cSetIntDyn & aSetSubst,int aKPair) : + mLP {} +{ + const typename tSV::tCont & aVPair = aLP.IV(); + // if indexe was not forced or is already, get the "best" one + if ((aKPair<0) || aSetSubst.mOccupied.at(aVPair.at(aKPair).mInd)) + { + cWhichMax aMaxInd(-1,0); + // extract the index, not occupied + for (size_t aKP=0 ; aKP void cOneLinearConstraint::ModifDenseEqLinear(cDenseVect & aCoeff,Type & aRHS, const cDenseVect & ) +{ +} + +/* +template void cResolSysNonLinear::AddObservationLinear + ( + const Type& aWeight, + const cDenseVect & aCoeff, + const Type & aRHS + ) +{ + mInPhaseAddEq = true; + Type aNewRHS = aRHS; + cDenseVect aNewCoeff = aCoeff.Dup(); + + for (int aK=0 ; aKPublicAddObservation(aWeight,aNewCoeff,aNewRHS); +} +*/ + +template class cOneLinearConstraint; +template class cOneLinearConstraint; +template class cOneLinearConstraint; + /* ************************************************************ */ /* */ /* cResolSysNonLinear */ /* */ /* ************************************************************ */ + + // ===== constructors / destructors ================ template cResolSysNonLinear::cResolSysNonLinear(tLinearSysSR * aSys,const tDVect & aInitSol) : From ef2531c495f7602d5cfc6839c387a22ed2f7a9f6 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Wed, 20 Dec 2023 18:37:56 +0100 Subject: [PATCH 02/21] In Linear constrained optimization --- MMVII/include/MMVII_SysSurR.h | 15 + MMVII/include/MMVII_util.h | 2 +- MMVII/src/Matrix/cInputOutputRSNL.cpp | 262 +++++++++++++++ MMVII/src/Matrix/cLinearConstraint.cpp | 174 ++++++++++ MMVII/src/Matrix/cResolSysNonLinear.cpp | 424 +----------------------- MMVII/src/Matrix/cWeighters.cpp | 100 ++++++ 6 files changed, 553 insertions(+), 424 deletions(-) create mode 100755 MMVII/src/Matrix/cInputOutputRSNL.cpp create mode 100755 MMVII/src/Matrix/cLinearConstraint.cpp create mode 100755 MMVII/src/Matrix/cWeighters.cpp diff --git a/MMVII/include/MMVII_SysSurR.h b/MMVII/include/MMVII_SysSurR.h index 0192ac3095..78e6b9c564 100755 --- a/MMVII/include/MMVII_SysSurR.h +++ b/MMVII/include/MMVII_SysSurR.h @@ -54,6 +54,18 @@ template class cResidualWeighterExplicit: public cResidualWeighter< tStdVect mWeights; }; + +template class cREAL8_RWAdapt : public cResidualWeighter +{ + public : + typedef std::vector tStdVect; + cREAL8_RWAdapt(const cResidualWeighter * aRW) ; + tStdVect WeightOfResidual(const tStdVect & aVIn) const override; + private : + const cResidualWeighter* mRW; +}; + + /// Index to use in vector of index indicating a variable to substituate static constexpr int RSL_INDEX_SUBST_TMP = -1; @@ -801,6 +813,9 @@ template class cObjWithUnkowns // : public cObjOfMultipleObjUk void ConvertVWD(cInputOutputRSNL & aIO1 , const cInputOutputRSNL & aIO2); + + }; #endif // _MMVII_SysSurR_H_ diff --git a/MMVII/include/MMVII_util.h b/MMVII/include/MMVII_util.h index 8366fc5963..75937c499d 100755 --- a/MMVII/include/MMVII_util.h +++ b/MMVII/include/MMVII_util.h @@ -356,7 +356,7 @@ class cSetIntDyn void Clear(); void AddIndFixe(size_t aK) ///< Add an element, assume sizeof vector of { - if (!mOccupied[aK]) + if (!mOccupied.at(aK)) { mOccupied[aK] = true; mVIndOcc.push_back(aK); diff --git a/MMVII/src/Matrix/cInputOutputRSNL.cpp b/MMVII/src/Matrix/cInputOutputRSNL.cpp new file mode 100755 index 0000000000..bdc6fbe50f --- /dev/null +++ b/MMVII/src/Matrix/cInputOutputRSNL.cpp @@ -0,0 +1,262 @@ + +#include "MMVII_Tpl_Images.h" + +#include "MMVII_SysSurR.h" + +using namespace NS_SymbolicDerivative; +using namespace MMVII; + +namespace MMVII +{ + + + +template void ConvertVWD(cInputOutputRSNL & aIO1 , const cInputOutputRSNL & aIO2) +{ + Convert(aIO1.mWeights,aIO2.mWeights); + Convert(aIO1.mVals,aIO2.mVals); + + aIO1.mDers.resize(aIO2.mDers.size()); + for (size_t aKDer=0 ; aKDer cInputOutputRSNL::cInputOutputRSNL(const tVectInd& aVInd,const tStdVect & aVObs): + mGlobVInd (aVInd), + mVObs (aVObs), + mNbTmpUk (0) +{ + + // Check consistency on temporary indexes + for (const auto & anInd : aVInd) + { + if (cSetIORSNL_SameTmp::IsIndTmp(anInd)) + { + mNbTmpUk++; + } + } + // MMVII_INTERNAL_ASSERT_tiny(mNbTmpUk==mVTmpUk.size(),"Size Tmp/subst in cInputOutputRSNL"); +} + + +template cInputOutputRSNL::cInputOutputRSNL(bool Fake,const cInputOutputRSNL & aR_IO) : + cInputOutputRSNL + ( + aR_IO.mGlobVInd, + VecConvert(aR_IO.mVObs) + ) +{ + ConvertVWD(*this,aR_IO); +} + + +template Type cInputOutputRSNL::WeightOfKthResisual(int aK) const +{ + switch (mWeights.size()) + { + case 0 : return 1.0; + case 1 : return mWeights[0]; + default : return mWeights.at(aK); + } +} +template size_t cInputOutputRSNL::NbUkTot() const +{ + return mGlobVInd.size() ; +} + +template bool cInputOutputRSNL::IsOk() const +{ + if (mVals.size() !=mDers.size()) + return false; + + if (mVals.empty()) + return false; + + { + size_t aNbUk = NbUkTot(); + for (const auto & aDer : mDers) + if (aDer.size() != aNbUk) + return false; + } + + { + size_t aSzW = mWeights.size(); + if ((aSzW>1) && (aSzW!= mVals.size())) + return false; + } + return true; +} + + + + + +/* ************************************************************ */ +/* */ +/* cSetIORSNL_SameTmp */ +/* */ +/* ************************************************************ */ + +template cSetIORSNL_SameTmp::cSetIORSNL_SameTmp + ( + const tStdVect & aValTmpUk, + const tVectInd & aVFix, + const tStdVect & aValFix + ) : + mVFix (aVFix), + mValFix (aValFix), + mOk (false), + mNbTmpUk (aValTmpUk.size()), + mValTmpUk (aValTmpUk), + mVarTmpIsFrozen (mNbTmpUk,false), + mValueFrozenVarTmp (mNbTmpUk,-283971), // random val + mNbEq (0), + mSetIndTmpUk (mNbTmpUk) +{ + MMVII_INTERNAL_ASSERT_tiny((aVFix.size()==aValFix.size()) || aValFix.empty(),"Bad size for fix var tmp"); + + for (size_t aKInd=0 ; aKInd int cSetIORSNL_SameTmp::NbRedundacy() const +{ + return mNbEq -mNbTmpUk; +} + +template cSetIORSNL_SameTmp::cSetIORSNL_SameTmp(bool Fake,const cSetIORSNL_SameTmp & aR_Set) : + cSetIORSNL_SameTmp + ( + VecConvert(aR_Set.mValTmpUk), + aR_Set.mVFix, + VecConvert(aR_Set.mValFix) + ) +{ + for (const auto & anIO : aR_Set.mVEq) + AddOneEq(cInputOutputRSNL(false,anIO)); +} + + + + +template size_t cSetIORSNL_SameTmp::ToIndTmp(int anInd) { return -(anInd+1); } +template bool cSetIORSNL_SameTmp::IsIndTmp(int anInd) +{ + return anInd<0; +} +template size_t cSetIORSNL_SameTmp::NbTmpUk() const { return mNbTmpUk; } +template const std::vector & cSetIORSNL_SameTmp::ValTmpUk() const { return mValTmpUk; } +template Type cSetIORSNL_SameTmp::Val1TmpUk(int aInd) const { return mValTmpUk.at(ToIndTmp(aInd));} + + + +template void cSetIORSNL_SameTmp::AddOneEq(const tIO_OneEq & anIO_In) +{ + mVEq.push_back(anIO_In); + tIO_OneEq & anIO = mVEq.back(); + + MMVII_INTERNAL_ASSERT_tiny(anIO.IsOk(),"Bad size for cInputOutputRSNL"); + + // for (const auto & anInd : anIO.mGlobVInd) + for (size_t aKInd=0 ; aKInd mNbTmpUk) // A priori there is no use to less or equal equation, this doesnt give any constraint + && ( mSetIndTmpUk.NbElem()== mNbTmpUk) // we are sure to have good index, because we cannot add oustide + ) + { + mOk = true; + } +} + +template void cSetIORSNL_SameTmp::AddFixVarTmp (int aInd,const Type& aVal,const Type& aWeight) +{ + MMVII_INTERNAL_ASSERT_tiny + ( + cSetIORSNL_SameTmp::IsIndTmp(aInd), + "Non tempo index in AddFixVarTmp" + ); + + // tVectInd aVInd{anInd}; + + cInputOutputRSNL aIO({aInd},{}); + aIO.mWeights.push_back(aWeight); + aIO.mDers.push_back({1.0}); + Type aDVal = Val1TmpUk(aInd)-aVal; + aIO.mVals.push_back({aDVal}); + + AddOneEq(aIO); +} + +template void cSetIORSNL_SameTmp::AddFixCurVarTmp (int aInd,const Type& aWeight) +{ + AddFixVarTmp(aInd,Val1TmpUk(aInd),aWeight); +} + +template + const std::vector >& + cSetIORSNL_SameTmp::AllEq() const +{ + return mVEq; +} + +template void cSetIORSNL_SameTmp::AssertOk() const +{ + MMVII_INTERNAL_ASSERT_tiny(mOk,"Not enough eq to use tmp unknowns"); +} + + + +/* ************************************************************ */ +/* */ +/* INSTANTIATION */ +/* */ +/* ************************************************************ */ + +#define INSTANTIATE_RESOLSYSNL(TYPE)\ +template class cInputOutputRSNL;\ +template class cSetIORSNL_SameTmp;\ +template void ConvertVWD(cInputOutputRSNL & aIO1 , const cInputOutputRSNL & aIO2); + + +INSTANTIATE_RESOLSYSNL(tREAL4) +INSTANTIATE_RESOLSYSNL(tREAL8) +INSTANTIATE_RESOLSYSNL(tREAL16) + + +}; diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp new file mode 100755 index 0000000000..a528b8e3d8 --- /dev/null +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -0,0 +1,174 @@ + +#include "MMVII_Tpl_Images.h" + +#include "MMVII_SysSurR.h" + +using namespace NS_SymbolicDerivative; +using namespace MMVII; + +namespace MMVII +{ + +/* ************************************************************ */ +/* */ +/* cOneLinearConstraint */ +/* */ +/* ************************************************************ */ + +/** Class for a "sparse" dense vector, i.e a vector that is represented by a dense vector + */ + +template class cDSVec +{ + public : + cDSVec(size_t aNbVar); + + void Add(const Type &,int anInd); + + + cDenseVect mVec; + cSetIntDyn mSet; +}; + +template cDSVec::cDSVec(size_t aNbVar) : + mVec (aNbVar,eModeInitImage::eMIA_Null), + mSet (aNbVar) +{ +} + +template void cDSVec::Add(const Type & aVal,int anInd) +{ + mVec(anInd) += aVal; + mSet.AddIndFixe(anInd); +} + +/* Class for handling linear constraint in non linear optimization system. + * Note the constraint on a vector X as : + * + * mL . X = mC where mL is a non null vector + * + * The way it is done : + * + * (1) We select an arbitray non null coord of L Li!=0; (something like the biggest one) + * (2) We suppose Li=1.0 (in fact we have it by mL = mL/Li , mC = mC/Li) + * (3) Let not X' the vector X without Xi + * (4) we have Xi = mC- mL X' + * (5) Each time we add a new obs in sytem : + * A.X = B + * A.x-B = A' X' -B + Ai Xi = A' X' -B + Ai (mC-mL X') + * + * (A'-Ai mL) = B + * + */ + + +template class cOneLinearConstraint +{ + public : + typedef cSparseVect tSV; + /** In Cstr we can fix the index of subst, if it value -1 let the system select the best , fixing can be usefull in case + * of equivalence + */ + cOneLinearConstraint(const tSV&aLP,const Type& aCste); + + + void SubstractIn(cOneLinearConstraint & aToSub,cDSVec & aBuf); + + void ModifDenseEqLinear(cDenseVect & aCoeff,Type & aRHS, const cDenseVect & aCurSol); + private : + + void AddBuf(cDSVec & aBuf,const Type & aMul,int aI2Avoid); + + tSV mLP; /// Linear part + int mISubst; /// Indexe which is substituted + Type mCste; /// Constant of the constrainte +}; + +template cOneLinearConstraint::cOneLinearConstraint(const tSV&aLP,const Type& aCste) : + mLP (aLP), + mISubst (-1), + mCste (aCste) +{ +} + +template void cOneLinearConstraint::AddBuf(cDSVec & aBuf,const Type & aMul,int aI2Avoid) +{ + for (const auto & aPair : mLP.IV()) + { + if (aPair.mInd != aI2Avoid) + { + aBuf.Add(aPair.mInd,aPair.mVal * aMul); + } + } +} + +template void cOneLinearConstraint::SubstractIn(cOneLinearConstraint & aToSub,cDSVec & aBuf) +{ + // substract constant + aToSub.mCste -= mCste; + + aToSub.AddBuf(aBuf, 1.0,mISubst); + this ->AddBuf(aBuf,-1.0,mISubst); + + aToSub.mLP.Reset(); + for (const auto & aInd : aBuf.mSet.mVIndOcc) + { + aToSub.mLP.AddIV(aInd,aBuf.mVec(aInd)); + } +} + + + +/* + mLP {} +{ + const typename tSV::tCont & aVPair = aLP.IV(); + // if indexe was not forced or is already, get the "best" one + if ((aKPair<0) || aSetSubst.mOccupied.at(aVPair.at(aKPair).mInd)) + { + cWhichMax aMaxInd(-1,0); + // extract the index, not occupied + for (size_t aKP=0 ; aKP void cOneLinearConstraint::ModifDenseEqLinear(cDenseVect & aCoeff,Type & aRHS, const cDenseVect & ) +{ +} +*/ + +#define INSTANTIATE_LINEAER_CONSTR(TYPE)\ +template class cOneLinearConstraint;\ +template class cDSVec; + +INSTANTIATE_LINEAER_CONSTR(tREAL16) +INSTANTIATE_LINEAER_CONSTR(tREAL8) +INSTANTIATE_LINEAER_CONSTR(tREAL4) + +// template class cOneLinearConstraint; +// template class cOneLinearConstraint; +// template class cOneLinearConstraint; + +}; diff --git a/MMVII/src/Matrix/cResolSysNonLinear.cpp b/MMVII/src/Matrix/cResolSysNonLinear.cpp index d183942543..d55b4999d1 100755 --- a/MMVII/src/Matrix/cResolSysNonLinear.cpp +++ b/MMVII/src/Matrix/cResolSysNonLinear.cpp @@ -9,317 +9,9 @@ using namespace MMVII; namespace MMVII { - - - -template class cREAL8_RWAdapt : public cResidualWeighter -{ - public : - typedef std::vector tStdVect; - - cREAL8_RWAdapt(const cResidualWeighter * aRW) : - mRW (aRW) - { - } - tStdVect WeightOfResidual(const tStdVect & aVIn) const override - { - std::vector aRV; - Convert(aRV,aVIn); - - aRV = mRW->WeightOfResidual(aRV); - tStdVect aVOut; - - return Convert(aVOut,aRV); - } - private : - - const cResidualWeighter* mRW; - -}; - -template void ConvertVWD(cInputOutputRSNL & aIO1 , const cInputOutputRSNL & aIO2) -{ - Convert(aIO1.mWeights,aIO2.mWeights); - Convert(aIO1.mVals,aIO2.mVals); - - aIO1.mDers.resize(aIO2.mDers.size()); - for (size_t aKDer=0 ; aKDer cInputOutputRSNL::cInputOutputRSNL(const tVectInd& aVInd,const tStdVect & aVObs): - mGlobVInd (aVInd), - mVObs (aVObs), - mNbTmpUk (0) -{ - - // Check consistency on temporary indexes - for (const auto & anInd : aVInd) - { - if (cSetIORSNL_SameTmp::IsIndTmp(anInd)) - { - mNbTmpUk++; - } - } - // MMVII_INTERNAL_ASSERT_tiny(mNbTmpUk==mVTmpUk.size(),"Size Tmp/subst in cInputOutputRSNL"); -} - - -template cInputOutputRSNL::cInputOutputRSNL(bool Fake,const cInputOutputRSNL & aR_IO) : - cInputOutputRSNL - ( - aR_IO.mGlobVInd, - VecConvert(aR_IO.mVObs) - ) -{ - ConvertVWD(*this,aR_IO); -} - - -template Type cInputOutputRSNL::WeightOfKthResisual(int aK) const -{ - switch (mWeights.size()) - { - case 0 : return 1.0; - case 1 : return mWeights[0]; - default : return mWeights.at(aK); - } -} -template size_t cInputOutputRSNL::NbUkTot() const -{ - return mGlobVInd.size() ; -} - -template bool cInputOutputRSNL::IsOk() const -{ - if (mVals.size() !=mDers.size()) - return false; - - if (mVals.empty()) - return false; - - { - size_t aNbUk = NbUkTot(); - for (const auto & aDer : mDers) - if (aDer.size() != aNbUk) - return false; - } - - { - size_t aSzW = mWeights.size(); - if ((aSzW>1) && (aSzW!= mVals.size())) - return false; - } - return true; -} - - - - - -/* ************************************************************ */ -/* */ -/* cSetIORSNL_SameTmp */ -/* */ -/* ************************************************************ */ - -template cSetIORSNL_SameTmp::cSetIORSNL_SameTmp - ( - const tStdVect & aValTmpUk, - const tVectInd & aVFix, - const tStdVect & aValFix - ) : - mVFix (aVFix), - mValFix (aValFix), - mOk (false), - mNbTmpUk (aValTmpUk.size()), - mValTmpUk (aValTmpUk), - mVarTmpIsFrozen (mNbTmpUk,false), - mValueFrozenVarTmp (mNbTmpUk,-283971), // random val - mNbEq (0), - mSetIndTmpUk (mNbTmpUk) -{ - MMVII_INTERNAL_ASSERT_tiny((aVFix.size()==aValFix.size()) || aValFix.empty(),"Bad size for fix var tmp"); - - for (size_t aKInd=0 ; aKInd int cSetIORSNL_SameTmp::NbRedundacy() const -{ - return mNbEq -mNbTmpUk; -} - -template cSetIORSNL_SameTmp::cSetIORSNL_SameTmp(bool Fake,const cSetIORSNL_SameTmp & aR_Set) : - cSetIORSNL_SameTmp - ( - VecConvert(aR_Set.mValTmpUk), - aR_Set.mVFix, - VecConvert(aR_Set.mValFix) - ) -{ - for (const auto & anIO : aR_Set.mVEq) - AddOneEq(cInputOutputRSNL(false,anIO)); -} - - - - -template size_t cSetIORSNL_SameTmp::ToIndTmp(int anInd) { return -(anInd+1); } -template bool cSetIORSNL_SameTmp::IsIndTmp(int anInd) -{ - return anInd<0; -} -template size_t cSetIORSNL_SameTmp::NbTmpUk() const { return mNbTmpUk; } -template const std::vector & cSetIORSNL_SameTmp::ValTmpUk() const { return mValTmpUk; } -template Type cSetIORSNL_SameTmp::Val1TmpUk(int aInd) const { return mValTmpUk.at(ToIndTmp(aInd));} - - - -template void cSetIORSNL_SameTmp::AddOneEq(const tIO_OneEq & anIO_In) -{ - mVEq.push_back(anIO_In); - tIO_OneEq & anIO = mVEq.back(); - - MMVII_INTERNAL_ASSERT_tiny(anIO.IsOk(),"Bad size for cInputOutputRSNL"); - - // for (const auto & anInd : anIO.mGlobVInd) - for (size_t aKInd=0 ; aKInd mNbTmpUk) // A priori there is no use to less or equal equation, this doesnt give any constraint - && ( mSetIndTmpUk.NbElem()== mNbTmpUk) // we are sure to have good index, because we cannot add oustide - ) - { - mOk = true; - } -} - -template void cSetIORSNL_SameTmp::AddFixVarTmp (int aInd,const Type& aVal,const Type& aWeight) -{ - MMVII_INTERNAL_ASSERT_tiny - ( - cSetIORSNL_SameTmp::IsIndTmp(aInd), - "Non tempo index in AddFixVarTmp" - ); - - // tVectInd aVInd{anInd}; - - cInputOutputRSNL aIO({aInd},{}); - aIO.mWeights.push_back(aWeight); - aIO.mDers.push_back({1.0}); - Type aDVal = Val1TmpUk(aInd)-aVal; - aIO.mVals.push_back({aDVal}); - - AddOneEq(aIO); -} - -template void cSetIORSNL_SameTmp::AddFixCurVarTmp (int aInd,const Type& aWeight) -{ - AddFixVarTmp(aInd,Val1TmpUk(aInd),aWeight); -} - -template - const std::vector >& - cSetIORSNL_SameTmp::AllEq() const -{ - return mVEq; -} - -template void cSetIORSNL_SameTmp::AssertOk() const -{ - MMVII_INTERNAL_ASSERT_tiny(mOk,"Not enough eq to use tmp unknowns"); -} - - -/* ************************************************************ */ -/* */ -/* cResidualWeighter */ -/* */ -/* ************************************************************ */ - -template cResidualWeighter::cResidualWeighter(const Type & aVal) : - mVal (aVal) -{ -} - -template std::vector cResidualWeighter::WeightOfResidual(const tStdVect & aVResidual) const -{ - return tStdVect(aVResidual.size(),mVal); -} - -/* ************************************************************ */ -/* */ -/* cExplicitWeighter */ -/* */ -/* ************************************************************ */ - -template -cResidualWeighterExplicit::cResidualWeighterExplicit(bool isSigmas, const tStdVect & aData) : - mSigmas{}, mWeights{} -{ - tStdVect aDataInv {}; - std::for_each(aData.begin(), aData.end(), [&](Type s) { aDataInv.push_back(1/(s*s)); }); - if (isSigmas) - { - mSigmas = aData; - mWeights = aDataInv; - } else { - mSigmas = aDataInv; // MPD->JMM : should be rather mSigmas = 1/sqrt(W) ?? - mWeights = aData; - } -} - -template -std::vector cResidualWeighterExplicit::WeightOfResidual(const std::vector &aVResidual) const -{ - MMVII_INTERNAL_ASSERT_tiny(mWeights.size() == aVResidual.size(), "Number of weights does not correpond to number of residuals"); - return mWeights; -} - - -/* ************************************************************ */ -/* */ -/* cResolSysNonLinear */ +/* cREAL8_RSNL */ /* */ /* ************************************************************ */ @@ -363,118 +55,6 @@ int cREAL8_RSNL::CountFreeVariables() const } -/* ************************************************************ */ -/* */ -/* cOneLinearConstraint */ -/* */ -/* ************************************************************ */ - -/* Class for handling linear constraint in non linear optimization system. - * Note the constraint on a vector X as : - * - * mL . X = mC where mL is a non null vector - * - * The way it is done : - * - * (1) We select an arbitray non null coord of L Li!=0; (something like the biggest one) - * (2) We suppose Li=1.0 (in fact we have it by mL = mL/Li , mC = mC/Li) - * (3) Let not X' the vector X without Xi - * (4) we have Xi = mC- mL X' - * (5) Each time we add a new obs in sytem : - * A.X = B - * A.x-B = A' X' -B + Ai Xi = A' X' -B + Ai (mC-mL X') - * - * (A'-Ai mL) = B - * - */ - -template class cOneLinearConstraint -{ - public : - typedef cSparseVect tSV; - /** In Cstr we can fix the index of subst, if it value -1 let the system select the best , fixing can be usefull in case - * of equivalence - */ - cOneLinearConstraint(const tSV&aLP,const Type& aCste,cSetIntDyn & aSetSubst,int aInd =-1); - - void ModifDenseEqLinear(cDenseVect & aCoeff,Type & aRHS, const cDenseVect & aCurSol); - private : - tSV mLP; /// Linear part - int mISubst; /// Indexe which is substituted - Type mCste; /// Constant of the constrainte -}; - -template cOneLinearConstraint::cOneLinearConstraint(const tSV&aLP,const Type& aCste,cSetIntDyn & aSetSubst,int aKPair) : - mLP {} -{ - const typename tSV::tCont & aVPair = aLP.IV(); - // if indexe was not forced or is already, get the "best" one - if ((aKPair<0) || aSetSubst.mOccupied.at(aVPair.at(aKPair).mInd)) - { - cWhichMax aMaxInd(-1,0); - // extract the index, not occupied - for (size_t aKP=0 ; aKP void cOneLinearConstraint::ModifDenseEqLinear(cDenseVect & aCoeff,Type & aRHS, const cDenseVect & ) -{ -} - -/* -template void cResolSysNonLinear::AddObservationLinear - ( - const Type& aWeight, - const cDenseVect & aCoeff, - const Type & aRHS - ) -{ - mInPhaseAddEq = true; - Type aNewRHS = aRHS; - cDenseVect aNewCoeff = aCoeff.Dup(); - - for (int aK=0 ; aKPublicAddObservation(aWeight,aNewCoeff,aNewRHS); -} -*/ - -template class cOneLinearConstraint; -template class cOneLinearConstraint; -template class cOneLinearConstraint; /* ************************************************************ */ /* */ @@ -1073,8 +653,6 @@ template cDenseVect cResolSysNonLinear::R_SolveUpdat #define INSTANTIATE_RESOLSYSNL(TYPE)\ template class cInputOutputRSNL;\ template class cSetIORSNL_SameTmp;\ -template class cResidualWeighter;\ -template class cResidualWeighterExplicit;\ template class cResolSysNonLinear; INSTANTIATE_RESOLSYSNL(tREAL4) diff --git a/MMVII/src/Matrix/cWeighters.cpp b/MMVII/src/Matrix/cWeighters.cpp new file mode 100755 index 0000000000..24904ec55e --- /dev/null +++ b/MMVII/src/Matrix/cWeighters.cpp @@ -0,0 +1,100 @@ + +#include "MMVII_Tpl_Images.h" + +#include "MMVII_SysSurR.h" + +using namespace NS_SymbolicDerivative; +using namespace MMVII; + +namespace MMVII +{ + +/* ************************************************************ */ +/* */ +/* cREAL8_RWAdapt */ +/* */ +/* ************************************************************ */ + +template + cREAL8_RWAdapt::cREAL8_RWAdapt(const cResidualWeighter * aRW) : + mRW (aRW) +{ +} + + +template typename cREAL8_RWAdapt::tStdVect cREAL8_RWAdapt::WeightOfResidual(const tStdVect & aVIn) const +{ + std::vector aRV; + Convert(aRV,aVIn); + + aRV = mRW->WeightOfResidual(aRV); + tStdVect aVOut; + + return Convert(aVOut,aRV); +} + + + +/* ************************************************************ */ +/* */ +/* cResidualWeighter */ +/* */ +/* ************************************************************ */ + +template cResidualWeighter::cResidualWeighter(const Type & aVal) : + mVal (aVal) +{ +} + +template std::vector cResidualWeighter::WeightOfResidual(const tStdVect & aVResidual) const +{ + return tStdVect(aVResidual.size(),mVal); +} + +/* ************************************************************ */ +/* */ +/* cExplicitWeighter */ +/* */ +/* ************************************************************ */ + +template +cResidualWeighterExplicit::cResidualWeighterExplicit(bool isSigmas, const tStdVect & aData) : + mSigmas{}, mWeights{} +{ + tStdVect aDataInv {}; + std::for_each + ( + aData.begin(), aData.end(), + [&](Type aValue) { aDataInv.push_back( isSigmas ? 1/Square(aValue) : 1/std::sqrt(aValue) ); } + ); + if (isSigmas) + { + mSigmas = aData; + mWeights = aDataInv; + } + else + { + mSigmas = aDataInv; + mWeights = aData; + } +} + +template +std::vector cResidualWeighterExplicit::WeightOfResidual(const std::vector &aVResidual) const +{ + MMVII_INTERNAL_ASSERT_tiny(mWeights.size() == aVResidual.size(), "Number of weights does not correpond to number of residuals"); + return mWeights; +} + + +#define INSTANTIATE_RESOLSYSNL(TYPE)\ +template class cREAL8_RWAdapt;\ +template class cResidualWeighter;\ +template class cResidualWeighterExplicit; + +INSTANTIATE_RESOLSYSNL(tREAL4) +INSTANTIATE_RESOLSYSNL(tREAL8) +INSTANTIATE_RESOLSYSNL(tREAL16) + + +}; From 45937345742521cb7794590b2a5608bdfad07d1c Mon Sep 17 00:00:00 2001 From: deseilligny Date: Sun, 31 Dec 2023 13:08:55 +0100 Subject: [PATCH 03/21] In Bench Linear Constr --- MMVII/include/MMVII_Bench.h | 3 + MMVII/include/MMVII_Images.h | 14 +- MMVII/include/MMVII_Matrix.h | 67 +++- MMVII/include/MMVII_SysSurR.h | 1 + MMVII/src/Bench/BenchGlob.cpp | 1 + MMVII/src/Bench/BenchMatrix.cpp | 92 ++++- MMVII/src/ImagesBase/BaseImage.cpp | 27 +- MMVII/src/Matrix/BaseMatrixes.cpp | 188 ++++++++- MMVII/src/Matrix/cDenseMatrix.cpp | 52 ++- MMVII/src/Matrix/cL1BarrodaleSover.cpp | 2 +- MMVII/src/Matrix/cLinearConstraint.cpp | 499 ++++++++++++++++++++---- MMVII/src/Matrix/cResolSysNonLinear.cpp | 6 +- MMVII/src/Perso/cMMVII_CatVideo.cpp | 23 +- 13 files changed, 848 insertions(+), 127 deletions(-) diff --git a/MMVII/include/MMVII_Bench.h b/MMVII/include/MMVII_Bench.h index 6ecf322b4b..614bcf94f5 100755 --- a/MMVII/include/MMVII_Bench.h +++ b/MMVII/include/MMVII_Bench.h @@ -145,6 +145,9 @@ void Bench_ToHomMult(cParamExeBench & aParam); // Test conversion set pair Hom = void Bench_SpatialIndex(cParamExeBench & aParam); /// test spatial index +void BenchLinearConstr(cParamExeBench & aParam); /// elementary test on linear constr + + /* Called by BenchGlobImage */ diff --git a/MMVII/include/MMVII_Images.h b/MMVII/include/MMVII_Images.h index d9d2652466..5de9fa72f0 100755 --- a/MMVII/include/MMVII_Images.h +++ b/MMVII/include/MMVII_Images.h @@ -363,14 +363,16 @@ template class cDataTypedIm : public cDataGenUnTypedI cDataTypedIm (const cPtxd & aP0,const cPtxd & aP1, Type * DataLin=nullptr,eModeInitImage=eModeInitImage::eMIA_NoInit); ///< Only cstr virtual ~cDataTypedIm(); ///< Big obj, do it virtual - // All distance-norm are normalized/averaged , so that const image has a norm equal to the constante - double L1Dist(const cDataTypedIm & aV) const; ///< Distance som abs - double L2Dist(const cDataTypedIm & aV) const; ///< Dist som square - double SqL2Dist(const cDataTypedIm & aV) const; ///< Square L2Dist + // If Avg=true, All distance-norm are normalized/averaged , so that const image has a norm equal to the constante + // This is default for image, but not for matrix/vector + double L1Dist(const cDataTypedIm & aV,bool Avg=true) const; ///< Distance som abs + double L2Dist(const cDataTypedIm & aV,bool Avg=true) const; ///< Dist som square + double SqL2Dist(const cDataTypedIm & aV,bool Avg=true) const; ///< Square L2Dist double LInfDist(const cDataTypedIm & aV) const; ///< Dist max - double L1Norm() const; ///< Norm som abs - double L2Norm() const; ///< Norm square + double L1Norm(bool Avg=true) const; ///< Norm som abs + double L2Norm(bool Avg=true) const; ///< Norm square double LInfNorm() const; ///< Nomr max + double SqL2Norm(bool Avg=true) const; ///< Norm square Type MinVal() const; Type MaxVal() const; diff --git a/MMVII/include/MMVII_Matrix.h b/MMVII/include/MMVII_Matrix.h index 60fc10b0d0..c1fe9ea1c3 100755 --- a/MMVII/include/MMVII_Matrix.h +++ b/MMVII/include/MMVII_Matrix.h @@ -39,6 +39,8 @@ template struct cCplIV template class cSparseVect : public cMemCheck { public : + + typedef cSparseVect tSV; typedef cCplIV tCplIV; typedef std::vector tCont; typedef typename tCont::const_iterator const_iterator; @@ -57,7 +59,11 @@ template class cSparseVect : public cMemCheck void AddIV(const tCplIV & aCpl) { IV().push_back(aCpl); } /// Random sparse vector - static cSparseVect RanGenerate(int aNbVar,double aProba); + static cSparseVect RanGenerate(int aNbVar,double aProba,tREAL8 aMinVal= 1e-2,int aMinSize=1); + + // generate NbVect of dimension NbVar with average density a Proba, assuring that in all vector we have + // dist(Uk, < aCosMax + // static std::list > GenerateKVect(int aNbVar,int aNbVect,double aProba,tREAL8 aDMin); /// SzInit fill with arbitray value, only to reserve space // cSparseVect(int aSzReserve=-1,int aSzInit=-1) ; @@ -66,6 +72,11 @@ template class cSparseVect : public cMemCheck /// Check the vector can be used in a matrix,vect [0,Nb[, used in the assertions bool IsInside(int aNb) const; void Reset(); + + const tCplIV * Find(int anInd) const; /// return the pair of a given index + + // Maximum index, aDef is used if empty, if aDef<=-2 & empty erreur + int MaxIndex(int aDef=-1) const; private : /* inline void MakeSort(){if (!mIsSorted) Sort();} @@ -84,11 +95,13 @@ template class cDenseVect typedef cIm1D tIM; typedef cDataIm1D tDIM; typedef cSparseVect tSpV; + typedef cDenseVect tDV; cDenseVect(int aSz, eModeInitImage=eModeInitImage::eMIA_NoInit); cDenseVect(tIM anIm); cDenseVect(const std::vector & aVect); - cDenseVect(int Sz,const tSpV &); + // Adapt size , set + cDenseVect(const tSpV &,int aSz=-1); static cDenseVect Cste(int aSz,const Type & aVal); cDenseVect Dup() const; static cDenseVect RanGenerate(int aNbVar); @@ -102,13 +115,16 @@ template class cDenseVect Type & operator() (int aK) {return DIm().GetV(aK);} const int & Sz() const {return DIm().Sz();} - double L1Dist(const cDenseVect & aV) const; - double L2Dist(const cDenseVect & aV) const; + // For vector/matrix it's more standard than norm are a sum and not an average + double L1Dist(const cDenseVect & aV,bool Avg=false) const; + double L2Dist(const cDenseVect & aV,bool Avg=false) const; - double L1Norm() const; ///< Norm som abs - double L2Norm() const; ///< Norm square + double L1Norm(bool Avg=false) const; ///< Norm som abs + double L2Norm(bool Avg=false) const; ///< Norm square + double SqL2Norm(bool Avg=false) const; ///< Norm square double LInfNorm() const; ///< Nomr max + tDV VecUnit() const; // return V/|V| Type * RawData(); const Type * RawData() const; @@ -126,13 +142,37 @@ template class cDenseVect Type AvgElem() const; ///< Avereage of all elements void SetAvg(const Type & anAvg); ///< multiply by a cste to fix the average + + // operator -= - double DotProduct(const cDenseVect &) const; + double DotProduct(const cDenseVect &) const; //== scalar product void TplCheck(const tSpV & aV) const { MMVII_INTERNAL_ASSERT_medium(aV.IsInside(Sz()) ,"Sparse Vector out dense vect"); } void WeightedAddIn(Type aWeight,const tSpV & aColLine); + + /* ========= Othognalization & projection stuff =========== */ + + // ---------- Projection, Dist to space ------------------ + /// return orthognal projection on subspace defined by aVVect, use least square (slow ? At least good enough for bench ) + tDV ProjOnSubspace(const std::vector & aVVect) const; + /// return distance to subspace (i.e distance to proj) + Type DistToSubspace(const std::vector &) const; + + // -------------- Gram schmitd method for orthogonalization ------------------- + /** Elementary step of Gram-Schmit orthogonalization method ; return a vector orthogonal + to all VV and that belong to the space "this+aVV", assumme aVV are already orthogonal */ + tDV GramSchmidtCompletion(const std::vector & aVV) const; + /// full method of gram schmidt to orthogonalize + static std::vector GramSchmidtOrthogonalization(const std::vector & aVV) ; + + // ---------------- Base complementation in orthogonal subspace, slow but dont require initial base orthog -------- + /// Return a unitary vector not colinear to VV, by iteration then randomization, untill the Dist to subspace > DMin + static tDV VecComplem(const std::vector & aVV,Type DMin=0.1) ; + /// Complement the base with vector orthogonal to the base, and orthog between them (if WithInit contain initial vect + added) + static std::vector BaseComplem(const std::vector & aVV,bool WithInit=false,Type DMin=0.1) ; + private : tIM mIm; @@ -184,6 +224,7 @@ template class cMatrix : public cRect2 virtual void ReadLineInPlace(int aY,tDV &) const; virtual tDV ReadLine(int aY) const; virtual void WriteLine(int aY,const tDV &) ; + std::vector MakeLines() const; // generate all the lines @@ -351,8 +392,12 @@ template class cDenseMatrix : public cUnOptDenseMatrix cDenseMatrix Dup() const; static cDenseMatrix Identity(int aSz); ///< return identity matrix static cDenseMatrix Diag(const tDV &); + static cDenseMatrix FromLines(const std::vector &); // Create from set of "line vector" cDenseMatrix ClosestOrthog() const; ///< return closest + tDM SubMatrix(const cPt2di & aSz) const; + tDM SubMatrix(const cPt2di & aP0,const cPt2di & aP1) const; + /** Generate a random square matrix having "good" conditionning property , i.e with eigen value constraint, usefull for bench as when the random matrix is close to singular, it may instability that fail the numerical test. @@ -360,6 +405,9 @@ template class cDenseMatrix : public cUnOptDenseMatrix static tDM RandomSquareRegMatrix(const cPt2di&aSz,bool IsSym,double aAmplAcc,double aCondMinAccept); static tRSVD RandomSquareRegSVD(const cPt2di&aSz,bool IsSym,double aAmplAcc,double aCondMinAccept); + + static tDM RandomOrthogMatrix(const int aSz); + /* Generate a matrix rank deficient, where aSzK is the size of the kernel */ static tRSVD RandomSquareRankDefSVD(const cPt2di & aSz,int aSzK); static tDM RandomSquareRankDefMatrix(const cPt2di & aSz,int aSzK); @@ -448,6 +496,7 @@ template class cDenseMatrix : public cUnOptDenseMatrix void ChangSign(); ///< Multiply by -1 void SetDirectBySign(); ///< Multiply by -1 if indirect + tDV Random1LineCombination() const; // retunr a vector that is a random combination of lines // ===== Overridng of cMatrix classe ==== void MulColInPlace(tDV &,const tDV &) const override; Type MulColElem(int aY,const tDV &)const override; @@ -463,8 +512,8 @@ template class cDenseMatrix : public cUnOptDenseMatrix void Weighted_Add_tAA(Type aWeight,const tSpV & aColLine,bool OnlySup=true) override; // === method implemente with DIm - Type L2Dist(const cDenseMatrix & aV) const; - Type SqL2Dist(const cDenseMatrix & aV) const; + Type L2Dist(const cDenseMatrix & aV,bool Avg=false) const; + Type SqL2Dist(const cDenseMatrix & aV,bool Avg=false) const; // void operator -= (const cDenseMatrix &) ; => see "include/MMVII_Tpl_Images.h" private : diff --git a/MMVII/include/MMVII_SysSurR.h b/MMVII/include/MMVII_SysSurR.h index 78e6b9c564..4b408af8e6 100755 --- a/MMVII/include/MMVII_SysSurR.h +++ b/MMVII/include/MMVII_SysSurR.h @@ -304,6 +304,7 @@ template class cInputOutputRSNL std::vector mDers; ///< derivate of fctr size_t mNbTmpUk; + // use a s converter from tREAL8, "Fake" is used to separate from copy construtcor when Type == tREAL8 cInputOutputRSNL(bool Fake,const cInputOutputRSNL &); private : // cInputOutputRSNL(const cInputOutputRSNL &) = delete; diff --git a/MMVII/src/Bench/BenchGlob.cpp b/MMVII/src/Bench/BenchGlob.cpp index 05ce96d9f9..7b2842f0f0 100755 --- a/MMVII/src/Bench/BenchGlob.cpp +++ b/MMVII/src/Bench/BenchGlob.cpp @@ -546,6 +546,7 @@ int cAppli_MMVII_Bench::ExecuteBench(cParamExeBench & aParam) Bench_MatEss(aParam); Bench_SpatialIndex(aParam); Bench_ToHomMult(aParam); + BenchLinearConstr(aParam); } // Now call the bench of all application that define their own bench diff --git a/MMVII/src/Bench/BenchMatrix.cpp b/MMVII/src/Bench/BenchMatrix.cpp index 704b4b03f1..3940f834be 100755 --- a/MMVII/src/Bench/BenchMatrix.cpp +++ b/MMVII/src/Bench/BenchMatrix.cpp @@ -710,7 +710,7 @@ template void TplBenchLsq() for (int aK=0 ; aK<= 3*aNbEq ; aK++) { // sparse vector with density K/aNbEq - cSparseVect aVCoeff = cSparseVect::RanGenerate(aNbVar,double(aK)/aNbEq); + cSparseVect aVCoeff = cSparseVect::RanGenerate(aNbVar,double(aK+1)/aNbEq); aVSV.push_back(aVCoeff); Type aCste = RandUnif_C(); Type aW = 0.5 + RandUnif_0_1(); @@ -720,7 +720,7 @@ template void TplBenchLsq() if (aK%2) aSysLin.AddObservationLinear(aW,aVCoeff,aCste); else - aSysLin.AddObservationLinear(aW,cDenseVect(aNbVar,aVCoeff),aCste); + aSysLin.AddObservationLinear(aW,cDenseVect(aVCoeff,aNbVar),aCste); } static int aCpt = 0; aCpt++; @@ -993,10 +993,94 @@ template void BenchMatPt() // std::cout << "DLLLLCC " << aDL << " " << aDC << "\n"; } +template void BenchProj() +{ + + + for (int aK=0 ; aK<100 ; aK++) + { + int aDimTot = 2 + (aK%10); + int aDimProj = std::min(aDimTot-1,1+(aK%3)); + + // --- [0] Generate "ground truth" two complemantary matri aMatProj & aMatCompl, orthognal to each other + // MatProj being a non orthognal + cDenseMatrix aMatOrth = cDenseMatrix::RandomOrthogMatrix(aDimTot); // Generate N Orhog vector + // Generate a non orthog base of [0,DimProj] and a vector inside + cDenseMatrix aMatProj = aMatOrth.SubMatrix(cPt2di(aDimTot,aDimProj)); // Extract a orthog base of proj space + // Make a base non orthog but no degenerate + aMatProj = cDenseMatrix::RandomSquareRegMatrix(cPt2di(aDimProj,aDimProj),false,0.1,0.1) * aMatProj; + std::vector> aBase = aMatProj.MakeLines(); + + // Generate a orthog matrix of complementary space + cDenseMatrix aMatCompl = aMatOrth.SubMatrix(cPt2di(0,aDimProj),cPt2di(aDimTot,aDimTot)); // Extract a orthog base of compl space + + // ----------- [1] Test Projection -------------------------------------------- + { + cDenseVect aV = aMatProj.Random1LineCombination(); + MMVII_INTERNAL_ASSERT_bench(aV.DistToSubspace(aBase)<1e-5,"Dist Proj"); + + cDenseVect aVCompl = aMatCompl.Random1LineCombination(); + + cDenseVect aV2Proj = aV+ aVCompl; + MMVII_INTERNAL_ASSERT_bench(aV.L2Dist(aV2Proj.ProjOnSubspace(aBase))<1e-5,"ProjOnSubspace"); + } + // ---------- [2] Test Gram-Schmidt ---------------------- + { + std::vector> aGS = cDenseVect::GramSchmidtOrthogonalization(aBase); + + // test that all vector are orthogonal to each other + for (size_t aK1 = 0 ; aK1> aSubBase(aBase.begin(),aBase.begin()+aK +1); + tREAL8 aD1 = aGS.at(aK).DistToSubspace(aSubBase); + + std::vector> aSubGS(aGS.begin(),aGS.begin()+aK +1); + tREAL8 aD2 = aBase.at(aK).DistToSubspace(aSubGS); + MMVII_INTERNAL_ASSERT_bench(aD1+aD2<1e-5,"Gram-Schmidt subspace"); + } + } + { + // Test the vector added are orthonormal + std::vector> aBaseCompl = cDenseVect::BaseComplem(aBase,false); + for (size_t aK1 = 0 ; aK1(); + Bench_EigenDecompos(aParam); @@ -1037,8 +1121,8 @@ void BenchDenseMatrix0(cParamExeBench & aParam) cDenseVect aV1(2); aV0(0) = 10; aV0(1) = 20; aV1(0) = 13; aV1(1) = 24; - MMVII_INTERNAL_ASSERT_bench(std::abs(aV0.L1Dist(aV1)-3.5)<1e-5,"Bench Matrixes"); - MMVII_INTERNAL_ASSERT_bench(std::abs(aV0.L2Dist(aV1)-5.0/sqrt(2))<1e-5,"Bench Matrixes"); + MMVII_INTERNAL_ASSERT_bench(std::abs(aV0.L1Dist(aV1,true)-3.5)<1e-5,"Bench Matrixes"); + MMVII_INTERNAL_ASSERT_bench(std::abs(aV0.L2Dist(aV1,true)-5.0/sqrt(2))<1e-5,"Bench Matrixes"); diff --git a/MMVII/src/ImagesBase/BaseImage.cpp b/MMVII/src/ImagesBase/BaseImage.cpp index 8831ecf69c..aa53a12ff0 100755 --- a/MMVII/src/ImagesBase/BaseImage.cpp +++ b/MMVII/src/ImagesBase/BaseImage.cpp @@ -83,31 +83,31 @@ template template - double cDataTypedIm::L1Dist(const cDataTypedIm & aI2) const + double cDataTypedIm::L1Dist(const cDataTypedIm & aI2,bool isAvg) const { tPB::AssertSameArea(aI2); double aRes = 0.0; for (int aK=0 ; aK - double cDataTypedIm::SqL2Dist(const cDataTypedIm & aI2) const + double cDataTypedIm::SqL2Dist(const cDataTypedIm & aI2,bool isAvg) const { tPB::AssertSameArea(aI2); double aRes = 0.0; for (int aK=0 ; aK - double cDataTypedIm::L2Dist(const cDataTypedIm & aI2) const + double cDataTypedIm::L2Dist(const cDataTypedIm & aI2,bool isAvg) const { - return sqrt(SqL2Dist(aI2)); + return sqrt(SqL2Dist(aI2,isAvg)); } @@ -126,23 +126,30 @@ template template - double cDataTypedIm::L1Norm() const + double cDataTypedIm::L1Norm(bool isAvg) const { double aRes = 0.0; for (int aK=0 ; aK - double cDataTypedIm::L2Norm() const + double cDataTypedIm::SqL2Norm(bool isAvg) const { double aRes = 0.0; for (int aK=0 ; aK + double cDataTypedIm::L2Norm(bool isAvg) const +{ + return sqrt(SqL2Norm(isAvg)); +} + template double cDataTypedIm::LInfNorm() const { diff --git a/MMVII/src/Matrix/BaseMatrixes.cpp b/MMVII/src/Matrix/BaseMatrixes.cpp index c366151f3f..e0492a0dc8 100755 --- a/MMVII/src/Matrix/BaseMatrixes.cpp +++ b/MMVII/src/Matrix/BaseMatrixes.cpp @@ -1,4 +1,6 @@ #include "MMVII_Tpl_Images.h" +#include "MMVII_SysSurR.h" +#include "MMVII_Tpl_Images.h" namespace MMVII { @@ -33,7 +35,8 @@ template cSparseVect::cSparseVect(const cDenseVect & aD cSparseVect (aDV.Sz()) { for (int aK=0 ; aK bool cSparseVect::IsInside(int aNb) const @@ -52,21 +55,41 @@ template void cSparseVect::Reset() } -template cSparseVect cSparseVect::RanGenerate(int aNbVar,double aProba) +template cSparseVect cSparseVect::RanGenerate(int aNbVar,double aProba,tREAL8 aMinVal,int aMinSize) { cSparseVect aRes; - for (int aK=0 ; aK const typename cSparseVect::tCplIV * cSparseVect::Find(int anInd) const +{ + for (const auto & aPair : *mIV) + if (aPair.mInd==anInd) + return & aPair; + return nullptr; +} + +template int cSparseVect::MaxIndex(int aDef) const +{ + for (const auto & aPair : *mIV) + UpdateMax(aDef,aPair.mInd); + + MMVII_INTERNAL_ASSERT_tiny(aDef>=-1,"No def value for empty vect in cSparseVect::MaxIndex"); + return aDef; +} + /* ========================== */ /* cDenseVect */ /* ========================== */ @@ -92,8 +115,8 @@ template cDenseVect::cDenseVect(int aSz,eModeInitImage aModeI { } -template cDenseVect::cDenseVect(int aSz,const tSpV & aSpV) : - cDenseVect(aSz,eModeInitImage::eMIA_Null) +template cDenseVect::cDenseVect(const tSpV & aSpV,int aSz) : + cDenseVect(std::max(aSz,1+aSpV.MaxIndex()) ,eModeInitImage::eMIA_Null) { for (const auto & aPair : aSpV) mIm.DIm().SetV(aPair.mInd,aPair.mVal); @@ -157,28 +180,36 @@ template cDenseVect::cDenseVect(int aSz,eModeInitImage aModeI } */ -template double cDenseVect::L1Dist(const cDenseVect & aV) const +template double cDenseVect::L1Dist(const cDenseVect & aV,bool isAvg) const { - return mIm.DIm().L1Dist(aV.mIm.DIm()); + return mIm.DIm().L1Dist(aV.mIm.DIm(),isAvg); } -template double cDenseVect::L2Dist(const cDenseVect & aV) const +template double cDenseVect::L2Dist(const cDenseVect & aV,bool isAvg) const { - return mIm.DIm().L2Dist(aV.mIm.DIm()); + return mIm.DIm().L2Dist(aV.mIm.DIm(),isAvg); } -template double cDenseVect::L1Norm() const +template double cDenseVect::L1Norm(bool isAvg) const { - return mIm.DIm().L1Norm(); + return mIm.DIm().L1Norm(isAvg); } -template double cDenseVect::L2Norm() const +template double cDenseVect::L2Norm(bool isAvg) const { - return mIm.DIm().L2Norm(); + return mIm.DIm().L2Norm(isAvg); +} +template double cDenseVect::SqL2Norm(bool isAvg) const +{ + return mIm.DIm().SqL2Norm(isAvg); } template double cDenseVect::LInfNorm() const { return mIm.DIm().LInfNorm(); } +template cDenseVect cDenseVect::VecUnit() const +{ + return Type(SafeDiv(1.0,L2Norm())) * (*this); +} // double L1Norm() const; ///< Norm som abs double L2Norm() const; ///< Norm square double LInfNorm() const; ///< Nomr max @@ -249,6 +280,73 @@ template void cDenseVect::SetAvg(const Type & aTargAvg) (*this)(aK) *= aMul; } +template cDenseVect cDenseVect::GramSchmidtCompletion(const std::vector & aVV) const +{ + cDenseVect aRes = *this; + + for (const auto & aV : aVV) + { + aRes = aRes - Type(aV.DotProduct(*this)/aV.SqL2Norm()) * aV; + } + + return aRes; +} + +template std::vector> cDenseVect::GramSchmidtOrthogonalization(const std::vector & aVV) +{ + std::vector> aRes; + + for (const auto & aV : aVV) + aRes.push_back(aV.GramSchmidtCompletion(aRes)); + + return aRes; +} + +template cDenseVect cDenseVect::VecComplem(const std::vector & aVV,Type aDMin) +{ + size_t aDim = aVV.at(0).Sz(); + cDenseVect aTest(aDim,eModeInitImage::eMIA_Null); + + cWhichMax aWMax(-1,-1.0); + + for (size_t aK=0 ; aK<=aDim ; aK++) + { + // forced end of loop, we select the "less bad vector" + if (aK==aDim) + { + aDMin = -1; + aK = aWMax.IndexExtre(); + } + aTest(aK) = 1; + + cDenseVect aRes = aTest-aTest.ProjOnSubspace(aVV); + Type aNorm = aRes.L2Norm(); + if (aNorm>aDMin) + return Type(1.0/aNorm) * aRes; + aWMax.Add(aK,aNorm); + aTest(aK) = 0; + } + + MMVII_INTERNAL_ASSERT_tiny(false,"VecComplem : should not be here !!"); + return aTest; +} + +// Can go much faster by selecting all the result inside VecCompl and selecting the K Best +template std::vector> cDenseVect::BaseComplem(const std::vector & aVV,bool WithInit,Type aDMin) +{ + int aDim = aVV.at(0).Sz(); + std::vector aRes = aVV; + + for (int aK= aVV.size() ; aK>(aRes.begin()+aVV.size(),aRes.end()); +} + + /* template void AddData(const cAuxAr2007 & anAux, cDenseVect & aDV) @@ -454,6 +552,8 @@ template static void TplReadLineInPlace(const cMatrix & aMat, aV(aX) = aMat.V_GetElem(aX,aY); } + + template static void TplWriteLine(cMatrix & aMat,int aY,const cDenseVect& aV) { aMat.TplCheckSizeX(aV); @@ -514,14 +614,24 @@ template cDenseVect cMatrix::ReadCol(int aX) const return aRes; } -template cDenseVect cMatrix::ReadLine(int aX) const +template cDenseVect cMatrix::ReadLine(int aY) const { cDenseVect aRes(Sz().x()); - ReadLineInPlace(aX,aRes); + ReadLineInPlace(aY,aRes); return aRes; } +template std::vector> cMatrix::MakeLines() const +{ + std::vector> aRes; + for (int aY=0 ; aY std::ostream & operator << (std::ostream & OS,const cMatri } +template cDenseVect cDenseVect::ProjOnSubspace(const std::vector & aVV) const +{ + cDenseVect aRes(Sz(),eModeInitImage::eMIA_Null); + + int aNbVec = aVV.size(); + + if (aNbVec) + { + cLeasSqtAA aSys(aNbVec); + + for (int aKCoord=0 ; aKCoord anEq(aNbVec); + for (int aKV=0 ; aKV aSol = aSys.Solve(); + + for (int aKV = 0 ; aKV cDenseVect cDenseVect::ProjOnSubspace(const std::vector & aVV) const +{ + return cDenseVect(1); +} + +template Type cDenseVect::DistToSubspace(const std::vector & aVV) const +{ + cDenseVect aProj = ProjOnSubspace(aVV); + + return this->L2Dist(aProj); +} + + // template void AddData(const cAuxAr2007 & anAux, cDenseVect &); /* ===================================================== */ diff --git a/MMVII/src/Matrix/cDenseMatrix.cpp b/MMVII/src/Matrix/cDenseMatrix.cpp index e123315eba..1d9bc72823 100755 --- a/MMVII/src/Matrix/cDenseMatrix.cpp +++ b/MMVII/src/Matrix/cDenseMatrix.cpp @@ -72,6 +72,35 @@ template cDenseMatrix cDenseMatrix::Diag(const cDenseVe return aRes; } +template cDenseMatrix cDenseMatrix::FromLines(const std::vector & aVV) +{ + cDenseMatrix aRes(aVV.at(0).Sz(),aVV.size()); + for (int aY=0 ; aY cDenseMatrix cDenseMatrix::SubMatrix(const cPt2di & aP0,const cPt2di & aP1) const +{ + cPt2di aSz = aP1 -aP0; + cDenseMatrix aRes(aSz.x(),aSz.y()); + + for (const auto & aPix : aRes.DIm()) + aRes.SetElem(aPix.x(),aPix.y(),GetElem(aPix+aP0)); + + return aRes; +} + +template cDenseMatrix cDenseMatrix::SubMatrix(const cPt2di & aSz) const +{ + return SubMatrix(cPt2di(0,0),aSz); +} + + + template cDenseMatrix cDenseMatrix::ClosestOrthog() const { this->CheckSquare(*this); @@ -91,15 +120,15 @@ template cDenseMatrix cDenseMatrix::ClosestOrthog() co return aSVD.MatU() * cDenseMatrix::Diag(aVP) * aSVD.MatV().Transpose(); } -template Type cDenseMatrix::L2Dist(const cDenseMatrix & aV) const +template Type cDenseMatrix::L2Dist(const cDenseMatrix & aV,bool isAvg) const { - return DIm().L2Dist(aV.DIm()); + return DIm().L2Dist(aV.DIm(),isAvg); } -template Type cDenseMatrix::SqL2Dist(const cDenseMatrix & aV) const +template Type cDenseMatrix::SqL2Dist(const cDenseMatrix & aV,bool isAvg) const { - return DIm().SqL2Dist(aV.DIm()); + return DIm().SqL2Dist(aV.DIm(),isAvg); } @@ -233,6 +262,15 @@ template cDenseMatrix return aSVDD.OriMatr(); } +template cDenseMatrix cDenseMatrix::RandomOrthogMatrix(const int aSz) +{ + tDM aMat(aSz,eModeInitImage::eMIA_RandCenter); + aMat.SelfSymetrize(); + cResulSymEigenValue aSE = aMat.SymEigenValue(); + return aSE.EigenVectors(); +} + + template cDenseVect cDenseMatrix::Kernel(Type * aVp) const { @@ -446,6 +484,10 @@ template void cDenseMatrix::Weighted_Add_tAA(Type aWeight,con void Weighted_Add_tAA(const tDV & aColLine,bool OnlySup=true) override; */ +template cDenseVect cDenseMatrix::Random1LineCombination() const +{ + return (cDenseMatrix(Sz().y(),1,eModeInitImage::eMIA_RandCenter) * (*this)).ReadLine(0); +} template void cDenseMatrix::Weighted_Add_tAA(Type aWeight,const tSpV & aSparseV,bool OnlySup) { @@ -556,6 +598,8 @@ template void cUnOptDenseMatrix::Resize(const cPt2di & aSz) DIm().Resize(aSz); } + + /* ===================================================== */ /* ===== INSTANTIATION ===== */ /* ===================================================== */ diff --git a/MMVII/src/Matrix/cL1BarrodaleSover.cpp b/MMVII/src/Matrix/cL1BarrodaleSover.cpp index c65c3179bc..cec787a4a2 100755 --- a/MMVII/src/Matrix/cL1BarrodaleSover.cpp +++ b/MMVII/src/Matrix/cL1BarrodaleSover.cpp @@ -127,7 +127,7 @@ template template void cLinearMemoEq::SpecificAddObservation(const Type& aWeight,const cSparseVect & aCoeff,const Type & aRHS) { - SpecificAddObservation(aWeight,cDenseVect(this->mNbVar,aCoeff),aRHS); + SpecificAddObservation(aWeight,cDenseVect(aCoeff,this->mNbVar),aRHS); } template diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp index a528b8e3d8..6de5bb70c1 100755 --- a/MMVII/src/Matrix/cLinearConstraint.cpp +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -8,12 +8,14 @@ using namespace MMVII; namespace MMVII { +//static bool DEBUG=false; +//static bool DEBUG2=false; -/* ************************************************************ */ -/* */ -/* cOneLinearConstraint */ -/* */ -/* ************************************************************ */ +template class cDSVec; // Sparse/Dense vect +template class cOneLinearConstraint; // represent 1 constr +template class cSetLinearConstraint; // represent a set of constraint + +class cBenchLinearConstr; /** Class for a "sparse" dense vector, i.e a vector that is represented by a dense vector */ @@ -23,143 +25,500 @@ template class cDSVec public : cDSVec(size_t aNbVar); - void Add(const Type &,int anInd); - + void AddValInd(const Type &,int anInd); cDenseVect mVec; cSetIntDyn mSet; -}; -template cDSVec::cDSVec(size_t aNbVar) : - mVec (aNbVar,eModeInitImage::eMIA_Null), - mSet (aNbVar) -{ -} - -template void cDSVec::Add(const Type & aVal,int anInd) -{ - mVec(anInd) += aVal; - mSet.AddIndFixe(anInd); -} + void Reset(); + void Show(); + void TestEmpty(); +}; /* Class for handling linear constraint in non linear optimization system. * Note the constraint on a vector X as : * * mL . X = mC where mL is a non null vector * - * The way it is done : + * (I) The way it is done is by substitution : * * (1) We select an arbitray non null coord of L Li!=0; (something like the biggest one) - * (2) We suppose Li=1.0 (in fact we have it by mL = mL/Li , mC = mC/Li) - * (3) Let not X' the vector X without Xi + * (2) We suppose Li=1.0 (in fact we have it by setting mL = mL/Li , mC = mC/Li) + * (3) Let note X' the vector X without Xi * (4) we have Xi = mC- mL X' * (5) Each time we add a new obs in sytem : * A.X = B - * A.x-B = A' X' -B + Ai Xi = A' X' -B + Ai (mC-mL X') + * A.X-B = A' X' -B + Ai Xi = A' X' -B + Ai (mC-mL X') + * (A'-Ai mL) X = B - Ai mC + * + * (II) So far so good, but now supose we have the two contraint: + * C1: x +2y=0 C2 2x + y = 0 + * And a form L :x + y +z, obviouly as the two constraint implie x=y=0, it mean that L shoul reduce to z + * + * But suppose we use C1 as x ->-2y and C2 as C2A : y-> -2x or C2B x-> -y/2 + * using C1 we have L -> -y+z and + * C2A -> 2x+z + * C2B -> -y+z (nothing to do, x is already substitued) + * + * So this does not lead to the good reduction + * + * (III) So now, before using the constraint we make a preprocessing, more a less a triangulation : * - * (A'-Ai mL) = B + * C1 : x + 2y=0 ~ x->-2y + * C'2 : C2(x->-2y) : -y=0 * + * now if we use C1 and C'2 L will reduce to 0 + * This the principe used in MMVII for constrained optimization : make a substitution afer a preprocessing + * that triangulate the constraint */ template class cOneLinearConstraint { public : - typedef cSparseVect tSV; + friend class cSetLinearConstraint; + friend class cBenchLinearConstr; + + typedef cSparseVect tSV; + typedef cDenseVect tDV; + typedef typename tSV::tCplIV tCplIV; + typedef cInputOutputRSNL tIO_RSNL; + typedef cSetIORSNL_SameTmp tSetIO_ST; + /** In Cstr we can fix the index of subst, if it value -1 let the system select the best , fixing can be usefull in case * of equivalence */ - cOneLinearConstraint(const tSV&aLP,const Type& aCste); + cOneLinearConstraint(const tSV&aLP,const Type& aCste,int aNum); + // Subsract into "aToSub" so as to annulate the coeff with mISubst + void SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf); + void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; + void SubstituteInSparseLinearEquation(tSV & aA,Type & aB,cDSVec & aBuf) const; + void SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf,const tDV & aCurSol) const; - void SubstractIn(cOneLinearConstraint & aToSub,cDSVec & aBuf); + const tCplIV * LinearMax() const; + + /// One the Index of substitution is chosen, transformat by divide all equation by Li and supress Li tha implicitely=1 + void InitSubst(); - void ModifDenseEqLinear(cDenseVect & aCoeff,Type & aRHS, const cDenseVect & aCurSol); private : - void AddBuf(cDSVec & aBuf,const Type & aMul,int aI2Avoid); + void AddBuf(cDSVec & aBuf,const Type & aMul,int aI2Avoid) const; - tSV mLP; /// Linear part - int mISubst; /// Indexe which is substituted - Type mCste; /// Constant of the constrainte + tSV mLP; /// Linear part + int mISubst; /// Indexe which is substituted + Type mCste; /// Constant of the constrainte + int mNum; /// Identifier, used for debug at least + int mOrder; /// Order of reduction, used to sort the constraint + bool mSelected; /// a marker to know if a constraint has already been reduced }; -template cOneLinearConstraint::cOneLinearConstraint(const tSV&aLP,const Type& aCste) : + +template class cSetLinearConstraint +{ + public : + friend class cBenchLinearConstr; + + typedef cSparseVect tSV; + typedef cDenseVect tDV; + typedef typename tSV::tCplIV tCplIV; + typedef cOneLinearConstraint t1Constr; + + void Compile(); + cSetLinearConstraint(int aNbVar); + void Add1Constr(const t1Constr &); + private : + std::vector mVCstrInit; // Initial constraint, + std::vector mVCstrReduced; // Constraint after reduction + cDSVec mBuf; // Buffer for computation +}; + +/* ************************************************************ */ +/* */ +/* cSetLinearConstraint */ +/* */ +/* ************************************************************ */ + +template cSetLinearConstraint::cSetLinearConstraint(int aNbVar) : + mBuf (aNbVar) +{ +} + +template void cSetLinearConstraint::Add1Constr(const t1Constr & aConstr) +{ + mVCstrInit.push_back(aConstr); +} + + +template void cSetLinearConstraint::Compile() +{ + mVCstrReduced = mVCstrInit; + + // Set no selected for all + for (auto & aCstr : mVCstrReduced) + aCstr.mSelected = false; + + size_t aNbReduced = 0; + while (aNbReduced != mVCstrInit.size()) + { + // extract the "best" Cstr, i.e. with highest value + cWhichMax aMax(-1,-1.0); + for (int aKC=0; aKCmVal)); + } + t1Constr& aBest = mVCstrReduced.at(aMax.IndexExtre()); + + aBest.InitSubst(); + aBest.mOrder = aNbReduced; + // substract the selected constraint to all + for (t1Constr & aCstr : mVCstrReduced) + { + if (! aCstr.mSelected) + { + //if (DEBUG) + //StdOut() << "SIOC, ISUBS " << aBest.mISubst << " N=" << aBest.mNum << " => " << aCstr.mNum << "\n"; + aBest.SubstituteInOtherConstraint(aCstr,mBuf); + } + } + //if (DEBUG) StdOut() << "=======================================\n"; + aNbReduced++; + } + std::sort + ( + mVCstrReduced.begin(), + mVCstrReduced.end(), + [](const auto & aC1,const auto & aC2){return aC1.mOrder cDSVec::cDSVec(size_t aNbVar) : + mVec (aNbVar,eModeInitImage::eMIA_Null), + mSet (aNbVar) +{ +} + +template void cDSVec::AddValInd(const Type & aVal,int anInd) +{ + mVec(anInd) += aVal; + mSet.AddIndFixe(anInd); +} + +template void cDSVec::Reset() +{ + for (const auto & anInd : mSet.mVIndOcc) + { + mVec(anInd) = 0.0; + mSet.mOccupied.at(anInd) = false; + } + mSet.mVIndOcc.clear(); +} + + +template void cDSVec::TestEmpty() +{ + for (const auto & aV : mVec.ToStdVect()) + MMVII_INTERNAL_ASSERT_tiny(aV==0.0,"Vec Test Empty"); + MMVII_INTERNAL_ASSERT_tiny(mSet.mVIndOcc.empty(),"Occ Test Empty"); + for (const auto & aV : mSet.mOccupied) + MMVII_INTERNAL_ASSERT_tiny(aV==false,"Vec Test Empty"); +} + +template void cDSVec::Show() +{ + StdOut() << "cDSVeccDSVec "; + for (const auto & aV : mSet.mOccupied) + StdOut() << " " << aV ; + StdOut() << "\n"; +} + +/* ************************************************************** */ +/* */ +/* cOneLinearConstraint */ +/* */ +/* ************************************************************** */ + +template cOneLinearConstraint::cOneLinearConstraint(const tSV&aLP,const Type& aCste,int aNum) : mLP (aLP), mISubst (-1), - mCste (aCste) + mCste (aCste), + mNum (aNum) +{ +} + +template void cOneLinearConstraint::InitSubst() +{ + mSelected = true; + const tCplIV * aCple = LinearMax() ; + mISubst = aCple->mInd; + + erase_if(mLP.IV(),[this](const auto & aPair){return aPair.mInd==mISubst;}); + + for (auto & aPair : mLP.IV()) + aPair.mVal /= aCple->mVal; + mCste /= aCple->mVal; +} + +template const typename cOneLinearConstraint::tCplIV * cOneLinearConstraint::LinearMax() const { + cWhichMax aMax(nullptr,-1.0); + + for (const auto & aPair : mLP) + aMax.Add(&aPair,std::abs(aPair.mVal)); + + const tCplIV * aRes = aMax.IndexExtre(); + MMVII_INTERNAL_ASSERT_tiny(aRes!=nullptr,"cOneLinearConstraint::LinearMax probably bad formed cosntrained"); + MMVII_INTERNAL_ASSERT_tiny(aRes->mVal!=0,"cOneLinearConstraint::LinearMax probably bad formed cosntrained"); + return aRes ; } -template void cOneLinearConstraint::AddBuf(cDSVec & aBuf,const Type & aMul,int aI2Avoid) +template void GlobAddBuf(cDSVec & aBuf,const cSparseVect & aLP,const Type & aMul,int aI2Avoid) { - for (const auto & aPair : mLP.IV()) + for (const auto & aPair : aLP.IV()) { if (aPair.mInd != aI2Avoid) { - aBuf.Add(aPair.mInd,aPair.mVal * aMul); + aBuf.AddValInd(aPair.mVal*aMul,aPair.mInd); } } } -template void cOneLinearConstraint::SubstractIn(cOneLinearConstraint & aToSub,cDSVec & aBuf) +template void cOneLinearConstraint::AddBuf(cDSVec & aBuf,const Type & aMul,int aI2Avoid) const +{ + GlobAddBuf(aBuf, mLP,aMul,aI2Avoid); +} + +template void cOneLinearConstraint::SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf) { +static int aCpt=0; aCpt++; +//DEBUG2 = (aCpt==3); + //if (DEBUG) StdOut() << "INNNN " << aToSub.mLP.Find(mISubst) << " Cpt=" << aCpt << " B2 "<< DEBUG2 << "\n"; + + SubstituteInSparseLinearEquation(aToSub.mLP,aToSub.mCste,aBuf); + + //if (DEBUG) StdOut() << "OUUT " << aToSub.mLP.Find(mISubst) << "\n"; +} + +template void cOneLinearConstraint::SubstituteInDenseLinearEquation(cDenseVect & aA,Type & aB) const +{ + // (A'-Ai mL) X = B - Ai mC + Type & aAi = aA(mISubst); + aB -= aAi * mCste; + + for (const auto & aPair : mLP) + if (aPair.mInd != mISubst) + aA(aPair.mInd) -= aAi * aPair.mVal; + + aAi = 0.0; +} + +template void cOneLinearConstraint::SubstituteInSparseLinearEquation(tSV & aA,Type & aB,cDSVec & aBuf) const +{ + // (A'-Ai mL) X = B - Ai mC + const tCplIV * aPair = aA.Find(mISubst) ; + + /*if (DEBUG2) + { + aBuf.TestEmpty(); + StdOut() << "PAIR " << aPair << " SZ=" << mLP.size() << "\n"; + }*/ + + + // current case, if the index is not present in equation nothing to do (in this case Ai=0 and A'=A) + if (aPair == nullptr) return; + // substract constant - aToSub.mCste -= mCste; + aB -= mCste * aPair->mVal; + + // other current case, if the equation is a single substition (like frozen var) no more things to do + if (mLP.size()==0) return; - aToSub.AddBuf(aBuf, 1.0,mISubst); - this ->AddBuf(aBuf,-1.0,mISubst); +//if (DEBUG2) aBuf.Show(); + // mIsSubst is send as parameter because it must disapear in the buf + GlobAddBuf(aBuf,aA, (Type)1.0,mISubst); +//if (DEBUG2) aBuf.Show(); + this ->AddBuf(aBuf,-aPair->mVal,mISubst); +//if (DEBUG2) aBuf.Show(); - aToSub.mLP.Reset(); + aA.Reset(); for (const auto & aInd : aBuf.mSet.mVIndOcc) { - aToSub.mLP.AddIV(aInd,aBuf.mVec(aInd)); + aA.AddIV(aInd,aBuf.mVec(aInd)); } + aBuf.Reset(); + //if (DEBUG2) aBuf.TestEmpty(); } - -/* - mLP {} +template void cOneLinearConstraint::SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf,const tDV & aCurSol) const { - const typename tSV::tCont & aVPair = aLP.IV(); - // if indexe was not forced or is already, get the "best" one - if ((aKPair<0) || aSetSubst.mOccupied.at(aVPair.at(aKPair).mInd)) + // [1] Find the index of mISubst + int aKSubst = -1; // Indexe wher mGlobVIn potentially equals mISubst + + for (size_t aKVar=0 ; aKVar aMaxInd(-1,0); - // extract the index, not occupied - for (size_t aKP=0 ; aKP::IsIndTmp(aIndGlob)) && ( aIndGlob== mISubst)) + aKSubst = aKVar; } - // now store the result - mISubst = aVPair.at(aKPair).mInd; // The indexe that will be susbstitued - Type aV0 = aVPair.at(aKPair).mVal; // Value we divide all - mCste = aCste / aV0; // normalized constant - - for (const auto & aPair : aVPair) + // if index subst is not involved, nothing to do + if (aKSubst<0) return; + + /* F(X) = F(X0) + D (X-X0) = D (X-X0) + V0 + = D' (X'-X0') +Di (Xi-X0i) + V0 + = D' (X'-X0') + Di (mC- mL X' -X0i) + V0 + = D' (X'-X0') - Di mL (X' -X'0 + X'0) - Di X0i + V0 + Di mC + = (D' -Di mL) (X'-X0') + V0 + Di (mC -X0i - mL X'0 ) +*/ + + Type aDelta = (mCste-aCurSol(mISubst)); // mC-X0i + bool FirstDerNN = true; // Used to check that we do only once the extension of indexe + for (size_t aKEq=0 ; aKEq::IsIndTmp(aInd)) && aBuf.mSet.mOccupied.at(aInd) ) + { + aIO.mDers.at(aKEq).at(aKVar) -= aBuf.mVec(aInd) * aDerI; // -Di mL + aBuf.mSet.mOccupied.at(aInd) = false; // purge occuo + aBuf.mVec(aInd) = 0; // purge + } + } + // [C] modify the derivate for the index, presnt in constraint but not in equation + for (const auto & aPair : mLP.IV()) + { + if (aPair.mInd != mISubst) + { + if (aBuf.mSet.mOccupied.at(aPair.mInd)) + { + aIO.mDers.at(aKEq).push_back(-aPair.mVal * aDerI); + if (FirstDerNN) // (aKEq==0) + aIO.mGlobVInd.push_back(aPair.mInd); + aBuf.mSet.mOccupied.at(aPair.mInd) =false; + aBuf.mVec(aPair.mInd) = 0.0; + } + } + } + + // [D] finish purge + aBuf.mSet.mVIndOcc.clear(); + } + aDerI = 0; + FirstDerNN= false; } } } -template void cOneLinearConstraint::ModifDenseEqLinear(cDenseVect & aCoeff,Type & aRHS, const cDenseVect & ) +class cBenchLinearConstr +{ + public : + cBenchLinearConstr(int aNbVar,int aNbCstr); + + int mNbVar; + int mNbCstr; + std::vector > mLSV; // list of sparse vect used by const + std::vector > mLDV; // list of dense vect, copy of sparse, for easiness + + // So that the constaint L.V = C is equiv to L(V-V0) = 0 + cDenseVect mV0; + cSetLinearConstraint mSetC; +}; + + +cBenchLinearConstr::cBenchLinearConstr(int aNbVar,int aNbCstr) : + mNbVar (aNbVar), + mNbCstr (aNbCstr), + mV0 (aNbVar,eModeInitImage::eMIA_RandCenter), + mSetC (aNbVar) { +static int aCpt=0; aCpt++; +//DEBUG= (aCpt==4); + + for (int aK=0 ; aK< mNbCstr ; aK++) + { + bool Ok = false; + while (!Ok) + { + cSparseVect aSV = cSparseVect::RanGenerate(aNbVar,0.3,0.1,1); // Generate random sparse vect + cDenseVect aDV(aSV,mNbVar); // make a dense copy + if (aDV.DistToSubspace(mLDV) > 0.2 * aDV.L2Norm()) // ensure the vect is enouh far from space defines by others + { + Ok=true; // done for this one + mLDV.push_back(aDV); // add new sparse V + mLSV.push_back(aSV); // add new dense V + cOneLinearConstraint aLC(aSV,mV0.DotProduct(aDV),aK); + mSetC.Add1Constr(aLC); + } + } + } + //if (!DEBUG) return; + mSetC.Compile(); + + for (int aK1=0 ; aK1< mNbCstr ; aK1++) + { + const cOneLinearConstraint & aC1= mSetC.mVCstrReduced.at(aK1); + for (int aK2=0 ; aK2< mNbCstr ; aK2++) + { + const cOneLinearConstraint & aC2= mSetC.mVCstrReduced.at(aK2); + + auto * aPair = aC2.mLP.Find(aC1.mISubst); + + if (aK1<=aK2) + { + StdOut() << "PPP " << aPair << " N1=" << aC1.mNum << " N2=" << aC2.mNum << "\n"; + MMVII_INTERNAL_ASSERT_bench(aPair==0,"Reduce in LinearCstr"); + } + } + } } -*/ + +void BenchLinearConstr(cParamExeBench & aParam) +{ + return; + if (! aParam.NewBench("LinearConstr")) return; + + StdOut() << "BenchLinearConstrBenchLinearConstr\n"; + // std::vector aV{{2,3},{3,2}}; + + for (int aK=0 ; aK<100 ; aK++) + { + cBenchLinearConstr(10,3); + cBenchLinearConstr(20,5); + } + + aParam.EndBench(); +} + #define INSTANTIATE_LINEAER_CONSTR(TYPE)\ +template class cSetLinearConstraint;\ template class cOneLinearConstraint;\ template class cDSVec; diff --git a/MMVII/src/Matrix/cResolSysNonLinear.cpp b/MMVII/src/Matrix/cResolSysNonLinear.cpp index d55b4999d1..7796e80d31 100755 --- a/MMVII/src/Matrix/cResolSysNonLinear.cpp +++ b/MMVII/src/Matrix/cResolSysNonLinear.cpp @@ -555,7 +555,6 @@ template void cResolSysNonLinear::AddEq2Subst std::vector aVIO(1,tIO_RSNL(aVInd,aVObs)); CalcVal(aCalc,aVIO,aSetIO.ValTmpUk(),true,aWeighter); - aSetIO.AddOneEq(aVIO.at(0)); } @@ -650,9 +649,10 @@ template cDenseVect cResolSysNonLinear::R_SolveUpdat /* */ /* ************************************************************ */ +// template class cInputOutputRSNL; +// template class cSetIORSNL_SameTmp; + #define INSTANTIATE_RESOLSYSNL(TYPE)\ -template class cInputOutputRSNL;\ -template class cSetIORSNL_SameTmp;\ template class cResolSysNonLinear; INSTANTIATE_RESOLSYSNL(tREAL4) diff --git a/MMVII/src/Perso/cMMVII_CatVideo.cpp b/MMVII/src/Perso/cMMVII_CatVideo.cpp index d117a1b44c..ea16d8354a 100755 --- a/MMVII/src/Perso/cMMVII_CatVideo.cpp +++ b/MMVII/src/Perso/cMMVII_CatVideo.cpp @@ -38,7 +38,7 @@ class cAppli_CatVideo : public cMMVII_Appli std::string mNameFoF; ///< name of file of files std::string mNameResult; ///< name of Resulting media bool mVideoMode; ///< is it video, change def options - std::string mOptions; ///< is it video, change options + std::vector mOptions; ///< is it video, change options }; @@ -119,6 +119,11 @@ int cAppli_CatVideo::Exe() } } +/* + JOE => modif MPD , else generate : + ffmpeg -safe 0 -f concat -i FileCatVideo.txt "-vcodec mpeg4 -b 15000k" toto.mp4 + and the ffmpeg refuse to have as a single string "-vcodec mpeg4 -b 15000k" + if (! IsInit(&mOptions)) { if (mVideoMode) @@ -126,6 +131,22 @@ int cAppli_CatVideo::Exe() } cParamCallSys aCom("ffmpeg","-safe","0","-f","concat","-i",mNameFoF,mOptions,mNameResult); +*/ + + cParamCallSys aCom("ffmpeg","-safe","0","-f","concat","-i",mNameFoF); + if (! IsInit(&mOptions)) + { + if (mVideoMode) + mOptions = {"-vcodec","mpeg4","-b","15000k"}; + } + + + for (const auto & anOpt : mOptions) + aCom.AddArgs(anOpt); + aCom.AddArgs(mNameResult); + // End modif + + int aRes = EXIT_SUCCESS ; if (mExec) { From 3b81d8abc85df93074465b86b333afe729795959 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Sun, 31 Dec 2023 19:13:31 +0100 Subject: [PATCH 04/21] Happy new year w/o bug .... --- MMVII/include/MMVII_Matrix.h | 2 + MMVII/src/Bench/BenchMatrix.cpp | 3 - MMVII/src/Matrix/BaseMatrixes.cpp | 9 +++ MMVII/src/Matrix/cLinearConstraint.cpp | 97 +++++++++++++++++++------- 4 files changed, 84 insertions(+), 27 deletions(-) diff --git a/MMVII/include/MMVII_Matrix.h b/MMVII/include/MMVII_Matrix.h index c1fe9ea1c3..306510e8cd 100755 --- a/MMVII/include/MMVII_Matrix.h +++ b/MMVII/include/MMVII_Matrix.h @@ -77,6 +77,8 @@ template class cSparseVect : public cMemCheck // Maximum index, aDef is used if empty, if aDef<=-2 & empty erreur int MaxIndex(int aDef=-1) const; + + void EraseIndex(int anInd); private : /* inline void MakeSort(){if (!mIsSorted) Sort();} diff --git a/MMVII/src/Bench/BenchMatrix.cpp b/MMVII/src/Bench/BenchMatrix.cpp index 3940f834be..b813cad409 100755 --- a/MMVII/src/Bench/BenchMatrix.cpp +++ b/MMVII/src/Bench/BenchMatrix.cpp @@ -587,8 +587,6 @@ template void BenchSysSur(cLinearOverCstrSys& aSys,bool Exact for (int aK=0 ; aK::TyNum()) - << " Eps: " << std::numeric_limits::epsilon() << " " << aDTest << "\n"; */ MMVII_INTERNAL_ASSERT_bench(aDif void BenchProj() for (size_t aK2 = 0 ; aK2 int cSparseVect::MaxIndex(int aDef) const return aDef; } +template void cSparseVect::EraseIndex(int anInd) +{ + erase_if(*mIV,[anInd](const auto & aPair){return aPair.mInd==anInd;}); +} +/* +*/ + + + /* ========================== */ /* cDenseVect */ /* ========================== */ diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp index 6de5bb70c1..314106f5dd 100755 --- a/MMVII/src/Matrix/cLinearConstraint.cpp +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -8,7 +8,7 @@ using namespace MMVII; namespace MMVII { -//static bool DEBUG=false; +static bool DEBUG=false; //static bool DEBUG2=false; template class cDSVec; // Sparse/Dense vect @@ -101,6 +101,9 @@ template class cOneLinearConstraint /// One the Index of substitution is chosen, transformat by divide all equation by Li and supress Li tha implicitely=1 void InitSubst(); + /// 4 Debug purpose + void Show() const; + private : void AddBuf(cDSVec & aBuf,const Type & aMul,int aI2Avoid) const; @@ -113,7 +116,6 @@ template class cOneLinearConstraint bool mSelected; /// a marker to know if a constraint has already been reduced }; - template class cSetLinearConstraint { public : @@ -127,6 +129,8 @@ template class cSetLinearConstraint void Compile(); cSetLinearConstraint(int aNbVar); void Add1Constr(const t1Constr &); + + void Show(const std::string & aMsg) const; private : std::vector mVCstrInit; // Initial constraint, std::vector mVCstrReduced; // Constraint after reduction @@ -154,6 +158,8 @@ template void cSetLinearConstraint::Compile() { mVCstrReduced = mVCstrInit; + if (DEBUG) Show("Init"); + // Set no selected for all for (auto & aCstr : mVCstrReduced) aCstr.mSelected = false; @@ -177,14 +183,19 @@ template void cSetLinearConstraint::Compile() { if (! aCstr.mSelected) { - //if (DEBUG) - //StdOut() << "SIOC, ISUBS " << aBest.mISubst << " N=" << aBest.mNum << " => " << aCstr.mNum << "\n"; + if (DEBUG) + StdOut() << "SIOC, ISUBS " << aBest.mISubst << " N=" << aBest.mNum << " => " << aCstr.mNum << "\n"; aBest.SubstituteInOtherConstraint(aCstr,mBuf); } } - //if (DEBUG) StdOut() << "=======================================\n"; + if (DEBUG) + { + if (DEBUG) Show("Reduc:" + ToStr(aNbReduced)); + } aNbReduced++; } + if (DEBUG) + StdOut() << "=======================================\n"; std::sort ( mVCstrReduced.begin(), @@ -193,9 +204,17 @@ template void cSetLinearConstraint::Compile() ); } +template void cSetLinearConstraint::Show(const std::string & aMsg) const +{ + StdOut() << "======== SHOWTSELC " << aMsg << " =====================" << std::endl; + + for (const auto & aCstr: mVCstrReduced) + aCstr.Show(); +} + /* ************************************************************ */ /* */ -/* cOneLinearConstraint */ +/* cDSVect */ /* */ /* ************************************************************ */ @@ -224,6 +243,10 @@ template void cDSVec::Reset() template void cDSVec::TestEmpty() { + static bool First = true; + if (First) + StdOut() << "TestEmptyTestEmptyTestEmptyTestEmptyTestEmptyTestEmptyTestEmptyTestEmpty\n"; + First = false; for (const auto & aV : mVec.ToStdVect()) MMVII_INTERNAL_ASSERT_tiny(aV==0.0,"Vec Test Empty"); MMVII_INTERNAL_ASSERT_tiny(mSet.mVIndOcc.empty(),"Occ Test Empty"); @@ -236,7 +259,7 @@ template void cDSVec::Show() StdOut() << "cDSVeccDSVec "; for (const auto & aV : mSet.mOccupied) StdOut() << " " << aV ; - StdOut() << "\n"; + StdOut() << std::endl; } /* ************************************************************** */ @@ -246,20 +269,23 @@ template void cDSVec::Show() /* ************************************************************** */ template cOneLinearConstraint::cOneLinearConstraint(const tSV&aLP,const Type& aCste,int aNum) : - mLP (aLP), - mISubst (-1), - mCste (aCste), - mNum (aNum) + mLP (aLP), + mISubst (-1), + mCste (aCste), + mNum (aNum), + mOrder (-1), + mSelected (false) { } + template void cOneLinearConstraint::InitSubst() { mSelected = true; const tCplIV * aCple = LinearMax() ; mISubst = aCple->mInd; - erase_if(mLP.IV(),[this](const auto & aPair){return aPair.mInd==mISubst;}); + mLP.EraseIndex(mISubst); for (auto & aPair : mLP.IV()) aPair.mVal /= aCple->mVal; @@ -322,21 +348,23 @@ template void cOneLinearConstraint::SubstituteInDenseLinearEq template void cOneLinearConstraint::SubstituteInSparseLinearEquation(tSV & aA,Type & aB,cDSVec & aBuf) const { // (A'-Ai mL) X = B - Ai mC - const tCplIV * aPair = aA.Find(mISubst) ; + const tCplIV * aPairInA = aA.Find(mISubst) ; + aBuf.TestEmpty(); /*if (DEBUG2) { - aBuf.TestEmpty(); StdOut() << "PAIR " << aPair << " SZ=" << mLP.size() << "\n"; }*/ // current case, if the index is not present in equation nothing to do (in this case Ai=0 and A'=A) - if (aPair == nullptr) return; + if (aPairInA == nullptr) return; + Type aValAi = aPairInA->mVal; // Save value because erase will supress the ref ... // substract constant - aB -= mCste * aPair->mVal; - + aB -= mCste * aValAi; + // Substract + aA.EraseIndex(mISubst); // other current case, if the equation is a single substition (like frozen var) no more things to do if (mLP.size()==0) return; @@ -344,7 +372,7 @@ template void cOneLinearConstraint::SubstituteInSparseLinearE // mIsSubst is send as parameter because it must disapear in the buf GlobAddBuf(aBuf,aA, (Type)1.0,mISubst); //if (DEBUG2) aBuf.Show(); - this ->AddBuf(aBuf,-aPair->mVal,mISubst); + this ->AddBuf(aBuf,-aValAi,mISubst); //if (DEBUG2) aBuf.Show(); aA.Reset(); @@ -353,7 +381,7 @@ template void cOneLinearConstraint::SubstituteInSparseLinearE aA.AddIV(aInd,aBuf.mVec(aInd)); } aBuf.Reset(); - //if (DEBUG2) aBuf.TestEmpty(); + aBuf.TestEmpty(); } @@ -435,6 +463,21 @@ template void cOneLinearConstraint::SubstituteInOutRSNL(tIO_ } } +template void cOneLinearConstraint::Show() const +{ + StdOut() << " * N=" << mNum << " O="<< mOrder << " S=" << mSelected << " I=" << mISubst ; + + for (const auto & aPair:mLP.IV()) + StdOut() << " [" << aPair.mInd << " : " << aPair.mVal << "]"; + + StdOut() << std::endl; +} + +/* ************************************************************** */ +/* */ +/* cBenchLinearConstr */ +/* */ +/* ************************************************************** */ class cBenchLinearConstr { @@ -459,7 +502,7 @@ cBenchLinearConstr::cBenchLinearConstr(int aNbVar,int aNbCstr) : mSetC (aNbVar) { static int aCpt=0; aCpt++; -//DEBUG= (aCpt==4); +// DEBUG= (aCpt==6); for (int aK=0 ; aK< mNbCstr ; aK++) { @@ -478,7 +521,8 @@ static int aCpt=0; aCpt++; } } } - //if (!DEBUG) return; + // if (!DEBUG) return; + StdOut() << "NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << "\n"; mSetC.Compile(); for (int aK1=0 ; aK1< mNbCstr ; aK1++) @@ -492,7 +536,10 @@ static int aCpt=0; aCpt++; if (aK1<=aK2) { - StdOut() << "PPP " << aPair << " N1=" << aC1.mNum << " N2=" << aC2.mNum << "\n"; + StdOut() << "PPP " << aPair + << " N1=" << aC1.mNum << "," << aC1.mOrder + << " N2=" << aC2.mNum << "," << aC2.mOrder + << " Cpt=" << aCpt << "\n"; MMVII_INTERNAL_ASSERT_bench(aPair==0,"Reduce in LinearCstr"); } } @@ -501,14 +548,16 @@ static int aCpt=0; aCpt++; void BenchLinearConstr(cParamExeBench & aParam) { - return; +return; if (! aParam.NewBench("LinearConstr")) return; StdOut() << "BenchLinearConstrBenchLinearConstr\n"; // std::vector aV{{2,3},{3,2}}; - for (int aK=0 ; aK<100 ; aK++) + for (int aK=0 ; aK<100000 ; aK++) { + cBenchLinearConstr(4,2); + cBenchLinearConstr(10,2); cBenchLinearConstr(10,3); cBenchLinearConstr(20,5); } From b063c1e5ffce1341edad7b4594b1ffc8c2b0c86a Mon Sep 17 00:00:00 2001 From: deseilligny Date: Mon, 1 Jan 2024 22:14:47 +0100 Subject: [PATCH 05/21] ii --- MMVII/include/MMVII_Matrix.h | 11 ++++---- MMVII/src/Matrix/BaseMatrixes.cpp | 39 ++++++++++++++++++++++++++ MMVII/src/Matrix/cLinearConstraint.cpp | 39 ++++++++++++++++++-------- 3 files changed, 72 insertions(+), 17 deletions(-) diff --git a/MMVII/include/MMVII_Matrix.h b/MMVII/include/MMVII_Matrix.h index 306510e8cd..a14e123728 100755 --- a/MMVII/include/MMVII_Matrix.h +++ b/MMVII/include/MMVII_Matrix.h @@ -52,11 +52,9 @@ template class cSparseVect : public cMemCheck const tCont & IV() const { return *(mIV.get());} tCont & IV() { return *(mIV.get());} - void AddIV(const int & anInd,const Type & aV) - { - IV().push_back(tCplIV(anInd,aV)); - } - void AddIV(const tCplIV & aCpl) { IV().push_back(aCpl); } + void AddIV(const int & anInd,const Type & aV) ; /// "Raw" add, dont check if ind exist + void AddIV(const tCplIV & aCpl) ; /// "Raw" add, dont check if ind exist + void CumulIV(const tCplIV & aCpl) ; /// Create only if not exist, else add in place /// Random sparse vector static cSparseVect RanGenerate(int aNbVar,double aProba,tREAL8 aMinVal= 1e-2,int aMinSize=1); @@ -74,11 +72,14 @@ template class cSparseVect : public cMemCheck void Reset(); const tCplIV * Find(int anInd) const; /// return the pair of a given index + tCplIV * Find(int anInd) ; /// return the pair of a given index // Maximum index, aDef is used if empty, if aDef<=-2 & empty erreur int MaxIndex(int aDef=-1) const; void EraseIndex(int anInd); + /// Create a real duplicata, as copy-constructor return the same shared ptr + tSV Dup() const; private : /* inline void MakeSort(){if (!mIsSorted) Sort();} diff --git a/MMVII/src/Matrix/BaseMatrixes.cpp b/MMVII/src/Matrix/BaseMatrixes.cpp index c3d7b5b9dd..eb606c4a5d 100755 --- a/MMVII/src/Matrix/BaseMatrixes.cpp +++ b/MMVII/src/Matrix/BaseMatrixes.cpp @@ -31,6 +31,15 @@ template cSparseVect::cSparseVect(int aSzReserve) : IV().reserve(aSzReserve); } +template cSparseVect cSparseVect::Dup() const +{ + tSV aRes(size()); + + *(aRes.mIV) = *mIV; + + return aRes; +} + template cSparseVect::cSparseVect(const cDenseVect & aDV) : cSparseVect (aDV.Sz()) { @@ -54,6 +63,29 @@ template void cSparseVect::Reset() mIV->clear(); } +template void cSparseVect::AddIV(const int & anInd,const Type & aV) +{ + AddIV(tCplIV(anInd,aV)); +} +template void cSparseVect::AddIV(const tCplIV & aCpl) +{ + IV().push_back(aCpl); +} + + +template void cSparseVect::CumulIV(const tCplIV & aCpl) +{ + tCplIV * aPairExist = Find(aCpl.mInd); + if (aPairExist != nullptr) + { + aPairExist->mVal += aCpl.mVal; + } + else + { + AddIV(aCpl); + } +} + template cSparseVect cSparseVect::RanGenerate(int aNbVar,double aProba,tREAL8 aMinVal,int aMinSize) { @@ -80,6 +112,13 @@ template const typename cSparseVect::tCplIV * cSparseVect typename cSparseVect::tCplIV * cSparseVect::Find(int anInd) +{ + for (auto & aPair : *mIV) + if (aPair.mInd==anInd) + return & aPair; + return nullptr; +} template int cSparseVect::MaxIndex(int aDef) const { diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp index 314106f5dd..b0e4eda205 100755 --- a/MMVII/src/Matrix/cLinearConstraint.cpp +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -158,7 +158,8 @@ template void cSetLinearConstraint::Compile() { mVCstrReduced = mVCstrInit; - if (DEBUG) Show("Init"); + if (DEBUG) + Show("Init"); // Set no selected for all for (auto & aCstr : mVCstrReduced) @@ -323,13 +324,13 @@ template void cOneLinearConstraint::AddBuf(cDSVec & a template void cOneLinearConstraint::SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf) { -static int aCpt=0; aCpt++; -//DEBUG2 = (aCpt==3); - //if (DEBUG) StdOut() << "INNNN " << aToSub.mLP.Find(mISubst) << " Cpt=" << aCpt << " B2 "<< DEBUG2 << "\n"; + static int aCpt=0; aCpt++; + //DEBUG2 = (aCpt==3); + //if (DEBUG) StdOut() << "INNNN " << aToSub.mLP.Find(mISubst) << " Cpt=" << aCpt << " B2 "<< DEBUG2 << "\n"; SubstituteInSparseLinearEquation(aToSub.mLP,aToSub.mCste,aBuf); - //if (DEBUG) StdOut() << "OUUT " << aToSub.mLP.Find(mISubst) << "\n"; + //if (DEBUG) StdOut() << "OUUT " << aToSub.mLP.Find(mISubst) << "\n"; } template void cOneLinearConstraint::SubstituteInDenseLinearEquation(cDenseVect & aA,Type & aB) const @@ -501,8 +502,9 @@ cBenchLinearConstr::cBenchLinearConstr(int aNbVar,int aNbCstr) : mV0 (aNbVar,eModeInitImage::eMIA_RandCenter), mSetC (aNbVar) { -static int aCpt=0; aCpt++; + static int aCpt=0; aCpt++; // DEBUG= (aCpt==6); + StdOut() << "0-NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << " Cpt " << aCpt << "\n"; for (int aK=0 ; aK< mNbCstr ; aK++) { @@ -521,9 +523,10 @@ static int aCpt=0; aCpt++; } } } + StdOut() << "A-NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << " Cpt " << aCpt << "\n"; // if (!DEBUG) return; - StdOut() << "NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << "\n"; mSetC.Compile(); + StdOut() << "B-NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << " Cpt " << aCpt << "\n"; for (int aK1=0 ; aK1< mNbCstr ; aK1++) { @@ -536,14 +539,18 @@ static int aCpt=0; aCpt++; if (aK1<=aK2) { - StdOut() << "PPP " << aPair - << " N1=" << aC1.mNum << "," << aC1.mOrder - << " N2=" << aC2.mNum << "," << aC2.mOrder - << " Cpt=" << aCpt << "\n"; + if (DEBUG) + { + StdOut() << "PPP " << aPair + << " N1=" << aC1.mNum << "," << aC1.mOrder + << " N2=" << aC2.mNum << "," << aC2.mOrder + << " Cpt=" << aCpt << "\n"; + } MMVII_INTERNAL_ASSERT_bench(aPair==0,"Reduce in LinearCstr"); } } } + StdOut() << "C-NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << " Cpt " << aCpt << "\n"; } void BenchLinearConstr(cParamExeBench & aParam) @@ -554,7 +561,7 @@ return; StdOut() << "BenchLinearConstrBenchLinearConstr\n"; // std::vector aV{{2,3},{3,2}}; - for (int aK=0 ; aK<100000 ; aK++) + for (int aK=0 ; aK<100 ; aK++) { cBenchLinearConstr(4,2); cBenchLinearConstr(10,2); @@ -562,6 +569,14 @@ return; cBenchLinearConstr(20,5); } + for (int aK=0 ; aK<5000 ; aK++) + { + int aNbVar = 1 + 100 * RandUnif_0_1(); + int aNbCstr = (aNbVar>1) ? RandUnif_N(aNbVar-1) : 0 ; + // int aNbCstr=4; + cBenchLinearConstr(aNbVar,aNbCstr); + } + aParam.EndBench(); } From 8730892fe7e5f344fa1015974f72d04ee6b9b38a Mon Sep 17 00:00:00 2001 From: deseilligny Date: Tue, 2 Jan 2024 18:05:29 +0100 Subject: [PATCH 06/21] Test prelim on subst ok --- MMVII/include/MMVII_Matrix.h | 5 + MMVII/src/Matrix/BaseMatrixes.cpp | 13 ++ MMVII/src/Matrix/cLinearConstraint.cpp | 245 ++++++++++++++----------- 3 files changed, 159 insertions(+), 104 deletions(-) diff --git a/MMVII/include/MMVII_Matrix.h b/MMVII/include/MMVII_Matrix.h index a14e123728..f944391f3d 100755 --- a/MMVII/include/MMVII_Matrix.h +++ b/MMVII/include/MMVII_Matrix.h @@ -163,6 +163,10 @@ template class cDenseVect /// return distance to subspace (i.e distance to proj) Type DistToSubspace(const std::vector &) const; + /// Theoretically max min distance, but before I find an exact algorithm, just the max on all vect , compute 2 way + static Type ApproxDistBetweenSubspace(const std::vector &,const std::vector &); + + // -------------- Gram schmitd method for orthogonalization ------------------- /** Elementary step of Gram-Schmit orthogonalization method ; return a vector orthogonal to all VV and that belong to the space "this+aVV", assumme aVV are already orthogonal */ @@ -178,6 +182,7 @@ template class cDenseVect private : + static Type ASymApproxDistBetweenSubspace(const std::vector &,const std::vector &); tIM mIm; }; /* To come, sparse vector, will be vect + vect */ diff --git a/MMVII/src/Matrix/BaseMatrixes.cpp b/MMVII/src/Matrix/BaseMatrixes.cpp index eb606c4a5d..c91fb56649 100755 --- a/MMVII/src/Matrix/BaseMatrixes.cpp +++ b/MMVII/src/Matrix/BaseMatrixes.cpp @@ -394,7 +394,20 @@ template std::vector> cDenseVect::BaseCompl return std::vector>(aRes.begin()+aVV.size(),aRes.end()); } +template Type cDenseVect::ASymApproxDistBetweenSubspace(const std::vector & aVV1,const std::vector & aVV2) +{ + Type aRes=0.0; + + for (const auto & aV1 : aVV1) + UpdateMax(aRes,aV1.DistToSubspace(aVV2)); + return aRes; +} + +template Type cDenseVect::ApproxDistBetweenSubspace(const std::vector & aVV1,const std::vector & aVV2) +{ + return std::max(ASymApproxDistBetweenSubspace(aVV1,aVV2),ASymApproxDistBetweenSubspace(aVV2,aVV1)); +} /* template void AddData(const cAuxAr2007 & anAux, cDenseVect & aDV) diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp index b0e4eda205..bdea4db819 100755 --- a/MMVII/src/Matrix/cLinearConstraint.cpp +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -89,13 +89,15 @@ template class cOneLinearConstraint * of equivalence */ cOneLinearConstraint(const tSV&aLP,const Type& aCste,int aNum); + cOneLinearConstraint Dup() const; // Subsract into "aToSub" so as to annulate the coeff with mISubst - void SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf); - void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; + void SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf); + void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; void SubstituteInSparseLinearEquation(tSV & aA,Type & aB,cDSVec & aBuf) const; void SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf,const tDV & aCurSol) const; + /// Extract pair with maximal amplitude (in abs) const tCplIV * LinearMax() const; /// One the Index of substitution is chosen, transformat by divide all equation by Li and supress Li tha implicitely=1 @@ -106,14 +108,17 @@ template class cOneLinearConstraint private : - void AddBuf(cDSVec & aBuf,const Type & aMul,int aI2Avoid) const; + /// Return the vector with the X[mISubst] to 1 (for test) + cDenseVect DenseVectRestored(int aNbVar) const; + + void AddBuf(cDSVec & aBuf,const Type & aMul) const; tSV mLP; /// Linear part int mISubst; /// Indexe which is substituted Type mCste; /// Constant of the constrainte int mNum; /// Identifier, used for debug at least int mOrder; /// Order of reduction, used to sort the constraint - bool mSelected; /// a marker to know if a constraint has already been reduced + bool mReduced; /// a marker to know if a constraint has already been reduced }; template class cSetLinearConstraint @@ -126,14 +131,23 @@ template class cSetLinearConstraint typedef typename tSV::tCplIV tCplIV; typedef cOneLinearConstraint t1Constr; - void Compile(); + /// Cstr : allow the buffer for computatio, cSetLinearConstraint(int aNbVar); + /// Transformate the set of constraint to allow a cascade os substitution + void Compile(bool ForBench); + /// Add a new constraint (just debug) void Add1Constr(const t1Constr &); + /// Show all the detail void Show(const std::string & aMsg) const; + + /// Test that the reduced contsraint define the same space than initial (a bit slow ...) + void TestSameSpace(); + private : std::vector mVCstrInit; // Initial constraint, std::vector mVCstrReduced; // Constraint after reduction + int mNbVar; cDSVec mBuf; // Buffer for computation }; @@ -144,7 +158,8 @@ template class cSetLinearConstraint /* ************************************************************ */ template cSetLinearConstraint::cSetLinearConstraint(int aNbVar) : - mBuf (aNbVar) + mNbVar (aNbVar), + mBuf (aNbVar) { } @@ -154,16 +169,12 @@ template void cSetLinearConstraint::Add1Constr(const t1Constr } -template void cSetLinearConstraint::Compile() +template void cSetLinearConstraint::Compile(bool ForBench) { - mVCstrReduced = mVCstrInit; - - if (DEBUG) - Show("Init"); - - // Set no selected for all - for (auto & aCstr : mVCstrReduced) - aCstr.mSelected = false; + // make a copy of initial cstr : use dup because shared pointer on mLP .... + mVCstrReduced.clear(); + for (const auto & aCstr : mVCstrInit) + mVCstrReduced.push_back(aCstr.Dup()); size_t aNbReduced = 0; while (aNbReduced != mVCstrInit.size()) @@ -172,7 +183,7 @@ template void cSetLinearConstraint::Compile() cWhichMax aMax(-1,-1.0); for (int aKC=0; aKCmVal)); } t1Constr& aBest = mVCstrReduced.at(aMax.IndexExtre()); @@ -182,21 +193,23 @@ template void cSetLinearConstraint::Compile() // substract the selected constraint to all for (t1Constr & aCstr : mVCstrReduced) { - if (! aCstr.mSelected) + if (! aCstr.mReduced) { if (DEBUG) StdOut() << "SIOC, ISUBS " << aBest.mISubst << " N=" << aBest.mNum << " => " << aCstr.mNum << "\n"; aBest.SubstituteInOtherConstraint(aCstr,mBuf); } } + + // Show("CCCReduc:" + ToStr(aNbReduced)); + if (ForBench) + TestSameSpace(); if (DEBUG) { if (DEBUG) Show("Reduc:" + ToStr(aNbReduced)); } aNbReduced++; } - if (DEBUG) - StdOut() << "=======================================\n"; std::sort ( mVCstrReduced.begin(), @@ -212,6 +225,27 @@ template void cSetLinearConstraint::Show(const std::string & for (const auto & aCstr: mVCstrReduced) aCstr.Show(); } +template void cSetLinearConstraint::TestSameSpace() +{ + std::vector > aV0; //< Dense representation of initial cstr + std::vector > aVR; //< Dense representation of reduced cstr + + // make a dense representation for constraints already reduced + for (size_t aKC=0 ; aKC(mVCstrInit[aKC].mLP,mNbVar)); + aVR.push_back(mVCstrReduced[aKC].DenseVectRestored(mNbVar)); + } + } + + // check the subspace Init&Reduce are almost the same + Type aD = cDenseVect::ApproxDistBetweenSubspace(aV0,aVR); + MMVII_INTERNAL_ASSERT_bench(aD<1e-5,"cSetLinearConstraint:: TestSameSpace"); +} + + /* ************************************************************ */ /* */ @@ -244,10 +278,6 @@ template void cDSVec::Reset() template void cDSVec::TestEmpty() { - static bool First = true; - if (First) - StdOut() << "TestEmptyTestEmptyTestEmptyTestEmptyTestEmptyTestEmptyTestEmptyTestEmpty\n"; - First = false; for (const auto & aV : mVec.ToStdVect()) MMVII_INTERNAL_ASSERT_tiny(aV==0.0,"Vec Test Empty"); MMVII_INTERNAL_ASSERT_tiny(mSet.mVIndOcc.empty(),"Occ Test Empty"); @@ -270,67 +300,77 @@ template void cDSVec::Show() /* ************************************************************** */ template cOneLinearConstraint::cOneLinearConstraint(const tSV&aLP,const Type& aCste,int aNum) : - mLP (aLP), - mISubst (-1), - mCste (aCste), - mNum (aNum), - mOrder (-1), - mSelected (false) + mLP (aLP), + mISubst (-1), + mCste (aCste), + mNum (aNum), + mOrder (-1), + mReduced (false) { } +template cOneLinearConstraint cOneLinearConstraint::Dup() const +{ + cOneLinearConstraint aRes = *this; + aRes.mLP = mLP.Dup(); + + return aRes; +} + +template cDenseVect cOneLinearConstraint::DenseVectRestored(int aNbVar) const +{ + cDenseVect aRes(mLP,aNbVar); + if (mISubst>=0) + aRes(mISubst)=1.0; + return aRes; +} template void cOneLinearConstraint::InitSubst() { - mSelected = true; - const tCplIV * aCple = LinearMax() ; - mISubst = aCple->mInd; + mReduced = true; // memorize as reduced + const tCplIV * aCple = LinearMax() ; // Extract the coord with maximal amplitude + mISubst = aCple->mInd; // This is the variable that will be substituted + Type aV0 = aCple->mVal; // Make a copy because Erase will lost reference ... !!! - mLP.EraseIndex(mISubst); + mLP.EraseIndex(mISubst); // supress the coeff substituted, it values implicitely one - for (auto & aPair : mLP.IV()) - aPair.mVal /= aCple->mVal; - mCste /= aCple->mVal; + // Normalize the rest + for (auto & aPair : mLP.IV()) + aPair.mVal /= aV0; + mCste /= aV0; } template const typename cOneLinearConstraint::tCplIV * cOneLinearConstraint::LinearMax() const { + // Extract the pair cWhichMax aMax(nullptr,-1.0); - for (const auto & aPair : mLP) aMax.Add(&aPair,std::abs(aPair.mVal)); - const tCplIV * aRes = aMax.IndexExtre(); + + // Some check,if no pair is found, probably the system was degenerated MMVII_INTERNAL_ASSERT_tiny(aRes!=nullptr,"cOneLinearConstraint::LinearMax probably bad formed cosntrained"); + // to see later if we replace by |aRes->mVal| > Epsilon ? MMVII_INTERNAL_ASSERT_tiny(aRes->mVal!=0,"cOneLinearConstraint::LinearMax probably bad formed cosntrained"); return aRes ; } -template void GlobAddBuf(cDSVec & aBuf,const cSparseVect & aLP,const Type & aMul,int aI2Avoid) +template void GlobAddBuf(cDSVec & aBuf,const cSparseVect & aLP,const Type & aMul) { for (const auto & aPair : aLP.IV()) { - if (aPair.mInd != aI2Avoid) - { - aBuf.AddValInd(aPair.mVal*aMul,aPair.mInd); - } + aBuf.AddValInd(aPair.mVal*aMul,aPair.mInd); } } -template void cOneLinearConstraint::AddBuf(cDSVec & aBuf,const Type & aMul,int aI2Avoid) const +template void cOneLinearConstraint::AddBuf(cDSVec & aBuf,const Type & aMul) const { - GlobAddBuf(aBuf, mLP,aMul,aI2Avoid); + GlobAddBuf(aBuf, mLP,aMul); } template void cOneLinearConstraint::SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf) { - static int aCpt=0; aCpt++; - //DEBUG2 = (aCpt==3); - //if (DEBUG) StdOut() << "INNNN " << aToSub.mLP.Find(mISubst) << " Cpt=" << aCpt << " B2 "<< DEBUG2 << "\n"; - SubstituteInSparseLinearEquation(aToSub.mLP,aToSub.mCste,aBuf); - - //if (DEBUG) StdOut() << "OUUT " << aToSub.mLP.Find(mISubst) << "\n"; } template void cOneLinearConstraint::SubstituteInDenseLinearEquation(cDenseVect & aA,Type & aB) const @@ -349,13 +389,7 @@ template void cOneLinearConstraint::SubstituteInDenseLinearEq template void cOneLinearConstraint::SubstituteInSparseLinearEquation(tSV & aA,Type & aB,cDSVec & aBuf) const { // (A'-Ai mL) X = B - Ai mC - const tCplIV * aPairInA = aA.Find(mISubst) ; - - aBuf.TestEmpty(); - /*if (DEBUG2) - { - StdOut() << "PAIR " << aPair << " SZ=" << mLP.size() << "\n"; - }*/ + tCplIV * aPairInA = aA.Find(mISubst) ; // current case, if the index is not present in equation nothing to do (in this case Ai=0 and A'=A) @@ -364,17 +398,15 @@ template void cOneLinearConstraint::SubstituteInSparseLinearE // substract constant aB -= mCste * aValAi; - // Substract + // Substract index aA.EraseIndex(mISubst); + // aPairInA->mVal = 0.0; same effect than erase + // other current case, if the equation is a single substition (like frozen var) no more things to do if (mLP.size()==0) return; -//if (DEBUG2) aBuf.Show(); - // mIsSubst is send as parameter because it must disapear in the buf - GlobAddBuf(aBuf,aA, (Type)1.0,mISubst); -//if (DEBUG2) aBuf.Show(); - this ->AddBuf(aBuf,-aValAi,mISubst); -//if (DEBUG2) aBuf.Show(); + GlobAddBuf(aBuf,aA, (Type)1.0); + this ->AddBuf(aBuf,-aValAi); aA.Reset(); for (const auto & aInd : aBuf.mSet.mVIndOcc) @@ -382,14 +414,13 @@ template void cOneLinearConstraint::SubstituteInSparseLinearE aA.AddIV(aInd,aBuf.mVec(aInd)); } aBuf.Reset(); - aBuf.TestEmpty(); } template void cOneLinearConstraint::SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf,const tDV & aCurSol) const { // [1] Find the index of mISubst - int aKSubst = -1; // Indexe wher mGlobVIn potentially equals mISubst + int aKSubst = -1; // Indexe where mGlobVIn potentially equals mISubst for (size_t aKVar=0 ; aKVar void cOneLinearConstraint::SubstituteInOutRSNL(tIO_ /* F(X) = F(X0) + D (X-X0) = D (X-X0) + V0 = D' (X'-X0') +Di (Xi-X0i) + V0 = D' (X'-X0') + Di (mC- mL X' -X0i) + V0 - = D' (X'-X0') - Di mL (X' -X'0 + X'0) - Di X0i + V0 + Di mC - = (D' -Di mL) (X'-X0') + V0 + Di (mC -X0i - mL X'0 ) + = D' (X'-X0') - Di mL (X' -X0' + X0') - Di X0i + V0 + Di mC + = (D' -Di mL) (X'-X0') + V0 + Di (mC -X0i - mL X0' ) */ Type aDelta = (mCste-aCurSol(mISubst)); // mC-X0i @@ -422,51 +453,48 @@ template void cOneLinearConstraint::SubstituteInOutRSNL(tIO_ // [A] Compute the constant and put the linear part in buf (to be indexable) for (const auto & aPair : mLP.IV()) { - if (aPair.mInd != mISubst) - { - aIO.mVals[aKEq] -= aPair.mVal * aCurSol(aPair.mInd) *aDerI ; // -Di mL X'0 - aBuf.AddValInd(aPair.mVal,aPair.mInd); // We memorize indexe - } + MMVII_INTERNAL_ASSERT_tiny(aPair.mInd != mISubst,"Index error"); + aIO.mVals[aKEq] -= aPair.mVal * aCurSol(aPair.mInd) *aDerI ; // -Di mL X'0 + aBuf.AddValInd(aPair.mVal,aPair.mInd); // We memorize indexe } - // [B] modify the derivate using the index, also purge partially the buffer + // [B] modify the derivate using the index, also purge partially the buffer, + // after this only the update will be partial (only variable present in mGlobVInd will be incremented) for (size_t aKVar=0 ; aKVar::IsIndTmp(aInd)) && aBuf.mSet.mOccupied.at(aInd) ) { aIO.mDers.at(aKEq).at(aKVar) -= aBuf.mVec(aInd) * aDerI; // -Di mL aBuf.mSet.mOccupied.at(aInd) = false; // purge occuo - aBuf.mVec(aInd) = 0; // purge + aBuf.mVec(aInd) = 0; // purge vector } } - // [C] modify the derivate for the index, presnt in constraint but not in equation + // [C] modify the derivate for the index, present in constraint but not in equation for (const auto & aPair : mLP.IV()) { - if (aPair.mInd != mISubst) + if (aBuf.mSet.mOccupied.at(aPair.mInd)) // if { - if (aBuf.mSet.mOccupied.at(aPair.mInd)) - { - aIO.mDers.at(aKEq).push_back(-aPair.mVal * aDerI); - if (FirstDerNN) // (aKEq==0) - aIO.mGlobVInd.push_back(aPair.mInd); - aBuf.mSet.mOccupied.at(aPair.mInd) =false; - aBuf.mVec(aPair.mInd) = 0.0; - } - } + aIO.mDers.at(aKEq).push_back(-aPair.mVal * aDerI); // - Di mL + if (FirstDerNN) // (aKEq==0) + aIO.mGlobVInd.push_back(aPair.mInd); + aBuf.mSet.mOccupied.at(aPair.mInd) =false; // purge occupied + aBuf.mVec(aPair.mInd) = 0.0; // purge vector + } } // [D] finish purge aBuf.mSet.mVIndOcc.clear(); } - aDerI = 0; - FirstDerNN= false; + aDerI = 0; // now supress the derivate of substituted variable + FirstDerNN= false; // No longer first Non Null derivate } } } template void cOneLinearConstraint::Show() const { - StdOut() << " * N=" << mNum << " O="<< mOrder << " S=" << mSelected << " I=" << mISubst ; + StdOut() << " * N=" << mNum << " O="<< mOrder << " S=" << mReduced << " I=" << mISubst ; for (const auto & aPair:mLP.IV()) StdOut() << " [" << aPair.mInd << " : " << aPair.mVal << "]"; @@ -503,9 +531,6 @@ cBenchLinearConstr::cBenchLinearConstr(int aNbVar,int aNbCstr) : mSetC (aNbVar) { static int aCpt=0; aCpt++; -// DEBUG= (aCpt==6); - StdOut() << "0-NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << " Cpt " << aCpt << "\n"; - for (int aK=0 ; aK< mNbCstr ; aK++) { bool Ok = false; @@ -523,11 +548,12 @@ cBenchLinearConstr::cBenchLinearConstr(int aNbVar,int aNbCstr) : } } } - StdOut() << "A-NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << " Cpt " << aCpt << "\n"; - // if (!DEBUG) return; - mSetC.Compile(); - StdOut() << "B-NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << " Cpt " << aCpt << "\n"; + // the paramater true will check that constraint reduced define the same space + mSetC.Compile(true); + /* Check that the constraint are somewhat truangular sup, ie for any constraint + * the substiution variable is absent from the constraint after + */ for (int aK1=0 ; aK1< mNbCstr ; aK1++) { const cOneLinearConstraint & aC1= mSetC.mVCstrReduced.at(aK1); @@ -535,10 +561,13 @@ cBenchLinearConstr::cBenchLinearConstr(int aNbVar,int aNbCstr) : { const cOneLinearConstraint & aC2= mSetC.mVCstrReduced.at(aK2); + // extract the posible term of C2 corresponding to substituate of C1 auto * aPair = aC2.mLP.Find(aC1.mISubst); if (aK1<=aK2) { + // this term should not exist if C1 is before C2 + MMVII_INTERNAL_ASSERT_bench((aPair==0) || (aPair->mVal==0.0) ,"Reduce in LinearCstr"); if (DEBUG) { StdOut() << "PPP " << aPair @@ -546,22 +575,21 @@ cBenchLinearConstr::cBenchLinearConstr(int aNbVar,int aNbCstr) : << " N2=" << aC2.mNum << "," << aC2.mOrder << " Cpt=" << aCpt << "\n"; } - MMVII_INTERNAL_ASSERT_bench(aPair==0,"Reduce in LinearCstr"); } } } - StdOut() << "C-NNNNnBvar " << mNbVar << " NbC=" << mNbCstr << " Cpt " << aCpt << "\n"; } void BenchLinearConstr(cParamExeBench & aParam) { -return; + int aMul = std::min(4,1+aParam.Level()); + //return; if (! aParam.NewBench("LinearConstr")) return; StdOut() << "BenchLinearConstrBenchLinearConstr\n"; // std::vector aV{{2,3},{3,2}}; - for (int aK=0 ; aK<100 ; aK++) + for (int aK=0 ; aK<50 ; aK++) { cBenchLinearConstr(4,2); cBenchLinearConstr(10,2); @@ -569,14 +597,23 @@ return; cBenchLinearConstr(20,5); } - for (int aK=0 ; aK<5000 ; aK++) + int aNb = std::max(1,int(100.0/pow(aMul,4)) ); + for (int aK=0 ; aK1) ? RandUnif_N(aNbVar-1) : 0 ; + cBenchLinearConstr(aNbVar,aNbCstr); + } + + for (int aK=0 ; aK<500 ; aK++) { - int aNbVar = 1 + 100 * RandUnif_0_1(); + int aNbVar = 1 + 20 * RandUnif_0_1(); int aNbCstr = (aNbVar>1) ? RandUnif_N(aNbVar-1) : 0 ; - // int aNbCstr=4; + aNbCstr = std::min(10,aNbCstr); cBenchLinearConstr(aNbVar,aNbCstr); } + aParam.EndBench(); } From f4cda42058ab0a90cec1fdbce7c0a98a2aab2ea6 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Tue, 2 Jan 2024 20:17:11 +0100 Subject: [PATCH 07/21] Somm comm --- MMVII/src/Matrix/cResolSysNonLinear.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/MMVII/src/Matrix/cResolSysNonLinear.cpp b/MMVII/src/Matrix/cResolSysNonLinear.cpp index 7796e80d31..14472b95a1 100755 --- a/MMVII/src/Matrix/cResolSysNonLinear.cpp +++ b/MMVII/src/Matrix/cResolSysNonLinear.cpp @@ -253,6 +253,7 @@ template void cResolSysNonLinear::AddEqFixNewVal(const tObj template void cResolSysNonLinear::ModifyFrozenVar (tIO_RSNL& aIO) { + // CHANGE HERE for (size_t aKVar=0 ; aKVar void cResolSysNonLinear::AddObservationLinear for (const auto & aPair :aCoeff) { + // CHANGE HERE if (mVarIsFrozen.at(aPair.mInd)) { // if freeze => transfert value in contant @@ -313,6 +315,7 @@ template void cResolSysNonLinear::AddObservationLinear for (int aK=0 ; aK const cDenseVect & cResolSysNonLinear::SolveUp // for var frozen, they are not involved in any equation, we must fix their value other way for (int aK=0 ; aK Date: Wed, 3 Jan 2024 11:20:49 +0100 Subject: [PATCH 08/21] In transert FixVar 2 Linear Constr --- MMVII/include/MMVII_SysSurR.h | 6 + MMVII/src/Matrix/LinearConstraint.h | 157 ++++++++++++++++++++++++ MMVII/src/Matrix/cLinearConstraint.cpp | 147 +--------------------- MMVII/src/Matrix/cResolSysNonLinear.cpp | 27 ++-- 4 files changed, 182 insertions(+), 155 deletions(-) create mode 100755 MMVII/src/Matrix/LinearConstraint.h diff --git a/MMVII/include/MMVII_SysSurR.h b/MMVII/include/MMVII_SysSurR.h index 4b408af8e6..238bc6c6a5 100755 --- a/MMVII/include/MMVII_SysSurR.h +++ b/MMVII/include/MMVII_SysSurR.h @@ -21,6 +21,8 @@ template class cResidualWeighter; // template class cObjOfMultipleObjUk; template class cObjWithUnkowns; template class cSetInterUK_MultipeObj; +template class cSetLinearConstraint; // defined in "src/Matrix" + /** Class for weighting residuals : compute the vector of weight from a vector of residual; default return {1.0,1.0,...} @@ -138,6 +140,7 @@ class cREAL8_RSNL void AssertNotInEquation() const; ///< verify that we are notin equation step (to allow froze modification) int CountFreeVariables() const; ///< number of free variables protected : + void SetPhaseEq(); int mNbVar; bool mInPhaseAddEq; ///< check that dont modify val fixed after adding equations @@ -273,6 +276,9 @@ template class cResolSysNonLinear : public cREAL8_RSNL std::vector mValueFrozenVar; ///< indicate for each var the possible value where it is frozen int lastNbObs; ///< number of observations of last solving int currNbObs; ///< number of observations currently added + + /// handle the linear constraint : fix var, shared var, gauge ... + cSetLinearConstraint* mLinearConstr; }; diff --git a/MMVII/src/Matrix/LinearConstraint.h b/MMVII/src/Matrix/LinearConstraint.h new file mode 100755 index 0000000000..bb0ba43e70 --- /dev/null +++ b/MMVII/src/Matrix/LinearConstraint.h @@ -0,0 +1,157 @@ +#ifndef _LINEARCONSTRAINT_H_ +#define _LINEARCONSTRAINT_H_ + +#include "MMVII_Tpl_Images.h" +#include "MMVII_SysSurR.h" + +using namespace NS_SymbolicDerivative; +using namespace MMVII; + +namespace MMVII +{ +// static bool DEBUG=false; +//static bool DEBUG2=false; + +template class cDSVec; // Sparse/Dense vect +template class cOneLinearConstraint; // represent 1 constr +template class cSetLinearConstraint; // represent a set of constraint +class cBenchLinearConstr; + +/** Class for a "sparse" dense vector, i.e a vector that is represented by a dense vector + */ + +template class cDSVec +{ + public : + cDSVec(size_t aNbVar); + + void AddValInd(const Type &,int anInd); + + cDenseVect mVec; + cSetIntDyn mSet; + + void Reset(); + void Show(); + void TestEmpty(); +}; + +/* Class for handling linear constraint in non linear optimization system. + * Note the constraint on a vector X as : + * + * mL . X = mC where mL is a non null vector + * + * (I) The way it is done is by substitution : + * + * (1) We select an arbitray non null coord of L Li!=0; (something like the biggest one) + * (2) We suppose Li=1.0 (in fact we have it by setting mL = mL/Li , mC = mC/Li) + * (3) Let note X' the vector X without Xi + * (4) we have Xi = mC- mL X' + * (5) Each time we add a new obs in sytem : + * A.X = B + * A.X-B = A' X' -B + Ai Xi = A' X' -B + Ai (mC-mL X') + * (A'-Ai mL) X = B - Ai mC + * + * (II) So far so good, but now supose we have the two contraint: + * C1: x +2y=0 C2 2x + y = 0 + * And a form L :x + y +z, obviouly as the two constraint implie x=y=0, it mean that L shoul reduce to z + * + * But suppose we use C1 as x ->-2y and C2 as C2A : y-> -2x or C2B x-> -y/2 + * using C1 we have L -> -y+z and + * C2A -> 2x+z + * C2B -> -y+z (nothing to do, x is already substitued) + * + * So this does not lead to the good reduction + * + * (III) So now, before using the constraint we make a preprocessing, more a less a triangulation : + * + * C1 : x + 2y=0 ~ x->-2y + * C'2 : C2(x->-2y) : -y=0 + * + * now if we use C1 and C'2 L will reduce to 0 + * This the principe used in MMVII for constrained optimization : make a substitution afer a preprocessing + * that triangulate the constraint + */ + + +template class cOneLinearConstraint +{ + public : + friend class cSetLinearConstraint; + friend class cBenchLinearConstr; + + typedef cSparseVect tSV; + typedef cDenseVect tDV; + typedef typename tSV::tCplIV tCplIV; + typedef cInputOutputRSNL tIO_RSNL; + typedef cSetIORSNL_SameTmp tSetIO_ST; + + /** In Cstr we can fix the index of subst, if it value -1 let the system select the best , fixing can be usefull in case + * of equivalence + */ + cOneLinearConstraint(const tSV&aLP,const Type& aCste,int aNum); + cOneLinearConstraint Dup() const; + + // Subsract into "aToSub" so as to annulate the coeff with mISubst + void SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf); + void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; + void SubstituteInSparseLinearEquation(tSV & aA,Type & aB,cDSVec & aBuf) const; + void SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf,const tDV & aCurSol) const; + + /// Extract pair with maximal amplitude (in abs) + const tCplIV * LinearMax() const; + + /// One the Index of substitution is chosen, transformat by divide all equation by Li and supress Li tha implicitely=1 + void InitSubst(); + + /// 4 Debug purpose + void Show() const; + + private : + + /// Return the vector with the X[mISubst] to 1 (for test) + cDenseVect DenseVectRestored(int aNbVar) const; + + void AddBuf(cDSVec & aBuf,const Type & aMul) const; + + tSV mLP; /// Linear part + int mISubst; /// Indexe which is substituted + Type mCste; /// Constant of the constrainte + int mNum; /// Identifier, used for debug at least + int mOrder; /// Order of reduction, used to sort the constraint + bool mReduced; /// a marker to know if a constraint has already been reduced +}; + +template class cSetLinearConstraint +{ + public : + friend class cBenchLinearConstr; + + typedef cSparseVect tSV; + typedef cDenseVect tDV; + typedef typename tSV::tCplIV tCplIV; + typedef cOneLinearConstraint t1Constr; + + /// Cstr : allow the buffer for computatio, + cSetLinearConstraint(int aNbVar); + /// Transformate the set of constraint to allow a cascade os substitution + void Compile(bool ForBench); + /// Add a new constraint (just debug) + void Add1Constr(const t1Constr &); + + /// Show all the detail + void Show(const std::string & aMsg) const; + + /// Test that the reduced contsraint define the same space than initial (a bit slow ...) + void TestSameSpace(); + + private : + std::vector mVCstrInit; // Initial constraint, + std::vector mVCstrReduced; // Constraint after reduction + int mNbVar; + cDSVec mBuf; // Buffer for computation +}; + +}; +#endif // _LINEARCONSTRAINT_H_ + + diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp index bdea4db819..36d9aba481 100755 --- a/MMVII/src/Matrix/cLinearConstraint.cpp +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -1,7 +1,4 @@ - -#include "MMVII_Tpl_Images.h" - -#include "MMVII_SysSurR.h" +#include "LinearConstraint.h" using namespace NS_SymbolicDerivative; using namespace MMVII; @@ -9,148 +6,6 @@ using namespace MMVII; namespace MMVII { static bool DEBUG=false; -//static bool DEBUG2=false; - -template class cDSVec; // Sparse/Dense vect -template class cOneLinearConstraint; // represent 1 constr -template class cSetLinearConstraint; // represent a set of constraint - -class cBenchLinearConstr; - -/** Class for a "sparse" dense vector, i.e a vector that is represented by a dense vector - */ - -template class cDSVec -{ - public : - cDSVec(size_t aNbVar); - - void AddValInd(const Type &,int anInd); - - cDenseVect mVec; - cSetIntDyn mSet; - - void Reset(); - void Show(); - void TestEmpty(); -}; - -/* Class for handling linear constraint in non linear optimization system. - * Note the constraint on a vector X as : - * - * mL . X = mC where mL is a non null vector - * - * (I) The way it is done is by substitution : - * - * (1) We select an arbitray non null coord of L Li!=0; (something like the biggest one) - * (2) We suppose Li=1.0 (in fact we have it by setting mL = mL/Li , mC = mC/Li) - * (3) Let note X' the vector X without Xi - * (4) we have Xi = mC- mL X' - * (5) Each time we add a new obs in sytem : - * A.X = B - * A.X-B = A' X' -B + Ai Xi = A' X' -B + Ai (mC-mL X') - * (A'-Ai mL) X = B - Ai mC - * - * (II) So far so good, but now supose we have the two contraint: - * C1: x +2y=0 C2 2x + y = 0 - * And a form L :x + y +z, obviouly as the two constraint implie x=y=0, it mean that L shoul reduce to z - * - * But suppose we use C1 as x ->-2y and C2 as C2A : y-> -2x or C2B x-> -y/2 - * using C1 we have L -> -y+z and - * C2A -> 2x+z - * C2B -> -y+z (nothing to do, x is already substitued) - * - * So this does not lead to the good reduction - * - * (III) So now, before using the constraint we make a preprocessing, more a less a triangulation : - * - * C1 : x + 2y=0 ~ x->-2y - * C'2 : C2(x->-2y) : -y=0 - * - * now if we use C1 and C'2 L will reduce to 0 - * This the principe used in MMVII for constrained optimization : make a substitution afer a preprocessing - * that triangulate the constraint - */ - - -template class cOneLinearConstraint -{ - public : - friend class cSetLinearConstraint; - friend class cBenchLinearConstr; - - typedef cSparseVect tSV; - typedef cDenseVect tDV; - typedef typename tSV::tCplIV tCplIV; - typedef cInputOutputRSNL tIO_RSNL; - typedef cSetIORSNL_SameTmp tSetIO_ST; - - /** In Cstr we can fix the index of subst, if it value -1 let the system select the best , fixing can be usefull in case - * of equivalence - */ - cOneLinearConstraint(const tSV&aLP,const Type& aCste,int aNum); - cOneLinearConstraint Dup() const; - - // Subsract into "aToSub" so as to annulate the coeff with mISubst - void SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf); - void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; - void SubstituteInSparseLinearEquation(tSV & aA,Type & aB,cDSVec & aBuf) const; - void SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf,const tDV & aCurSol) const; - - /// Extract pair with maximal amplitude (in abs) - const tCplIV * LinearMax() const; - - /// One the Index of substitution is chosen, transformat by divide all equation by Li and supress Li tha implicitely=1 - void InitSubst(); - - /// 4 Debug purpose - void Show() const; - - private : - - /// Return the vector with the X[mISubst] to 1 (for test) - cDenseVect DenseVectRestored(int aNbVar) const; - - void AddBuf(cDSVec & aBuf,const Type & aMul) const; - - tSV mLP; /// Linear part - int mISubst; /// Indexe which is substituted - Type mCste; /// Constant of the constrainte - int mNum; /// Identifier, used for debug at least - int mOrder; /// Order of reduction, used to sort the constraint - bool mReduced; /// a marker to know if a constraint has already been reduced -}; - -template class cSetLinearConstraint -{ - public : - friend class cBenchLinearConstr; - - typedef cSparseVect tSV; - typedef cDenseVect tDV; - typedef typename tSV::tCplIV tCplIV; - typedef cOneLinearConstraint t1Constr; - - /// Cstr : allow the buffer for computatio, - cSetLinearConstraint(int aNbVar); - /// Transformate the set of constraint to allow a cascade os substitution - void Compile(bool ForBench); - /// Add a new constraint (just debug) - void Add1Constr(const t1Constr &); - - /// Show all the detail - void Show(const std::string & aMsg) const; - - /// Test that the reduced contsraint define the same space than initial (a bit slow ...) - void TestSameSpace(); - - private : - std::vector mVCstrInit; // Initial constraint, - std::vector mVCstrReduced; // Constraint after reduction - int mNbVar; - cDSVec mBuf; // Buffer for computation -}; - /* ************************************************************ */ /* */ /* cSetLinearConstraint */ diff --git a/MMVII/src/Matrix/cResolSysNonLinear.cpp b/MMVII/src/Matrix/cResolSysNonLinear.cpp index 14472b95a1..f13c93d832 100755 --- a/MMVII/src/Matrix/cResolSysNonLinear.cpp +++ b/MMVII/src/Matrix/cResolSysNonLinear.cpp @@ -1,7 +1,7 @@ - #include "MMVII_Tpl_Images.h" - #include "MMVII_SysSurR.h" +#include "LinearConstraint.h" + using namespace NS_SymbolicDerivative; using namespace MMVII; @@ -54,6 +54,14 @@ int cREAL8_RSNL::CountFreeVariables() const return std::count(mVarIsFrozen.begin(), mVarIsFrozen.end(), false); } +void cREAL8_RSNL::SetPhaseEq() +{ + if (! mInPhaseAddEq) + { + // Compile constraint + mInPhaseAddEq = true; + } +} /* ************************************************************ */ @@ -71,11 +79,11 @@ template cResolSysNonLinear::cResolSysNonLinear(tLinearSysSR // mNbVar (aInitSol.Sz()), mCurGlobSol (aInitSol.Dup()), mSysLinear (aSys), - // mInPhaseAddEq (false), // mVarIsFrozen (mNbVar,false), mValueFrozenVar (mNbVar,-1), lastNbObs (0), - currNbObs (0) + currNbObs (0), + mLinearConstr (new cSetLinearConstraint(mNbVar)) { } @@ -87,13 +95,14 @@ template cResolSysNonLinear::cResolSysNonLinear(eModeSSR aMod template cResolSysNonLinear::~cResolSysNonLinear() { delete mSysLinear; + delete mLinearConstr; } // ============= miscelaneous accessors ================ template cLinearOverCstrSys * cResolSysNonLinear::SysLinear() { - mInPhaseAddEq = true; // cautious, if user requires this access he may modify + SetPhaseEq(); // cautious, if user requires this access he may modify return mSysLinear; } @@ -278,7 +287,7 @@ template void cResolSysNonLinear::AddObservationLinear const Type & aRHS ) { - mInPhaseAddEq = true; + SetPhaseEq(); Type aNewRHS = aRHS; cSparseVect aNewCoeff; @@ -309,7 +318,7 @@ template void cResolSysNonLinear::AddObservationLinear const Type & aRHS ) { - mInPhaseAddEq = true; + SetPhaseEq(); Type aNewRHS = aRHS; cDenseVect aNewCoeff = aCoeff.Dup(); @@ -412,7 +421,7 @@ template void cResolSysNonLinear::CalcVal // The possibility of having several comes from potential paralellization // MMVII_INTERNAL_ASSERT_tiny(aVIO.size()==1,"CalcValCalcVal"); - mInPhaseAddEq = true; + SetPhaseEq(); MMVII_INTERNAL_ASSERT_tiny(aCalcVal->NbInBuf()==0,"Buff not empty"); // Usefull only to test correcness of DoOneEval @@ -521,7 +530,7 @@ template <> void cResolSysNonLinear::R_CalcAndAddObs template void cResolSysNonLinear::AddObs(const std::vector& aVIO) { - mInPhaseAddEq = true; + SetPhaseEq(); // Parse all the linearized equation for (const auto & aIO : aVIO) { From 779a7060b23ffb4a9c23a131677c94fa047279fd Mon Sep 17 00:00:00 2001 From: deseilligny Date: Wed, 3 Jan 2024 16:30:28 +0100 Subject: [PATCH 09/21] Before merge --- MMVII/include/MMVII_SysSurR.h | 3 ++ MMVII/src/Matrix/LinearConstraint.h | 21 ++++++++------ MMVII/src/Matrix/cLinearConstraint.cpp | 38 ++++++++++++++++++++++++- MMVII/src/Matrix/cResolSysNonLinear.cpp | 21 ++++++++++---- 4 files changed, 69 insertions(+), 14 deletions(-) diff --git a/MMVII/include/MMVII_SysSurR.h b/MMVII/include/MMVII_SysSurR.h index 238bc6c6a5..7c9a06d49a 100755 --- a/MMVII/include/MMVII_SysSurR.h +++ b/MMVII/include/MMVII_SysSurR.h @@ -141,6 +141,8 @@ class cREAL8_RSNL int CountFreeVariables() const; ///< number of free variables protected : void SetPhaseEq(); + /// Mut be defined in inherited class because maniupulate mLinearConstr which depend of type + virtual void InitConstraint() = 0; int mNbVar; bool mInPhaseAddEq; ///< check that dont modify val fixed after adding equations @@ -266,6 +268,7 @@ template class cResolSysNonLinear : public cREAL8_RSNL /// Add observations as computed by CalcVal void AddObs(const std::vector&); + void InitConstraint() override; /** Bases function of calculating derivatives, dont modify the system as is to avoid in case of schur complement */ void CalcVal(tCalc *,std::vector&,const tStdVect & aVTmp,bool WithDer,const tResidualW & ); diff --git a/MMVII/src/Matrix/LinearConstraint.h b/MMVII/src/Matrix/LinearConstraint.h index bb0ba43e70..c941c5f592 100755 --- a/MMVII/src/Matrix/LinearConstraint.h +++ b/MMVII/src/Matrix/LinearConstraint.h @@ -20,7 +20,7 @@ class cBenchLinearConstr; /** Class for a "sparse" dense vector, i.e a vector that is represented by a dense vector */ -template class cDSVec +template class cDSVec : public cMemCheck { public : cDSVec(size_t aNbVar); @@ -73,7 +73,7 @@ template class cDSVec */ -template class cOneLinearConstraint +template class cOneLinearConstraint : public cMemCheck { public : friend class cSetLinearConstraint; @@ -83,7 +83,6 @@ template class cOneLinearConstraint typedef cDenseVect tDV; typedef typename tSV::tCplIV tCplIV; typedef cInputOutputRSNL tIO_RSNL; - typedef cSetIORSNL_SameTmp tSetIO_ST; /** In Cstr we can fix the index of subst, if it value -1 let the system select the best , fixing can be usefull in case * of equivalence @@ -121,15 +120,16 @@ template class cOneLinearConstraint bool mReduced; /// a marker to know if a constraint has already been reduced }; -template class cSetLinearConstraint +template class cSetLinearConstraint : public cMemCheck { public : friend class cBenchLinearConstr; + typedef cInputOutputRSNL tIO_RSNL; typedef cSparseVect tSV; typedef cDenseVect tDV; typedef typename tSV::tCplIV tCplIV; - typedef cOneLinearConstraint t1Constr; + typedef cOneLinearConstraint t1Constr; /// Cstr : allow the buffer for computatio, cSetLinearConstraint(int aNbVar); @@ -138,17 +138,22 @@ template class cSetLinearConstraint /// Add a new constraint (just debug) void Add1Constr(const t1Constr &); + void Reset(); + void Add1ConstrFrozenVar(int aKVar,const Type & aVal); + + void SubstituteInSparseLinearEquation(tSV & aA,Type & aB) const; + void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; + void SubstituteInOutRSNL(tIO_RSNL& aIO,const tDV & aCurSol) const; + private : /// Show all the detail void Show(const std::string & aMsg) const; - /// Test that the reduced contsraint define the same space than initial (a bit slow ...) void TestSameSpace(); - private : std::vector mVCstrInit; // Initial constraint, std::vector mVCstrReduced; // Constraint after reduction int mNbVar; - cDSVec mBuf; // Buffer for computation + mutable cDSVec mBuf; // Buffer for computation }; }; diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp index 36d9aba481..428b16773b 100755 --- a/MMVII/src/Matrix/cLinearConstraint.cpp +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -20,7 +20,23 @@ template cSetLinearConstraint::cSetLinearConstraint(int aNbVa template void cSetLinearConstraint::Add1Constr(const t1Constr & aConstr) { - mVCstrInit.push_back(aConstr); + mVCstrInit.push_back(aConstr.Dup()); +} + + +template void cSetLinearConstraint::Reset() +{ + mVCstrInit.clear(); + mVCstrReduced.clear(); +} + +template void cSetLinearConstraint::Add1ConstrFrozenVar(int aKVar,const Type & aVal) +{ + cSparseVect aSV; + aSV.AddIV(aKVar,1.0); + cOneLinearConstraint aCstr(aSV,aVal, mVCstrInit.size()); + + Add1Constr(aCstr); } @@ -73,6 +89,26 @@ template void cSetLinearConstraint::Compile(bool ForBench) ); } +template void cSetLinearConstraint::SubstituteInSparseLinearEquation(tSV & aA,Type & aB) const +{ + for (const auto & aCstr : mVCstrReduced) + aCstr.SubstituteInSparseLinearEquation(aA,aB,mBuf); +} + +template void cSetLinearConstraint::SubstituteInDenseLinearEquation(tDV & aA,Type & aB) const +{ + for (const auto & aCstr : mVCstrReduced) + aCstr.SubstituteInDenseLinearEquation(aA,aB); +} + +template void cSetLinearConstraint::SubstituteInOutRSNL(tIO_RSNL& aIO,const tDV & aCurSol) const +{ + for (const auto & aCstr : mVCstrReduced) + aCstr.SubstituteInOutRSNL(aIO,mBuf,aCurSol); +} + + + template void cSetLinearConstraint::Show(const std::string & aMsg) const { StdOut() << "======== SHOWTSELC " << aMsg << " =====================" << std::endl; diff --git a/MMVII/src/Matrix/cResolSysNonLinear.cpp b/MMVII/src/Matrix/cResolSysNonLinear.cpp index f13c93d832..1e2add6bb5 100755 --- a/MMVII/src/Matrix/cResolSysNonLinear.cpp +++ b/MMVII/src/Matrix/cResolSysNonLinear.cpp @@ -56,11 +56,10 @@ int cREAL8_RSNL::CountFreeVariables() const void cREAL8_RSNL::SetPhaseEq() { - if (! mInPhaseAddEq) - { - // Compile constraint - mInPhaseAddEq = true; - } + if (mInPhaseAddEq) return; + + InitConstraint(); + mInPhaseAddEq = true; } @@ -70,6 +69,18 @@ void cREAL8_RSNL::SetPhaseEq() /* */ /* ************************************************************ */ +template void cResolSysNonLinear::InitConstraint() +{ + mLinearConstr->Reset(); + for (int aKV=0 ; aKVAdd1ConstrFrozenVar(aKV,mValueFrozenVar.at(aKV)); + } + } + mLinearConstr->Compile(false); +} // ===== constructors / destructors ================ From ccee58811be09baf0407081567dd61e661f67715 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Wed, 3 Jan 2024 18:31:35 +0100 Subject: [PATCH 10/21] In put FixVar by linear cstr --- MMVII/src/Matrix/LinearConstraint.h | 8 +-- MMVII/src/Matrix/cLinearConstraint.cpp | 82 ++++++++++++++++++++++--- MMVII/src/Matrix/cResolSysNonLinear.cpp | 8 ++- 3 files changed, 83 insertions(+), 15 deletions(-) diff --git a/MMVII/src/Matrix/LinearConstraint.h b/MMVII/src/Matrix/LinearConstraint.h index c941c5f592..b83e313805 100755 --- a/MMVII/src/Matrix/LinearConstraint.h +++ b/MMVII/src/Matrix/LinearConstraint.h @@ -94,7 +94,7 @@ template class cOneLinearConstraint : public cMemCheck void SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf); void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; void SubstituteInSparseLinearEquation(tSV & aA,Type & aB,cDSVec & aBuf) const; - void SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf,const tDV & aCurSol) const; + void SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf) const; /// Extract pair with maximal amplitude (in abs) const tCplIV * LinearMax() const; @@ -136,14 +136,14 @@ template class cSetLinearConstraint : public cMemCheck /// Transformate the set of constraint to allow a cascade os substitution void Compile(bool ForBench); /// Add a new constraint (just debug) - void Add1Constr(const t1Constr &); + void Add1Constr(const t1Constr &,const tDV *); void Reset(); - void Add1ConstrFrozenVar(int aKVar,const Type & aVal); + void Add1ConstrFrozenVar(int aKVar,const Type & aVal,const tDV *); void SubstituteInSparseLinearEquation(tSV & aA,Type & aB) const; void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; - void SubstituteInOutRSNL(tIO_RSNL& aIO,const tDV & aCurSol) const; + void SubstituteInOutRSNL(tIO_RSNL& aIO) const; private : /// Show all the detail void Show(const std::string & aMsg) const; diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp index 428b16773b..bd29bf0246 100755 --- a/MMVII/src/Matrix/cLinearConstraint.cpp +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -18,7 +18,7 @@ template cSetLinearConstraint::cSetLinearConstraint(int aNbVa { } -template void cSetLinearConstraint::Add1Constr(const t1Constr & aConstr) +template void cSetLinearConstraint::Add1Constr(const t1Constr & aConstr,const tDV *) { mVCstrInit.push_back(aConstr.Dup()); } @@ -30,13 +30,13 @@ template void cSetLinearConstraint::Reset() mVCstrReduced.clear(); } -template void cSetLinearConstraint::Add1ConstrFrozenVar(int aKVar,const Type & aVal) +template void cSetLinearConstraint::Add1ConstrFrozenVar(int aKVar,const Type & aVal,const tDV * aCurSol) { cSparseVect aSV; aSV.AddIV(aKVar,1.0); cOneLinearConstraint aCstr(aSV,aVal, mVCstrInit.size()); - Add1Constr(aCstr); + Add1Constr(aCstr,aCurSol); } @@ -101,10 +101,10 @@ template void cSetLinearConstraint::SubstituteInDenseLinearEq aCstr.SubstituteInDenseLinearEquation(aA,aB); } -template void cSetLinearConstraint::SubstituteInOutRSNL(tIO_RSNL& aIO,const tDV & aCurSol) const +template void cSetLinearConstraint::SubstituteInOutRSNL(tIO_RSNL& aIO) const { for (const auto & aCstr : mVCstrReduced) - aCstr.SubstituteInOutRSNL(aIO,mBuf,aCurSol); + aCstr.SubstituteInOutRSNL(aIO,mBuf); } @@ -266,6 +266,9 @@ template void cOneLinearConstraint::SubstituteInOtherConstrai template void cOneLinearConstraint::SubstituteInDenseLinearEquation(cDenseVect & aA,Type & aB) const { + // ????????????? + // A X -B = A' X' + AiXi -B = A'(X-X0' + X0') +Ai(C -mLX0) - B + // (A'-Ai mL) X = B - Ai mC Type & aAi = aA(mISubst); aB -= aAi * mCste; @@ -308,7 +311,7 @@ template void cOneLinearConstraint::SubstituteInSparseLinearE } -template void cOneLinearConstraint::SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf,const tDV & aCurSol) const +template void cOneLinearConstraint::SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf) const { // [1] Find the index of mISubst int aKSubst = -1; // Indexe where mGlobVIn potentially equals mISubst @@ -322,14 +325,76 @@ template void cOneLinearConstraint::SubstituteInOutRSNL(tIO_ // if index subst is not involved, nothing to do if (aKSubst<0) return; + +/* Case everybody in diff (or abs) + * + * F(X) = F(X0) + D (X-X0) = D (X-X0) + V0 + * = D' (X'-X0') +Di (Xi-X0i) + V0 + * But constraint is writen : + * mLp.(X'-X0') + (Xi-Xi0) = C + * + * Then : + * F(X) = D' (X'-X0') + V0 + Di(C - mLp .(X'-X0')) = ( D'- Di mLp ) (X'-X0') + V0 +Di C + */ + + bool FirstDerNN = true; // Used to check that we do only once the extension of indexe + for (size_t aKEq=0 ; aKEq::IsIndTmp(aInd)) && aBuf.mSet.mOccupied.at(aInd) ) + { + aIO.mDers.at(aKEq).at(aKVar) -= aBuf.mVec(aInd) * aDerI; // -Di mL + aBuf.mSet.mOccupied.at(aInd) = false; // purge occuo + aBuf.mVec(aInd) = 0; // purge vector + } + } + // [C] modify the derivate for the index, present in constraint but not in equation + for (const auto & aPair : mLP.IV()) + { + if (aBuf.mSet.mOccupied.at(aPair.mInd)) // if + { + aIO.mDers.at(aKEq).push_back(-aPair.mVal * aDerI); // - Di mL + if (FirstDerNN) // (aKEq==0) + aIO.mGlobVInd.push_back(aPair.mInd); + aBuf.mSet.mOccupied.at(aPair.mInd) =false; // purge occupied + aBuf.mVec(aPair.mInd) = 0.0; // purge vector + } + } + + // [D] finish purge + aBuf.mSet.mVIndOcc.clear(); + } + aDerI = 0; // now supress the derivate of substituted variable + FirstDerNN= false; // No longer first Non Null derivate + } + } - /* F(X) = F(X0) + D (X-X0) = D (X-X0) + V0 + /* Case contraint in abs, F in diff + F(X) = F(X0) + D (X-X0) = D (X-X0) + V0 = D' (X'-X0') +Di (Xi-X0i) + V0 = D' (X'-X0') + Di (mC- mL X' -X0i) + V0 = D' (X'-X0') - Di mL (X' -X0' + X0') - Di X0i + V0 + Di mC = (D' -Di mL) (X'-X0') + V0 + Di (mC -X0i - mL X0' ) */ + /* Type aDelta = (mCste-aCurSol(mISubst)); // mC-X0i bool FirstDerNN = true; // Used to check that we do only once the extension of indexe for (size_t aKEq=0 ; aKEq void cOneLinearConstraint::SubstituteInOutRSNL(tIO_ FirstDerNN= false; // No longer first Non Null derivate } } +*/ } template void cOneLinearConstraint::Show() const @@ -435,7 +501,7 @@ cBenchLinearConstr::cBenchLinearConstr(int aNbVar,int aNbCstr) : mLDV.push_back(aDV); // add new sparse V mLSV.push_back(aSV); // add new dense V cOneLinearConstraint aLC(aSV,mV0.DotProduct(aDV),aK); - mSetC.Add1Constr(aLC); + mSetC.Add1Constr(aLC,nullptr); } } } diff --git a/MMVII/src/Matrix/cResolSysNonLinear.cpp b/MMVII/src/Matrix/cResolSysNonLinear.cpp index 1e2add6bb5..61356c8db9 100755 --- a/MMVII/src/Matrix/cResolSysNonLinear.cpp +++ b/MMVII/src/Matrix/cResolSysNonLinear.cpp @@ -76,7 +76,7 @@ template void cResolSysNonLinear::InitConstraint() { if (mVarIsFrozen.at(aKV)) { - mLinearConstr->Add1ConstrFrozenVar(aKV,mValueFrozenVar.at(aKV)); + mLinearConstr->Add1ConstrFrozenVar(aKV,mValueFrozenVar.at(aKV),&mCurGlobSol); } } mLinearConstr->Compile(false); @@ -333,17 +333,19 @@ template void cResolSysNonLinear::AddObservationLinear Type aNewRHS = aRHS; cDenseVect aNewCoeff = aCoeff.Dup(); + // AX-B = (A' X' + AiXi-B) = A' (X'-X0') + A' X0' +AiXi -B + // B=> B -AiXi -A' X0' for (int aK=0 ; aK Date: Sat, 6 Jan 2024 18:08:15 +0100 Subject: [PATCH 11/21] Non linear hard constraint validated --- MMVII/Doc/Programmer/NonLinearOptim.tex | 2 +- MMVII/include/MMVII_PhgrDist.h | 3 + MMVII/include/MMVII_SysSurR.h | 15 ++- MMVII/src/Bench/BenchMatrix.cpp | 10 +- MMVII/src/Matrix/LinearConstraint.h | 24 +++-- MMVII/src/Matrix/cLinearConstraint.cpp | 38 ++++++-- MMVII/src/Matrix/cResolSysNonLinear.cpp | 95 +++++++++++++++++-- MMVII/src/SymbDerGen/Formulas_Geom2D.h | 53 ++++++++++- MMVII/src/SymbDerGen/GenerateCodes.cpp | 7 ++ .../TutoBenchTrianguRSNL/BenchNetwRSNL.cpp | 2 +- MMVII/src/TutoBenchTrianguRSNL/TrianguRSNL.h | 8 +- .../src/TutoBenchTrianguRSNL/cMainNetwork.cpp | 68 ++++++++++++- .../src/TutoBenchTrianguRSNL/cNetPropCov.cpp | 8 +- 13 files changed, 284 insertions(+), 49 deletions(-) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index 17b77a5c13..a978d2605e 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -165,7 +165,7 @@ \subsection{Files of MMVII involved} non linear optimisation, from the user point of view the main class is {\tt cResolSysNonLinear}; - \item {\tt src/Bench/BenchResolSysNonLinear.cpp} contains the code for the $2d$-triangulation + \item files on folder {\tt src/TutoBenchTrianguRSNL/} contain the code for the $2d$-triangulation example; \item {\tt src/SymbDerGen/Formulas\_Geom2D.h } and {\tt src/SymbDerGen/GenerateCodes.cpp } diff --git a/MMVII/include/MMVII_PhgrDist.h b/MMVII/include/MMVII_PhgrDist.h index fa683196ac..2de30ef056 100755 --- a/MMVII/include/MMVII_PhgrDist.h +++ b/MMVII/include/MMVII_PhgrDist.h @@ -163,6 +163,9 @@ NS_SymbolicDerivative::cCalculator * EqDist3DParam(bool WithDerive,int a /// let pk=(xk,yk,zk), R=(r00..r22) Residual : R(p2-p2) - {dx,dy,dz} NS_SymbolicDerivative::cCalculator * EqTopoSubFrame(bool WithDerive,int aSzBuf); +/// Sum of square of unknown, to test non linear constraints +NS_SymbolicDerivative::cCalculator * EqSumSquare(int aNb,bool WithDerive,int aSzBuf,bool ReUse); + // ............. Equation implying 2D distance conservation ............. diff --git a/MMVII/include/MMVII_SysSurR.h b/MMVII/include/MMVII_SysSurR.h index 7c9a06d49a..a7168f24cf 100755 --- a/MMVII/include/MMVII_SysSurR.h +++ b/MMVII/include/MMVII_SysSurR.h @@ -147,6 +147,7 @@ class cREAL8_RSNL int mNbVar; bool mInPhaseAddEq; ///< check that dont modify val fixed after adding equations std::vector mVarIsFrozen; ///< indicate for each var is it is frozen + int mNbIter; ///< Number of iteration made }; @@ -180,6 +181,7 @@ template class cResolSysNonLinear : public cREAL8_RSNL /// destructor ~cResolSysNonLinear(); + /// Accessor const tDVect & CurGlobSol() const; cREAL8_RSNL::tDVect R_CurGlobSol() const override; ///< tREAL8 Equivalent @@ -195,7 +197,7 @@ template class cResolSysNonLinear : public cREAL8_RSNL void SetCurSol(int aNumV,const Type&) ; void R_SetCurSol(int aNumV,const tREAL8&) override; ///< tREAL8 Equivalent - tLinearSysSR * SysLinear() ; + tLinearSysSR * SysLinear() ; ///< Accessor /// Solve solution, update the current solution, Reset the least square system const tDVect & SolveUpdateReset(const Type & aLVM =0.0) ; @@ -218,6 +220,8 @@ template class cResolSysNonLinear : public cREAL8_RSNL void AddEqFixNewVal(const tObjWUk & anObj,const cPtxd &,const cPtxd &,const Type& aWeight); + void AddNonLinearConstr(tCalc * aCalcVal,const tVectInd & aVInd,const tStdVect& aVObs,bool OnlyIfFirst); + /// Basic Add 1 equation , no bufferistion, no schur complement void CalcAndAddObs(tCalc *,const tVectInd &,const tStdVect& aVObs,const tResidualW & = tResidualW()); void R_CalcAndAddObs(tCalc *,const tVectInd &,const tR_Up::tStdVect& aVObs,const tR_Up::tResidualW & ) override; @@ -259,6 +263,8 @@ template class cResolSysNonLinear : public cREAL8_RSNL int GetNbObs() const; ///< get number of observations (last iteration if after reset, or current number if after AddObs) + void AddConstr(const tSVect & aVect,const Type & aCste,bool OnlyIfFirstIter=true); + void SupressAllConstr(); private : cResolSysNonLinear(const tRSNL & ) = delete; @@ -270,8 +276,8 @@ template class cResolSysNonLinear : public cREAL8_RSNL void InitConstraint() override; /** Bases function of calculating derivatives, dont modify the system as is - to avoid in case of schur complement */ - void CalcVal(tCalc *,std::vector&,const tStdVect & aVTmp,bool WithDer,const tResidualW & ); + to avoid in case of schur complement , if it is used for linearizeing constraint "ForConstr" the process is slightly diff*/ + void CalcVal(tCalc *,std::vector&,const tStdVect & aVTmp,bool WithDer,const tResidualW &,bool ForConstr ); tDVect mCurGlobSol; ///< Curent solution tLinearSysSR* mSysLinear; ///< Sys to solve equations, equation are concerning the differences with current solution @@ -282,6 +288,9 @@ template class cResolSysNonLinear : public cREAL8_RSNL /// handle the linear constraint : fix var, shared var, gauge ... cSetLinearConstraint* mLinearConstr; + + std::vector mVCstrCstePart; /// Cste part of linear constraint that dont have specific struct (i.e vs Froze/Share) + std::vector mVCstrLinearPart; /// Linerar Part of }; diff --git a/MMVII/src/Bench/BenchMatrix.cpp b/MMVII/src/Bench/BenchMatrix.cpp index b813cad409..1f2a3d2b50 100755 --- a/MMVII/src/Bench/BenchMatrix.cpp +++ b/MMVII/src/Bench/BenchMatrix.cpp @@ -698,12 +698,11 @@ template void TplBenchLsq() cDenseVect aRandSol(aNbVar,eModeInitImage::eMIA_RandCenter); // use to test NonLinear in mode AddObservationLinear - cResolSysNonLinear aSysLin(eModeSSR::eSSR_LsqDense,aRandSol); + cResolSysNonLinear aSysNonLin(eModeSSR::eSSR_LsqDense,aRandSol); // juste make several time the test , becaude chek also reseting for (int aNbTest=0 ; aNbTest<3 ; aNbTest++) { - std::vector< cSparseVect > aVSV; for (int aK=0 ; aK<= 3*aNbEq ; aK++) { @@ -716,9 +715,9 @@ template void TplBenchLsq() aSys->PublicAddObservation(aW,aVCoeff,aCste); // Add sparse or dense if (aK%2) - aSysLin.AddObservationLinear(aW,aVCoeff,aCste); + aSysNonLin.AddObservationLinear(aW,aVCoeff,aCste); else - aSysLin.AddObservationLinear(aW,cDenseVect(aVCoeff,aNbVar),aCste); + aSysNonLin.AddObservationLinear(aW,cDenseVect(aVCoeff,aNbVar),aCste); } static int aCpt = 0; aCpt++; @@ -779,7 +778,7 @@ template void TplBenchLsq() } } - cDenseVect aSolLin = aSysLin.SolveUpdateReset() - aVSol[0]; + cDenseVect aSolLin = aSysNonLin.SolveUpdateReset() - aVSol[0]; MMVII_INTERNAL_ASSERT_bench(aSolLin.L2Norm()<1e-5,"Cmp Least Square"); for (int aKSys=0 ; aKSys(aK,aK); TplBenchDenseMatr(aK,aK); } diff --git a/MMVII/src/Matrix/LinearConstraint.h b/MMVII/src/Matrix/LinearConstraint.h index b83e313805..5073d68198 100755 --- a/MMVII/src/Matrix/LinearConstraint.h +++ b/MMVII/src/Matrix/LinearConstraint.h @@ -7,6 +7,9 @@ using namespace NS_SymbolicDerivative; using namespace MMVII; +// for some time we maintain a possibility to go to the "old" fix var system +#define WithNewLinearCstr true + namespace MMVII { // static bool DEBUG=false; @@ -84,11 +87,9 @@ template class cOneLinearConstraint : public cMemCheck typedef typename tSV::tCplIV tCplIV; typedef cInputOutputRSNL tIO_RSNL; - /** In Cstr we can fix the index of subst, if it value -1 let the system select the best , fixing can be usefull in case - * of equivalence - */ cOneLinearConstraint(const tSV&aLP,const Type& aCste,int aNum); - cOneLinearConstraint Dup() const; + /// If aCurSol != 0 constraint is exprimed relatively to current sol + cOneLinearConstraint Dup(const tDV* aCurSol) const; // Subsract into "aToSub" so as to annulate the coeff with mISubst void SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf); @@ -130,20 +131,29 @@ template class cSetLinearConstraint : public cMemCheck typedef cDenseVect tDV; typedef typename tSV::tCplIV tCplIV; typedef cOneLinearConstraint t1Constr; + typedef cLinearOverCstrSys tLinearSysSR; /// Cstr : allow the buffer for computatio, cSetLinearConstraint(int aNbVar); /// Transformate the set of constraint to allow a cascade os substitution void Compile(bool ForBench); - /// Add a new constraint (just debug) - void Add1Constr(const t1Constr &,const tDV *); + /// Add a new constraint + void Add1Constr(const t1Constr &,const tDV * aCurSol); + /// Add a new constraint + void Add1Constr(const tSV&,const Type & aCste,const tDV * aCurSol); + /// Add a constraint of type Fix Var to a solution + void Add1ConstrFrozenVar(int aKVar,const Type & aVal,const tDV *); void Reset(); - void Add1ConstrFrozenVar(int aKVar,const Type & aVal,const tDV *); + // ============ These 3 method modify equation to take into account substition =========== void SubstituteInSparseLinearEquation(tSV & aA,Type & aB) const; void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; void SubstituteInOutRSNL(tIO_RSNL& aIO) const; + + /// This add the constraint to the system, required because all the subst-var having been elimitated + void AddConstraint2Sys(tLinearSysSR &); + private : /// Show all the detail void Show(const std::string & aMsg) const; diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp index bd29bf0246..5b30f79e0c 100755 --- a/MMVII/src/Matrix/cLinearConstraint.cpp +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -18,9 +18,15 @@ template cSetLinearConstraint::cSetLinearConstraint(int aNbVa { } -template void cSetLinearConstraint::Add1Constr(const t1Constr & aConstr,const tDV *) +template void cSetLinearConstraint::Add1Constr(const t1Constr & aConstr,const tDV * aCurSol) { - mVCstrInit.push_back(aConstr.Dup()); + mVCstrInit.push_back(aConstr.Dup(aCurSol)); +} + +template void cSetLinearConstraint::Add1Constr(const tSV& aSV,const Type & aCste,const tDV * aCurSol) +{ + cOneLinearConstraint aCstr(aSV,aCste, mVCstrInit.size()); + Add1Constr(aCstr,aCurSol); } @@ -34,9 +40,7 @@ template void cSetLinearConstraint::Add1ConstrFrozenVar(int a { cSparseVect aSV; aSV.AddIV(aKVar,1.0); - cOneLinearConstraint aCstr(aSV,aVal, mVCstrInit.size()); - - Add1Constr(aCstr,aCurSol); + Add1Constr(aSV,aVal,aCurSol); } @@ -45,7 +49,7 @@ template void cSetLinearConstraint::Compile(bool ForBench) // make a copy of initial cstr : use dup because shared pointer on mLP .... mVCstrReduced.clear(); for (const auto & aCstr : mVCstrInit) - mVCstrReduced.push_back(aCstr.Dup()); + mVCstrReduced.push_back(aCstr.Dup(nullptr)); size_t aNbReduced = 0; while (aNbReduced != mVCstrInit.size()) @@ -136,6 +140,15 @@ template void cSetLinearConstraint::TestSameSpace() MMVII_INTERNAL_ASSERT_bench(aD<1e-5,"cSetLinearConstraint:: TestSameSpace"); } +template void cSetLinearConstraint::AddConstraint2Sys(tLinearSysSR & aSys) +{ + // A priori identic to use init or reduced, simpler with init as there is no reconstruction + for (const auto & aCstr : mVCstrInit) + { + aSys.PublicAddObservation(1.0,aCstr.mLP,aCstr.mCste); + } +} + /* ************************************************************ */ @@ -200,11 +213,19 @@ template cOneLinearConstraint::cOneLinearConstraint(const tSV { } -template cOneLinearConstraint cOneLinearConstraint::Dup() const +// If aCUR SOL to exprimate the constraint +// 0= A . X -C = A. (X-X0) + A .X0 -C : C -= A .X0 +template cOneLinearConstraint cOneLinearConstraint::Dup(const tDV * aCurSol) const { cOneLinearConstraint aRes = *this; aRes.mLP = mLP.Dup(); + if (aCurSol) + { + for (const auto & aPair : aRes.mLP.IV()) + aRes.mCste -= aPair.mVal * (*aCurSol)(aPair.mInd); + } + return aRes; } @@ -539,11 +560,9 @@ cBenchLinearConstr::cBenchLinearConstr(int aNbVar,int aNbCstr) : void BenchLinearConstr(cParamExeBench & aParam) { - int aMul = std::min(4,1+aParam.Level()); //return; if (! aParam.NewBench("LinearConstr")) return; - StdOut() << "BenchLinearConstrBenchLinearConstr\n"; // std::vector aV{{2,3},{3,2}}; for (int aK=0 ; aK<50 ; aK++) @@ -553,6 +572,7 @@ void BenchLinearConstr(cParamExeBench & aParam) cBenchLinearConstr(10,3); cBenchLinearConstr(20,5); } + int aMul = std::min(4,1+aParam.Level()); int aNb = std::max(1,int(100.0/pow(aMul,4)) ); for (int aK=0 ; aK void cResolSysNonLinear::InitConstraint() { mLinearConstr->Reset(); + // Add the constraint specific to Frozen-Var for (int aKV=0 ; aKV void cResolSysNonLinear::InitConstraint() mLinearConstr->Add1ConstrFrozenVar(aKV,mValueFrozenVar.at(aKV),&mCurGlobSol); } } + + // Add the general constraint + for (size_t aKC=0 ; aKCAdd1Constr(mVCstrLinearPart.at(aKC),mVCstrCstePart.at(aKC),&mCurGlobSol); + } mLinearConstr->Compile(false); } +template void cResolSysNonLinear::AddConstr(const tSVect & aVect,const Type & aCste,bool OnlyIfFirstIter) +{ + if (OnlyIfFirstIter && (mNbIter!=0)) return; + + mVCstrLinearPart.push_back(aVect.Dup()); + mVCstrCstePart.push_back(aCste); +} + + + +// template void cResolSysNonLinear::I // ===== constructors / destructors ================ @@ -274,6 +293,9 @@ template void cResolSysNonLinear::AddEqFixNewVal(const tObj template void cResolSysNonLinear::ModifyFrozenVar (tIO_RSNL& aIO) { // CHANGE HERE +#if (WithNewLinearCstr) + mLinearConstr->SubstituteInOutRSNL(aIO); +#else for (size_t aKVar=0 ; aKVar void cResolSysNonLinear::ModifyFrozenVar (tIO_RSNL& } } } +#endif } template void cResolSysNonLinear::AddObservationLinear @@ -302,6 +325,14 @@ template void cResolSysNonLinear::AddObservationLinear Type aNewRHS = aRHS; cSparseVect aNewCoeff; +#if (WithNewLinearCstr) + for (const auto & aPair :aCoeff) + { + aNewRHS -= mCurGlobSol(aPair.mInd) * aPair.mVal; + aNewCoeff.AddIV(aPair); + } + mLinearConstr->SubstituteInSparseLinearEquation(aNewCoeff,aNewRHS); +#else for (const auto & aPair :aCoeff) { // CHANGE HERE @@ -317,6 +348,7 @@ template void cResolSysNonLinear::AddObservationLinear aNewCoeff.AddIV(aPair); } } +#endif currNbObs++; /// Check JMM mSysLinear->PublicAddObservation(aWeight,aNewCoeff,aNewRHS); } @@ -332,6 +364,11 @@ template void cResolSysNonLinear::AddObservationLinear SetPhaseEq(); Type aNewRHS = aRHS; cDenseVect aNewCoeff = aCoeff.Dup(); +#if (WithNewLinearCstr) + for (int aK=0 ; aKSubstituteInDenseLinearEquation(aNewCoeff,aNewRHS); +#else // AX-B = (A' X' + AiXi-B) = A' (X'-X0') + A' X0' +AiXi -B // B=> B -AiXi -A' X0' @@ -348,6 +385,7 @@ template void cResolSysNonLinear::AddObservationLinear aNewRHS -= mCurGlobSol(aK) * aCoeff(aK); // -A' X0' } } +#endif currNbObs++; /// Check JMM mSysLinear->PublicAddObservation(aWeight,aNewCoeff,aNewRHS); } @@ -427,14 +465,16 @@ template void cResolSysNonLinear::CalcVal std::vector& aVIO, const tStdVect & aValTmpUk, bool WithDer, - const tResidualW & aWeighter + const tResidualW & aWeighter, + bool ForConstraint ) { // This test is always true 4 now, which I(MPD) was not sure // The possibility of having several comes from potential paralellization // MMVII_INTERNAL_ASSERT_tiny(aVIO.size()==1,"CalcValCalcVal"); - SetPhaseEq(); + if (!ForConstraint) + SetPhaseEq(); MMVII_INTERNAL_ASSERT_tiny(aCalcVal->NbInBuf()==0,"Buff not empty"); // Usefull only to test correcness of DoOneEval @@ -496,10 +536,41 @@ template void cResolSysNonLinear::CalcVal } aIO.mWeights = aWeighter.WeightOfResidual(aIO.mVals); // StdOut() << "HHHhUuHH " << aIO.mVals << " " << aIO.mWeights << std::endl; - ModifyFrozenVar(aIO); + if (! ForConstraint) + ModifyFrozenVar(aIO); } } +template void cResolSysNonLinear::AddNonLinearConstr + ( + tCalc * aCalcVal, + const tVectInd & aVInd, + const tStdVect& aVObs, + bool OnlyIfFirst + ) +{ + std::vector aVIO(1,tIO_RSNL(aVInd,aVObs)); + CalcVal(aCalcVal,aVIO,{},true,tResidualW(),true); + + // Parse all the linearized equation + for (const auto & aIO : aVIO) + { + // check we dont use temporary value + MMVII_INTERNAL_ASSERT_tiny(aIO.mNbTmpUk==0,"Cannot use tmp uk w/o Schur complement"); + // parse all values + for (size_t aKVal=0 ; aKVal void cResolSysNonLinear::CalcAndAddObs ( @@ -510,7 +581,7 @@ template void cResolSysNonLinear::CalcAndAddObs ) { std::vector aVIO(1,tIO_RSNL(aVInd,aVObs)); - CalcVal(aCalcVal,aVIO,{},true,aWeigther); + CalcVal(aCalcVal,aVIO,{},true,aWeigther,false); AddObs(aVIO); } @@ -578,7 +649,7 @@ template void cResolSysNonLinear::AddEq2Subst ) { std::vector aVIO(1,tIO_RSNL(aVInd,aVObs)); - CalcVal(aCalc,aVIO,aSetIO.ValTmpUk(),true,aWeighter); + CalcVal(aCalc,aVIO,aSetIO.ValTmpUk(),true,aWeighter,false); aSetIO.AddOneEq(aVIO.at(0)); } @@ -592,7 +663,7 @@ template void cResolSysNonLinear::R_AddEq2Subst { std::vector aVIO(1,tIO_RSNL(aVInd,VecConvert(aR_VObs))); - CalcVal(aCalc,aVIO,VecConvert(aSetIO.ValTmpUk()),true,cREAL8_RWAdapt(&aWeighter)); + CalcVal(aCalc,aVIO,VecConvert(aSetIO.ValTmpUk()),true,cREAL8_RWAdapt(&aWeighter),false); cInputOutputRSNL aRIO (aVInd,aR_VObs); ConvertVWD(aRIO,aVIO.at(0)); @@ -634,7 +705,6 @@ template <> void cResolSysNonLinear::R_AddObsWithTmpUK (const tR_Up::tSe template const cDenseVect & cResolSysNonLinear::SolveUpdateReset(const Type & aLVM) { -//StdOut() << "KKKKKKKKKKKKKKKkkk " << aLVM << "\n"; if (mNbVar>currNbObs) { //StdOut() << "currNbObscurrNbObs " << currNbObs << " RRRRR=" << currNbObs - mNbVar << std::endl; @@ -643,12 +713,20 @@ template const cDenseVect & cResolSysNonLinear::SolveUp lastNbObs = currNbObs; mInPhaseAddEq = false; // for var frozen, they are not involved in any equation, we must fix their value other way + +#if (WithNewLinearCstr) + mLinearConstr->AddConstraint2Sys(*mSysLinear); +#else for (int aK=0 ; aK0) { AddEqFixVar(aK,CurSol(aK),mSysLinear->LVMW(aK)*aLVM); @@ -659,6 +737,7 @@ template const cDenseVect & cResolSysNonLinear::SolveUp mSysLinear->Reset(); currNbObs = 0; + mNbIter++; return mCurGlobSol; } diff --git a/MMVII/src/SymbDerGen/Formulas_Geom2D.h b/MMVII/src/SymbDerGen/Formulas_Geom2D.h index 8fe9304732..2195af0599 100755 --- a/MMVII/src/SymbDerGen/Formulas_Geom2D.h +++ b/MMVII/src/SymbDerGen/Formulas_Geom2D.h @@ -14,6 +14,45 @@ using namespace NS_SymbolicDerivative; namespace MMVII { +/** Class used in Bench of non linear constraint, it returns the some of square of 8 unknosnw, it is used + in the 8 neighbors; purely articicial ... + */ + +class cFormulaSumSquares +{ + public : + cFormulaSumSquares(int aNb) : + mNb (aNb) + { + } + + std::vector VNamesUnknowns() const + { + std::vector aRes; + for (int aK=0 ; aK VNamesObs() { return {"SXX"}; } + + std::string FormulaName() const { return "SumSquare_"+ToStr(mNb);} + + template + std::vector formula + ( + const std::vector & aVUk, + const std::vector & aVObs + ) const + { + auto aSum = aVObs.at(0); + for (int aK=0 ; aK * EqTopoSubFrame(bool WithDerive,int aSzBuf) return TplEqTopoSubFrame(WithDerive,aSzBuf); } +cCalculator * EqSumSquare(int aNb,bool WithDerive,int aSzBuf,bool ReUse) +{ + return StdAllocCalc(NameFormula(cFormulaSumSquares(8),WithDerive),aSzBuf,false,ReUse); +} + /* **************************** */ /* BENCH PART */ /* **************************** */ @@ -654,6 +659,8 @@ int cAppliGenCode::Exe() for (const auto WithDer : {true,false}) { + GenCodesFormula((tREAL8*)nullptr,cFormulaSumSquares(8),WithDer); // RIGIDBLOC + GenCodesFormula((tREAL8*)nullptr,cFormulaBlocRigid(),WithDer); // RIGIDBLOC GenCodesFormula((tREAL8*)nullptr,cFormulaRattBRExist(),WithDer); // RIGIDBLOC diff --git a/MMVII/src/TutoBenchTrianguRSNL/BenchNetwRSNL.cpp b/MMVII/src/TutoBenchTrianguRSNL/BenchNetwRSNL.cpp index 1681267a76..93941093bd 100755 --- a/MMVII/src/TutoBenchTrianguRSNL/BenchNetwRSNL.cpp +++ b/MMVII/src/TutoBenchTrianguRSNL/BenchNetwRSNL.cpp @@ -55,7 +55,7 @@ template void TplOneBenchSSRNL for (int aK=0 ; aK < THE_NB_ITER ; aK++) { double aWGauge = (aCpt%2) ? -1 : 100; // alternate "hard" constraint and soft, to test more .. - anEc = aBN.DoOneIterationCompensation(aWGauge,true); + anEc = aBN.DoOneIterationCompensation(aWGauge,true,true); } if (anEc>aPrec) { diff --git a/MMVII/src/TutoBenchTrianguRSNL/TrianguRSNL.h b/MMVII/src/TutoBenchTrianguRSNL/TrianguRSNL.h index 250f4450a1..e5c8c47fb1 100755 --- a/MMVII/src/TutoBenchTrianguRSNL/TrianguRSNL.h +++ b/MMVII/src/TutoBenchTrianguRSNL/TrianguRSNL.h @@ -30,7 +30,7 @@ namespace MMVII uniqueness of solution, some arbitrary constraint are added on "frozen" points (X0=0,Y0=0 and X1=0) Classes : - # cPNetwork represent one point of the network + # cPNetwork represent one point of the network # cMainNetwork represent the network itself */ namespace NS_Bench_RSNL @@ -120,17 +120,17 @@ template class cMainNetwork Type NetSz() const {return Norm2(mBoxInd.Sz());} /// If we use this iteration for covariance calculation , we dont add constraint, and dont solve - Type DoOneIterationCompensation(double aWeigthGauge,bool WithCalcReset); + Type DoOneIterationCompensation(double aWeigthGauge,bool WithCalcReset,bool CanMangleCstr); - /// Distance observed between 2 points, can be redefines if we want to add noise + /// Distance observed between 2 points, can be redefined if we want to add noise virtual Type ObsDist(const tPNet & aPN1,const tPNet & aPN2) const; /// A network can filter the linking on it own criteria, default -> true virtual bool OwnLinkingFiltrage(const cPt2di & aP1,const cPt2di & aP2) const; Type CalcResidual(); - void AddGaugeConstraint(Type aWeight); // W<0 : hard constraint, W>0 soft, W==0 None + void AddGaugeConstraint(Type aWeight,bool CanUseMangle); // W<0 : hard constraint, W>0 soft, W==0 None /// Access to CurSol of mSys diff --git a/MMVII/src/TutoBenchTrianguRSNL/cMainNetwork.cpp b/MMVII/src/TutoBenchTrianguRSNL/cMainNetwork.cpp index 1d389d5899..e680aab515 100755 --- a/MMVII/src/TutoBenchTrianguRSNL/cMainNetwork.cpp +++ b/MMVII/src/TutoBenchTrianguRSNL/cMainNetwork.cpp @@ -257,8 +257,16 @@ template Type cMainNetwork ::CalcResidual() return sqrt(aSumResidual / aNbPairTested ); } -template void cMainNetwork ::AddGaugeConstraint(Type aWeightFix) +template void cMainNetwork::AddGaugeConstraint(Type aWeightFix,bool CanMangle) { + // If true, instead of using FrozenVar, we use a random mix of all the constraint, jut to test + // the correctness of the + static int aCptWNeg = 0; + bool isHardConstr = (aWeightFix<0); + aCptWNeg += isHardConstr; + bool doMangleCstr = isHardConstr && (aCptWNeg%2==0) && CanMangle; + + if (aWeightFix==0) return; // Compute dist to sol + add constraint for fixed var for (const auto & aPN : mVPts) @@ -270,27 +278,77 @@ template void cMainNetwork ::AddGaugeConstraint(Type aWeightF if (aWeightFix>=0) mSys->AddEqFixVar(aPN.mNumY,aPN.TheorPt().y(),aWeightFix); else + { mSys->SetFrozenVar(aPN.mNumY,aPN.TheorPt().y()); + } } if (aPN.mFrozenX) // If X is frozenn add equation fixing X to its theoreticall value { + // Case soft constraint if (aWeightFix>=0) mSys->AddEqFixVar(aPN.mNumX,aPN.TheorPt().x(),aWeightFix); - else - mSys->SetFrozenVar(aPN.mNumX,aPN.TheorPt().x()); + else // case of "hard" constraint + { + // case mangling, we will build a constraint involving all neighbors + if (doMangleCstr) + { + // Use for linear constraint + cSparseVect aLC; // Linear constraint + Type aCste = 0.0; // constant + + // use for Non Linear constraint + std::vector aVInd; + Type aSumSq = 0.0; + + for (const auto & aDelta : AllocNeighbourhood<2>(2)) // Parse all 8 neighbors + { + cPt2di aPixNeigh = aPN.mInd + aDelta; // neighboring point + if (IsInGrid(aPixNeigh)) // if inside the grid + { + const cPNetwork & aPN = PNetOfGrid(aPixNeigh); // network point corresponding + if (aPN.mNumX >=0) // if NumX is not a temporary + { + // Linear constraint + Type aCoeff = RandUnif_C_NotNull(0.1); // randomize the contribution + aLC.AddIV(aPN.mNumX,aCoeff); + aCste += aCoeff * aPN.mTheorPt.x(); + // Non linear constraint + aSumSq += Square(aPN.mTheorPt.x()); + aVInd.push_back(aPN.mNumX); + } + } + } + // we can do constraint only if we have the good number of unknowns + if (aVInd.size()==8) + { + auto aCalc = EqSumSquare(8,true,1,true); + mSys->AddNonLinearConstr(aCalc,aVInd,{aSumSq},true); + } + else + mSys->AddConstr(aLC,aCste); + } + else + { + mSys->SetFrozenVar(aPN.mNumX,aPN.TheorPt().x()); + } + } } } + if (doMangleCstr) + { + StdOut() << "doMangleCstrdoMangleCstr " << mWithSchur << "\n"; + } } -template Type cMainNetwork::DoOneIterationCompensation(double aWeigthGauge,bool WithCalcReset) +template Type cMainNetwork::DoOneIterationCompensation(double aWeigthGauge,bool WithCalcReset,bool CanMangleCstr) { Type aResidual = CalcResidual() ; // if we are computing covariance we want it in a free network (the gauge constraint // in the local network have no meaning in the coordinate of the global network) - AddGaugeConstraint(aWeigthGauge); + AddGaugeConstraint(aWeigthGauge,CanMangleCstr); // Add observation on distances diff --git a/MMVII/src/TutoBenchTrianguRSNL/cNetPropCov.cpp b/MMVII/src/TutoBenchTrianguRSNL/cNetPropCov.cpp index f9f7a0f9d3..0a1d1a63ae 100755 --- a/MMVII/src/TutoBenchTrianguRSNL/cNetPropCov.cpp +++ b/MMVII/src/TutoBenchTrianguRSNL/cNetPropCov.cpp @@ -681,7 +681,7 @@ template Type cCovNetwork::SolveByCovPropagation(double aChe // Add a gauge constraint for the main newtork, as all subnetnwork are computed up to a rotation // do it before propag, as required in case of hard constraint - this->AddGaugeConstraint(-1); + this->AddGaugeConstraint(-1,false); // for all subnetwork propagate the covariance for (auto & aPtrNet : mVNetElem) aPtrNet->PropagCov(aCheatMT); @@ -791,12 +791,12 @@ template Type cElemNetwork::ComputeCovMatrix(double aWGaugeC for (int aK=0 ; aK<(aNbIter-1); aK++) { // this->DoOneIterationCompensation(10.0,true); // Iterations with a gauge and solve - this->DoOneIterationCompensation(-1,true); // Iterations with a gauge and solve + this->DoOneIterationCompensation(-1,true,false); // Iterations with a gauge and solve, No Mangling of cstr } Type aRes = this->CalcResidual(); // memorization of residual // last iteration with a gauge w/o solve (because solving would reinit the covariance) - this->DoOneIterationCompensation(aWGaugeCovMatr,false); + this->DoOneIterationCompensation(aWGaugeCovMatr,false,false); // StdOut() << "aWGaugeCovMatr " << aWGaugeCovMatr << std::endl; @@ -1127,7 +1127,7 @@ int cAppli_TestPropCov::Exe() double aRefRes =100; for (int aK=0 ; aK < 10 ; aK++) { - aRefRes = mMainNet->DoOneIterationCompensation(100.0,true); + aRefRes = mMainNet->DoOneIterationCompensation(100.0,true,false); } aSomRefRes += aRefRes; From 0d15f5906cdddd2586f7caae40ecb0471b96e071 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Sun, 7 Jan 2024 21:11:02 +0100 Subject: [PATCH 12/21] Shared parameter , almost OK (pb on avg) --- MMVII/include/MMVII_SysSurR.h | 20 +++- MMVII/src/BundleAdjustment/BundleAdjustment.h | 6 + MMVII/src/BundleAdjustment/cAppliBundAdj.cpp | 12 +- .../src/BundleAdjustment/cMMVII_BundleAdj.cpp | 105 ++++++++++++++++++ MMVII/src/Matrix/cResolSysNonLinear.cpp | 60 +++++++++- .../src/TutoBenchTrianguRSNL/cMainNetwork.cpp | 13 ++- 6 files changed, 204 insertions(+), 12 deletions(-) diff --git a/MMVII/include/MMVII_SysSurR.h b/MMVII/include/MMVII_SysSurR.h index a7168f24cf..4b31c397e7 100755 --- a/MMVII/include/MMVII_SysSurR.h +++ b/MMVII/include/MMVII_SysSurR.h @@ -138,16 +138,30 @@ class cREAL8_RSNL void UnfrozeAll() ; ///< indicate it var must be frozen /unfrozen bool VarIsFrozen(int aK) const; ///< indicate it var must be frozen /unfrozen void AssertNotInEquation() const; ///< verify that we are notin equation step (to allow froze modification) + // To update with Shared int CountFreeVariables() const; ///< number of free variables + + // ------------------ Handling shared unknowns -------------------- + void SetShared(const std::vector & aVUk); + void SetUnShared(const std::vector & aVUk); + void SetAllUnShared(); + + // === protected : + static constexpr int TheLabelFrozen =-1; + static constexpr int TheLabelNoEquiv =-2; + void SetPhaseEq(); /// Mut be defined in inherited class because maniupulate mLinearConstr which depend of type virtual void InitConstraint() = 0; int mNbVar; - bool mInPhaseAddEq; ///< check that dont modify val fixed after adding equations - std::vector mVarIsFrozen; ///< indicate for each var is it is frozen - int mNbIter; ///< Number of iteration made + bool mInPhaseAddEq; ///< check that dont modify val fixed after adding equations + std::vector mVarIsFrozen; ///< indicate for each var is it is frozen + int mNbIter; ///< Number of iteration made + // int mNbUnkown; + int mCurMaxEquiv; ///< Used to label the + std::vector mEquivNum; ///< Equivalence numerotation, used for shared unknowns }; diff --git a/MMVII/src/BundleAdjustment/BundleAdjustment.h b/MMVII/src/BundleAdjustment/BundleAdjustment.h index 71e4209a0e..7dd1132753 100644 --- a/MMVII/src/BundleAdjustment/BundleAdjustment.h +++ b/MMVII/src/BundleAdjustment/BundleAdjustment.h @@ -133,6 +133,8 @@ class cMMVII_BundleAdj void SetParamFrozenCalib(const std::string & aPattern); void SetViscosity(const tREAL8& aViscTr,const tREAL8& aViscAngle); void SetFrozenCenters(const std::string & aPattern); + void SetSharedIntrinsicParams(const std::vector &); + void AddPoseViscosity(); void AddConstrainteRefPose(); @@ -159,6 +161,8 @@ class cMMVII_BundleAdj /// One It for 1 pack of GCP (4 now 1 pack allowed, but this may change) void OneItere_OnePackGCP(const cSetMesImGCP *); + void CompileSharedIntrinsicParams(bool ForAvg); + //============== Data ============================= cPhotogrammetricProject * mPhProj; @@ -181,6 +185,8 @@ class cMMVII_BundleAdj std::string mPatParamFrozenCalib; /// Pattern for name of paramater of internal calibration std::string mPatFrozenCenter; /// Pattern for name of pose with frozen centers + std::vector mVPatShared; + // =================== Information to use ================== // - - - - - - - - GCP - - - - - - - - - - - diff --git a/MMVII/src/BundleAdjustment/cAppliBundAdj.cpp b/MMVII/src/BundleAdjustment/cAppliBundAdj.cpp index 35c8475f0f..24622db39b 100644 --- a/MMVII/src/BundleAdjustment/cAppliBundAdj.cpp +++ b/MMVII/src/BundleAdjustment/cAppliBundAdj.cpp @@ -39,12 +39,11 @@ class cAppliBundlAdj : public cMMVII_Appli std::vector mParamRefOri; int mNbIter; - - std::string mPatParamFrozCalib; std::string mPatFrosenCenters; std::vector mViscPose; tREAL8 mLVM; // Levenberk Markard + std::vector mVSharedIP; // Vector for shared intrinsic param }; cAppliBundlAdj::cAppliBundlAdj(const std::vector & aVArgs,const cSpecMMVII_Appli & aSpec) : @@ -94,7 +93,8 @@ cCollecSpecArg2007 & cAppliBundlAdj::ArgOpt(cCollecSpecArg2007 & anArgOpt) << AOpt2007(mLVM,"LVM","Levenberg–Marquardt parameter (to have better conditionning of least squares)",{eTA2007::HDV}) << AOpt2007(mBRSigma,"BRW","Bloc Rigid Weighting [SigmaCenter,SigmaRot]",{{eTA2007::ISizeV,"[2,2]"}}) // RIGIDBLOC << AOpt2007(mBRSigma_Rat,"BRW_Rat","Rattachment fo Bloc Rigid Weighting [SigmaCenter,SigmaRot]",{{eTA2007::ISizeV,"[2,2]"}}) // RIGIDBLOC - << AOpt2007(mParamRefOri,"RefOri","Reference orientation [Ori,SimgaTr,SigmaRot?,PatApply?]",{{eTA2007::ISizeV,"[2,4]"}}) // RIGIDBLOC + << AOpt2007(mParamRefOri,"RefOri","Reference orientation [Ori,SimgaTr,SigmaRot?,PatApply?]",{{eTA2007::ISizeV,"[2,4]"}}) + << AOpt2007(mVSharedIP,"SharedIP","Shared intrinc parmaters [Pat1Cam,Pat1Par,Pat2Cam...] ",{{eTA2007::ISizeV,"[2,20]"}}) // ]] ; } @@ -108,6 +108,7 @@ int cAppliBundlAdj::Exe() mPhProj.FinishInit(); + if (IsInit(&mParamRefOri)) mBA.AddReferencePoses(mParamRefOri); @@ -130,6 +131,11 @@ int cAppliBundlAdj::Exe() { mBA.SetViscosity(mViscPose.at(0),mViscPose.at(1)); } + + if (IsInit(&mVSharedIP)) + { + mBA.SetSharedIntrinsicParams(mVSharedIP); + } if (IsInit(&mGCPW)) diff --git a/MMVII/src/BundleAdjustment/cMMVII_BundleAdj.cpp b/MMVII/src/BundleAdjustment/cMMVII_BundleAdj.cpp index 809b3a8afe..14c108d5e0 100644 --- a/MMVII/src/BundleAdjustment/cMMVII_BundleAdj.cpp +++ b/MMVII/src/BundleAdjustment/cMMVII_BundleAdj.cpp @@ -126,12 +126,14 @@ void cMMVII_BundleAdj::AssertPhpAndPhaseAdd() void cMMVII_BundleAdj::InitIteration() { + CompileSharedIntrinsicParams(true); mPhaseAdd = false; InitItereGCP(); mR8_Sys = new cResolSysNonLinear(eModeSSR::eSSR_LsqNormSparse,mSetIntervUK.GetVUnKnowns()); mSys = mR8_Sys; + CompileSharedIntrinsicParams(false); } @@ -285,6 +287,9 @@ void cMMVII_BundleAdj::AddCam(const std::string & aNameIm) const std::vector & cMMVII_BundleAdj::VSIm() const {return mVSIm;} const std::vector & cMMVII_BundleAdj::VSCPC() const {return mVSCPC;} + /* ---------------------------------------- */ + /* Frozen/Shared */ + /* ---------------------------------------- */ void cMMVII_BundleAdj::SetParamFrozenCalib(const std::string & aPattern) { @@ -296,6 +301,106 @@ void cMMVII_BundleAdj::SetFrozenCenters(const std::string & aPattern) mPatFrozenCenter = aPattern; } +void cMMVII_BundleAdj::SetSharedIntrinsicParams(const std::vector & aVParams) +{ + mVPatShared = aVParams; +} + +typedef std::tuple tISRP; + +void cMMVII_BundleAdj::CompileSharedIntrinsicParams(bool ForAvg) +{ + MMVII_INTERNAL_ASSERT_tiny((mVPatShared.size()%2)==0,"Expected even size for shared intrinsic params"); + bool Show = ForAvg; + + // Parse the pair Pattern Name Cam / Pattern Name Params + for (size_t aKPat=0 ; aKPat> aMapSharedIndexes; // store the shared index of a given param name + std::map> aMapNames; // store the sharing as name, for show + std::map> aMapValues; // store the sharing of adress for averaging + // Parse the calib and select those which name match the pattern name cam + for (auto aPtrCal : mVPCIC) + { + if (MatchRegex(aPtrCal->Name(),mVPatShared[aKPat])) + { + // Extract information on parameter macthing the pattern of params + cGetAdrInfoParam aGIP(mVPatShared[aKPat+1],*aPtrCal); + for (size_t aKParam=0 ; aKParam* aObj = aGIP.VObjs().at(aKParam); + + size_t aNum = aObj->IndOfVal(aAdr); + aMapSharedIndexes[aNameP].push_back(aNum); + aMapNames[aNameP].push_back(aPtrCal->Name()); + + aMapValues[aNameP].push_back(tISRP(aNum,aPtrCal->Name(),aAdr)); + } + } + } + if (Show) + { + StdOut() << "=========== Shared params for" + << " PatName ={" << mVPatShared[aKPat] << "}" + << " PatCal={" << mVPatShared[aKPat+1] << "}" + << " ============ " << std::endl; + } + for (const auto & [aNamePar,aVTuple] : aMapValues) + { + std::vector aVIndEqui; + tREAL8 aSum = 0.0; + for (const auto & [aNum,aNameCam,anAdr] : aVTuple) + { + aSum += *anAdr; + aVIndEqui.push_back(aNum); + } + aSum /= aVTuple.size(); + if (Show) + StdOut() << " * " << aNamePar << " : " << aSum << std::endl; + if (ForAvg) + { + for (const auto & [aNum,aNameCam,anAdr] : aVTuple) + { + //*anAdr = aSum; => dont undertand why it apparently slow down the convergence ?? + StdOut() << " - " << aNameCam << " : " << *anAdr << std::endl; + } + } + else + { + mSys->SetShared(aVIndEqui); + } + } +/* + if (ForAvg) + { + if (Show) + { + StdOut() << "=========== Shared params for" + << " PatName ={" << mVPatShared[aKPat] << "}" + << " PatCal={" << mVPatShared[aKPat+1] << "}" + << " ============ " << std::endl; + for (const auto & [aNamePar,aVNameCam] : aMapNames) + { + StdOut() << " * " << aNamePar << std::endl; + for (const auto & aNameCam : aVNameCam ) + StdOut() << " - " << aNameCam << std::endl; + } + StdOut() << "==========================================================" << std::endl; + } + } + else + { + for (const auto & [aNamePar,aVIndexes] : aMapSharedIndexes) + { + mSys->SetShared(aVIndexes); + } + } +*/ + } +} + /* ---------------------------------------- */ /* AddViscosity */ /* ---------------------------------------- */ diff --git a/MMVII/src/Matrix/cResolSysNonLinear.cpp b/MMVII/src/Matrix/cResolSysNonLinear.cpp index 796229abf4..e3cac08573 100755 --- a/MMVII/src/Matrix/cResolSysNonLinear.cpp +++ b/MMVII/src/Matrix/cResolSysNonLinear.cpp @@ -23,7 +23,9 @@ cREAL8_RSNL::cREAL8_RSNL(int aNbVar) : mNbVar (aNbVar), mInPhaseAddEq (false), mVarIsFrozen (mNbVar,false), - mNbIter (0) + mNbIter (0), + mCurMaxEquiv (0), + mEquivNum (aNbVar,TheLabelNoEquiv) { } @@ -63,6 +65,27 @@ void cREAL8_RSNL::SetPhaseEq() mInPhaseAddEq = true; } +void cREAL8_RSNL::SetShared(const std::vector & aVUk) +{ +// StdOut() << "cREAL8_RSNL::SetShared " << mEquivNum.size() << " " << + for (const auto & aIUK : aVUk) + mEquivNum.at(aIUK) = mCurMaxEquiv; + mCurMaxEquiv++; +} + +void cREAL8_RSNL::SetUnShared(const std::vector & aVUk) +{ + for (const auto & aIUK : aVUk) + mEquivNum.at(aIUK) = TheLabelNoEquiv; +} + +void cREAL8_RSNL::SetAllUnShared() +{ + for (auto & anEq : mEquivNum) + anEq = TheLabelNoEquiv; + mCurMaxEquiv = 0; +} + /* ************************************************************ */ /* */ @@ -82,6 +105,41 @@ template void cResolSysNonLinear::InitConstraint() } } + // Add the constraint specific to shared unknowns + { + std::map> aMapEq; + for (int aKV=0 ; aKV=0) + aMapEq[mEquivNum.at(aKV)].push_back(aKV); + } + // For X1,X2, ..., Xk shared, we add the constraint X1=X2, X1=X3, ... X1=Xk + // And fix the value to average + for (const auto & [anEqui,aVInd] : aMapEq) + { + Type aSumV = 0 ; + for (size_t aKInd=0 ; aKInd aLinC; + aLinC.AddIV(aVInd.at(0),1.0); + aLinC.AddIV(aVInd.at(aKInd),-1.0); + mLinearConstr->Add1Constr(aLinC,0.0,&mCurGlobSol); + } + } + // setting to the average is "better" at the first iteration, after it's useless, but no harm ... + aSumV /= aVInd.size(); +/* DONT UNDERSTAND WHY !!!! But this does not work + for (size_t aKInd=0 ; aKInd void cMainNetwork::AddGaugeConstraint(Type aWeightFi // use for Non Linear constraint std::vector aVInd; - Type aSumSq = 0.0; + Type aSumSqRef = 0.0; + Type aSumSqCur = 0.0; for (const auto & aDelta : AllocNeighbourhood<2>(2)) // Parse all 8 neighbors { @@ -315,16 +316,18 @@ template void cMainNetwork::AddGaugeConstraint(Type aWeightFi aLC.AddIV(aPN.mNumX,aCoeff); aCste += aCoeff * aPN.mTheorPt.x(); // Non linear constraint - aSumSq += Square(aPN.mTheorPt.x()); + aSumSqRef += Square(aPN.mTheorPt.x()); + aSumSqCur += Square(aPN.PCur().x()); aVInd.push_back(aPN.mNumX); } } } - // we can do constraint only if we have the good number of unknowns + // we can use generated code only if we have the good number of unknowns if (aVInd.size()==8) { auto aCalc = EqSumSquare(8,true,1,true); - mSys->AddNonLinearConstr(aCalc,aVInd,{aSumSq},true); + mSys->AddNonLinearConstr(aCalc,aVInd,{aSumSqRef},true); + // StdOut() << "SssQdDif=" << aSumSqCur - aSumSqRef << "\n"; } else mSys->AddConstr(aLC,aCste); @@ -338,7 +341,7 @@ template void cMainNetwork::AddGaugeConstraint(Type aWeightFi } if (doMangleCstr) { - StdOut() << "doMangleCstrdoMangleCstr " << mWithSchur << "\n"; + // StdOut() << "doMangleCstrdoMangleCstr " << mWithSchur << "\n"; } } From 39365331be9ee10bd203ef5f4b0b6b9b8f9ac41a Mon Sep 17 00:00:00 2001 From: deseilligny Date: Mon, 8 Jan 2024 17:11:13 +0100 Subject: [PATCH 13/21] Begin doc constrained opt --- MMVII/Doc/Programmer/NonLinearOptim.tex | 59 +++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index a978d2605e..5a6445901b 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -1314,3 +1314,62 @@ \subsection{Topometric computation} After \texttt{cTopoComp} object creation and filling (see \ref{subsec:topoBench}), the method \texttt{OneIteration()} is called to improve parameters estimation. +%--------------------------------------------- +%--------------------------------------------- +%--------------------------------------------- + +\section{Handling constrained optimization} + +%--------------------------------------------- + +\subsection{Theoreticall aspect} + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Introduction} + +We consider the problem minimize $F(X)$ as in equation~\ref{EqNLOInit} under +the $n$ constraint : + +\begin{equation} + C_1(X) =0, \; C_2(X)=0 \dots \; C_n(X)=0 +\end{equation} + +In equation~\ref{EqNLOInit} we consider that the current solution is +close to optimal solution and the equation can be linearized. +Similarly we consider that the current solution is close to the +optimum under constraint, and so that the constraint are almost satisfied. +Consequently we consider that the constraint can be linearized + +\begin{equation} +C_k(X) \approx L_1 \cdot X - c_k +\end{equation} + +Then : + +\begin{equation} +\begin{split} + L_1 \cdot X = c_1 \\ + L_2 \cdot X = c_2 \\ + \dots \\ + L_n \cdot X = c_n +\end{split} +\end{equation} + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{"Hard" vs "Soft" constraint} + +What we call "soft" constraint is the method consiting to add +the constraint as a penalization, with a certain weigthing $w$ in optimization : + +\begin{equation} + F_w(X) = F(X) + w \sum_k (L_k-c_x)^2 +\end{equation} + + + + + + + From b8487ca8292262b87d2028c3164c5119a0f45623 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Mon, 8 Jan 2024 23:07:21 +0100 Subject: [PATCH 14/21] Begin doc constraint --- MMVII/Doc/Programmer/NonLinearOptim.tex | 102 +++++++++++++++++++++++- 1 file changed, 101 insertions(+), 1 deletion(-) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index 5a6445901b..cf37dc8f2e 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -1364,9 +1364,109 @@ \subsubsection{"Hard" vs "Soft" constraint} the constraint as a penalization, with a certain weigthing $w$ in optimization : \begin{equation} - F_w(X) = F(X) + w \sum_k (L_k-c_x)^2 + F_w(X) = F(X) + w \sum_k (L_k-c_x)^2 \label{SoftConstraint} \end{equation} +Soft constraints are just observations like others and dont require +to add supplementary method. If what we want is hard constraints, an +easy way could be to use equation~\ref{SoftConstraint} with a "very high" +value for $w$. Mathematically speaking, except pathological cases, +generally when $w \rightarrow \infty$, the solution of ~\ref{SoftConstraint} +will converge to the solution of the hard constraint. + +Also it can work, this cannot be a general method , because +it is difficult, if not impossible to define generally what is a very high $w$. +If it is not high enough, the solution will not be close enough to the hard +constraint. It it is too high, it can create numerical inacuracy. + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Lagrangian method} + +Mention that it can be equivalent to solve the system with : + +\begin{equation} + \begin{pmatrix} + A & L \\ + ^t L & 0 + \end{pmatrix} +\end{equation} + +But the system being no longer definite-positive, the resolution is not obvious. + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Substituion methods, case 1 constraint} + +The method used in MMVII is a substitution method. First, examine the case with $1$ variable, suppose : + +\begin{itemize} + \item we have a constraint $L \cdot X = c$; + \item with $L= (l_0,l_1 \dots)$ and $l_0 \neq 0$, note $L'=(l_1,l_2, \dots)$ and $X'=(x_1,x_2\dots)$; +\end{itemize} + + +The constraint can be written : +\begin{equation} + x_0 = \frac{c-L' \cdot X'}{l_0} \label{LSQ:SUBST} +\end{equation} + +Now each time we add a new observation we will substitute $x_0$ : + +\begin{itemize} + \item let note the observation $O(X) = A \cdot X - C$ + \item with $A= (a_0,a_1 \dots) $ , note $A' = (a_1,a_2 \dots) $ + \item then $O(x) = a_0 x_0 + A' X'-C= a_0\frac{c-L' \cdot X'}{l_0} + A'X' -C$ +\end{itemize} + +And finaly : + +\begin{equation} + O(x) = (A'- \frac{a_0}{l_0} L') X' - (C -c\frac{a_0}{l_0}) +\end{equation} + +So by doing the substition for each new observation we obtain a system with $N-1$ variable, +where $x_0$ has been eliminated, and without constraint. We can solve it, to find $x_1, x_2 \dots $, +and at the end, use equation~\ref{LSQ:SUBST} to find $x_0$. + + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Substituion methods, case N constraint} + +We study the case with $2$ constraints, the generalization to $N$ constraint being obvious. +Suppose we have two constraints $C_a$ and $C_b$ : + +\begin{itemize} + \item $C_a : L^a_0 x_0 + L^a_1 x_1 \dots = c^a$; + \item $C_b : L^b_0 x_0 + L^b_1 x_1 \dots = c^b$; +\end{itemize} + +Once we have make a substition with $C_a$ , we want to use $C_b$ to eliminate $x_1$, +but for this we need to have $ L^b_0=0$, else it will "re inject" $x_0$. How can +have $L^b_0=0$ ? We can see that for any $\lambda$ : + +\begin{equation} + C_a(X)=0 , C_b(X)=0 \Leftrightarrow C_a(X)=0 , C_b(X)+\lambda C_a(X) =0 +\end{equation} + +So if set $C'_b = C_b - \frac{L^b_0}{L^a_0} C_b$, we can use now $C_a$ to eliminate $x_0$, +then $C'_b$ to eliminate $x_1$. + +The idea is to make a "pre-processing" of constraint to allow a substitution in cascade. +If we have $3$ constraint $C_a,C_b,C_c$ , we subract $C_a$ in $C_b$ and $C_c$, to have $C_a,C'_b,C'_c$ +where $x_0$ is absent of $C'_b$ and $C'_c$. Then we substract $C'_b$ to $C'_c$ to have new constraint +$C''_c$ without eliminate $x_0$ and $x_1$. Substituting consecutively wth $C_a,C'_b,C''_c$, we can eliminate +$x_0$, $x_1$ and $x_2$. + +This pre-processing of constraint is exactly the same than a gaussian elimination in linear resolution. +It requires some precaution to be stable, practically we make some "pivoting" to select the biggest coefficient +to eliminate. + +%--------------------------------------------- + +\subsection{Using it in MMVII} + From 5191f90fdf71d6f8f43bc9e867a08200cbdd1871 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Tue, 9 Jan 2024 15:07:39 +0100 Subject: [PATCH 15/21] Before batery fails ... --- MMVII/Doc/Programmer/NonLinearOptim.tex | 78 ++++++++++++++++++++++++- MMVII/src/Matrix/cLinearConstraint.cpp | 4 +- 2 files changed, 79 insertions(+), 3 deletions(-) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index cf37dc8f2e..f2519c52a7 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -1394,7 +1394,7 @@ \subsubsection{Lagrangian method} But the system being no longer definite-positive, the resolution is not obvious. -% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \subsubsection{Substituion methods, case 1 constraint} @@ -1467,6 +1467,82 @@ \subsubsection{Substituion methods, case N constraint} \subsection{Using it in MMVII} +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Restrictions/Potential Errors} + +Also we think that this approach has globally benefit, its use require some precaution. +If not respcted, it can rise several error at the execution. + +Firt and most important, the method require that we know all the constraint between +any observation can be added. This is handled by MMVII using a boolean flag {\tt mInPhaseAddEq}, +this flag is set to {\tt false} at the end of each iteration, and set to {\tt true} when +the first observation is added. Once this flag is set to {\tt true}, no adding of constraint +will be allowed, see the method {\tt AssertNotInEquation}. + +Another kind of error that can occurs it to add too much constraint so that it become +impossible to comply with all of them. Of course this is not due to the way it is handled, +but intrinsic to the notion of hard constraints. If this happens, an error +will be raised : + +\begin{itemize} + \item see the method {\tt LinearMax} in class {\tt cOneLinearConstraint}; + \item a error with message like {\tt LinearMax probably bad formed constrained";} will be raised. +\end{itemize} + +Note that this redundancy is detected from the structural point, not from the numerical point. For example +if we add $3$ constraint involvling only the $2$ same unknown an error will be detected even if it is +$3$ times the same constraint. Conversely , for +example if $x$ and $y$ are unknowns, if we add the $2$ constraint $x+y=1$ and $\lambda(x+y)=0$, it is +probable that due to numerical unaccuracy, no contradiction will be detected. + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Handling "frozen" variables} + +The most current use of constraint is the case where we want to impose that a given unknown +has a given value, it can be its current value or a new value. The constraint is simply +$X_i=C$. There is several methods to facilitate this manipulation in class {\tt cResolSysNonLinear} , for +the frozing part we have: + + +\begin{itemize} + \item {\tt SetFrozenVarCurVal(int aK);} freeze the value of an unknown to its current value, + knowing the number of the unknown in the system; + \item {\tt SetFrozenVarCurVal(tObjWUk \& anObj,const Type \& aVal);} idem, but specify the object {\tt anObj} + and the adress of the value {\tt aVal}; + \item several variant frozing several unkown of an object (using points or adresses + a number) or all the + object; + \item {\tt SetFrozenVar(int aK,const Type \&);} for forcing an unknown to have a given value + (i.e. \emph{a priori} different from it current value). + +\end{itemize} + +For the un-frozing part we have some similar methods ; + +\begin{itemize} + \item {\tt SetUnFrozen(\dots) ;} supress the constraint of an unknown, method knowing its number + and method with an object and one adress; + \item {\tt UnfrozeAll()} supress all the constraint on freezing any variable. +\end{itemize} + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Handling "shared" unknown} + +Sometime it occurs that we have a set of unknwons, theoretically different, but that we want to force +temporarilly to have the same value, what we call shared unknowns. A possible example, if we have several +camera, we want each camera to have it own focal but want to have the same distorsion, and this distorsion +has to be adjusted; \emph{unknown sharing} will allow to do that without requiring to add specific model of camera. + +From the theoreticall point of view, it's quite easy to do with constrained optimization. To force to +have $n$ unknows to have a shared value : $X0=X1=X2\dots=X_{n-1}$, we simply add $n-1$ equations selecting +an arbitrary reference variable $X_0$ : $X_0-X_1=0$, $X_0=X_2$, \dots , $X_0=X_{n-1}$. + +% void SetShared(const std::vector & aVUk); +% void SetUnShared(const std::vector & aVUk); +% void SetAllUnShared(); + diff --git a/MMVII/src/Matrix/cLinearConstraint.cpp b/MMVII/src/Matrix/cLinearConstraint.cpp index 5b30f79e0c..e8b19a5ed0 100755 --- a/MMVII/src/Matrix/cLinearConstraint.cpp +++ b/MMVII/src/Matrix/cLinearConstraint.cpp @@ -261,9 +261,9 @@ template const typename cOneLinearConstraint::tCplIV * cOneLi const tCplIV * aRes = aMax.IndexExtre(); // Some check,if no pair is found, probably the system was degenerated - MMVII_INTERNAL_ASSERT_tiny(aRes!=nullptr,"cOneLinearConstraint::LinearMax probably bad formed cosntrained"); + MMVII_INTERNAL_ASSERT_tiny(aRes!=nullptr,"cOneLinearConstraint::LinearMax probably bad formed constrained"); // to see later if we replace by |aRes->mVal| > Epsilon ? - MMVII_INTERNAL_ASSERT_tiny(aRes->mVal!=0,"cOneLinearConstraint::LinearMax probably bad formed cosntrained"); + MMVII_INTERNAL_ASSERT_tiny(aRes->mVal!=0,"cOneLinearConstraint::LinearMax probably bad formed constrained"); return aRes ; } From 4417fb9e310b7b98db1dfcd9ec863285db1bf1d3 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Tue, 9 Jan 2024 17:26:58 +0100 Subject: [PATCH 16/21] Chg ordi, micro modif on doc --- MMVII/Doc/Programmer/NonLinearOptim.tex | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index f2519c52a7..ab03d3c90e 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -1539,8 +1539,11 @@ \subsubsection{Handling "shared" unknown} have $n$ unknows to have a shared value : $X0=X1=X2\dots=X_{n-1}$, we simply add $n-1$ equations selecting an arbitrary reference variable $X_0$ : $X_0-X_1=0$, $X_0=X_2$, \dots , $X_0=X_{n-1}$. -% void SetShared(const std::vector & aVUk); -% void SetUnShared(const std::vector & aVUk); +\begin{itemize} + \item {\tt SetShared(const std::vector \& aVUk);} given a vector of unknowns numbering, + make them a set of unkowns shared; + \item {\tt void SetUnShared(const std::vector \& aVUk);} +\end{itemize} % void SetAllUnShared(); From 24c92e86678fa1de37e60c3302bc5d9381935b69 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Tue, 9 Jan 2024 17:49:49 +0100 Subject: [PATCH 17/21] ??? --- MMVII/Doc/Programmer/NonLinearOptim.tex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index cf37dc8f2e..d6fcdef27c 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -1392,7 +1392,7 @@ \subsubsection{Lagrangian method} \end{pmatrix} \end{equation} -But the system being no longer definite-positive, the resolution is not obvious. +But the system being no longer definite-positive, the resolution require some attention. % - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - From 34af6a3fef2dfbf0dcae7af1d5546a174def4546 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Tue, 9 Jan 2024 22:47:38 +0100 Subject: [PATCH 18/21] Doc on constraint system --- MMVII/Doc/Programmer/NonLinearOptim.tex | 92 +++++++++++++++++++++++-- MMVII/include/MMVII_SysSurR.h | 2 +- 2 files changed, 89 insertions(+), 5 deletions(-) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index b5d97671b8..a6d21f9196 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -526,6 +526,8 @@ \subsection{Least square solving and constructor} \subsection{Adding a basic equation} +\label{AddBasicEq} + The tag {\tt BASIC:CALC} in {\tt BenchResolSysNonLinear.cpp} contain an example of such use. The method for adding an observation is named {\tt CalcAndAddObs} : @@ -1433,6 +1435,7 @@ \subsubsection{Substituion methods, case 1 constraint} % - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \subsubsection{Substituion methods, case N constraint} +\label{CSTR:NSUBST} We study the case with $2$ constraints, the generalization to $N$ constraint being obvious. Suppose we have two constraints $C_a$ and $C_b$ : @@ -1535,16 +1538,97 @@ \subsubsection{Handling "shared" unknown} camera, we want each camera to have it own focal but want to have the same distorsion, and this distorsion has to be adjusted; \emph{unknown sharing} will allow to do that without requiring to add specific model of camera. -From the theoreticall point of view, it's quite easy to do with constrained optimization. To force to -have $n$ unknows to have a shared value : $X0=X1=X2\dots=X_{n-1}$, we simply add $n-1$ equations selecting +From the theoreticall point of view, it's quite easy to do that with constrained optimization. For forcing +$n$ unknows to have a shared value : $X0=X1=X2\dots=X_{n-1}$, we simply add $n-1$ equations selecting an arbitrary reference variable $X_0$ : $X_0-X_1=0$, $X_0=X_2$, \dots , $X_0=X_{n-1}$. \begin{itemize} \item {\tt SetShared(const std::vector \& aVUk);} given a vector of unknowns numbering, make them a set of unkowns shared; - \item {\tt void SetUnShared(const std::vector \& aVUk);} + \item {\tt void SetUnShared(const std::vector \& aVUk);} make the invert operation; + \item {\tt void SetAllUnShared();} +\end{itemize} + + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Handling general case, linear and non linear} + +Apart the special case of unknown freezing or sharing, there are method for handing general constraints. +For the linear case we have the method {\tt AddConstr} : + +\begin{itemize} + \item {\tt AddConstr(const tSVect \& V,const Type \& C,bool OnlyIfFirstIter=true);} + \item {\tt V} is the linear part , {\tt C} is the constant, it correspond to the equation $ V \cdot X = C$ ; + \item the parameter {\tt OnlyIfFirstIter} indicate that the constraint must be added only if we are at the + first iteration of the non linear system. +\end{itemize} + +For non linear constraint we have the method {\tt AddNonLinearConstr}. It's very similar to {\tt CalcAndAddObs} +(see \ref{AddBasicEq}), it takes a calculator corresponding to a funcion $F$ to compute the value and its derivatives and a linerized +version of the constraint $F(X)=0$ : + +\begin{itemize} + \item {\tt void AddNonLinearConstr(tCalc * aCalcVal,const tVectInd \& aVInd,const tStdVect\& aVObs,bool OnlyIfFirst=true);} + + \item {\tt aCalcVal,aVInd, aVObs} play the same role than in {\tt CalcAndAddObs}; + + \item the parameter {\tt OnlyIfFirstIter} play the same role than in {\tt AddConstr}. \end{itemize} -% void SetAllUnShared(); + +The method {\tt SupressAllConstr()}, supress all the constraint +added by {\tt AddConstr} or {\tt AddNonLinearConstr}. + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Example in bench} + +In the network bench the method {\tt AddGaugeConstraint} give examples of using +the constraint to fix the arbitrary rotation that cannot be fixed by distance conservation. +It also serve of unitary test of correctnes. The "natural" way, already described is to fix +$3$ coordinates $X_{0,0},Y_{0,0},X_{1,0}$ with soft of hard constraints (using {\tt AddEqFixVar} or {\tt SetFrozenVar}). + +The other testing are completely artificial and activated with the flag {\tt doMangleCstr}, +this test is done using constraint involving the neighboorhoud of a point. +Let $X_k$ be the unknowns of the neighbouring points, and $R_k$ be the +ground truth value of the $X_k$. + +For testing the linear constraint, with {\tt AddConstr} we generate a random weighting $W_k$ +and we add the linear constraint : + +\begin{equation} + \sum_k W_k X_k = \sum_k W_k R_k +\end{equation} + +For the non linear constraint, we use the following equation : + +\begin{equation} + \sum_k X_k^2 = \sum_k R_k^2 +\end{equation} + +%--------------------------------------------- + +\subsection{Implementation details} + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Classes presentation } + +The code relative is located in the files {\tt Matrix/LinearConstraint.h} +and {\tt Matrix/cLinearConstraint.cpp}. The class involved are : + +\begin{itemize} + \item {\tt cOneLinearConstraint} for representing a single constraint; + \item {\tt cSetLinearConstraint} for representing a set of constraint : initial value and value after + pre-processing described in~\ref{CSTR:NSUBST}; + \item {\tt cDSVec} helper class allowing easy manipulation of sparse vector; + \item {\tt cBenchLinearConstr} class for doing some unitary test on {\tt cSetLinearConstraint}. +\end{itemize} + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Class {\tt cDSVec} } + diff --git a/MMVII/include/MMVII_SysSurR.h b/MMVII/include/MMVII_SysSurR.h index 4b31c397e7..b90f35fc2e 100755 --- a/MMVII/include/MMVII_SysSurR.h +++ b/MMVII/include/MMVII_SysSurR.h @@ -234,7 +234,7 @@ template class cResolSysNonLinear : public cREAL8_RSNL void AddEqFixNewVal(const tObjWUk & anObj,const cPtxd &,const cPtxd &,const Type& aWeight); - void AddNonLinearConstr(tCalc * aCalcVal,const tVectInd & aVInd,const tStdVect& aVObs,bool OnlyIfFirst); + void AddNonLinearConstr(tCalc * aCalcVal,const tVectInd & aVInd,const tStdVect& aVObs,bool OnlyIfFirst=true); /// Basic Add 1 equation , no bufferistion, no schur complement void CalcAndAddObs(tCalc *,const tVectInd &,const tStdVect& aVObs,const tResidualW & = tResidualW()); From ea4356584557fab51839ab40d9ea3a38a60438b6 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Wed, 10 Jan 2024 12:07:04 +0100 Subject: [PATCH 19/21] In doc --- MMVII/Doc/Programmer/NonLinearOptim.tex | 34 +++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index a6d21f9196..feb390372a 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -1496,7 +1496,7 @@ \subsubsection{Restrictions/Potential Errors} Note that this redundancy is detected from the structural point, not from the numerical point. For example if we add $3$ constraint involvling only the $2$ same unknown an error will be detected even if it is $3$ times the same constraint. Conversely , for -example if $x$ and $y$ are unknowns, if we add the $2$ constraint $x+y=1$ and $\lambda(x+y)=0$, it is +example if $x$ and $y$ are unknowns, if we add the $2$ constraint $x+y=1$ and $\frac{x}{3}+ \frac{y}{3}=0$, it is probable that due to numerical unaccuracy, no contradiction will be detected. % - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1612,7 +1612,7 @@ \subsection{Implementation details} % - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\subsubsection{Classes presentation } +\subsubsection{Global presentation } The code relative is located in the files {\tt Matrix/LinearConstraint.h} and {\tt Matrix/cLinearConstraint.cpp}. The class involved are : @@ -1629,6 +1629,36 @@ \subsubsection{Classes presentation } \subsubsection{Class {\tt cDSVec} } +By itself the method of subsitution is relatively simple. But there is some complexity added +for efficient implementation : + +\begin{itemize} + \item in many context where non linear optimization is used, including photogrammetry, each observation + imply few (says $\approx 10-50$) among many (say $\approx 500-100000$); + + \item for efficient representation we use sparse vector for constraint and observation, + a sparse vector beging a collection of pair index/value; + + \item let name $m$ the number of pair of sparse vector and $N$ the number of unknown; + + \item many operation on sparse vector, like substitution described in~\ref{CSTR:NSUBST}, require some + precaution if we want that the cost to be in $\mathcal{O}(m)$ rather than $\mathcal{O}(N)$. +\end{itemize} + + +This here where {\tt cDSVec} can help, it's a class for dense representaion of sparse vector. Typically a {\tt cDSVec} : + +\begin{itemize} + \item contains a dense real vector $V$ (initialy full of $0$) , a list of used index $L$ (initially empty), a dense boolean + $B$ (initially full of false) vector indicating if the index is used + \item each time an element is added-supress on the {\tt cDSVec} at index $i$ , the dense vector is updated + at $V[i]$ and if $B[i]$ is false, $B$ and $L$ are also updated; + \item a dense vector can be quickly reset to $0$ with the use of $L$. +\end{itemize} + +Typically, as the allocation of a dense vector can take some time (in $\mathcal{O}(N)$) the idea is to allocate +a dense vector in class {\tt cSetLinearConstraint} and to reuse it many time, typically it's a buffer. + From 9f6dd381e16408b9a29354d1cf38466167d35f27 Mon Sep 17 00:00:00 2001 From: deseilligny Date: Wed, 10 Jan 2024 18:35:27 +0100 Subject: [PATCH 20/21] In correct doc constraints --- MMVII/Doc/Programmer/NonLinearOptim.tex | 131 ++++++++++++++++-------- MMVII/src/Matrix/LinearConstraint.h | 10 +- 2 files changed, 98 insertions(+), 43 deletions(-) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index feb390372a..2a37bbb1f4 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -1330,7 +1330,7 @@ \subsection{Theoreticall aspect} \subsubsection{Introduction} -We consider the problem minimize $F(X)$ as in equation~\ref{EqNLOInit} under +We consider the problem of minimizing $F(X)$ as in equation~\ref{EqNLOInit} under the $n$ constraint : \begin{equation} @@ -1340,14 +1340,14 @@ \subsubsection{Introduction} In equation~\ref{EqNLOInit} we consider that the current solution is close to optimal solution and the equation can be linearized. Similarly we consider that the current solution is close to the -optimum under constraint, and so that the constraint are almost satisfied. -Consequently we consider that the constraint can be linearized +optimum under constraint, and consequently that the constraints are almost satisfied. +We consider then that the constraint can be linearized : \begin{equation} C_k(X) \approx L_1 \cdot X - c_k \end{equation} -Then : +So we can write : \begin{equation} \begin{split} @@ -1366,18 +1366,18 @@ \subsubsection{"Hard" vs "Soft" constraint} the constraint as a penalization, with a certain weigthing $w$ in optimization : \begin{equation} - F_w(X) = F(X) + w \sum_k (L_k-c_x)^2 \label{SoftConstraint} + F_w(X) = F(X) + w \sum_k (L_k-c_k)^2 \label{SoftConstraint} \end{equation} Soft constraints are just observations like others and dont require -to add supplementary method. If what we want is hard constraints, an +to add any supplementary method. If what we want is hard constraints, an easy way could be to use equation~\ref{SoftConstraint} with a "very high" value for $w$. Mathematically speaking, except pathological cases, generally when $w \rightarrow \infty$, the solution of ~\ref{SoftConstraint} will converge to the solution of the hard constraint. -Also it can work, this cannot be a general method , because -it is difficult, if not impossible to define generally what is a very high $w$. +Also it can work, this cannot be a general method , because generally +it is difficult, if not impossible to define a \emph{"good very high $w$"}. If it is not high enough, the solution will not be close enough to the hard constraint. It it is too high, it can create numerical inacuracy. @@ -1385,7 +1385,7 @@ \subsubsection{"Hard" vs "Soft" constraint} \subsubsection{Lagrangian method} -Mention that it can be equivalent to solve the system with : +To detail later : mention that it can be equivalent to solve the system with : \begin{equation} \begin{pmatrix} @@ -1394,17 +1394,18 @@ \subsubsection{Lagrangian method} \end{pmatrix} \end{equation} -But the system being no longer definite-positive, the resolution require some attention. +But the system being no longer definite-positive, the resolution can be tricky. % - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\subsubsection{Substituion methods, case 1 constraint} +\subsubsection{Substitution methods, case $1$ constraint} +\label{CSTR:SUBST} -The method used in MMVII is a substitution method. First, examine the case with $1$ variable, suppose : +The method used in MMVII is a substitution method. First, let examine the case with $1$ variable, suppose : \begin{itemize} \item we have a constraint $L \cdot X = c$; - \item with $L= (l_0,l_1 \dots)$ and $l_0 \neq 0$, note $L'=(l_1,l_2, \dots)$ and $X'=(x_1,x_2\dots)$; + \item noting $L= (l_0,l_1 \dots)$ and suposing $l_0 \neq 0$, note $L'=(l_1,l_2, \dots)$ and $X'=(x_1,x_2\dots)$; \end{itemize} @@ -1416,28 +1417,28 @@ \subsubsection{Substituion methods, case 1 constraint} Now each time we add a new observation we will substitute $x_0$ : \begin{itemize} - \item let note the observation $O(X) = A \cdot X - C$ + \item let note the observation $Obs(X) = A \cdot X - C$ \item with $A= (a_0,a_1 \dots) $ , note $A' = (a_1,a_2 \dots) $ - \item then $O(x) = a_0 x_0 + A' X'-C= a_0\frac{c-L' \cdot X'}{l_0} + A'X' -C$ + \item then $Obs(x) = a_0 x_0 + A' X'-C= a_0\frac{c-L' \cdot X'}{l_0} + A'X' -C$ \end{itemize} And finaly : \begin{equation} - O(x) = (A'- \frac{a_0}{l_0} L') X' - (C -c\frac{a_0}{l_0}) + Obs(x) = (A'- \frac{a_0}{l_0} L') X' - (C -c\frac{a_0}{l_0}) \end{equation} -So by doing the substition for each new observation we obtain a system with $N-1$ variable, +So by doing the substitution for each observation we obtain a system with $N-1$ variables, where $x_0$ has been eliminated, and without constraint. We can solve it, to find $x_1, x_2 \dots $, and at the end, use equation~\ref{LSQ:SUBST} to find $x_0$. % - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\subsubsection{Substituion methods, case N constraint} +\subsubsection{Substitution methods, case N constraints.} \label{CSTR:NSUBST} -We study the case with $2$ constraints, the generalization to $N$ constraint being obvious. +We study the case with $2-3$ constraints, the generalization to $N$ constraints being straightforward. Suppose we have two constraints $C_a$ and $C_b$ : \begin{itemize} @@ -1445,21 +1446,22 @@ \subsubsection{Substituion methods, case N constraint} \item $C_b : L^b_0 x_0 + L^b_1 x_1 \dots = c^b$; \end{itemize} -Once we have make a substition with $C_a$ , we want to use $C_b$ to eliminate $x_1$, -but for this we need to have $ L^b_0=0$, else it will "re inject" $x_0$. How can -have $L^b_0=0$ ? We can see that for any $\lambda$ : +For each observation, once we have make a substition with $C_a$ to eliminate $x_0$, + we want to use $C_b$ to eliminate $x_1$. +But for this we need to have $ L^b_0=0$, else it will "re inject" $x_0$. To +force $L^b_0=0$, we remark that for any $\lambda$ : \begin{equation} C_a(X)=0 , C_b(X)=0 \Leftrightarrow C_a(X)=0 , C_b(X)+\lambda C_a(X) =0 \end{equation} -So if set $C'_b = C_b - \frac{L^b_0}{L^a_0} C_b$, we can use now $C_a$ to eliminate $x_0$, -then $C'_b$ to eliminate $x_1$. +So if we set $C'_b = C_b - \frac{L^b_0}{L^a_0} C_b$, we can now : use $C_a$ to eliminate $x_0$, +then use $C'_b$ to eliminate $x_1$. -The idea is to make a "pre-processing" of constraint to allow a substitution in cascade. -If we have $3$ constraint $C_a,C_b,C_c$ , we subract $C_a$ in $C_b$ and $C_c$, to have $C_a,C'_b,C'_c$ -where $x_0$ is absent of $C'_b$ and $C'_c$. Then we substract $C'_b$ to $C'_c$ to have new constraint -$C''_c$ without eliminate $x_0$ and $x_1$. Substituting consecutively wth $C_a,C'_b,C''_c$, we can eliminate +The idea is to make a "pre-processing" of constraints to allow a substitution in cascade. +If we have $3$ constraints $C_a,C_b,C_c$ , we subract $C_a$ in $C_b$ and $C_c$, to have $C_a,C'_b,C'_c$ +where $x_0$ is absent of $C'_b$ and $C'_c$. Then we substract $C'_b$ to $C'_c$ to have a new constraint +$C''_c$ where $x_0$ and $x_1$ are eliminated. For each observation, by substituting consecutively $C_a,C'_b,C''_c$, we can eliminate $x_0$, $x_1$ and $x_2$. This pre-processing of constraint is exactly the same than a gaussian elimination in linear resolution. @@ -1470,20 +1472,22 @@ \subsubsection{Substituion methods, case N constraint} \subsection{Using it in MMVII} +\label{Cstr:Use:MMVII} + % - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \subsubsection{Restrictions/Potential Errors} Also we think that this approach has globally benefit, its use require some precaution. -If not respcted, it can rise several error at the execution. +If not respected, it can rise several error at execution of the programm. -Firt and most important, the method require that we know all the constraint between -any observation can be added. This is handled by MMVII using a boolean flag {\tt mInPhaseAddEq}, +Firt and most important, the method require that we know all the constraints between +before any observation can be added. This is handled by {\tt MMVII} using a boolean flag {\tt mInPhaseAddEq}, this flag is set to {\tt false} at the end of each iteration, and set to {\tt true} when the first observation is added. Once this flag is set to {\tt true}, no adding of constraint -will be allowed, see the method {\tt AssertNotInEquation}. +will be allowed (see the method {\tt AssertNotInEquation}). -Another kind of error that can occurs it to add too much constraint so that it become +Another kind of error that can occurs is to add too much constraint so that it become impossible to comply with all of them. Of course this is not due to the way it is handled, but intrinsic to the notion of hard constraints. If this happens, an error will be raised : @@ -1494,9 +1498,9 @@ \subsubsection{Restrictions/Potential Errors} \end{itemize} Note that this redundancy is detected from the structural point, not from the numerical point. For example -if we add $3$ constraint involvling only the $2$ same unknown an error will be detected even if it is +if we add $3$ constraint involving only the $2$ same unknown an error will be detected even if it is $3$ times the same constraint. Conversely , for -example if $x$ and $y$ are unknowns, if we add the $2$ constraint $x+y=1$ and $\frac{x}{3}+ \frac{y}{3}=0$, it is +example if $x$ and $y$ are unknowns, if we add the $2$ constraint $x+5y=1$ and $\frac{x}{3}+ \frac{5y}{3}=0$, it is probable that due to numerical unaccuracy, no contradiction will be detected. % - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1512,11 +1516,11 @@ \subsubsection{Handling "frozen" variables} \begin{itemize} \item {\tt SetFrozenVarCurVal(int aK);} freeze the value of an unknown to its current value, knowing the number of the unknown in the system; - \item {\tt SetFrozenVarCurVal(tObjWUk \& anObj,const Type \& aVal);} idem, but specify the object {\tt anObj} - and the adress of the value {\tt aVal}; + \item {\tt SetFrozenVarCurVal(tObjWUk \& anObj,const Type \& aVal)}, idem, but specify the object {\tt anObj} + and the adress of the value {\tt aVal} that must be \emph{"inside"} the object; \item several variant frozing several unkown of an object (using points or adresses + a number) or all the object; - \item {\tt SetFrozenVar(int aK,const Type \&);} for forcing an unknown to have a given value + \item {\tt SetFrozenVar(int aK,const Type \&)}, for forcing an unknown to have a given value (i.e. \emph{a priori} different from it current value). \end{itemize} @@ -1524,8 +1528,8 @@ \subsubsection{Handling "frozen" variables} For the un-frozing part we have some similar methods ; \begin{itemize} - \item {\tt SetUnFrozen(\dots) ;} supress the constraint of an unknown, method knowing its number - and method with an object and one adress; + \item {\tt SetUnFrozen(\dots) ;} supress the constraint of an unknown (idem, with method knowing its number + and method with an object and one adress); \item {\tt UnfrozeAll()} supress all the constraint on freezing any variable. \end{itemize} @@ -1614,6 +1618,11 @@ \subsection{Implementation details} \subsubsection{Global presentation } +This part is for the programmer that will need to maintain the core system. +For the programmer that "only" want to use it in optimisation in {\tt MMVII} +the interaction with {\tt cResolSysNonLinear}, described +in \ref{Cstr:Use:MMVII}, is sufficient. + The code relative is located in the files {\tt Matrix/LinearConstraint.h} and {\tt Matrix/cLinearConstraint.cpp}. The class involved are : @@ -1660,7 +1669,49 @@ \subsubsection{Class {\tt cDSVec} } a dense vector in class {\tt cSetLinearConstraint} and to reuse it many time, typically it's a buffer. +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Class {\tt cOneLinearConstraint} } + + +The class {\tt cOneLinearConstraint} is used to represent one linear constraint (!! \dots :-;) . +It contains a sparse vector {\tt mLP}$=V$ and a constant {\tt mCste}$)C$, at its creation it +represent directly the constraint $V \cdot X = C$. + +One it has been "reduced" (i.e. the substitution described in \ref{CSTR:SUBST} has been applied), +the {\tt mISubst}$=i$ contain the number of the unknown to substitute, and {\tt mLP}$=V'$ no longer the +pair containing $I$, the constraint is then $V' \cdot X + X_i = C$. The boolean {\tt mReduced} +indicate if the constraint was reduced. + +% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +\subsubsection{Class {\tt cSetLinearConstraint} } + +The class {\tt cSetLinearConstraint} is the main class, i.e the only class the +"rest of the word" needs to interact with. It contains essentially the following data : + +\begin{itemize} + \item a copy of the initial version of constraint; + \item a copy of reduced version ; + \item a bufffer of type {\tt cDSVec} to accelerate the computation; +\end{itemize} +A typical sequence of use will be : + + +\begin{itemize} + \item create the object, indicating the number of unknown (to allocate the buffer); + \item add a number of $M$ constraints (using {\tt Add1Constr} or {\tt Add1ConstrFrozenVar}); + \item "compile" the object , this is apply processing of~\ref{CSTR:NSUBST} to create the + reduced constraint in {\tt mVCstrReduced}; + \item each time an observation is added, use one of the $3$ following methods to eliminate the $M$ variables + selected in {\tt Compile} : + \begin{itemize} + \item {\tt SubstituteInSparseLinearEquation}; + \item {\tt SubstituteInDenseLinearEquation} + \item {\tt SubstituteInOutRSNL} + \end{itemize} +\end{itemize} diff --git a/MMVII/src/Matrix/LinearConstraint.h b/MMVII/src/Matrix/LinearConstraint.h index 5073d68198..02c436be76 100755 --- a/MMVII/src/Matrix/LinearConstraint.h +++ b/MMVII/src/Matrix/LinearConstraint.h @@ -91,10 +91,13 @@ template class cOneLinearConstraint : public cMemCheck /// If aCurSol != 0 constraint is exprimed relatively to current sol cOneLinearConstraint Dup(const tDV* aCurSol) const; - // Subsract into "aToSub" so as to annulate the coeff with mISubst + /// Substitute into "aToSub" so as to annulate the coeff with mISubst void SubstituteInOtherConstraint(cOneLinearConstraint & aToSub,cDSVec & aBuf); + /// Substitute into a the dense constraint AX=B void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; + /// Substitute into a the sparse constraint AX=B void SubstituteInSparseLinearEquation(tSV & aA,Type & aB,cDSVec & aBuf) const; + /// Substitute into the result of functor (contains Func+Derivates) void SubstituteInOutRSNL(tIO_RSNL& aIO,cDSVec & aBuf) const; /// Extract pair with maximal amplitude (in abs) @@ -145,14 +148,15 @@ template class cSetLinearConstraint : public cMemCheck void Add1ConstrFrozenVar(int aKVar,const Type & aVal,const tDV *); void Reset(); + + /// This add the constraint to the system , thi allow to determinate the value of subsituted variables + void AddConstraint2Sys(tLinearSysSR &); // ============ These 3 method modify equation to take into account substition =========== void SubstituteInSparseLinearEquation(tSV & aA,Type & aB) const; void SubstituteInDenseLinearEquation (tDV & aA,Type & aB) const; void SubstituteInOutRSNL(tIO_RSNL& aIO) const; - /// This add the constraint to the system, required because all the subst-var having been elimitated - void AddConstraint2Sys(tLinearSysSR &); private : /// Show all the detail From dccb58c63a5a8309027e416e6c435bdab89e8c1a Mon Sep 17 00:00:00 2001 From: deseilligny Date: Wed, 10 Jan 2024 20:36:50 +0100 Subject: [PATCH 21/21] Doc constrained, some type corrected --- MMVII/Doc/Programmer/NonLinearOptim.tex | 44 +++++++++++++------------ 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/MMVII/Doc/Programmer/NonLinearOptim.tex b/MMVII/Doc/Programmer/NonLinearOptim.tex index 2a37bbb1f4..cc36b7c50b 100755 --- a/MMVII/Doc/Programmer/NonLinearOptim.tex +++ b/MMVII/Doc/Programmer/NonLinearOptim.tex @@ -1537,14 +1537,16 @@ \subsubsection{Handling "frozen" variables} \subsubsection{Handling "shared" unknown} -Sometime it occurs that we have a set of unknwons, theoretically different, but that we want to force -temporarilly to have the same value, what we call shared unknowns. A possible example, if we have several -camera, we want each camera to have it own focal but want to have the same distorsion, and this distorsion +Sometime it occurs that for a set of unknwons, theoretically different, we want to enforce +temporarilly these unknowns to have the same value , that's what we call shared unknowns. +A possible example, is with several camera where we want each camera to have its own focal +but want them to have the same distorsion, but this distorsion has to be adjusted; \emph{unknown sharing} will allow to do that without requiring to add specific model of camera. -From the theoreticall point of view, it's quite easy to do that with constrained optimization. For forcing +From the theoreticall point of view, it's quite direct to impose shared unknowns with constrained optimization. For forcing $n$ unknows to have a shared value : $X0=X1=X2\dots=X_{n-1}$, we simply add $n-1$ equations selecting -an arbitrary reference variable $X_0$ : $X_0-X_1=0$, $X_0=X_2$, \dots , $X_0=X_{n-1}$. +an arbitrary reference variable $X_0$ : $X_0-X_1=0$, $X_0-X_2$=0, \dots , $X_0-X_{n-1}=0$. +The methods in class {\tt cResolSysNonLinear} , for manipulating shared unknown are : \begin{itemize} \item {\tt SetShared(const std::vector \& aVUk);} given a vector of unknowns numbering, @@ -1569,8 +1571,8 @@ \subsubsection{Handling general case, linear and non linear} \end{itemize} For non linear constraint we have the method {\tt AddNonLinearConstr}. It's very similar to {\tt CalcAndAddObs} -(see \ref{AddBasicEq}), it takes a calculator corresponding to a funcion $F$ to compute the value and its derivatives and a linerized -version of the constraint $F(X)=0$ : +(see \ref{AddBasicEq}), it takes a calculator corresponding to a funcion $F$ to compute the value +and its derivatives and generate a linearized version of the constraint $F(X)=0$ : \begin{itemize} \item {\tt void AddNonLinearConstr(tCalc * aCalcVal,const tVectInd \& aVInd,const tStdVect\& aVObs,bool OnlyIfFirst=true);} @@ -1587,12 +1589,12 @@ \subsubsection{Handling general case, linear and non linear} \subsubsection{Example in bench} -In the network bench the method {\tt AddGaugeConstraint} give examples of using -the constraint to fix the arbitrary rotation that cannot be fixed by distance conservation. +In the network bench the method {\tt AddGaugeConstraint} gives examples of using +the constraints to fix the arbitrary rotation that cannot be fixed by distance conservation. It also serve of unitary test of correctnes. The "natural" way, already described is to fix -$3$ coordinates $X_{0,0},Y_{0,0},X_{1,0}$ with soft of hard constraints (using {\tt AddEqFixVar} or {\tt SetFrozenVar}). +$3$ coordinates $X_{0,0},Y_{0,0},X_{1,0}$ with soft or hard constraints (using {\tt AddEqFixVar} or {\tt SetFrozenVar}). -The other testing are completely artificial and activated with the flag {\tt doMangleCstr}, +The other testing method are completely artificial and activated with the flag {\tt doMangleCstr}, this test is done using constraint involving the neighboorhoud of a point. Let $X_k$ be the unknowns of the neighbouring points, and $R_k$ be the ground truth value of the $X_k$. @@ -1618,13 +1620,13 @@ \subsection{Implementation details} \subsubsection{Global presentation } -This part is for the programmer that will need to maintain the core system. +This part is for the programmer that will need to maintain and make evolve the core system. For the programmer that "only" want to use it in optimisation in {\tt MMVII} the interaction with {\tt cResolSysNonLinear}, described in \ref{Cstr:Use:MMVII}, is sufficient. -The code relative is located in the files {\tt Matrix/LinearConstraint.h} -and {\tt Matrix/cLinearConstraint.cpp}. The class involved are : +The code relative to constrained optimization is located in the files {\tt Matrix/LinearConstraint.h} +and {\tt Matrix/cLinearConstraint.cpp}. The classes involved are : \begin{itemize} \item {\tt cOneLinearConstraint} for representing a single constraint; @@ -1646,16 +1648,16 @@ \subsubsection{Class {\tt cDSVec} } imply few (says $\approx 10-50$) among many (say $\approx 500-100000$); \item for efficient representation we use sparse vector for constraint and observation, - a sparse vector beging a collection of pair index/value; + a sparse vector being a collection of pair index/value; - \item let name $m$ the number of pair of sparse vector and $N$ the number of unknown; + \item let name $m$ the number of pair of a sparse vector and $N$ the number of unknowns; \item many operation on sparse vector, like substitution described in~\ref{CSTR:NSUBST}, require some - precaution if we want that the cost to be in $\mathcal{O}(m)$ rather than $\mathcal{O}(N)$. + precaution if we want that the cost to be in $\mathcal{O}(m)$ rather than in $\mathcal{O}(N)$. \end{itemize} -This here where {\tt cDSVec} can help, it's a class for dense representaion of sparse vector. Typically a {\tt cDSVec} : +This is here where {\tt cDSVec} can help, it's a class for dense representaion of sparse vector. Typically a {\tt cDSVec} : \begin{itemize} \item contains a dense real vector $V$ (initialy full of $0$) , a list of used index $L$ (initially empty), a dense boolean @@ -1666,7 +1668,7 @@ \subsubsection{Class {\tt cDSVec} } \end{itemize} Typically, as the allocation of a dense vector can take some time (in $\mathcal{O}(N)$) the idea is to allocate -a dense vector in class {\tt cSetLinearConstraint} and to reuse it many time, typically it's a buffer. +a dense vector in class {\tt cSetLinearConstraint} and to reuse it many times, that's a buffer. % - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1674,7 +1676,7 @@ \subsubsection{Class {\tt cDSVec} } \subsubsection{Class {\tt cOneLinearConstraint} } -The class {\tt cOneLinearConstraint} is used to represent one linear constraint (!! \dots :-;) . +The class {\tt cOneLinearConstraint} is used to represent one linear constraint (who would believe that !! \dots :-;) . It contains a sparse vector {\tt mLP}$=V$ and a constant {\tt mCste}$)C$, at its creation it represent directly the constraint $V \cdot X = C$. @@ -1702,7 +1704,7 @@ \subsubsection{Class {\tt cSetLinearConstraint} } \begin{itemize} \item create the object, indicating the number of unknown (to allocate the buffer); \item add a number of $M$ constraints (using {\tt Add1Constr} or {\tt Add1ConstrFrozenVar}); - \item "compile" the object , this is apply processing of~\ref{CSTR:NSUBST} to create the + \item "compile" the object , this means apply the processing of~\ref{CSTR:NSUBST} to create the reduced constraint in {\tt mVCstrReduced}; \item each time an observation is added, use one of the $3$ following methods to eliminate the $M$ variables selected in {\tt Compile} :