diff --git a/docs/source/SOLVERS.rst b/docs/source/SOLVERS.rst index b72deb0f0c..db77007ffb 100644 --- a/docs/source/SOLVERS.rst +++ b/docs/source/SOLVERS.rst @@ -4,18 +4,18 @@ Solvers ======= -:term:`FiPy` requires either :term:`Pysparse`, :term:`SciPy` or -:term:`Trilinos` to be installed in order to solve linear systems. -From our experiences, :term:`FiPy` runs most efficiently in serial -when :term:`Pysparse` is the linear solver. :term:`Trilinos` is the -most complete of the three solvers due to its numerous preconditioning -and solver capabilities and it also allows :term:`FiPy` to :ref:`run -in parallel `. Although less efficient than :term:`Pysparse` -and less capable than :term:`Trilinos`, :term:`SciPy` is a very -popular package, widely available and easy to install. For this -reason, :term:`SciPy` may be the best linear solver choice when first -installing and testing :term:`FiPy` (and it is the only viable solver -under `Python 3.x`_). +:term:`FiPy` requires either PETSc_, pyamgx_, Pysparse_, SciPy_, or +Trilinos_ solver suites to be installed in order to solve linear systems. +From our experiences, :term:`FiPy` runs most efficiently in serial when Pysparse_ +is the linear solver. PETSc_ and Trilinos_ are the most complete of the +solvers due to their numerous preconditioning and solver capabilities and +they also allow :term:`FiPy` to :ref:`run in parallel `. +Although less efficient than Pysparse_ and less capable than PETSc_ or +Trilinos_, SciPy_ is a very popular package, widely available and easy to +install. For this reason, SciPy_ may be the best linear solver choice when +first installing and testing :term:`FiPy`. pyamgx_ offers the possibility +of solving sparse sparse linear systems on the GPU; be aware that both +hardware and software configuration is non-trivial. :term:`FiPy` chooses the solver suite based on system availability or based on the user supplied :ref:`FlagsAndEnvironmentVariables`. For example, @@ -31,7 +31,7 @@ to ``scipy``:: $ python -c "from fipy import *; print DefaultSolver" -uses a :ref:`SCIPY` solver. Suite-specific solver classes can also +uses a SciPy_ solver. Suite-specific solver classes can also be imported and instantiated overriding any other directives. For example:: @@ -39,13 +39,11 @@ example:: > print DefaultSolver" --no-pysparse -uses a :ref:`SCIPY` solver regardless of the command line +uses a SciPy_ solver regardless of the command line argument. In the absence of :ref:`FlagsAndEnvironmentVariables`, :term:`FiPy`'s order of precedence when choosing the -solver suite for generic solvers is :ref:`PYSPARSE` followed by -:ref:`TRILINOS`, :ref:`PYAMG` and :ref:`SCIPY`. - -.. _Python 3.x: http://docs.python.org/py3k/ +solver suite for generic solvers is PySparse_ followed by +PETSc_, Trilinos_, SciPy_, PyAMG_, and pyamgx_. .. _PETSC: @@ -64,9 +62,10 @@ communication (see :ref:`PARALLEL` for more details). .. attention:: :term:`PETSc` requires the :term:`petsc4py` and :term:`mpi4py` interfaces. -.. note:: :term:`FiPy` does not implement any precoditioner objects for - :term:`PETSc`. Simply pass one of the `PCType strings`_ in the - `precon=` argument when declaring the solver. +.. note:: While, for consistency with other solver suites, :term:`FiPy` does + implement some precoditioner objects for :term:`PETSc`, you can also + simply pass one of the `PCType strings`_ in the `precon=` argument when + declaring the solver. .. _PCType strings: https://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/PC/PCType.html @@ -82,7 +81,11 @@ http://pysparse.sourceforge.net It provides several sparse matrix storage formats and conversion methods. It also implements a number of iterative solvers, preconditioners, and interfaces to efficient factorization packages. The only requirement to -install and use Pysparse is :term:`NumPy`. +install and use :term:`Pysparse` is :term:`NumPy`. + +.. warning:: + + :term:`Pysparse` is archaic and limited to :ref:`RunningUnderPython2`. .. warning:: @@ -108,7 +111,7 @@ PyAMG http://code.google.com/p/pyamg/ The :term:`PyAMG` package provides adaptive multigrid preconditioners that -can be used in conjunction with the :term:`SciPy` solvers. +can be used in conjunction with the SciPy_ solvers. .. _PYAMGX: @@ -132,10 +135,10 @@ Trilinos http://trilinos.sandia.gov :term:`Trilinos` provides a more complete set of solvers and -preconditioners than either :term:`Pysparse` or -:term:`SciPy`. :term:`Trilinos` preconditioning allows for iterative -solutions to some difficult problems that :term:`Pysparse` and -:term:`SciPy` cannot solve, and it enables parallel execution of +preconditioners than either Pysparse_ or +SciPy_. :term:`Trilinos` preconditioning allows for iterative +solutions to some difficult problems that Pysparse_ and +SciPy_ cannot solve, and it enables parallel execution of :term:`FiPy` (see :ref:`PARALLEL` for more details). .. attention:: @@ -191,3 +194,248 @@ solutions to some difficult problems that :term:`Pysparse` and forced the solver to stop before reaching an adequate solution. Different solvers, different preconditioners, or a less restrictive tolerance may help. + +.. _CONVERGENCE: + +----------- +Convergence +----------- + +Different solver suites take different approaches to testing convergence. +We endeavor to harmonize this behavior by allowing the strings in the +"criterion" column to be passed as an argument when instantiating a +:class:`~fipy.solvers.solver.Solver`. Convergence is detected if +``residual < tolerance * scale``. + +.. raw:: latex + + \begin{landscape} + +.. csv-table:: Residual Criteria + :file: _static/residuals.csv + :widths: auto + :header-rows: 1 + :stub-columns: 1 + :class: wideshow longtable + +.. raw:: latex + + \end{landscape} + +.. note:: PyAMG_ is a set of preconditioners applied on top of SciPy_, so + is not explicitly included in these tables. + + + +``default`` +=========== + +The setting ``criterion="default"`` applies the same scaling (``RHS``) to +all solvers. This behavior is new in version |release|; prior to that, the +default behavior was the same as ``criterion="legacy"``. + +``legacy`` +========== + +The setting ``criterion="legacy"`` restores the behavior of FiPy prior to +version |release| and is equivalent to what the particular suite and solver +does if not specifically configured. The ``legacy`` row of the table is a +best effort at documenting what will happen. + +.. note:: + + - All LU solvers use ``"initial"`` scaling. + - PySparse_ has two different groups of solvers, + with different scaling. + - PETSc_ accepts |KSP_NORM_DEFAULT|_ in order to + "use the default for the current ``KSPType``". Discerning the actual + behavior would require burning the code in a bowl of chicken entrails. + (It is reasonable to assume |KSP_NORM_PRECONDITIONED|_ for + left-preconditioned solvers and |KSP_NORM_UNPRECONDITIONED|_ otherwise; + even the PETSc_ documentation says that |KSP_NORM_NATURAL|_ is `"weird" + `_). + +``absolute_tolerance`` +====================== + +PETSc_ and SciPy_ Krylov solvers accept an additional +``absolute_tolerance`` parameter, such that convergence is detected if +``residual < max(tolerance * scale, absolute_tolerance``). + +``divergence_tolerance`` +======================== + +PETSc_ Krylov solvers accept a third ``divergence_tolerance`` parameter, +such that a divergence is detected if ``residual > divergence_tolerance * +scale``. Because of `the way the convergence test is coded +`_, +if the initial residual is much larger than the norm of the right-hand-side +vector, PETSc_ will abort with |KSP_DIVERGED_DTOL|_ without ever trying to +solve. If this occurs, either ``divergence_tolerance`` should be increased +or another convergence criterion should be used. + +.. note:: + + See :mod:`examples.diffusion.mesh1D`, + :mod:`examples.diffusion.steadyState.mesh1D.inputPeriodic`, + :mod:`examples.elphf.diffusion.mesh1D`, + :mod:`examples.elphf.phaseDiffusion`, :mod:`examples.phase.binary`, + :mod:`examples.phase.quaternary`, and + :mod:`examples.reactiveWetting.liquidVapor1D` for several examples where + :code:`criterion="initial"` is used to address this situation. + +.. note:: + + ``divergence_tolerance`` never caused a problem in previous versions of + :term:`FiPy` because the default behavior of PETSc_ is to zero out the + initial guess before trying to solve and then never do a test against + ``divergence_tolerance``. This resulted in behavior (number of + iterations and ultimate residual) that was very different from the other + solver suites and so :term:`FiPy` now directs PETSc to use the initial + guess. + +Reporting +========= + +Different solver suites also report different levels of detail about why +they succed or fail. This information is captured as a +:class:`~fipy.solvers.convergence.Convergence` or +:class:`~fipy.solvers.convergence.Divergence` property of the +:class:`~fipy.solvers.solver.Solver` after calling +:meth:`~fipy.terms.term.Term.solve` or +:meth:`~fipy.terms.term.Term.sweep`. + +.. raw:: latex + + \begin{landscape} + +.. tabularcolumns:: \Y{.25}\Y{.10}\Y{.22}\Y{.16}\Y{.09}\Y{.06}\Y{.12} + +.. csv-table:: Convergence Status Codes + :file: _static/solver_convergence.csv + :widths: auto + :header-rows: 1 + :stub-columns: 1 + :class: wideshow longtable + +.. raw:: latex + + \end{landscape} + + +.. raw:: latex + + \begin{landscape} + +.. tabularcolumns:: \Y{.25}\Y{.10}\Y{.22}\Y{.16}\Y{.09}\Y{.06}\Y{.12} + +.. csv-table:: Divergence Status Codes + :file: _static/solver_divergence.csv + :widths: auto + :header-rows: 1 + :stub-columns: 1 + :class: wideshow longtable + +.. raw:: latex + + \end{landscape} + +.. |KSP_NORM_UNPRECONDITIONED| replace:: :literal:`KSP_NORM_UNPRECONDITIONED` +.. _KSP_NORM_UNPRECONDITIONED: https://petsc.org/main/docs/manualpages/KSP/KSP_NORM_UNPRECONDITIONED/ +.. |KSP_NORM_PRECONDITIONED| replace:: :literal:`KSP_NORM_PRECONDITIONED` +.. _KSP_NORM_PRECONDITIONED: https://petsc.org/main/docs/manualpages/KSP/KSP_NORM_PRECONDITIONED/ +.. |KSP_NORM_NATURAL| replace:: :literal:`KSP_NORM_NATURAL` +.. _KSP_NORM_NATURAL: https://petsc.org/main/docs/manualpages/KSP/KSP_NORM_NATURAL/ +.. |KSP_NORM_DEFAULT| replace:: :literal:`KSP_NORM_DEFAULT` +.. _KSP_NORM_DEFAULT: https://petsc.org/main/manualpages/KSP/KSPNormType/ + +.. [#KSP_Convergence_Tests] https://petsc.org/release/docs/manual/ksp/#sec-convergencetests + +.. [#AMGX_convergence] *AMGX REFERENCE MANUAL*: 2.3 General Settings: ``convergence``, + October 2017, API Version 2, + https://github.com/NVIDIA/AMGX/blob/main/doc/AMGX_Reference.pdf + +.. [#SciPy_Convergence_Test] https://github.com/scipy/scipy/blob/2d1d5b042a09e131ffe191726aa6829b33590970/scipy/sparse/linalg/_isolve/iterative.py#L30 + +.. [#AztecOO_convergence] *AztecOO Users Guide*: 3.1 Aztec Options: ``options[AZ_conv]``, + SAND REPORT SAND2004-3796, Updated August 2007, + For AztecOO Version 3.6 in Trilinos Release 8.0, + https://trilinos.github.io/pdfs/AztecOOUserGuide.pdf + +.. [#FiPy_Convergence_Test] Implemented by :term:`FiPy` using intrinsic + solver capabilities. + +.. |KSP_CONVERGED_ITS| replace:: :literal:`KSP_CONVERGED_ITS` +.. _KSP_CONVERGED_ITS: https://petsc.org/main/docs/manualpages/KSP/KSP_CONVERGED_ITS/ +.. |KSP_CONVERGED_ATOL| replace:: :literal:`KSP_CONVERGED_ATOL` +.. _KSP_CONVERGED_ATOL: https://petsc.org/main/docs/manualpages/KSP/KSP_CONVERGED_ATOL/ +.. |KSP_CONVERGED_RTOL| replace:: :literal:`KSP_CONVERGED_RTOL` +.. _KSP_CONVERGED_RTOL: https://petsc.org/main/docs/manualpages/KSP/KSP_CONVERGED_RTOL/ +.. |KSP_CONVERGED_ITERATING| replace:: :literal:`KSP_CONVERGED_ITERATING` +.. _KSP_CONVERGED_ITERATING: https://petsc.org/main/docs/manualpages/KSP/KSP_CONVERGED_ITERATING/ +.. |KSP_DIVERGED_ITS| replace:: :literal:`KSP_DIVERGED_ITS` +.. _KSP_DIVERGED_ITS: https://petsc.org/main/docs/manualpages/KSP/KSP_DIVERGED_ITS/ +.. |KSP_DIVERGED_PC_FAILED| replace:: :literal:`KSP_DIVERGED_PC_FAILED` +.. _KSP_DIVERGED_PC_FAILED: https://petsc.org/main/docs/manualpages/KSP/KSP_DIVERGED_PC_FAILED/ +.. |KSP_DIVERGED_INDEFINITE_PC| replace:: :literal:`KSP_DIVERGED_INDEFINITE_PC` +.. _KSP_DIVERGED_INDEFINITE_PC: https://petsc.org/main/docs/manualpages/KSP/KSP_DIVERGED_INDEFINITE_PC/ +.. |KSP_DIVERGED_INDEFINITE_MAT| replace:: :literal:`KSP_DIVERGED_INDEFINITE_MAT` +.. _KSP_DIVERGED_INDEFINITE_MAT: https://petsc.org/main/docs/manualpages/KSP/KSPConvergedReason/ +.. |KSP_DIVERGED_NANORINF| replace:: :literal:`KSP_DIVERGED_NANORINF` +.. _KSP_DIVERGED_NANORINF: https://petsc.org/main/docs/manualpages/KSP/KSPConvergedReason/ +.. |KSP_DIVERGED_BREAKDOWN| replace:: :literal:`KSP_DIVERGED_BREAKDOWN` +.. _KSP_DIVERGED_BREAKDOWN: https://petsc.org/main/docs/manualpages/KSP/KSP_DIVERGED_BREAKDOWN/ +.. |KSP_DIVERGED_BREAKDOWN_BICG| replace:: :literal:`KSP_DIVERGED_BREAKDOWN_BICG` +.. _KSP_DIVERGED_BREAKDOWN_BICG: https://petsc.org/main/docs/manualpages/KSP/KSP_DIVERGED_BREAKDOWN_BICG/ +.. |KSP_CONVERGED_HAPPY_BREAKDOWN| replace:: :literal:`KSP_CONVERGED_HAPPY_BREAKDOWN` +.. _KSP_CONVERGED_HAPPY_BREAKDOWN: https://petsc.org/main/docs/manualpages/KSP/KSPConvergedReason/ +.. |KSP_DIVERGED_NULL| replace:: :literal:`KSP_DIVERGED_NULL` +.. _KSP_DIVERGED_NULL: https://petsc.org/main/docs/manualpages/KSP/KSPConvergedReason/ +.. |KSP_DIVERGED_DTOL| replace:: :literal:`KSP_DIVERGED_DTOL` +.. _KSP_DIVERGED_DTOL: https://petsc.org/main/docs/manualpages/KSP/KSP_DIVERGED_DTOL/ +.. |KSP_DIVERGED_NONSYMMETRIC| replace:: :literal:`KSP_DIVERGED_NONSYMMETRIC` +.. _KSP_DIVERGED_NONSYMMETRIC: https://petsc.org/main/docs/manualpages/KSP/KSP_DIVERGED_NONSYMMETRIC/ + +.. |AMGX_SOLVE_SUCCESS| replace:: :literal:`AMGX_SOLVE_SUCCESS` +.. _AMGX_SOLVE_SUCCESS: https://github.com/NVIDIA/AMGX/blob/main/doc/AMGX_Reference.pdf +.. |AMGX_SOLVE_FAILED| replace:: :literal:`AMGX_SOLVE_FAILED` +.. _AMGX_SOLVE_FAILED: https://github.com/NVIDIA/AMGX/blob/main/doc/AMGX_Reference.pdf +.. |AMGX_SOLVE_DIVERGED| replace:: :literal:`AMGX_SOLVE_DIVERGED` +.. _AMGX_SOLVE_DIVERGED: https://github.com/NVIDIA/AMGX/blob/main/doc/AMGX_Reference.pdf + +.. |PySparse_2| replace:: :literal:`2` +.. _PySparse_2: http://pysparse.sourceforge.net/itsolvers.html +.. |PySparse_1| replace:: :literal:`1` +.. _PySparse_1: http://pysparse.sourceforge.net/itsolvers.html +.. |PySparse_0| replace:: :literal:`0` +.. _PySparse_0: http://pysparse.sourceforge.net/itsolvers.html +.. |PySparse_neg1| replace:: :literal:`-1` +.. _PySparse_neg1: http://pysparse.sourceforge.net/itsolvers.html +.. |PySparse_neg2| replace:: :literal:`-2` +.. _PySparse_neg2: http://pysparse.sourceforge.net/itsolvers.html +.. |PySparse_neg3| replace:: :literal:`-3` +.. _PySparse_neg3: http://pysparse.sourceforge.net/itsolvers.html +.. |PySparse_neg4| replace:: :literal:`-4` +.. _PySparse_neg4: http://pysparse.sourceforge.net/itsolvers.html +.. |PySparse_neg5| replace:: :literal:`-5` +.. _PySparse_neg5: http://pysparse.sourceforge.net/itsolvers.html +.. |PySparse_neg6| replace:: :literal:`-6` +.. _PySparse_neg6: http://pysparse.sourceforge.net/itsolvers.html + +.. |SciPy_0| replace:: :literal:`0` +.. _SciPy_0: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.gmres.html +.. |SciPy_lt0| replace:: :literal:`<0` +.. _SciPy_lt0: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.gmres.html +.. |SciPy_gt0| replace:: :literal:`>0` +.. _SciPy_gt0: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.gmres.html + +.. |AZ_normal| replace:: :literal:`AZ_normal` +.. _AZ_normal: https://trilinos.github.io/pdfs/AztecOOUserGuide.pdf +.. |AZ_maxits| replace:: :literal:`AZ_maxits` +.. _AZ_maxits: https://trilinos.github.io/pdfs/AztecOOUserGuide.pdf +.. |AZ_ill_cond| replace:: :literal:`AZ_ill_cond` +.. _AZ_ill_cond: https://trilinos.github.io/pdfs/AztecOOUserGuide.pdf +.. |AZ_breakdown| replace:: :literal:`AZ_breakdown` +.. _AZ_breakdown: https://trilinos.github.io/pdfs/AztecOOUserGuide.pdf +.. |AZ_loss| replace:: :literal:`AZ_loss` +.. _AZ_loss: https://trilinos.github.io/pdfs/AztecOOUserGuide.pdf diff --git a/docs/source/USAGE.rst b/docs/source/USAGE.rst index bba37e724f..2c22b0053a 100644 --- a/docs/source/USAGE.rst +++ b/docs/source/USAGE.rst @@ -278,6 +278,14 @@ package. print a variety of diagnostic information. All other solvers should use `Logging`_ and :envvar:`FIPY_LOG_CONFIG`. +.. envvar:: FIPY_DEFAULT_CRITERION + + Changes the default solver :ref:`CONVERGENCE` criterion to the specified + value. Valid choices are "``legacy``", "``unscaled``", "``RHS``", + "``matrix``", "``initial``", "``solution``", "``preconditioned``", + "``natural``", "``default``". A value of "``default``" is admittedly + circular, but it works. + .. envvar:: FIPY_VIEWER Forces the use of the specified viewer. Valid values are any @@ -303,7 +311,7 @@ package. :ref:`PETSC` solvers in order to see what options are possible. Ignored if solver is not :ref:`PETSC`. -.. _PETSc configuration options: https://docs.petsc.org/en/latest/manual/other/#sec-options +.. _PETSc configuration options: https://petsc.org/main/manual/other/#runtime-options .. _PARALLEL: @@ -1063,13 +1071,13 @@ Thanks to the future_ package and to the contributions of pya_ and woodscn_, :term:`FiPy` runs under both :term:`Python 3` and :term:`Python` 2.7, without conversion or modification. -Because :term:`Python` itself will `drop support for Python 2.7 on January -1, 2020`_ and many of the prerequisites for :term:`FiPy` have `pledged to -drop support for Python 2.7 no later than 2020`_, we have prioritized adding +Because :term:`Python` itself `dropped support for Python 2.7 on January +1, 2020`_ and many of the prerequisites for :term:`FiPy` `pledged to +drop support for Python 2.7 no later than 2020`_, we prioritized adding support for better :term:`Python 3` solvers, starting with :term:`petsc4py`. -Because the faster :term:`PySparse` and :term:`Trilinos` solvers are not +Because the faster :term:`PySparse` solvers are not available under :term:`Python 3`, we will maintain :term:`Python` 2.x support as long as practical. Be aware that the conda-forge_ packages that :term:`FiPy` depends upon are not well-maintained on :term:`Python` 2.x @@ -1081,7 +1089,7 @@ become available on conda-forge_. .. _future: http://python-future.org .. _pya: https://github.com/pya .. _woodscn: https://github.com/pya -.. _drop support for Python 2.7 on January 1, 2020: https://www.python.org/dev/peps/pep-0373/#update +.. _dropped support for Python 2.7 on January 1, 2020: https://www.python.org/dev/peps/pep-0373/#update .. _pledged to drop support for Python 2.7 no later than 2020: https://python3statement.org ------ diff --git a/docs/source/_static/residuals.csv b/docs/source/_static/residuals.csv new file mode 100644 index 0000000000..682853adea --- /dev/null +++ b/docs/source/_static/residuals.csv @@ -0,0 +1,10 @@ +criterion,residual,scale,PETSc_ [#KSP_Convergence_Tests]_,pyamgx_ [#AMGX_convergence]_,PySparse_,SciPy_ [#SciPy_Convergence_Test]_,Trilinos_ [#AztecOO_convergence]_ +``unscaled``,:math:`\|\mathsf{L}\vec{x} - \vec{b}\|_2`,:math:`1`,[#FiPy_Convergence_Test]_,``ABSOLUTE``,[#FiPy_Convergence_Test]_,[#FiPy_Convergence_Test]_,``AZ_noscaled`` +``RHS``,:math:`\|\mathsf{L}\vec{x} - \vec{b}\|_2`,:math:`\|\vec{b}\|_2`,|KSP_NORM_UNPRECONDITIONED|_,[#FiPy_Convergence_Test]_,"`cgs `_, `pcg `_, `qmres `_, or [#FiPy_Convergence_Test]_",``default``,``AZ_rhs`` +``matrix``,:math:`\|\mathsf{L}\vec{x} - \vec{b}\|_2`,:math:`\|\mathsf{L}\|_\infty`,[#FiPy_Convergence_Test]_,[#FiPy_Convergence_Test]_,[#FiPy_Convergence_Test]_,[#FiPy_Convergence_Test]_,``AZ_Anorm`` +``initial``,:math:`\|\mathsf{L}\vec{x} - \vec{b}\|_2`,:math:`\|\mathsf{L}\vec{x} - \vec{b}\|_2^{(0)}`,[#FiPy_Convergence_Test]_,``RELATIVE_INI_CORE``,"`bicgstab `_, `gmres `_, `minres `_, or [#FiPy_Convergence_Test]_",[#FiPy_Convergence_Test]_,``AZ_r0`` +``solution``,:math:`\|\mathsf{L}\vec{x} - \vec{b}\|_\infty`,:math:`\|\mathsf{L}\|_\infty * \|\vec{x}\|_1 + \|\vec{b}\|_\infty`,,,,,``AZ_sol`` +``preconditioned``,:math:`\left\|\mathsf{P}^{-1}(\mathsf{L}\vec{x} - \vec{b})\right\|_2`,:math:`\left\|\vec{b}\right\|_2`,|KSP_NORM_PRECONDITIONED|_,,,, +``natural``,:math:`\sqrt{(\mathsf{L}\vec{x} - \vec{b})\mathsf{P}^{-1}(\mathsf{L}\vec{x} - \vec{b})}`,:math:`\left\|\vec{b}\right\|_2`,|KSP_NORM_NATURAL|_,,,, +``legacy``,,,|KSP_NORM_DEFAULT|_ (``RHS`` or ``preconditioned``),``initial``,``RHS`` or ``initial``,``RHS``,``initial`` +``default``,,,``RHS``,``RHS``,``RHS``,``RHS``,``RHS`` diff --git a/docs/source/_static/solver_convergence.csv b/docs/source/_static/solver_convergence.csv new file mode 100644 index 0000000000..39712ca359 --- /dev/null +++ b/docs/source/_static/solver_convergence.csv @@ -0,0 +1,9 @@ +,,PETSc_,pyamgx_,PySparse_,SciPy_,Trilinos_ +:class:`~fipy.solvers.convergence.Convergence`,Convergence criteria met.,,|AMGX_SOLVE_SUCCESS|_,,|SciPy_0|_,|AZ_normal|_ +:class:`~fipy.solvers.convergence.IterationConvergence`,Requested iterations complete (and no residual calculated).,|KSP_CONVERGED_ITS|_,,,, +:class:`~fipy.solvers.convergence.AbsoluteToleranceConvergence`,"Converged, residual is as small as seems reasonable on this machine.",|KSP_CONVERGED_ATOL|_,,|PySparse_2|_,, +:class:`~fipy.solvers.convergence.RHSZeroConvergence`,"Converged, :math:`\mathbf{b} = 0`, so the exact solution is :math:`\mathbf{x} = 0`.",,,|PySparse_1|_,, +:class:`~fipy.solvers.convergence.RelativeToleranceConvergence`,"Converged, relative error appears to be less than tolerance.",|KSP_CONVERGED_RTOL|_,,|PySparse_0|_,, +:class:`~fipy.solvers.convergence.HappyBreakdownConvergence`,"""Exact"" solution found and more iterations will just make things worse.",|KSP_CONVERGED_HAPPY_BREAKDOWN|_,,,, +:class:`~fipy.solvers.convergence.LossOfAccuracyConvergence`,The iterative solver has terminated due to a lack of accuracy in the recursive residual (caused by rounding errors).,,,,,|AZ_loss|_ +:class:`~fipy.solvers.convergence.IteratingConvergence`,Solve still in progress.,|KSP_CONVERGED_ITERATING|_,,,, diff --git a/docs/source/_static/solver_divergence.csv b/docs/source/_static/solver_divergence.csv new file mode 100644 index 0000000000..680d422753 --- /dev/null +++ b/docs/source/_static/solver_divergence.csv @@ -0,0 +1,10 @@ +,,PETSc_,pyamgx_,PySparse_,SciPy_,Trilinos_ +:class:`~fipy.solvers.convergence.BreakdownDivergence`,Illegal input or the iterative solver has broken down.,|KSP_DIVERGED_BREAKDOWN|_,|AMGX_SOLVE_FAILED|_,,|SciPy_lt0|_,|AZ_breakdown|_ +:class:`~fipy.solvers.convergence.IterationDivergence`,Maximum number of iterations was reached.,|KSP_DIVERGED_ITS|_,|AMGX_SOLVE_DIVERGED|_,|PySparse_neg1|_,|SciPy_gt0|_,|AZ_maxits|_ +:class:`~fipy.solvers.convergence.PreconditioningDivergence`,The system involving the preconditioner was ill-conditioned.,|KSP_DIVERGED_PC_FAILED|_,,|PySparse_neg2|_,, +:class:`~fipy.solvers.convergence.IllConditionedPreconditionerDivergence`,"An inner product of the form :math:`\mathbf{x}^T \mathsf{P}^{-1} \mathbf{x}` was not positive, so the preconditioning matrix :math:`\mathsf{P}` does not appear to be positive definite.",|KSP_DIVERGED_INDEFINITE_PC|_,,|PySparse_neg3|_,, +:class:`~fipy.solvers.convergence.IllConditionedDivergence`,The matrix :math:`\mathsf{L}` appears to be ill-conditioned.,|KSP_DIVERGED_INDEFINITE_MAT|_,,|PySparse_neg4|_,,|AZ_ill_cond|_ +:class:`~fipy.solvers.convergence.StagnatedDivergence`,The method stagnated.,,,|PySparse_neg5|_,, +:class:`~fipy.solvers.convergence.OutOfRangeDivergence`,A scalar quantity became too small or too large to continue computing.,|KSP_DIVERGED_NANORINF|_,,|PySparse_neg6|_,, +:class:`~fipy.solvers.convergence.NullDivergence`,Breakdown when solving the Hessenberg system within GMRES.,|KSP_DIVERGED_NULL|_,,,, +:class:`~fipy.solvers.convergence.ToleranceDivergence`,The residual norm increased by a factor of ``divtol``.,|KSP_DIVERGED_DTOL|_,,,, diff --git a/docs/source/_static/widetable.css b/docs/source/_static/widetable.css new file mode 100644 index 0000000000..de1a665bad --- /dev/null +++ b/docs/source/_static/widetable.css @@ -0,0 +1,38 @@ +/* + * Wide tables are unclickable, apparently because they're under the sidebar. + * + * Adapted from: + * https://groups.google.com/g/sphinx-users/c/ZV8gGObVrQU + * https://blogs.perficient.com/2021/01/18/freezing-row-and-column-in-html-table-using-css/ + */ + +/* + * Make wide table horizontally scrollable. + * This has significant UI issues. + */ +table.widescroll { + display: block; + overflow: auto; +} + +table.widescroll th.stub { + left: 0; + position: sticky; + z-index: 50; +} + +table.widescroll caption span.caption-text { + left: 0; + position: sticky; +} + +/* Move wide table above whatever is blocking it. */ +table.wideshow { + /* + * "The `position: static` property prevents z-index from having an effect." + * but where does `position: static` come from? + */ + position: relative; + z-index: 25; +} + diff --git a/docs/source/conf.py b/docs/source/conf.py index 765e7f799b..d4c92bf604 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -130,8 +130,8 @@ # [@MadPhysicist](https://stackoverflow.com/users/2988730/mad-physicist) # [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/) # https://stackoverflow.com/a/66182779 -napoleon_use_param = True -# napoleon_preprocess_types = True +# Corrected for https://github.com/sphinx-doc/sphinx/issues/10963 +napoleon_preprocess_types = True napoleon_type_aliases = { 'array-like': ':term:`array-like `', 'array_like': ':term:`array_like`' @@ -244,6 +244,8 @@ \definecolor{bluish}{rgb}{0.216,0.188,0.533} \newcommand{\fipylogo}{\scalebox{10}{\rotatebox{4}{\textcolor{redish}{\( \varphi \)}}\kern-.70em\raisebox{-.15em}{\textcolor{bluish}{\( \pi\)}}}} + + \usepackage{pdflscape} """, 'printindex': r'\footnotesize\raggedright\printindex', } @@ -332,6 +334,7 @@ def autodoc_skip_member(app, what, name, obj, skip, options): def setup(app): app.connect('autodoc-skip-member', autodoc_skip_member) + app.add_css_file('widetable.css') # lifted from astropy/astropy@e68ca1a1 diff --git a/examples/cahnHilliard/mesh2D.py b/examples/cahnHilliard/mesh2D.py index 317ea172e6..130ad3e134 100755 --- a/examples/cahnHilliard/mesh2D.py +++ b/examples/cahnHilliard/mesh2D.py @@ -84,10 +84,7 @@ >>> dexp = -5 >>> elapsed = 0. ->>> if __name__ == "__main__": -... duration = 1000. -... else: -... duration = 1000. +>>> duration = 1000. >>> while elapsed < duration: ... dt = min(100, numerix.exp(dexp)) @@ -118,4 +115,3 @@ exec(fipy.tests.doctestPlus._getScript()) input('finished') - diff --git a/examples/cahnHilliard/tanh1D.py b/examples/cahnHilliard/tanh1D.py index f772b3d6e5..157dcd779c 100755 --- a/examples/cahnHilliard/tanh1D.py +++ b/examples/cahnHilliard/tanh1D.py @@ -103,11 +103,11 @@ single: LinearLUSolver single: DefaultSolver ->>> import fipy.solvers.solver ->>> if fipy.solvers.solver_suite in ['pysparse', 'pyamgx']: +>>> from fipy.solvers import solver_suite +>>> if solver_suite in ['pysparse']: ... solver = LinearLUSolver(tolerance=1e-15, iterations=100) ... else: -... solver = DefaultSolver() +... solver = DefaultSolver(tolerance=1e-15, iterations=100) The solution to this 1D problem over an infinite domain is given by, diff --git a/examples/convection/exponential1D/tri2D.py b/examples/convection/exponential1D/tri2D.py index 4c2cedd072..de00c55c5d 100755 --- a/examples/convection/exponential1D/tri2D.py +++ b/examples/convection/exponential1D/tri2D.py @@ -34,8 +34,10 @@ >>> eq = (DiffusionTerm(coeff=diffCoeff) ... + ExponentialConvectionTerm(coeff=convCoeff)) ->>> eq.solve(var = var, -... solver=DefaultAsymmetricSolver(iterations=10000)) +It should be possible to drive this steady-state solution to high accuracy. + +>>> solver = DefaultAsymmetricSolver(tolerance=1e-10, iterations=10000) +>>> eq.solve(var=var, solver=solver) The analytical solution test for this problem is given by: @@ -45,8 +47,8 @@ >>> DD = 1. - numerix.exp(-convCoeff[axis] * L / diffCoeff) >>> analyticalArray = CC / DD ->>> print(var.allclose(analyticalArray, rtol = 1e-6, atol = 1e-6)) -1 +>>> print(var.allclose(analyticalArray, rtol=1e-6, atol=1e-6)) +True >>> if __name__ == '__main__': ... viewer = Viewer(vars = var) diff --git a/examples/convection/exponential1DSource/tri2D.py b/examples/convection/exponential1DSource/tri2D.py index fe0146dcfc..83b0a8dc57 100755 --- a/examples/convection/exponential1DSource/tri2D.py +++ b/examples/convection/exponential1DSource/tri2D.py @@ -40,7 +40,7 @@ ... - ExponentialConvectionTerm(coeff = convCoeff)) >>> eq.solve(var=var, -... solver=DefaultAsymmetricSolver(tolerance=1.e-15, iterations=10000)) +... solver=DefaultAsymmetricSolver(tolerance=1.e-8, iterations=10000)) The analytical solution test for this problem is given by: diff --git a/examples/convection/exponential2D/tri2D.py b/examples/convection/exponential2D/tri2D.py index 9a5966993a..abaed7785e 100755 --- a/examples/convection/exponential2D/tri2D.py +++ b/examples/convection/exponential2D/tri2D.py @@ -27,7 +27,10 @@ >>> eq = (DiffusionTerm(coeff=diffCoeff) ... + ExponentialConvectionTerm(coeff=convCoeff)) ->>> eq.solve(var = var) +It should be possible to drive this steady-state solution to high accuracy. + +>>> solver = eq.getDefaultSolver(tolerance=1e-10) +>>> eq.solve(var=var, solver=solver) The analytical solution test for this problem is given by: @@ -36,8 +39,8 @@ >>> CC = 1. - numerix.exp(-convCoeff[axis] * x / diffCoeff) >>> DD = 1. - numerix.exp(-convCoeff[axis] * L / diffCoeff) >>> analyticalArray = CC / DD ->>> print(var.allclose(analyticalArray, rtol = 1e-10, atol = 1e-10)) -1 +>>> print(var.allclose(analyticalArray, rtol=1e-10, atol=1e-10)) +True >>> if __name__ == '__main__': ... viewer = Viewer(vars = var) diff --git a/examples/diffusion/mesh1D.py b/examples/diffusion/mesh1D.py index 6c64b8a65d..a630463b2f 100755 --- a/examples/diffusion/mesh1D.py +++ b/examples/diffusion/mesh1D.py @@ -708,7 +708,7 @@ and see that :math:`\phi` dissipates to the expected average value of 0.2 with reasonable accuracy. ->>> print(numerix.allclose(phi, 0.2, atol=1e-5)) +>>> print(numerix.allclose(phi, 0.2, atol=4e-5)) True If we reset the initial condition @@ -788,7 +788,13 @@ >>> if __name__ == '__main__': ... viewer.plot() ->>> (TransientTerm() == DiffusionTerm(D)).solve(var=phi, dt=1e6*dt) +>>> eq = (TransientTerm() == DiffusionTerm(D)) + +The initial residual is much larger than the norm of the right-hand-side +vector, so we use `"initial"` tolerance scaling. + +>>> solver = eq.getDefaultSolver(criterion="initial") +>>> eq.solve(var=phi, dt=1e6*dt, solver=solver) >>> if __name__ == '__main__': ... viewer.plot() >>> from fipy import input diff --git a/examples/diffusion/nthOrder/input4thOrder1D.py b/examples/diffusion/nthOrder/input4thOrder1D.py index ce88461f5f..25deecfe6b 100755 --- a/examples/diffusion/nthOrder/input4thOrder1D.py +++ b/examples/diffusion/nthOrder/input4thOrder1D.py @@ -15,7 +15,7 @@ .. index:: single: Grid1D ->>> from fipy import CellVariable, Grid1D, NthOrderBoundaryCondition, DiffusionTerm, Viewer, GeneralSolver +>>> from fipy import CellVariable, Grid1D, NthOrderBoundaryCondition, DiffusionTerm, Viewer, LinearLUSolver >>> nx = 500 >>> dx = L / nx @@ -56,11 +56,7 @@ >>> eq = DiffusionTerm(coeff=(1, 1)) == 0 ->>> import fipy.solvers.solver ->>> if fipy.solvers.solver_suite == 'petsc': -... solver = GeneralSolver(precon='lu') -... else: -... solver = GeneralSolver() +>>> solver = LinearLUSolver() We perform one implicit timestep to achieve steady state diff --git a/examples/diffusion/steadyState/mesh1D/inputPeriodic.py b/examples/diffusion/steadyState/mesh1D/inputPeriodic.py index 7021b58513..87314dcdff 100755 --- a/examples/diffusion/steadyState/mesh1D/inputPeriodic.py +++ b/examples/diffusion/steadyState/mesh1D/inputPeriodic.py @@ -33,7 +33,13 @@ solver has no fixed value and can become unstable. >>> eq = TransientTerm(coeff=1e-8) - DiffusionTerm() ->>> eq.solve(var=var, dt=1.) + +The initial residual is much larger than the norm of the right-hand-side +vector, so we use `"initial"` tolerance scaling with a tolerance that will +drive to an accurate solution. + +>>> solver = eq.getDefaultSolver(criterion="initial", tolerance=1e-8) +>>> eq.solve(var=var, dt=1., solver=solver) >>> if __name__ == '__main__': ... viewer.plot() @@ -41,7 +47,7 @@ The result of the calculation will be the average value over the domain. >>> print(var.allclose((valueLeft + valueRight) / 2., rtol = 1e-5)) -1 +True """ from __future__ import unicode_literals diff --git a/examples/diffusion/steadyState/mesh1D/tri2Dinput.py b/examples/diffusion/steadyState/mesh1D/tri2Dinput.py index 1f18b22ddc..edb011cd58 100755 --- a/examples/diffusion/steadyState/mesh1D/tri2Dinput.py +++ b/examples/diffusion/steadyState/mesh1D/tri2Dinput.py @@ -13,9 +13,12 @@ :func:`~fipy.meshes.factoryMeshes.Grid1D` object. Here, one time step is executed to implicitly find the steady state -solution. +solution. We increase the solver tolerance from the default +:math:`10^{-5}` in order to achieve a good solution. - >>> DiffusionTerm().solve(var) + >>> eq = DiffusionTerm() + >>> solver = eq.getDefaultSolver(tolerance=1e-8) + >>> eq.solve(var, solver=solver) To test the solution, the analytical result is required. The `x` coordinates from the mesh are gathered and the length of the domain, @@ -30,7 +33,7 @@ tolerance of `1e-10`. >>> print(var.allclose(analyticalArray)) - 1 + True """ from __future__ import print_function diff --git a/examples/diffusion/steadyState/mesh20x20/tri2Dinput.py b/examples/diffusion/steadyState/mesh20x20/tri2Dinput.py index 541de34846..4958be496f 100755 --- a/examples/diffusion/steadyState/mesh20x20/tri2Dinput.py +++ b/examples/diffusion/steadyState/mesh20x20/tri2Dinput.py @@ -1,8 +1,12 @@ """ This input file again solves a 2D diffusion problem on a triangular mesh. +We increase the solver tolerance from the default :math:`10^{-5}` in order +to achieve a good solution. - >>> DiffusionTerm().solve(var) + >>> eq = DiffusionTerm() + >>> solver = eq.getDefaultSolver(tolerance=1e-10) + >>> eq.solve(var, solver=solver) The result is again tested in the same way: @@ -10,7 +14,7 @@ >>> x = mesh.cellCenters[0] >>> analyticalArray = valueLeft + (valueRight - valueLeft) * x / Lx >>> print(var.allclose(analyticalArray, rtol = 1e-8)) - 1 + True """ from __future__ import unicode_literals diff --git a/examples/diffusion/steadyState/mesh50x50/input.py b/examples/diffusion/steadyState/mesh50x50/input.py index af3fd9e117..d97fc54808 100755 --- a/examples/diffusion/steadyState/mesh50x50/input.py +++ b/examples/diffusion/steadyState/mesh50x50/input.py @@ -6,12 +6,14 @@ The result is again tested in the same way: - >>> DiffusionTerm().solve(var) + >>> eq = DiffusionTerm() + >>> solver = eq.getDefaultSolver(tolerance=1e-10) + >>> eq.solve(var, solver=solver) >>> Lx = nx * dx >>> x = mesh.cellCenters[0] >>> analyticalArray = valueLeft + (valueRight - valueLeft) * x / Lx >>> print(var.allclose(analyticalArray, rtol = 1e-9)) - 1 + True """ from __future__ import unicode_literals diff --git a/examples/diffusion/steadyState/mesh50x50/tri2Dinput.py b/examples/diffusion/steadyState/mesh50x50/tri2Dinput.py index 99a5314a46..c21858a940 100755 --- a/examples/diffusion/steadyState/mesh50x50/tri2Dinput.py +++ b/examples/diffusion/steadyState/mesh50x50/tri2Dinput.py @@ -12,12 +12,14 @@ The result is again tested in the same way: - >>> DiffusionTerm().solve(var) + >>> eq = DiffusionTerm() + >>> solver = eq.getDefaultSolver(tolerance=1e-10) + >>> eq.solve(var, solver=solver) >>> Lx = nx * dx >>> x = mesh.cellCenters[0] >>> analyticalArray = valueLeft + (valueRight - valueLeft) * x / Lx >>> print(var.allclose(analyticalArray, atol = 1e-7)) - 1 + True """ from __future__ import unicode_literals diff --git a/examples/diffusion/steadyState/otherMeshes/grid3Dinput.py b/examples/diffusion/steadyState/otherMeshes/grid3Dinput.py index 56b7630257..08e5c5fc14 100755 --- a/examples/diffusion/steadyState/otherMeshes/grid3Dinput.py +++ b/examples/diffusion/steadyState/otherMeshes/grid3Dinput.py @@ -4,13 +4,17 @@ """ Test case for the `Grid3D`. - >>> DiffusionTerm().solve(var) - >>> DiffusionTerm().solve(var2) + >>> eq3D = DiffusionTerm() + >>> solver = eq3D.getDefaultSolver(tolerance=1e-6) + >>> eq3D.solve(var, solver=solver) + >>> eq2D = DiffusionTerm() + >>> solver = eq2D.getDefaultSolver(tolerance=1e-6) + >>> eq2D.solve(var2, solver=solver) >>> a = numerix.array(var.globalValue) >>> b = numerix.array(var2.globalValue) >>> c = numerix.ravel(numerix.array((b, b, b))) >>> print(numerix.allclose(a, c)) - 1 + True """ from __future__ import unicode_literals diff --git a/examples/diffusion/variable.py b/examples/diffusion/variable.py index 49a9e12514..8fc831140c 100755 --- a/examples/diffusion/variable.py +++ b/examples/diffusion/variable.py @@ -12,8 +12,8 @@ >>> x = mesh.cellCenters[0] >>> values = numerix.where(x < 3. * L / 4., 10 * x - 9. * L / 4., x + 18. * L / 4.) >>> values = numerix.where(x < L / 4., x, values) - >>> print(var.allclose(values, atol = 1e-8, rtol = 1e-8)) - 1 + >>> print(var.allclose(values)) + True """ from __future__ import division diff --git a/examples/elphf/diffusion/mesh1D.py b/examples/elphf/diffusion/mesh1D.py index 355e628246..1ce7bb8f7a 100755 --- a/examples/elphf/diffusion/mesh1D.py +++ b/examples/elphf/diffusion/mesh1D.py @@ -77,7 +77,12 @@ >>> substitutionals[1].setValue(0.6) >>> substitutionals[1].setValue(0.3, where=x > L / 2) -We create one diffusion equation for each substitutional component +We create one diffusion equation for each substitutional component. The +initial residual is much larger than the norm of the right-hand-side +vector, so we use `"initial"` tolerance scaling with a tolerance and +preconditioner that will drive to an accurate solution. + +>>> from fipy import solver_suite >>> for Cj in substitutionals: ... CkSum = ComponentVariable(mesh = mesh, value = 0.) @@ -92,7 +97,12 @@ ... Cj.equation = (TransientTerm() ... == DiffusionTerm(coeff=Cj.diffusivity) ... + PowerLawConvectionTerm(coeff=convectionCoeff)) -... Cj.solver = DefaultAsymmetricSolver(precon=None, iterations=3200) +... if solver_suite in ["trilinos", "no-pysparse"]: +... from fipy import ILUPreconditioner +... preconditioner = ILUPreconditioner() +... else: +... preconditioner = "default" +... Cj.solver = DefaultAsymmetricSolver(criterion="initial", precon=preconditioner, iterations=3200) If we are running interactively, we create a viewer to see the results diff --git a/examples/elphf/phaseDiffusion.py b/examples/elphf/phaseDiffusion.py index 8b5cd03120..e761779711 100755 --- a/examples/elphf/phaseDiffusion.py +++ b/examples/elphf/phaseDiffusion.py @@ -82,7 +82,9 @@ We create the phase equation as in :mod:`examples.elphf.phase` and create the diffusion equations for the different species as in -:mod:`examples.elphf.diffusion.mesh1D` +:mod:`examples.elphf.diffusion.mesh1D`. The initial residual of the +diffusion equations is much larger than the norm of the right-hand-side +vector, so we use `"initial"` tolerance scaling for those equations >>> def makeEquations(phase, substitutionals, interstitials): ... phase.equation = TransientTerm(coeff = 1/phase.mobility) \ @@ -139,6 +141,9 @@ ... Cj.equation = (TransientTerm() ... == DiffusionTerm(coeff=Cj.diffusivity) ... + PowerLawConvectionTerm(coeff=convectionCoeff)) +... +... for Cj in substitutionals + interstitials: +... Cj.solver = Cj.equation.getDefaultSolver(criterion="initial") >>> makeEquations(phase, substitutionals, interstitials) @@ -180,8 +185,9 @@ ... field.updateOld() ... phase.equation.solve(var = phase, dt = dt) ... for field in substitutionals + interstitials: -... field.equation.solve(var = field, -... dt = dt) +... field.equation.solve(var=field, +... dt=dt, +... solver=field.solver) ... if __name__ == '__main__': ... viewer.plot() @@ -262,8 +268,9 @@ ... field.updateOld() ... phase.equation.solve(var = phase, dt = dt) ... for field in substitutionals + interstitials: -... field.equation.solve(var = field, -... dt = dt) +... field.equation.solve(var=field, +... dt=dt, +... solver=field.solver) ... if __name__ == '__main__': ... viewer.plot() @@ -354,8 +361,9 @@ ... field.updateOld() ... phase.equation.solve(var = phase, dt = dt) ... for field in substitutionals + interstitials: -... field.equation.solve(var = field, -... dt = dt) +... field.equation.solve(var=field, +... dt=dt, +... solver=field.solver) ... if __name__ == '__main__': ... viewer.plot() diff --git a/examples/flow/stokesCavity.py b/examples/flow/stokesCavity.py index 457bddfeec..339e820d14 100755 --- a/examples/flow/stokesCavity.py +++ b/examples/flow/stokesCavity.py @@ -232,6 +232,11 @@ factor to relax the solution. This argument cannot be passed to :meth:`~fipy.terms.term.Term.solve`. +The pressure corrector needs to work a bit harder than the other equations, so +we allow it to iterate longer to drive to a more demanding tolerance. + +>>> solver = pressureCorrectionEq.getDefaultSolver(tolerance=1e-10, iterations=2000) + .. index:: single: sweep single: cacheMatrix @@ -273,7 +278,7 @@ ... ## solve the pressure correction equation ... pressureCorrectionEq.cacheRHSvector() ... ## left bottom point must remain at pressure 0, so no correction -... pres = pressureCorrectionEq.sweep(var=pressureCorrection) +... pres = pressureCorrectionEq.sweep(var=pressureCorrection, solver=solver) ... rhs = pressureCorrectionEq.RHSvector ... ... ## update the pressure using the corrected value diff --git a/examples/levelSet/electroChem/adsorbingSurfactantEquation.py b/examples/levelSet/electroChem/adsorbingSurfactantEquation.py index 81a6473819..f9535d9a02 100755 --- a/examples/levelSet/electroChem/adsorbingSurfactantEquation.py +++ b/examples/levelSet/electroChem/adsorbingSurfactantEquation.py @@ -296,7 +296,7 @@ def solve(self, var, boundaryConditions=(), solver=None, dt=None): var : ~fipy.variables.surfactantVariable.SurfactantVariable A `SurfactantVariable` to be solved for. Provides the initial condition, the old value and holds the solution on completion. - solver : ~f[py.solvers.solver.Solver + solver : ~fipy.solvers.solver.Solver The iterative solver to be used to solve the linear system of equations. boundaryConditions : :obj:`tuple` of ~fipy.boundaryConditions.boundaryCondition.BoundaryCondition diff --git a/examples/phase/binary.py b/examples/phase/binary.py index 3a3471437e..286d6985a6 100755 --- a/examples/phase/binary.py +++ b/examples/phase/binary.py @@ -497,8 +497,10 @@ def deltaChemPot(phase, C, T): We now use the ":meth:`~fipy.terms.term.Term.sweep`" method instead of ":meth:`~fipy.terms.term.Term.solve`" because we require the residual. +The initial residual of the diffusion equation is much larger than the norm +of the right-hand-side vector, so we use `"initial"` tolerance scaling. ->>> solver = DefaultAsymmetricSolver(tolerance=1e-10) +>>> solver = DefaultAsymmetricSolver(criterion="initial") >>> phase.updateOld() >>> C.updateOld() diff --git a/examples/phase/impingement/mesh40x1.py b/examples/phase/impingement/mesh40x1.py index f21f68637e..47bc929014 100755 --- a/examples/phase/impingement/mesh40x1.py +++ b/examples/phase/impingement/mesh40x1.py @@ -188,12 +188,17 @@ we iterate the solution in time, plotting as we go if running interactively, +The ``theta`` equation is a bit more sensitive than the ``phase`` equation, +so we require a stricter tolerance. + +>>> solver = thetaEq.getDefaultSolver(tolerance=1e-7) + >>> steps = 10 >>> from builtins import range >>> for i in range(steps): ... theta.updateOld() -... thetaEq.solve(theta, dt = timeStepDuration) -... phaseEq.solve(phase, dt = timeStepDuration) +... thetaEq.solve(theta, dt=timeStepDuration, solver=solver) +... phaseEq.solve(phase, dt=timeStepDuration) ... if __name__ == '__main__': ... phaseViewer.plot() ... thetaProductViewer.plot() @@ -211,7 +216,7 @@ >>> testData = numerix.loadtxt(os.path.splitext(__file__)[0] + text_to_native_str('.gz')) >>> testData = CellVariable(mesh=mesh, value=testData) >>> print(theta.allclose(testData)) -1 +True """ from __future__ import unicode_literals __docformat__ = 'restructuredtext' diff --git a/examples/phase/quaternary.py b/examples/phase/quaternary.py index dd9b98fbf9..81a462e52c 100755 --- a/examples/phase/quaternary.py +++ b/examples/phase/quaternary.py @@ -354,12 +354,14 @@ ... datamin=0, datamax=1) ... viewer.plot() -and again iterate to equilibrium +and again iterate to equilibrium. The initial residual is much larger than +the norm of the right-hand-side vector, so we use `"initial"` tolerance +scaling. .. index:: single: DefaultAsymmetricSolver ->>> solver = DefaultAsymmetricSolver(tolerance=1e-10) +>>> solver = DefaultAsymmetricSolver(criterion="initial") >>> dt = 10000 diff --git a/examples/phase/test.py b/examples/phase/test.py index cb67446b91..b42d930c8e 100755 --- a/examples/phase/test.py +++ b/examples/phase/test.py @@ -14,7 +14,6 @@ def _suite(): 'quaternary', 'simple', 'symmetry', - 'binary', 'binaryCoupled', 'polyxtal', 'polyxtalCoupled' diff --git a/examples/reactiveWetting/liquidVapor1D.py b/examples/reactiveWetting/liquidVapor1D.py index b8f6715743..3a268bbcf6 100755 --- a/examples/reactiveWetting/liquidVapor1D.py +++ b/examples/reactiveWetting/liquidVapor1D.py @@ -276,6 +276,25 @@ elegantly by calling ``cacheMatrix()`` only on the necessary part of the equation. This currently doesn't work properly in :term:`FiPy`. +Beginning with :term:`FiPy` :error:`4`, solver :ref:`CONVERGENCE` +tolerance is normalized by the magnitude of the right-hand-side (RHS) +vector. For this particular problem, the initial residual is much smaller +than the RHS and so the solver gets "stuck". Changing the normalization to +use the initial residual at the beginning of each sweep allows the solution +to progress. Another option would be to scale the tolerance appropriately, +but the value is so small (:math:`\sim 10^{-22}`) that this results in +underflow issues for some solvers. + +>>> solver = coupledEqn.getDefaultSolver(criterion="initial", tolerance=1e-12) + +.. note:: + + :ref:`PETSC` intrinsically wants to use `"preconditioned"` + normalization, which prevents the solver from getting "stuck" because + the preconditioner effectively scales the RHS to be similar in magnitude + to the residual. Unfortunately, this normalization is not available for + the other solver suites, so we don't use it as the default. + >>> while timestep < totalSteps: ... ... sweep = 0 @@ -296,13 +315,13 @@ ... dt = min(dt, dx / max(abs(velocity)) * cfl) ... ... coupledEqn.cacheMatrix() -... residual = coupledEqn.sweep(dt=dt) -... +... residual = coupledEqn.sweep(dt=dt, solver=solver) +... ... if initialResidual is None: ... initialResidual = residual ... ... residual = residual / initialResidual -... +... ... if residual > previousResidual * 1.1 or sweep > 20: ... density[:] = density.old ... velocity[:] = velocity.old diff --git a/examples/reactiveWetting/liquidVapor2D.py b/examples/reactiveWetting/liquidVapor2D.py index a431e66a9b..ba4ec3f464 100755 --- a/examples/reactiveWetting/liquidVapor2D.py +++ b/examples/reactiveWetting/liquidVapor2D.py @@ -116,6 +116,31 @@ ... totalSteps = 1 ... totalSweeps = 1 +Beginning with :term:`FiPy` :error:`4`, solver :ref:`CONVERGENCE` +tolerance is normalized by the magnitude of the right-hand-side (RHS) +vector. For this particular problem, the initial residual is much smaller +than the RHS and so the solver gets "stuck". Changing the normalization to +use the initial residual at the beginning of each sweep allows the solution +to progress. + +>>> from fipy.solvers import solver_suite +>>> if solver_suite == "petsc": +... # PETSc's default ILU preconditioner does not behave well +... # for this problem +... from fipy import SSORPreconditioner +... precon = SSORPreconditioner() +... else: +... precon = "default" +>>> solver = coupledEqn.getDefaultSolver(criterion="initial", precon=precon) + +.. note:: + + :ref:`PETSC` intrinsically wants to use `"preconditioned"` + normalization, which prevents the solver from getting "stuck" because + the preconditioner effectively scales the RHS to be similar in magnitude + to the residual. Unfortunately, this normalization is not available for + the other solver suites, so we don't use it as the default. + >>> while timestep < totalSteps: ... ... sweep = 0 @@ -140,7 +165,7 @@ ... dt = min(dt, dx / max(abs(velocityVector.mag)) * cfl) ... ... coupledEqn.cacheMatrix() -... residual = coupledEqn.sweep(dt=dt) +... residual = coupledEqn.sweep(dt=dt, solver=solver) ... ... if initialResidual is None: ... initialResidual = residual diff --git a/fipy/__init__.py b/fipy/__init__.py index dc2635e1b5..3f3ac5613b 100644 --- a/fipy/__init__.py +++ b/fipy/__init__.py @@ -41,12 +41,6 @@ import os import sys -# log uncaught exceptions -def _excepthook(*args): - _log.error('Uncaught exception:', exc_info=args) - -sys.excepthook = _excepthook - # configure logging before doing anything else, otherwise we'll miss things if 'FIPY_LOG_CONFIG' in os.environ: with open(os.environ['FIPY_LOG_CONFIG'], mode='r') as fp: diff --git a/fipy/matrices/offsetSparseMatrix.py b/fipy/matrices/offsetSparseMatrix.py index ce62dd42f4..c6ae8e46aa 100644 --- a/fipy/matrices/offsetSparseMatrix.py +++ b/fipy/matrices/offsetSparseMatrix.py @@ -5,18 +5,21 @@ from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -def OffsetSparseMatrix(SparseMatrix, numberOfVariables, numberOfEquations): +def OffsetSparseMatrix(SparseMatrix, + numberOfVariables, numberOfEquations, + equationIndex, varIndex): """ Used in binary terms. `equationIndex` and `varIndex` need to be set statically before instantiation. """ class OffsetSparseMatrixClass(SparseMatrix): - equationIndex = 0 - varIndex = 0 - def __init__(self, mesh, nonZerosPerRow=1, exactNonZeros=False, numberOfVariables=numberOfVariables, - numberOfEquations=numberOfEquations): + numberOfEquations=numberOfEquations, + equationIndex=equationIndex, + varIndex=varIndex): + self.equationIndex = equationIndex + self.varIndex = varIndex if hasattr(nonZerosPerRow, "__iter__"): # nonZerosPerRow is an iterable for each row. # need to pad rows for other equations with zeros. diff --git a/fipy/matrices/petscMatrix.py b/fipy/matrices/petscMatrix.py index 4dab46215f..1d0707fcd6 100644 --- a/fipy/matrices/petscMatrix.py +++ b/fipy/matrices/petscMatrix.py @@ -14,6 +14,8 @@ class _PETScMatrix(_SparseMatrix): + INDEX_TYPE = PETSc.IntType + def __init__(self, matrix): """Creates a wrapper for a PETSc matrix @@ -47,7 +49,7 @@ def __iadd__(self, other): self.matrix.assemble() if other != 0: other.matrix.assemble() - self.matrix = self.matrix + other.matrix + self.matrix += other.matrix return self def __add__(self, other): @@ -184,6 +186,42 @@ def _shape(self): def _range(self): return list(range(self._shape[1])), list(range(self._shape[0])) + def _setRCV(self, vector, id1, id2, addv=False): + """Insert values based on triplet of rows, columns, and values + + L[id2[k], id1[k]] = vector[k] + + :meth:`~PETSc.Mat.setValuesRCV` is, helpfully, described as + `Undocumented. `https://github.com/petsc/petsc/blob/c373386401c23a900b8d8448dc0b1bd4ba1cb6ca/src/binding/petsc4py/src/petsc4py/PETSc/Mat.pyx#L2541C28-L2541C28>`_ + 8^P (even that was only done in + `v3.20.0 `_). + + The sourcecode is not terribly helpful, but, fortunately, + Firedrake's `petsc_sparse()` shows what to do. + (https://www.firedrakeproject.org/_modules/firedrake/preconditioners/fdm.html). + + I have no idea why they need to be column vectors, but it works. + + Parameters + ---------- + vector : array_like + non-zero values + id1 : array_like + row indices + id2 : array_like + column indices + """ + id1 = numerix.asarray(id1, dtype=PETSc.IntType) + id2 = numerix.asarray(id2, dtype=PETSc.IntType) + vector = numerix.asarray(vector) + + if len(id1) > 0: + # self.matrix.setValuesRCV() doesn't work for zero-length arrays + # which can happen on some nodes when parallel partitioning. + # https://gitlab.com/petsc/petsc/-/issues/1522 + self.matrix.setValuesRCV(id1[:, None], id2[:, None], vector[:, None], + addv=addv) + def put(self, vector, id1, id2): """ Put elements of `vector` at positions of the matrix corresponding to (`id1`, `id2`) @@ -196,7 +234,7 @@ def put(self, vector, id1, id2): 2.500000 --- --- """ self.matrix.assemble(self.matrix.AssemblyType.FLUSH) - self.matrix.setValuesCSR(*self._ijv2csr(id2, id1, vector)) + self._setRCV(vector=vector, id1=id1, id2=id2) def _ijv2csr(self, i, j, v): """Convert arrays of matrix indices and values into CSR format @@ -239,7 +277,7 @@ def _ijv2csr(self, i, j, v): vals = v[ix] # note: PETSc (at least via pip) only seems to handle 32 bit addressing - return row_ptr.astype('int32'), cols.astype('int32'), vals + return row_ptr.astype(PETSc.IntType), cols.astype(PETSc.IntType), vals def putDiagonal(self, vector): """ @@ -291,8 +329,7 @@ def addAt(self, vector, id1, id2): 2.500000 --- 2.200000 """ self.matrix.assemble(self.matrix.AssemblyType.FLUSH) - self.matrix.setValuesCSR(*self._ijv2csr(id2, id1, vector), - addv=True) + self._setRCV(vector=vector, id1=id1, id2=id2, addv=True) def addAtDiagonal(self, vector): if isinstance(vector, (int, float)): @@ -481,6 +518,10 @@ def T(self): self.matrix.assemble() return _PETScMatrix(matrix=self.matrix.transpose()) + def zeroEntries(self): + self.matrix.assemble() + self.matrix.zeroEntries() + class _PETScMatrixFromShape(_PETScMatrix): def __init__(self, rows, cols, @@ -520,6 +561,7 @@ def __init__(self, rows, cols, if isinstance(nonZerosPerRow, Iterable): nonZerosPerRow = numerix.asarray(nonZerosPerRow, dtype=PETSc.IntType) matrix.setPreallocationNNZ(nonZerosPerRow) + matrix.setOption(matrix.Option.KEEP_NONZERO_PATTERN, True) if not exactNonZeros: matrix.setOption(matrix.Option.NEW_NONZERO_ALLOCATION_ERR, False) @@ -599,8 +641,8 @@ def _ao(self): petscIDs = numerix.arange(N) + numerix.sum(count[:comm.procID]) - self._ao_ = PETSc.AO().createBasic(petsc=petscIDs.astype('int32'), - app=fipyIDs.astype('int32'), + self._ao_ = PETSc.AO().createBasic(petsc=petscIDs.astype(PETSc.IntType), + app=fipyIDs.astype(PETSc.IntType), comm=comm.petsc4py_comm) return self._ao_ @@ -613,7 +655,7 @@ def _matrix2mesh(self, ids): def _mesh2matrix(self, ids): """Convert mesh cell indices to matrix row indices """ - return self._ao.app2petsc(ids.astype('int32')) + return self._ao.app2petsc(ids.astype(PETSc.IntType)) def _fipy2petscGhost(self, var): """Convert a FiPy Variable to a PETSc `GhostVec` @@ -648,7 +690,7 @@ def _fipy2petscGhost(self, var): array = numerix.concatenate([corporeal, incorporeal]) comm = self.mesh.communicator.petsc4py_comm - vec = PETSc.Vec().createGhostWithArray(ghosts=self._m2m.ghosts.astype('int32'), + vec = PETSc.Vec().createGhostWithArray(ghosts=self._m2m.ghosts.astype(PETSc.IntType), array=array, comm=comm) @@ -910,7 +952,7 @@ def __mul__(self, other): x = other[self._m2m.localNonOverlappingColIDs] x = PETSc.Vec().createWithArray(x, comm=self.matrix.comm) - y = PETSc.Vec().createGhost(ghosts=self._m2m.ghosts.astype('int32'), + y = PETSc.Vec().createGhost(ghosts=self._m2m.ghosts.astype(PETSc.IntType), size=(len(self._m2m.localNonOverlappingColIDs), None), comm=self.matrix.comm) self.matrix.mult(x, y) @@ -922,7 +964,7 @@ def __mul__(self, other): def takeDiagonal(self): self.matrix.assemble() - y = PETSc.Vec().createGhost(ghosts=self._m2m.ghosts.astype('int32'), + y = PETSc.Vec().createGhost(ghosts=self._m2m.ghosts.astype(PETSc.IntType), size=(len(self._m2m.localNonOverlappingColIDs), None), comm=self.matrix.comm) self.matrix.getDiagonal(result=y) diff --git a/fipy/matrices/pysparseMatrix.py b/fipy/matrices/pysparseMatrix.py index b46cbada3a..67efe5d6f9 100644 --- a/fipy/matrices/pysparseMatrix.py +++ b/fipy/matrices/pysparseMatrix.py @@ -392,6 +392,10 @@ def T(self): return _PysparseMatrix(matrix=A_T) + def zeroEntries(self): + _, irow, jcol = self.matrix.find() + self.matrix.put(0, irow, jcol) + class _PysparseMatrixFromShape(_PysparseMatrix): def __init__(self, rows, cols, diff --git a/fipy/matrices/scipyMatrix.py b/fipy/matrices/scipyMatrix.py index d0b0f2a90f..fb7edf9efa 100644 --- a/fipy/matrices/scipyMatrix.py +++ b/fipy/matrices/scipyMatrix.py @@ -5,9 +5,10 @@ __all__ = [] import scipy.sparse as sp -from fipy.tools import numerix +from scipy.io import mmwrite from fipy.matrices.sparseMatrix import _SparseMatrix +from fipy.tools import numerix class _ScipyMatrix(_SparseMatrix): @@ -260,6 +261,9 @@ def addAtDiagonal(self, vector): ids = numerix.arange(len(vector)) self.addAt(vector, ids, ids) + def exportMmf(self, filename): + mmwrite(filename, self.matrix) + @property def numpyArray(self): return self.matrix.toarray() @@ -375,6 +379,10 @@ def T(self): """ return _ScipyMatrix(matrix=self.matrix.transpose(copy=True)) + def zeroEntries(self): + id1, id2 = self.matrix.nonzero() + self.matrix[id1, id2] = 0 + class _ScipyMatrixFromShape(_ScipyMatrix): def __init__(self, rows, cols, diff --git a/fipy/matrices/sparseMatrix.py b/fipy/matrices/sparseMatrix.py index e5797bfd29..be22fbbc12 100644 --- a/fipy/matrices/sparseMatrix.py +++ b/fipy/matrices/sparseMatrix.py @@ -16,6 +16,8 @@ class _SparseMatrix(object): numpyArray = property() _shape = property() + INDEX_TYPE = int + def __init__(self): pass @@ -132,6 +134,11 @@ def LIL(self): def T(self): raise NotImplementedError + def zeroEntries(self): + """Insert zeros into nonzero matrix entries. + """ + raise NotImplementedError + def _matrix2mesh(self, ids): """Convert matrix row indices to mesh cell indices """ @@ -187,6 +194,14 @@ def _cellIDsToGlobalIDs(IDs, M, L): N = len(IDs) return (numerix.vstack([IDs] * M) + numerix.indices((M, N))[0] * L).flatten() + def _cellsToMatrixMask(self, overlapping, M): + if overlapping: + L = self.mesh.numberOfCells + mask = numerix.ones((M * L,), dtype=bool) + else: + mask = self.bodies + return numerix.hstack([mask] * M) + def _cellIDsToGlobalRowIDs(self, IDs): return self._cellIDsToGlobalIDs(IDs, M=self.numberOfEquations, L=self.mesh.globalNumberOfCells) @@ -195,13 +210,21 @@ def _cellIDsToLocalRowIDs(self, IDs): return self._cellIDsToGlobalIDs(IDs, M=self.numberOfEquations, L=self.mesh.numberOfCells) + def _cellsToRowMask(self, overlapping): + return self._cellsToMatrixMask(overlapping, + M=self.numberOfEquations) + @property def globalNonOverlappingRowIDs(self): - return self._cellIDsToGlobalRowIDs(self.mesh._globalNonOverlappingCellIDs) + if not hasattr(self, "_globalNonOverlappingRowIDs"): + self._globalNonOverlappingRowIDs = self._cellIDsToGlobalRowIDs(self.mesh._globalNonOverlappingCellIDs) + return self._globalNonOverlappingRowIDs @property def globalOverlappingRowIDs(self): - return self._cellIDsToGlobalRowIDs(self.mesh._globalOverlappingCellIDs) + if not hasattr(self, "_globalOverlappingRowIDs"): + self._globalOverlappingRowIDs = self._cellIDsToGlobalRowIDs(self.mesh._globalOverlappingCellIDs) + return self._globalOverlappingRowIDs @property def localNonOverlappingRowIDs(self): @@ -215,13 +238,21 @@ def _cellIDsToLocalColIDs(self, IDs): return self._cellIDsToGlobalIDs(IDs, M=self.numberOfVariables, L=self.mesh.numberOfCells) + def _cellsToColMask(self, overlapping): + return self._cellsToMatrixMask(overlapping, + M=self.numberOfVariables) + @property def globalNonOverlappingColIDs(self): - return self._cellIDsToGlobalColIDs(self.mesh._globalNonOverlappingCellIDs) + if not hasattr(self, "_globalNonOverlappingColIDs"): + self._globalNonOverlappingColIDs = self._cellIDsToGlobalColIDs(self.mesh._globalNonOverlappingCellIDs) + return self._globalNonOverlappingColIDs @property def globalOverlappingColIDs(self): - return self._cellIDsToGlobalColIDs(self.mesh._globalOverlappingCellIDs) + if not hasattr(self, "_globalOverlappingColIDs"): + self._globalOverlappingColIDs = self._cellIDsToGlobalColIDs(self.mesh._globalOverlappingCellIDs) + return self._globalOverlappingColIDs @property def localOverlappingColIDs(self): @@ -231,17 +262,10 @@ def localOverlappingColIDs(self): def localNonOverlappingColIDs(self): return self._cellIDsToLocalColIDs(self.mesh._localNonOverlappingCellIDs) - def _getStencil_(self, id1, id2, - globalOverlappihgIDs, globalNonOverlappihgIDs, - overlapping=False): - id1 = globalOverlappihgIDs[id1] - - if overlapping: - mask = numerix.ones(id1.shape, dtype=bool) - else: - mask = numerix.isin(id1, globalNonOverlappihgIDs) - - id1 = self.matrix()._mesh2matrix(id1[mask]) + def _getStencil_(self, id1, id2, globalOverlappihgIDs, mask): + mask = mask[id1] + id1 = globalOverlappihgIDs[id1][mask] + id1 = self.matrix()._mesh2matrix(id1) id2 = numerix.asarray(id2)[mask] return id1, id2, mask @@ -275,8 +299,9 @@ def globalVectorAndIDs(self, vector, id1, id2, overlapping=False): @property def bodies(self): if self._bodies is None: - self._bodies = numerix.isin(self.mesh._globalOverlappingCellIDs, - self.mesh._globalNonOverlappingCellIDs) + self._bodies = numerix.zeros(self.mesh._localOverlappingCellIDs.shape, + dtype=bool) + self._bodies[self.mesh._localNonOverlappingCellIDs] = True return self._bodies @property @@ -292,15 +317,14 @@ class _RowMesh2Matrix(_Mesh2Matrix): def _getStencil(self, id1, id2, overlapping=False): return self._getStencil_(id1, id2, self.globalOverlappingRowIDs, - self.globalNonOverlappingRowIDs, - overlapping) + self._cellsToRowMask(overlapping)) class _ColMesh2Matrix(_Mesh2Matrix): def _getStencil(self, id1, id2, overlapping=False): - id2, id1, mask = self._getStencil_(id2, id1, - self.globalOverlappingColIDs, - self.globalNonOverlappingColIDs, - overlapping) + (id2, id1, + mask) = self._getStencil_(id2, id1, + self.globalOverlappingColIDs, + self._cellsToColMask(overlapping)) return id1, id2, mask @@ -308,7 +332,9 @@ class _RowColMesh2Matrix(_RowMesh2Matrix): def _getStencil(self, id1, id2, overlapping=False): id2 = self.globalOverlappingColIDs[id2] - id1, id2, mask = super(_RowColMesh2Matrix, self)._getStencil(id1, id2, overlapping) + (id1, id2, + mask) = super(_RowColMesh2Matrix, self)._getStencil(id1, id2, + overlapping) id2 = self.matrix()._mesh2matrix(id2) diff --git a/fipy/matrices/trilinosMatrix.py b/fipy/matrices/trilinosMatrix.py index e487c06d48..1706c3f2f1 100644 --- a/fipy/matrices/trilinosMatrix.py +++ b/fipy/matrices/trilinosMatrix.py @@ -38,6 +38,9 @@ class _TrilinosMatrix(_SparseMatrix): Allows basic python operations __add__, __sub__ etc. Facilitate matrix populating in an easy way. """ + + INDEX_TYPE = numerix.int32 + def __init__(self, matrix, nonZerosPerRow=None): """ Parameters @@ -145,7 +148,7 @@ def __iadd__(self, other): # Depending on which one is more filled, pick the order of operations if self.matrix.Filled() and other.matrix.NumGlobalNonzeros() \ > self.matrix.NumGlobalNonzeros(): - tempBandwidth = other.matrix.NumGlobalNonzeros() / self.matrix.NumGlobalRows()+1 + tempBandwidth = other.matrix.NumGlobalNonzeros() // self.matrix.NumGlobalRows()+1 tempMatrix = Epetra.CrsMatrix(Epetra.Copy, self.rowMap, tempBandwidth) @@ -712,6 +715,9 @@ def T(self): return _TrilinosMatrix(matrix=A_T_bis) + def zeroEntries(self): + self.matrix.PutScalar(0) + class _TrilinosMatrixFromShape(_TrilinosMatrix): def __init__(self, rows, cols, nonZerosPerRow=1, exactNonZeros=False, matrix=None): diff --git a/fipy/meshes/abstractMesh.py b/fipy/meshes/abstractMesh.py index c212149054..294074f1ea 100644 --- a/fipy/meshes/abstractMesh.py +++ b/fipy/meshes/abstractMesh.py @@ -16,6 +16,7 @@ from fipy.meshes.representations.abstractRepresentation import _AbstractRepresentation from fipy.meshes.topologies.abstractTopology import _AbstractTopology +from fipy.solvers import INDEX_TYPE class MeshAdditionError(Exception): """:class:`Exception` raised when meshes cannot be concatenated.""" @@ -376,13 +377,15 @@ def _getAddedMeshValues(self, other, resolution=1e-2): if diff > 0: other_faceVertexIDs = numerix.append(other_faceVertexIDs, -1 * numerix.ones((diff,) - + other_faceVertexIDs.shape[1:], 'l'), + + other_faceVertexIDs.shape[1:], + dtype=INDEX_TYPE), axis=0) other_faceVertexIDs = MA.masked_values(other_faceVertexIDs, -1) elif diff < 0: self_faceVertexIDs = numerix.append(self_faceVertexIDs, -1 * numerix.ones((-diff,) - + self_faceVertexIDs.shape[1:], 'l'), + + self_faceVertexIDs.shape[1:], + dtype=INDEX_TYPE), axis=0) self_faceVertexIDs = MA.masked_values(self_faceVertexIDs, -1) @@ -460,13 +463,15 @@ def _getAddedMeshValues(self, other, resolution=1e-2): if diff > 0: other_cellFaceIDs = numerix.append(other_cellFaceIDs, -1 * numerix.ones((diff,) - + other_cellFaceIDs.shape[1:], 'l'), + + other_cellFaceIDs.shape[1:], + dtype=INDEX_TYPE), axis=0) other_cellFaceIDs = MA.masked_values(other_cellFaceIDs, -1) elif diff < 0: self_cellFaceIDs = numerix.append(self_cellFaceIDs, -1 * numerix.ones((-diff,) - + self_cellFaceIDs.shape[1:], 'l'), + + self_cellFaceIDs.shape[1:], + dtype=INDEX_TYPE), axis=0) self_cellFaceIDs = MA.masked_values(self_cellFaceIDs, -1) @@ -488,6 +493,7 @@ def _getAddedMeshValues(self, other, resolution=1e-2): def interiorFaceIDs(self): if not hasattr(self, '_interiorFaceIDs'): self._interiorFaceIDs = numerix.nonzero(self.interiorFaces)[0] + self._interiorFaceIDs = self._interiorFaceIDs.astype(INDEX_TYPE) return self._interiorFaceIDs @property diff --git a/fipy/meshes/builders/grid2DBuilder.py b/fipy/meshes/builders/grid2DBuilder.py index efff5fe069..38fa8b69ac 100644 --- a/fipy/meshes/builders/grid2DBuilder.py +++ b/fipy/meshes/builders/grid2DBuilder.py @@ -11,6 +11,7 @@ _DOffsets, _UniformOrigin, _NonuniformNumPts) +from fipy.solvers import INDEX_TYPE class _Grid2DBuilder(_AbstractGridBuilder): @@ -126,7 +127,7 @@ def createCells(nx, ny, numFaces, numHorizFaces, numVertCols): `cells = (f1, f2, f3, f4)` going anticlockwise. `f1` etc. refer to the faces """ - cellFaceIDs = numerix.zeros((4, nx * ny), 'l') + cellFaceIDs = numerix.zeros((4, nx * ny), dtype=INDEX_TYPE) inline._runInline(""" int ID = j * ni + i; @@ -150,8 +151,8 @@ def createCells(nx, ny, numFaces, numHorizFaces, numVertCols): `cells = (f1, f2, f3, f4)` going anticlockwise. `f1` etc. refer to the faces """ - cellFaceIDs = numerix.zeros((4, nx * ny), 'l') - faceIDs = numerix.arange(numFaces) + cellFaceIDs = numerix.zeros((4, nx * ny), dtype=INDEX_TYPE) + faceIDs = numerix.arange(numFaces, dtype=INDEX_TYPE) if numFaces > 0: cellFaceIDs[0,:] = faceIDs[:numHorizFaces - nx] cellFaceIDs[2,:] = cellFaceIDs[0,:] + nx diff --git a/fipy/meshes/cylindricalNonUniformGrid1D.py b/fipy/meshes/cylindricalNonUniformGrid1D.py index e8dcbee588..52f4beffc9 100644 --- a/fipy/meshes/cylindricalNonUniformGrid1D.py +++ b/fipy/meshes/cylindricalNonUniformGrid1D.py @@ -82,10 +82,16 @@ def _test(self): everything works as expected. Fixed a bug where the following throws an error on solve() when `nx` is a float. - >>> from fipy import CellVariable, DiffusionTerm + >>> from fipy import CellVariable, DiffusionTerm, DefaultSolver >>> mesh = CylindricalNonUniformGrid1D(nx=3., dx=(1., 2., 3.)) >>> var = CellVariable(mesh=mesh) - >>> DiffusionTerm().solve(var) + + The residual and the b-vector are both zero, so use "unscaled" + normalization and no preconditioning. Again, we don't care about + the answer, we just want it to be quiet. + + >>> solver = DefaultSolver(criterion="unscaled", precon=None) + >>> DiffusionTerm().solve(var, solver=solver) This test is for https://github.com/usnistgov/fipy/issues/372. Cell volumes were being returned as `binOps` rather than arrays. diff --git a/fipy/meshes/cylindricalUniformGrid1D.py b/fipy/meshes/cylindricalUniformGrid1D.py index a721399d38..5a8b6951e6 100644 --- a/fipy/meshes/cylindricalUniformGrid1D.py +++ b/fipy/meshes/cylindricalUniformGrid1D.py @@ -64,7 +64,13 @@ def _test(self): >>> from fipy import * >>> mesh = CylindricalUniformGrid1D(nx=3., dx=1.) >>> var = CellVariable(mesh=mesh) - >>> DiffusionTerm().solve(var) + + The residual and the b-vector are both zero, so use "unscaled" + normalization and no preconditioning. Again, we don't care about + the answer, we just want it to be quiet. + + >>> solver = DefaultSolver(criterion="unscaled", precon=None) + >>> DiffusionTerm().solve(var, solver=solver) This test is for https://github.com/usnistgov/fipy/issues/372. Cell volumes were being returned as `binOps` rather than arrays. diff --git a/fipy/meshes/gmshMesh.py b/fipy/meshes/gmshMesh.py index 56556b0b9c..a45a0d4a83 100755 --- a/fipy/meshes/gmshMesh.py +++ b/fipy/meshes/gmshMesh.py @@ -27,6 +27,8 @@ from fipy.meshes.mesh2D import Mesh2D from fipy.meshes.topologies.meshTopology import _MeshTopology +from fipy.solvers import INDEX_TYPE + __all__ = ["GmshException", "MeshExportError", "gmshVersion", "openMSHFile", "openPOSFile", "GmshFile", "MSHFile", "POSFile", @@ -667,7 +669,7 @@ def _deriveCellsAndFaces(self, cellsToVertIDs, shapeTypes, numCells): maxFaceLen = max([len(f) for f in uniqueFaces]) uniqueFaces = [[-1] * (maxFaceLen - len(f)) + f for f in uniqueFaces] - facesToVertices = nx.array(uniqueFaces, dtype=nx.INT_DTYPE) + facesToVertices = nx.array(uniqueFaces, dtype=INDEX_TYPE) return facesToVertices.swapaxes(0, 1)[::-1], cellsToFaces.swapaxes(0, 1).copy('C'), facesDict @@ -680,7 +682,7 @@ def _translateNodesToVertices(self, entitiesNodes, vertexMap): try: vertIndices = vertexMap[nx.array(entity)] except IndexError: - vertIndices = nx.ones((len(entity),), 'l') * -1 + vertIndices = nx.ones((len(entity),), dtype=INDEX_TYPE) * -1 entitiesVertices.append(vertIndices) return entitiesVertices @@ -870,7 +872,9 @@ def read(self): # convert lists of cell vertices to a properly oriented masked array maxVerts = max([len(v) for v in cellsToVertIDs]) # ticket:539 - NumPy 1.7 casts to array before concatenation and empty array defaults to float - cellsToVertIDs = [nx.concatenate((v, nx.array([-1] * (maxVerts-len(v)), dtype=nx.INT_DTYPE))) for v in cellsToVertIDs] + cellsToVertIDs = [nx.concatenate((v, nx.array([-1] * (maxVerts-len(v)), + dtype=INDEX_TYPE))) + for v in cellsToVertIDs] cellsToVertIDs = nx.MA.masked_equal(cellsToVertIDs, value=-1).swapaxes(0, 1) _log.debug("Done with cells and faces.") @@ -1324,7 +1328,8 @@ def _globalNonOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return nx.array(self.mesh.cellGlobalIDs) + return nx.asarray(self.mesh.cellGlobalIDs, + dtype=INDEX_TYPE) @property def _globalOverlappingCellIDs(self): @@ -1345,7 +1350,8 @@ def _globalOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return nx.array(self.mesh.cellGlobalIDs + self.mesh.gCellGlobalIDs) + return nx.asarray(self.mesh.cellGlobalIDs + self.mesh.gCellGlobalIDs, + dtype=INDEX_TYPE) @property def _localNonOverlappingCellIDs(self): @@ -1366,7 +1372,8 @@ def _localNonOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return nx.arange(len(self.mesh.cellGlobalIDs)) + return nx.arange(len(self.mesh.cellGlobalIDs), + dtype=INDEX_TYPE) @property def _localOverlappingCellIDs(self): @@ -1388,7 +1395,8 @@ def _localOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ return nx.arange(len(self.mesh.cellGlobalIDs) - + len(self.mesh.gCellGlobalIDs)) + + len(self.mesh.gCellGlobalIDs), + dtype=INDEX_TYPE) @property def _localNonOverlappingFaceIDs(self): @@ -1411,7 +1419,8 @@ def _localNonOverlappingFaceIDs(self): .. note:: Trivial except for parallel meshes """ - return nx.arange(self.mesh.numberOfFaces)[..., self._nonOverlappingFaces] + return nx.arange(self.mesh.numberOfFaces, + dtype=INDEX_TYPE)[..., self._nonOverlappingFaces] diff --git a/fipy/meshes/mesh.py b/fipy/meshes/mesh.py index 64dbd0d134..19022429bb 100644 --- a/fipy/meshes/mesh.py +++ b/fipy/meshes/mesh.py @@ -12,6 +12,8 @@ from fipy.tools.dimensions.physicalField import PhysicalField from fipy.tools import serialComm +from fipy.solvers import INDEX_TYPE + __all__ = ["MeshAdditionError", "Mesh"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] @@ -87,8 +89,9 @@ def _calcInteriorAndExteriorCellIDs(self): exteriorCellIDs = list(self._exteriorCellIDs) except: exteriorCellIDs = self.faceCellIDs[0, self._exteriorFaces.value] - tmp = numerix.zeros(self.numberOfCells, 'l') - numerix.put(tmp, exteriorCellIDs, numerix.ones(len(exteriorCellIDs), 'l')) + tmp = numerix.zeros(self.numberOfCells, dtype=INDEX_TYPE) + numerix.put(tmp, exteriorCellIDs, + numerix.ones(len(exteriorCellIDs), dtype=INDEX_TYPE)) exteriorCellIDs = numerix.nonzero(tmp) interiorCellIDs = numerix.nonzero(numerix.logical_not(tmp)) return interiorCellIDs, exteriorCellIDs @@ -407,7 +410,7 @@ def _concatenableMesh(self): def _translate(self, vector): newCoords = self.vertexCoords + vector - newmesh = Mesh(newCoords, numerix.array(self.faceVertexIDs), numerix.array(self.cellFaceIDs)) + newmesh = Mesh(newCoords, numerix.asarray(self.faceVertexIDs), numerix.asarray(self.cellFaceIDs)) return newmesh def _handleFaceConnection(self): @@ -427,9 +430,10 @@ def _handleFaceConnection(self): """calculate Topology methods""" def _calcFaceCellIDs(self): - array = MA.array(MA.indices(self.cellFaceIDs.shape, 'l')[1], + array = MA.array(MA.indices(self.cellFaceIDs.shape, + dtype=INDEX_TYPE)[1], mask=MA.getmask(self.cellFaceIDs)) - faceCellIDs = MA.zeros((2, self.numberOfFaces), 'l') + faceCellIDs = MA.zeros((2, self.numberOfFaces), dtype=INDEX_TYPE) ## Nasty bug: MA.put(arr, ids, values) fills its ids and ## values arguments when masked! This was not the behavior diff --git a/fipy/meshes/mesh1D.py b/fipy/meshes/mesh1D.py index 7140efa317..30cc388a45 100644 --- a/fipy/meshes/mesh1D.py +++ b/fipy/meshes/mesh1D.py @@ -47,12 +47,12 @@ def _calcFaceTangents(self): def _translate(self, vector): newCoords = self.vertexCoords + vector - newmesh = Mesh1D(newCoords, numerix.array(self.faceVertexIDs), numerix.array(self.cellFaceIDs)) + newmesh = Mesh1D(newCoords, numerix.asarray(self.faceVertexIDs), numerix.asarray(self.cellFaceIDs)) return newmesh def __mul__(self, factor): newCoords = self.vertexCoords * factor - newmesh = Mesh1D(newCoords, numerix.array(self.faceVertexIDs), numerix.array(self.cellFaceIDs)) + newmesh = Mesh1D(newCoords, numerix.asarray(self.faceVertexIDs), numerix.asarray(self.cellFaceIDs)) return newmesh @property diff --git a/fipy/meshes/mesh2D.py b/fipy/meshes/mesh2D.py index b298b9433a..c7c02a7893 100644 --- a/fipy/meshes/mesh2D.py +++ b/fipy/meshes/mesh2D.py @@ -19,6 +19,8 @@ from fipy.meshes.representations.meshRepresentation import _MeshRepresentation from fipy.meshes.topologies.meshTopology import _Mesh2DTopology +from fipy.solvers import INDEX_TYPE + def _orderVertices(vertexCoords, vertices): coordinates = numerix.take(vertexCoords, vertices) centroid = numerix.add.reduce(coordinates) / coordinates.shape[0] @@ -94,7 +96,8 @@ def _calcOrderedCellVertexIDs(self): @property def _nonOrthogonality(self): - exteriorFaceArray = numerix.zeros((self.faceCellIDs.shape[1],), 'l') + exteriorFaceArray = numerix.zeros((self.faceCellIDs.shape[1],), + dtype=INDEX_TYPE) numerix.put(exteriorFaceArray, numerix.nonzero(self.exteriorFaces), 1) unmaskedFaceCellIDs = MA.filled(self.faceCellIDs, 0) # what we put in for the "fill" doesn't matter because only exterior diff --git a/fipy/meshes/topologies/abstractTopology.py b/fipy/meshes/topologies/abstractTopology.py index 9a64cf3eca..8a01d400f7 100644 --- a/fipy/meshes/topologies/abstractTopology.py +++ b/fipy/meshes/topologies/abstractTopology.py @@ -3,6 +3,7 @@ __docformat__ = 'restructuredtext' from ...tools import numerix +from ...solvers import INDEX_TYPE from ...variables.cellVariable import CellVariable __all__ = [] @@ -36,7 +37,8 @@ def _globalNonOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.numberOfCells) + return numerix.arange(self.mesh.numberOfCells, + dtype=INDEX_TYPE) @property def _globalOverlappingCellIDs(self): @@ -57,7 +59,8 @@ def _globalOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.numberOfCells) + return numerix.arange(self.mesh.numberOfCells, + dtype=INDEX_TYPE) @property def _localNonOverlappingCellIDs(self): @@ -78,7 +81,8 @@ def _localNonOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.numberOfCells) + return numerix.arange(self.mesh.numberOfCells, + dtype=INDEX_TYPE) @property def _localOverlappingCellIDs(self): @@ -99,7 +103,8 @@ def _localOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.numberOfCells) + return numerix.arange(self.mesh.numberOfCells, + dtype=INDEX_TYPE) @property def _globalNonOverlappingFaceIDs(self): @@ -122,7 +127,8 @@ def _globalNonOverlappingFaceIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.numberOfFaces) + return numerix.arange(self.mesh.numberOfFaces, + dtype=INDEX_TYPE) @property def _globalOverlappingFaceIDs(self): @@ -145,7 +151,8 @@ def _globalOverlappingFaceIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.numberOfFaces) + return numerix.arange(self.mesh.numberOfFaces, + dtype=INDEX_TYPE) @property def _cellProcID(self): @@ -247,7 +254,8 @@ def _localNonOverlappingFaceIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.numberOfFaces) + return numerix.arange(self.mesh.numberOfFaces, + dtype=INDEX_TYPE) @property def _localOverlappingFaceIDs(self): @@ -270,7 +278,8 @@ def _localOverlappingFaceIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.numberOfFaces) + return numerix.arange(self.mesh.numberOfFaces, + dtype=INDEX_TYPE) @property def _vertexCellIDs(self): diff --git a/fipy/meshes/topologies/gridTopology.py b/fipy/meshes/topologies/gridTopology.py index a1c2bde6c4..28dace3cbf 100644 --- a/fipy/meshes/topologies/gridTopology.py +++ b/fipy/meshes/topologies/gridTopology.py @@ -11,6 +11,8 @@ from fipy.meshes.topologies.abstractTopology import _AbstractTopology +from fipy.solvers import INDEX_TYPE + class _GridTopology(_AbstractTopology): @property @@ -40,7 +42,8 @@ def _globalNonOverlappingCellIDs(self): """ return numerix.arange(self.mesh.offset + self.mesh.overlap['left'], - self.mesh.offset + self.mesh.nx - self.mesh.overlap['right']) + self.mesh.offset + self.mesh.nx - self.mesh.overlap['right'], + dtype=INDEX_TYPE) @property def _globalOverlappingCellIDs(self): @@ -59,7 +62,8 @@ def _globalOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.offset, self.mesh.offset + self.mesh.nx) + return numerix.arange(self.mesh.offset, self.mesh.offset + self.mesh.nx, + dtype=INDEX_TYPE) @property def _localNonOverlappingCellIDs(self): @@ -79,7 +83,8 @@ def _localNonOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ return numerix.arange(self.mesh.overlap['left'], - self.mesh.nx - self.mesh.overlap['right']) + self.mesh.nx - self.mesh.overlap['right'], + dtype=INDEX_TYPE) @property def _localOverlappingCellIDs(self): @@ -98,7 +103,7 @@ def _localOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(0, self.mesh.nx) + return numerix.arange(0, self.mesh.nx, dtype=INDEX_TYPE) @property def _globalNonOverlappingFaceIDs(self): @@ -118,7 +123,9 @@ def _globalNonOverlappingFaceIDs(self): .. note:: Trivial except for parallel meshes """ return numerix.arange(self.mesh.offset + self.mesh.overlap['left'], - self.mesh.offset + self.mesh.numberOfFaces - self.mesh.overlap['right']) + (self.mesh.offset + self.mesh.numberOfFaces + - self.mesh.overlap['right']), + dtype=INDEX_TYPE) @property def _globalOverlappingFaceIDs(self): @@ -137,7 +144,9 @@ def _globalOverlappingFaceIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.offset, self.mesh.offset + self.mesh.numberOfFaces) + return numerix.arange(self.mesh.offset, + self.mesh.offset + self.mesh.numberOfFaces, + dtype=INDEX_TYPE) @property def _localNonOverlappingFaceIDs(self): @@ -157,7 +166,9 @@ def _localNonOverlappingFaceIDs(self): .. note:: Trivial except for parallel meshes """ return numerix.arange(self.mesh.overlap['left'], - self.mesh.numberOfFaces - self.mesh.overlap['right']) + (self.mesh.numberOfFaces + - self.mesh.overlap['right']), + dtype=INDEX_TYPE) @property def _localOverlappingFaceIDs(self): @@ -176,12 +187,13 @@ def _localOverlappingFaceIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(0, self.mesh.numberOfFaces) + return numerix.arange(0, self.mesh.numberOfFaces, dtype=INDEX_TYPE) @property def _cellTopology(self): """return a map of the topology of each cell of grid""" - cellTopology = numerix.empty((self.mesh.numberOfCells,), dtype=numerix.ubyte) + cellTopology = numerix.empty((self.mesh.numberOfCells,), + dtype=numerix.ubyte) cellTopology[:] = self._elementTopology["line"] return cellTopology @@ -210,8 +222,11 @@ def _globalNonOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange((self.mesh.offset[1] + self.mesh.overlap['bottom']) * self.mesh.nx, - (self.mesh.offset[1] + self.mesh.ny - self.mesh.overlap['top']) * self.mesh.nx) + return numerix.arange((self.mesh.offset[1] + + self.mesh.overlap['bottom']) * self.mesh.nx, + (self.mesh.offset[1] + self.mesh.ny + - self.mesh.overlap['top']) * self.mesh.nx, + dtype=INDEX_TYPE) @property def _globalOverlappingCellIDs(self): @@ -233,7 +248,10 @@ def _globalOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.offset[1] * self.mesh.nx, (self.mesh.offset[1] + self.mesh.ny) * self.mesh.nx) + return numerix.arange(self.mesh.offset[1] * self.mesh.nx, + (self.mesh.offset[1] + + self.mesh.ny) * self.mesh.nx, + dtype=INDEX_TYPE) @property def _localNonOverlappingCellIDs(self): @@ -256,7 +274,9 @@ def _localNonOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ return numerix.arange(self.mesh.overlap['bottom'] * self.mesh.nx, - (self.mesh.ny - self.mesh.overlap['top']) * self.mesh.nx) + (self.mesh.ny + - self.mesh.overlap['top']) * self.mesh.nx, + dtype=INDEX_TYPE) @property def _localOverlappingCellIDs(self): @@ -278,16 +298,18 @@ def _localOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(0, self.mesh.ny * self.mesh.nx) + return numerix.arange(0, self.mesh.ny * self.mesh.nx, dtype=INDEX_TYPE) def _calcFaceIDs(self, y0, ny, global_horz_faces): prev_horz_faces = y0 * self.mesh.nx horz = numerix.arange(prev_horz_faces, - prev_horz_faces + (ny + 1) * self.mesh.nx) + prev_horz_faces + (ny + 1) * self.mesh.nx, + dtype=INDEX_TYPE) prev_vert_faces = y0 * (self.mesh.nx + 1) vert = numerix.arange(global_horz_faces + prev_vert_faces, global_horz_faces + prev_vert_faces - + ny * (self.mesh.nx + 1)) + + ny * (self.mesh.nx + 1), + dtype=INDEX_TYPE) return numerix.concatenate((horz, vert)) @@ -415,8 +437,12 @@ def _globalNonOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange((self.mesh.offset[2] + self.mesh.overlap['front']) * self.mesh.nx * self.mesh.ny, - (self.mesh.offset[2] + self.mesh.nz - self.mesh.overlap['back']) * self.mesh.nx * self.mesh.ny) + nxy = self.mesh.nx * self.mesh.ny + return numerix.arange((self.mesh.offset[2] + + self.mesh.overlap['front']) * nxy, + (self.mesh.offset[2] + + self.mesh.nz - self.mesh.overlap['back']) * nxy, + dtype=INDEX_TYPE) @property def _globalOverlappingCellIDs(self): @@ -426,9 +452,10 @@ def _globalOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - - return numerix.arange(self.mesh.offset[2] * self.mesh.nx * self.mesh.ny, - (self.mesh.offset[2] + self.mesh.nz) * self.mesh.nx * self.mesh.ny) + nxy = self.mesh.nx * self.mesh.ny + return numerix.arange(self.mesh.offset[2] * nxy, + (self.mesh.offset[2] + self.mesh.nz) * nxy, + dtype=INDEX_TYPE) @property def _localNonOverlappingCellIDs(self): @@ -438,8 +465,10 @@ def _localNonOverlappingCellIDs(self): .. note:: Trivial except for parallel meshes """ - return numerix.arange(self.mesh.overlap['front'] * self.mesh.nx * self.mesh.ny, - (self.mesh.nz - self.mesh.overlap['back']) * self.mesh.nx * self.mesh.ny) + nxy = self.mesh.nx * self.mesh.ny + return numerix.arange(self.mesh.overlap['front'] * nxy, + (self.mesh.nz - self.mesh.overlap['back']) * nxy, + dtype=INDEX_TYPE) @property def _localOverlappingCellIDs(self): @@ -452,20 +481,23 @@ def _localOverlappingCellIDs(self): return numerix.arange(0, self.mesh.ny * self.mesh.nx * self.mesh.nz) def _calcFaceIDs(self, z0, nz, global_xy_faces, global_xz_faces): - prev_xy_faces = z0 * self.mesh.nx * self.mesh.ny + nxy = self.mesh.nx * self.mesh.ny + prev_xy_faces = z0 * nxy xy = numerix.arange(prev_xy_faces, - prev_xy_faces + - (nz + 1) * self.mesh.nx * self.mesh.ny) + prev_xy_faces + (nz + 1) * nxy, + dtype=INDEX_TYPE) prev_xz_faces = z0 * self.mesh.nx * (self.mesh.ny + 1) xz = numerix.arange(global_xy_faces + prev_xz_faces, global_xy_faces + prev_xz_faces + - self.mesh.nx * (self.mesh.ny + 1) * nz) + self.mesh.nx * (self.mesh.ny + 1) * nz, + dtype=INDEX_TYPE) prev_yz_faces = z0 * (self.mesh.nx + 1) * self.mesh.ny yz = numerix.arange(global_xy_faces + global_xz_faces + prev_yz_faces, global_xy_faces + global_xz_faces + prev_yz_faces + - (self.mesh.nx + 1) * self.mesh.ny * nz) + (self.mesh.nx + 1) * self.mesh.ny * nz, + dtype=INDEX_TYPE) return numerix.concatenate((xy, xz, yz)) diff --git a/fipy/meshes/uniformGrid1D.py b/fipy/meshes/uniformGrid1D.py index 2c3a18396b..0064adf80f 100644 --- a/fipy/meshes/uniformGrid1D.py +++ b/fipy/meshes/uniformGrid1D.py @@ -15,6 +15,8 @@ from fipy.meshes.representations.gridRepresentation import _Grid1DRepresentation from fipy.meshes.topologies.gridTopology import _Grid1DTopology +from fipy.solvers import INDEX_TYPE + __all__ = ["UniformGrid1D"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] @@ -99,8 +101,8 @@ def _cellToFaceOrientations(self): @property def _adjacentCellIDs(self): - c1 = numerix.arange(self.numberOfFaces) - ids = numerix.array((c1 - 1, c1)) + c1 = numerix.arange(self.numberOfFaces, dtype=INDEX_TYPE) + ids = numerix.array((c1 - 1, c1), dtype=INDEX_TYPE) if self.numberOfFaces > 0: ids[0, 0] = ids[1, 0] ids[1, -1] = ids[0, -1] @@ -108,8 +110,8 @@ def _adjacentCellIDs(self): @property def _cellToCellIDs(self): - c1 = numerix.arange(self.numberOfCells) - ids = MA.array((c1 - 1, c1 + 1)) + c1 = numerix.arange(self.numberOfCells, dtype=INDEX_TYPE) + ids = MA.array((c1 - 1, c1 + 1), dtype=INDEX_TYPE) if self.numberOfCells > 0: ids[0, 0] = MA.masked ids[1, -1] = MA.masked @@ -266,7 +268,8 @@ def _concatenableMesh(self): @property def _cellFaceIDs(self): - return MA.array(_Grid1DBuilder.createCells(self.nx)) + return MA.array(_Grid1DBuilder.createCells(self.nx), + dtype=INDEX_TYPE) @property def _maxFacesPerCell(self): @@ -278,8 +281,8 @@ def vertexCoords(self): @property def faceCellIDs(self): - c1 = numerix.arange(self.numberOfFaces) - ids = MA.array((c1 - 1, c1)) + c1 = numerix.arange(self.numberOfFaces, dtype=INDEX_TYPE) + ids = MA.array((c1 - 1, c1), dtype=INDEX_TYPE) if self.numberOfFaces > 0: ids[0, 0] = ids[1, 0] ids[1, 0] = MA.masked @@ -288,8 +291,8 @@ def faceCellIDs(self): @property def _cellVertexIDs(self): - c1 = numerix.arange(self.numberOfCells) - return numerix.array((c1 + 1, c1)) + c1 = numerix.arange(self.numberOfCells, dtype=INDEX_TYPE) + return numerix.array((c1 + 1, c1), dtype=INDEX_TYPE) def _getNearestCellID(self, points): """ @@ -316,7 +319,7 @@ def _getNearestCellID(self, points): xi, = points dx = self.dx - i = numerix.array(numerix.rint(((xi - x0) / dx)), 'l') + i = numerix.array(numerix.rint(((xi - x0) / dx)), dtype=INDEX_TYPE) i[i < 0] = 0 i[i > nx - 1] = nx - 1 diff --git a/fipy/meshes/uniformGrid2D.py b/fipy/meshes/uniformGrid2D.py index f7b8ebbb0a..de0549a638 100644 --- a/fipy/meshes/uniformGrid2D.py +++ b/fipy/meshes/uniformGrid2D.py @@ -16,6 +16,8 @@ from fipy.meshes.representations.gridRepresentation import _Grid2DRepresentation from fipy.meshes.topologies.gridTopology import _Grid2DTopology +from fipy.solvers import INDEX_TYPE + __all__ = ["UniformGrid2D"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] @@ -163,8 +165,10 @@ def _adjacentCellIDs(self): else: @property def _adjacentCellIDs(self): - Hids = numerix.zeros((self.numberOfHorizontalRows, self.nx, 2), 'l') - indices = numerix.indices((self.numberOfHorizontalRows, self.nx)) + Hids = numerix.zeros((self.numberOfHorizontalRows, self.nx, 2), + dtype=INDEX_TYPE) + indices = numerix.indices((self.numberOfHorizontalRows, self.nx), + dtype=INDEX_TYPE) Hids[..., 1] = indices[1] + indices[0] * self.nx Hids[..., 0] = Hids[..., 1] - self.nx @@ -174,8 +178,10 @@ def _adjacentCellIDs(self): Hids[0, ..., 1] = Hids[0, ..., 0] Hids[-1, ..., 1] = Hids[-1, ..., 0] - Vids = numerix.zeros((self.ny, self.numberOfVerticalColumns, 2), 'l') - indices = numerix.indices((self.ny, self.numberOfVerticalColumns)) + Vids = numerix.zeros((self.ny, self.numberOfVerticalColumns, 2), + dtype=INDEX_TYPE) + indices = numerix.indices((self.ny, self.numberOfVerticalColumns), + dtype=INDEX_TYPE) Vids[..., 1] = indices[1] + indices[0] * self.nx Vids[..., 0] = Vids[..., 1] - 1 @@ -191,8 +197,9 @@ def _adjacentCellIDs(self): @property def _cellToCellIDs(self): - ids = MA.zeros((4, self.nx, self.ny), 'l') - indices = numerix.indices((self.nx, self.ny)) + ids = MA.zeros((4, self.nx, self.ny), dtype=INDEX_TYPE) + indices = numerix.indices((self.nx, self.ny), + dtype=INDEX_TYPE) ids[0] = indices[0] + (indices[1] - 1) * self.nx ids[1] = (indices[0] + 1) + indices[1] * self.nx ids[2] = indices[0] + (indices[1] + 1) * self.nx @@ -211,7 +218,8 @@ def _cellToCellIDs(self): def _cellToCellIDsFilled(self): N = self.numberOfCells M = self._maxFacesPerCell - cellIDs = numerix.repeat(numerix.arange(N)[numerix.newaxis, ...], M, axis=0) + cellIDs = numerix.arange(N, dtype=INDEX_TYPE)[numerix.newaxis, ...] + cellIDs = numerix.repeat(cellIDs, M, axis=0) cellToCellIDs = self._cellToCellIDs return MA.where(MA.getmaskarray(cellToCellIDs), cellIDs, cellToCellIDs) @@ -465,7 +473,8 @@ def vertexCoords(self): if inline.doInline: @property def faceCellIDs(self): - faceCellIDs = numerix.zeros((2, self.numberOfFaces), 'l') + faceCellIDs = numerix.zeros((2, self.numberOfFaces), + dtype=INDEX_TYPE) mask = numerix.zeros((2, self.numberOfFaces), 'l') inline._runInline(""" @@ -508,8 +517,10 @@ def faceCellIDs(self): else: @property def faceCellIDs(self): - Hids = numerix.zeros((2, self.nx, self.numberOfHorizontalRows), 'l') - indices = numerix.indices((self.nx, self.numberOfHorizontalRows)) + Hids = numerix.zeros((2, self.nx, self.numberOfHorizontalRows), + dtype=INDEX_TYPE) + indices = numerix.indices((self.nx, self.numberOfHorizontalRows), + dtype=INDEX_TYPE) Hids[1] = indices[0] + indices[1] * self.nx Hids[0] = Hids[1] - self.nx if self.numberOfHorizontalRows > 0: @@ -517,8 +528,10 @@ def faceCellIDs(self): Hids[1, ..., 0] = -1 Hids[1, ..., -1] = -1 - Vids = numerix.zeros((2, self.numberOfVerticalColumns, self.ny), 'l') - indices = numerix.indices((self.numberOfVerticalColumns, self.ny)) + Vids = numerix.zeros((2, self.numberOfVerticalColumns, self.ny), + dtype=INDEX_TYPE) + indices = numerix.indices((self.numberOfVerticalColumns, self.ny), + dtype=INDEX_TYPE) Vids[1] = indices[0] + indices[1] * self.nx Vids[0] = Vids[1] - 1 if self.numberOfVerticalColumns > 0: @@ -535,13 +548,17 @@ def _cellVertexIDs(self): @property def faceVertexIDs(self): - Hids = numerix.zeros((2, self.nx, self.numberOfHorizontalRows), 'l') - indices = numerix.indices((self.nx, self.numberOfHorizontalRows)) + Hids = numerix.zeros((2, self.nx, self.numberOfHorizontalRows), + dtype=INDEX_TYPE) + indices = numerix.indices((self.nx, self.numberOfHorizontalRows), + dtype=INDEX_TYPE) Hids[0] = indices[0] + indices[1] * self.numberOfVerticalColumns Hids[1] = Hids[0] + 1 - Vids = numerix.zeros((2, self.numberOfVerticalColumns, self.ny), 'l') - indices = numerix.indices((self.numberOfVerticalColumns, self.ny)) + Vids = numerix.zeros((2, self.numberOfVerticalColumns, self.ny), + dtype=INDEX_TYPE) + indices = numerix.indices((self.numberOfVerticalColumns, self.ny), + dtype=INDEX_TYPE) Vids[0] = indices[0] + indices[1] * self.numberOfVerticalColumns Vids[1] = Vids[0] + self.numberOfVerticalColumns @@ -552,8 +569,10 @@ def faceVertexIDs(self): def _calcOrderedCellVertexIDs(self): """Correct ordering for VTK_PIXEL""" - ids = numerix.zeros((4, self.nx, self.ny), 'l') - indices = numerix.indices((self.nx, self.ny)) + ids = numerix.zeros((4, self.nx, self.ny), + dtype=INDEX_TYPE) + indices = numerix.indices((self.nx, self.ny), + dtype=INDEX_TYPE) ids[2] = indices[0] + (indices[1] + 1) * self.numberOfVerticalColumns ids[1] = ids[2] + 1 ids[3] = indices[0] + indices[1] * self.numberOfVerticalColumns diff --git a/fipy/meshes/uniformGrid3D.py b/fipy/meshes/uniformGrid3D.py index 4c82d5e2cc..f08f026d33 100644 --- a/fipy/meshes/uniformGrid3D.py +++ b/fipy/meshes/uniformGrid3D.py @@ -10,6 +10,8 @@ from fipy.meshes.representations.gridRepresentation import _Grid3DRepresentation from fipy.meshes.topologies.gridTopology import _Grid3DTopology +from fipy.solvers import INDEX_TYPE + __all__ = ["UniformGrid3D"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] @@ -135,8 +137,10 @@ def _adjacentCellIDs(self): @property def _cellToCellIDs(self): - ids = MA.zeros((6, self.nx, self.ny, self.nz), 'l') - indices = numerix.indices((self.nx, self.ny, self.nz)) + ids = MA.zeros((6, self.nx, self.ny, self.nz), + dtype=INDEX_TYPE) + indices = numerix.indices((self.nx, self.ny, self.nz), + dtype=INDEX_TYPE) nxy = self.nx * self.ny same = indices[0] + indices[1] * self.nx + indices[2] * nxy @@ -163,7 +167,8 @@ def _cellToCellIDs(self): def _cellToCellIDsFilled(self): N = self.numberOfCells M = self._maxFacesPerCell - cellIDs = numerix.repeat(numerix.arange(N)[numerix.newaxis, ...], M, axis=0) + cellIDs = numerix.arange(N, dtype=INDEX_TYPE)[numerix.newaxis, ...] + cellIDs = numerix.repeat(cellIDs, M, axis=0) cellToCellIDs = self._cellToCellIDs return MA.where(MA.getmaskarray(cellToCellIDs), cellIDs, cellToCellIDs) @@ -409,21 +414,27 @@ def _cellFaceIDs(self): self.nz, self.numberOfXYFaces, self.numberOfXZFaces, - self.numberOfYZFaces)) + self.numberOfYZFaces), + dtype=INDEX_TYPE) @property def _XYFaceIDs(self): - ids = numerix.arange(0, self.numberOfXYFaces) + ids = numerix.arange(0, self.numberOfXYFaces, + dtype=INDEX_TYPE) return ids.reshape((self.nz + 1, self.ny, self.nx)).swapaxes(0, 2) @property def _XZFaceIDs(self): - ids = numerix.arange(self.numberOfXYFaces, self.numberOfXYFaces + self.numberOfXZFaces) + ids = numerix.arange(self.numberOfXYFaces, + self.numberOfXYFaces + self.numberOfXZFaces, + dtype=INDEX_TYPE) return ids.reshape((self.nz, self.ny + 1, self.nx)).swapaxes(0, 2) @property def _YZFaceIDs(self): - ids = numerix.arange(self.numberOfXYFaces + self.numberOfXZFaces, self.numberOfFaces) + ids = numerix.arange(self.numberOfXYFaces + self.numberOfXZFaces, + self.numberOfFaces, + dtype=INDEX_TYPE) return ids.reshape((self.nz, self.ny, self.nx + 1)).swapaxes(0, 2) @property @@ -443,33 +454,44 @@ def vertexCoords(self): @property def faceCellIDs(self): - XYids = MA.zeros((2, self.nx, self.ny, self.nz + 1), 'l') - indices = numerix.indices((self.nx, self.ny, self.nz + 1)) + XYids = MA.zeros((2, self.nx, self.ny, self.nz + 1), + dtype=INDEX_TYPE) + indices = numerix.indices((self.nx, self.ny, self.nz + 1), + dtype=INDEX_TYPE) XYids[1] = indices[0] + (indices[1] + indices[2] * self.ny) * self.nx XYids[0] = XYids[1] - self.nx * self.ny XYids[0, ..., 0] = XYids[1, ..., 0] XYids[1, ..., 0] = MA.masked XYids[1, ..., -1] = MA.masked - XZids = MA.zeros((2, self.nx, self.ny + 1, self.nz), 'l') - indices = numerix.indices((self.nx, self.ny + 1, self.nz)) + XZids = MA.zeros((2, self.nx, self.ny + 1, self.nz), + dtype=INDEX_TYPE) + indices = numerix.indices((self.nx, self.ny + 1, self.nz), + dtype=INDEX_TYPE) XZids[1] = indices[0] + (indices[1] + indices[2] * self.ny) * self.nx XZids[0] = XZids[1] - self.nx XZids[0,:, 0,:] = XZids[1,:, 0,:] XZids[1,:, 0,:] = MA.masked XZids[1,:, -1,:] = MA.masked - YZids = MA.zeros((2, self.nx + 1, self.ny, self.nz), 'l') - indices = numerix.indices((self.nx + 1, self.ny, self.nz)) + YZids = MA.zeros((2, self.nx + 1, self.ny, self.nz), + dtype=INDEX_TYPE) + indices = numerix.indices((self.nx + 1, self.ny, self.nz), + dtype=INDEX_TYPE) YZids[1] = indices[0] + (indices[1] + indices[2] * self.ny) * self.nx YZids[0] = YZids[1] - 1 YZids[0, 0] = YZids[1, 0] YZids[1, 0] = MA.masked YZids[1, -1] = MA.masked - return MA.concatenate((XYids.swapaxes(1, 3).reshape((2, self.numberOfXYFaces)), - XZids.swapaxes(1, 3).reshape((2, self.numberOfXZFaces)), - YZids.swapaxes(1, 3).reshape((2, self.numberOfYZFaces))), axis=1) + XYids = XYids.swapaxes(1, 3) + XZids = XZids.swapaxes(1, 3) + YZids = YZids.swapaxes(1, 3) + + return MA.concatenate((XYids.reshape((2, self.numberOfXYFaces)), + XZids.reshape((2, self.numberOfXZFaces)), + YZids.reshape((2, self.numberOfYZFaces))), + axis=1) ## from common/mesh @@ -483,8 +505,10 @@ def faceVertexIDs(self): def _calcOrderedCellVertexIDs(self): """Correct ordering for `VTK_VOXEL`""" - ids = numerix.zeros((8, self.nx, self.ny, self.nz), 'l') - indices = numerix.indices((self.nx, self.ny, self.nz)) + ids = numerix.zeros((8, self.nx, self.ny, self.nz), + dtype=INDEX_TYPE) + indices = numerix.indices((self.nx, self.ny, self.nz), + dtype=INDEX_TYPE) ids[1] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1) + 1) * (self.nx + 1) ids[0] = ids[1] + 1 ids[3] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1)) * (self.nx + 1) diff --git a/fipy/solvers/__init__.py b/fipy/solvers/__init__.py index 613e68b9ec..6c8dd2870c 100644 --- a/fipy/solvers/__init__.py +++ b/fipy/solvers/__init__.py @@ -75,7 +75,6 @@ def _import_mesh_matrices(suite): """Solver class that should solve any matrix. """ - from fipy.tools.comms.dummyComm import DummyComm serialComm, parallelComm = DummyComm(), DummyComm() @@ -159,7 +158,7 @@ def _import_mesh_matrices(suite): if solver_suite is None and _desired_solver in ["pyamgx", None]: try: - if _parallelComm.Nproc > 1: + if _Nproc > 1: raise SerialSolverError('pyamgx') from fipy.solvers.pyamgx import * _mesh_matrices = _import_mesh_matrices(suite="Scipy") @@ -179,6 +178,8 @@ def _import_mesh_matrices(suite): # don't unpack until here in order to keep code above more succinct _RowMeshMatrix, _ColMeshMatrix, _MeshMatrix = _mesh_matrices +INDEX_TYPE = _MeshMatrix.INDEX_TYPE + from fipy.tests.doctestPlus import register_skipper register_skipper(flag='PYSPARSE_SOLVER', @@ -186,11 +187,36 @@ def _import_mesh_matrices(suite): why="the Pysparse solvers are not being used.", skipWarning=True) +register_skipper(flag='PETSC_SOLVER', + test=lambda: solver_suite == 'petsc', + why="the PETSc solvers are not being used.", + skipWarning=True) + register_skipper(flag='NOT_PYAMGX_SOLVER', test=lambda: solver_suite != 'pyamgx', why="the PyAMGX solver is being used.", skipWarning=True) +register_skipper(flag='SCIPY_SOLVER', + test=lambda: solver_suite == 'scipy', + why="the SciPy solvers are not being used.", + skipWarning=True) + +register_skipper(flag='PYAMG_SOLVER', + test=lambda: solver_suite == 'pyamg', + why="the PyAMG solvers are not being used.", + skipWarning=True) + +register_skipper(flag='PYAMGX_SOLVER', + test=lambda: solver_suite == 'pyamgx', + why="the pyamgx solvers are not being used.", + skipWarning=True) + +register_skipper(flag='TRILINOS_SOLVER', + test=lambda: (solver_suite == 'trilinos') or (solver_suite == 'no-pysparse'), + why="the Trilinos solvers are not being used.", + skipWarning=True) + register_skipper(flag='NOT_TRILINOS_SOLVER', test=lambda: solver_suite not in ['trilinos', 'no-pysparse'], why="the Trilinos solvers are being used.", diff --git a/fipy/solvers/convergence.py b/fipy/solvers/convergence.py new file mode 100644 index 0000000000..b8022c08a1 --- /dev/null +++ b/fipy/solvers/convergence.py @@ -0,0 +1,206 @@ +from __future__ import division +from __future__ import unicode_literals +from builtins import object +from builtins import str +__docformat__ = 'restructuredtext' + +from future.utils import with_metaclass + +import json +import logging +import warnings + +__all__ = ["ConvergenceBase", "Convergence", "AbsoluteToleranceConvergence", + "RelativeToleranceConvergence", "RHSZeroConvergence", + "IterationConvergence", "HappyBreakdownConvergence", + "IteratingConvergence", "LossOfAccuracyConvergence", + "Divergence", "IterationDivergence", "BreakdownDivergence", + "IllConditionedDivergence", "StagnatedDivergence", + "OutOfRangeDivergence", "PreconditioningDivergence", + "IllConditionedPreconditionerDivergence", "NullDivergence", + "ToleranceDivergence"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class _ConvergenceMeta(type): + # We use __init__ rather than __new__ here because we want + # to modify attributes of the class *after* they have been + # created + # See http://jakevdp.github.io/blog/2012/12/01/a-primer-on-python-metaclasses/ + def __init__(cls, name, bases, dct): + if not hasattr(cls, "code_registry"): + # this is the base class, create an empty registry + # one dictionary for all element types + cls.code_registry = {} + + if not hasattr(cls, "name_registry"): + # this is the base class, create an empty registry + # one dictionary for all element types + cls.name_registry = {} + + super(_ConvergenceMeta, cls).__init__(name, bases, dct) + + if hasattr(cls, "status_code") and hasattr(cls, "suite"): + cls.code_registry[(cls.suite, cls.status_code)] = cls + + if hasattr(cls, "status_name") and hasattr(cls, "suite"): + cls.name_registry[(cls.suite, cls.status_name)] = cls + +class DivergenceWarning(UserWarning): + def __init__(self, divergence): + msg = "msg={status_name}, code={status_code}, residual={residual}".format(**divergence.info) + super(DivergenceWarning, self).__init__(msg) + +class ConvergenceBase(with_metaclass(_ConvergenceMeta, object)): + """Information about whether and why a solver converged. + + Attributes + ---------- + solver : ~fipy.solvers.solver.Solver + The linear solver that was invoked. + iterations : int + The number of linear iterations the solver performed. + criterion : str + The :ref:`CONVERGENCE` test used by the solver. + tolerance_scale : float + The multiplier applied to the tolerance in order for this solver to + satisfy `criterion`. + residual : float + The unscaled norm of the residual achieved by the solver. + status_code : int or str + The canonical return value for this type of convergence. + status_name : str + The text representation of `status_code`. + actual_code : int or str + The status value actually returned by the solver. + """ + + def __init__(self, solver, iterations, residual, criterion, actual_code=None, **kwargs): + self.solver = solver + self.iterations = iterations + self.criterion = criterion + self.residual = residual + if actual_code is None: + self.actual_code = self.status_code + else: + self.actual_code = actual_code + + vars(self).update(kwargs) + + self.log() + + @property + def info(self): + info = vars(self).copy() + info["solver"] = str(info["solver"]) + info.update(vars(self.__class__)) + return {k: v for k, v in info.items() if not k.startswith("_")} + + def log(self, level=logging.DEBUG): + logger = logging.getLogger(self.__class__.__module__ + + "." + self.__class__.__name__) + + logger.log(level, json.dumps(self.info)) + + def warn(self): + pass + + def __str__(self): + return str(self.info) + +class Convergence(ConvergenceBase): + """Information about why a solver converged. + """ + + message = "User requested convergence criteria is satisfied. Iterations: {0}. Relative error: {1}" + +class AbsoluteToleranceConvergence(Convergence): + """Absolute tolerance satisfied. + + residual < atol * scale + """ + pass + +class RelativeToleranceConvergence(Convergence): + """Relative tolerance satisfied. + + residual < rtol * scale + """ + pass + +class RHSZeroConvergence(Convergence): + r""":math:`\vec{b} = 0`, so exact solution is :math:`\vec{x} = 0`. + """ + pass + +class IterationConvergence(Convergence): + """Requested iterations complete (and no residual calculated). + """ + pass + +class HappyBreakdownConvergence(Convergence): + '''"Exact" solution found and more iterations will just make things worse. + ''' + pass + +class IteratingConvergence(Convergence): + """Solve still in progress. + """ + pass + +class LossOfAccuracyConvergence(Convergence): + """Numerical loss of precision occurred. + """ + pass + +class Divergence(ConvergenceBase): + """Information about why a solver diverged. + """ + + def warn(self): + warnings.warn(DivergenceWarning(self), stacklevel=5) + +class IterationDivergence(Divergence): + """Exceeded maximum iterations. + """ + pass + +class BreakdownDivergence(Divergence): + """Method broke down. + """ + pass + +class IllConditionedDivergence(Divergence): + """Matrix was ill-conditioned. + """ + pass + +class StagnatedDivergence(Divergence): + """The method stagnated. + """ + pass + +class OutOfRangeDivergence(Divergence): + """A value became too small, too large, or invalid. + """ + pass + +class PreconditioningDivergence(Divergence): + """A problem with the preconditioner. + """ + pass + +class IllConditionedPreconditionerDivergence(PreconditioningDivergence): + """Preconditioner is ill-conditioned. + """ + pass + +class NullDivergence(Divergence): + """Breakdown when solving the Hessenberg system within GMRES. + """ + pass + +class ToleranceDivergence(Divergence): + """Residual norm increased too much. + """ + pass diff --git a/fipy/solvers/petsc/__init__.py b/fipy/solvers/petsc/__init__.py index e588a4f293..4f15934527 100644 --- a/fipy/solvers/petsc/__init__.py +++ b/fipy/solvers/petsc/__init__.py @@ -2,12 +2,15 @@ _log = logging.getLogger(__name__) -from fipy.solvers.petsc.linearLUSolver import * -from fipy.solvers.petsc.linearPCGSolver import * -from fipy.solvers.petsc.linearGMRESSolver import * -from fipy.solvers.petsc.linearBicgSolver import * -from fipy.solvers.petsc.linearCGSSolver import * -from fipy.solvers.petsc.dummySolver import * +from .linearLUSolver import * +from .linearPCGSolver import * +from .linearGMRESSolver import * +from .linearBicgSolver import * +from .linearCGSSolver import * +from .dummySolver import * +from . import petscConvergence + +from .preconditioners import * DefaultSolver = LinearGMRESSolver DefaultAsymmetricSolver = LinearGMRESSolver diff --git a/fipy/solvers/petsc/dummySolver.py b/fipy/solvers/petsc/dummySolver.py index 1f45bc9c41..c28b6da21b 100644 --- a/fipy/solvers/petsc/dummySolver.py +++ b/fipy/solvers/petsc/dummySolver.py @@ -12,4 +12,4 @@ class DummySolver(PETScSolver): """ def _solve_(self, L, x, b): - pass + return x diff --git a/fipy/solvers/petsc/linearBicgSolver.py b/fipy/solvers/petsc/linearBicgSolver.py index 3db002fa96..80c5a92b74 100644 --- a/fipy/solvers/petsc/linearBicgSolver.py +++ b/fipy/solvers/petsc/linearBicgSolver.py @@ -8,7 +8,7 @@ class LinearBicgSolver(PETScKrylovSolver): """ The `LinearBicgSolver` is an interface to the biconjugate gradient solver in - PETSc, using no preconditioner by default. + PETSc. """ solver = 'bicg' diff --git a/fipy/solvers/petsc/linearCGSSolver.py b/fipy/solvers/petsc/linearCGSSolver.py index 265a6b0dec..fbf27edcf6 100644 --- a/fipy/solvers/petsc/linearCGSSolver.py +++ b/fipy/solvers/petsc/linearCGSSolver.py @@ -8,7 +8,7 @@ class LinearCGSSolver(PETScKrylovSolver): """ The `LinearCGSSolver` is an interface to the conjugate gradient squared - solver in PETSc, using no preconditioner by default. + solver in PETSc. """ solver = 'cgs' diff --git a/fipy/solvers/petsc/linearGMRESSolver.py b/fipy/solvers/petsc/linearGMRESSolver.py index e95ba3e4e3..a0f50c7962 100644 --- a/fipy/solvers/petsc/linearGMRESSolver.py +++ b/fipy/solvers/petsc/linearGMRESSolver.py @@ -7,8 +7,7 @@ class LinearGMRESSolver(PETScKrylovSolver): """ - The `LinearGMRESSolver` is an interface to the GMRES solver in PETSc, - using no preconditioner by default. + The `LinearGMRESSolver` is an interface to the GMRES solver in PETSc. """ diff --git a/fipy/solvers/petsc/linearLUSolver.py b/fipy/solvers/petsc/linearLUSolver.py index b1cefee189..0227e7312e 100644 --- a/fipy/solvers/petsc/linearLUSolver.py +++ b/fipy/solvers/petsc/linearLUSolver.py @@ -5,8 +5,9 @@ from petsc4py import PETSc -from fipy.solvers.petsc.petscSolver import PETScSolver from fipy.tools.timer import Timer +from .petscSolver import PETScSolver +from .preconditioners.luPreconditioner import LUPreconditioner __all__ = ["LinearLUSolver"] @@ -18,22 +19,62 @@ class LinearLUSolver(PETScSolver): """ - def __init__(self, tolerance=1e-10, iterations=10, precon="lu"): + def __init__(self, tolerance="default", criterion="default", + iterations=10, precon=None): """ - :Parameters: - - `tolerance`: The required error tolerance. - - `iterations`: The maximum number of iterative steps to perform. - - `precon`: *Ignored*. - + Parameters + ---------- + tolerance : float + Required error tolerance. + criterion : {'default', 'unscaled', 'RHS', 'matrix', 'initial', 'legacy'} + Interpretation of ``tolerance``. + See :ref:`CONVERGENCE` for more information. + iterations : int + Maximum number of iterative steps to perform. + precon + *ignored* """ - PETScSolver.__init__(self, tolerance=tolerance, - iterations=iterations, precon="lu") + super(LinearLUSolver, self).__init__(tolerance=tolerance, + criterion=criterion, + iterations=iterations, + precon=LUPreconditioner()) + + def _adaptLegacyTolerance(self, L, x, b): + return self._adaptInitialTolerance(L, x, b) + + def _adaptUnscaledTolerance(self, L, x, b): + return (1., None) + + def _adaptRHSTolerance(self, L, x, b): + return (self._rhsNorm(L, x, b), None) + + def _adaptMatrixTolerance(self, L, x, b): + return (self._matrixNorm(L, x, b), None) + + def _adaptInitialTolerance(self, L, x, b): + return (self._residualNorm(L, x, b), None) def _solve_(self, L, x, b): + """Solve system of equations posed for PETSc + + Parameters + ---------- + L : PETSc.Mat + Sparse matrix + x : PETSc.Vec + Solution variable as ghosted vector + b : PETSc.Vec + Right-hand side as ghosted vector + + Returns + ------- + x : PETSc.Vec + Solution variable as ghosted vector + """ ksp = PETSc.KSP() ksp.create(PETSc.COMM_WORLD) ksp.setType("preonly") - ksp.getPC().setType(self.preconditioner) + self.preconditioner._applyToSolver(solver=ksp, matrix=L) # TODO: SuperLU invoked with PCFactorSetMatSolverType(pc, MATSOLVERSUPERLU) # see: http://www.mcs.anl.gov/petsc/petsc-dev/src/ksp/ksp/examples/tutorials/ex52.c.html # PETSc.PC().setFactorSolverType("superlu") @@ -42,27 +83,32 @@ def _solve_(self, L, x, b): ksp.setOperators(L) ksp.setFromOptions() + tolerance_scale, _ = self._adaptTolerance(L, x, b) + self._log.debug("BEGIN solve") with Timer() as t: for iteration in range(self.iterations): - errorVector = L * x - b - tol = errorVector.norm() - - if iteration == 0: - tol0 = tol + residualVector, residual = self._residualVectorAndNorm(L, x, b) - if tol <= self.tolerance * tol0: + if residual <= self.tolerance * tolerance_scale: break xError = x.copy() - ksp.solve(errorVector, xError) + ksp.solve(residualVector, xError) + x -= xError self._log.debug("END solve - {} ns".format(t.elapsed)) - self._log.debug('solver: %s', ksp.type) - self._log.debug('precon: %s', ksp.getPC().type) - self._log.debug('iterations: %d / %d', iteration+1, self.iterations) - self._log.debug('residual: %s', errorVector.norm(1)) + self._setConvergence(suite="petsc", + code=PETSc.KSP.ConvergedReason.CONVERGED_ITS, + iterations=iteration+1, + residual=residual, + ksp_solver=ksp.type, + ksp_precon=ksp.getPC().type) + + self.convergence.warn() + + return x diff --git a/fipy/solvers/petsc/linearPCGSolver.py b/fipy/solvers/petsc/linearPCGSolver.py index fc467ca0e8..95ee9691b7 100644 --- a/fipy/solvers/petsc/linearPCGSolver.py +++ b/fipy/solvers/petsc/linearPCGSolver.py @@ -7,8 +7,7 @@ class LinearPCGSolver(PETScKrylovSolver): """ - The `LinearPCGSolver` is an interface to the cg solver in PETSc, - using no preconditioner by default. + The `LinearPCGSolver` is an interface to the cg solver in PETSc. """ diff --git a/fipy/solvers/petsc/petscConvergence.py b/fipy/solvers/petsc/petscConvergence.py new file mode 100644 index 0000000000..db00fc5f74 --- /dev/null +++ b/fipy/solvers/petsc/petscConvergence.py @@ -0,0 +1,131 @@ +from petsc4py import PETSc + +from ..convergence import (Convergence, AbsoluteToleranceConvergence, + RelativeToleranceConvergence, IterationConvergence, + IterationDivergence, HappyBreakdownConvergence, + IteratingConvergence, + Divergence, BreakdownDivergence, + IllConditionedDivergence, + PreconditioningDivergence, + IllConditionedPreconditionerDivergence, + OutOfRangeDivergence, NullDivergence, + ToleranceDivergence) + +# "The values KSP_CONVERGED_CG_NEG_CURVE, KSP_CONVERGED_CG_CONSTRAINED, and +# KSP_CONVERGED_STEP_LENGTH are returned only by the special KSPNASH, +# KSPSTCG, and KSPGLTR solvers which are used by the SNESNEWTONTR (trust +# region) solver." + +class KSP_AbsoluteToleranceConvergence(AbsoluteToleranceConvergence): + """Residual 2-norm less than abstol""" + status_code = PETSc.KSP.ConvergedReason.CONVERGED_ATOL + status_name = "KSP_CONVERGED_ATOL" + suite = "petsc" + +class KSP_RelativeToleranceConvergence(RelativeToleranceConvergence): + """Residual 2-norm decreased by a factor of rtol, from 2-norm of right + hand side. + """ + status_code = PETSc.KSP.ConvergedReason.CONVERGED_RTOL + status_name = "KSP_CONVERGED_RTOL" + suite = "petsc" + +class KSP_NormalAbsoluteToleranceConvergence(KSP_AbsoluteToleranceConvergence): + status_code = PETSc.KSP.ConvergedReason.CONVERGED_ATOL_NORMAL + status_name = "KSP_CONVERGED_ATOL_NORMAL" + +class KSP_NormalRelativeToleranceConvergence(KSP_RelativeToleranceConvergence): + status_code = PETSc.KSP.ConvergedReason.CONVERGED_RTOL_NORMAL + status_name = "KSP_CONVERGED_RTOL_NORMAL" + +class KSP_IterationConvergence(IterationConvergence): + """Used by the KSPPREONLY solver after the single iteration of the + preconditioner is applied. Also used when the KSPConvergedSkip() + convergence test routine is set in KSP. + """ + status_code = PETSc.KSP.ConvergedReason.CONVERGED_ITS + status_name = "KSP_CONVERGED_ITS" + suite = "petsc" + +class KSP_HappyBreakdownConvergence(HappyBreakdownConvergence): + status_code = PETSc.KSP.ConvergedReason.CONVERGED_HAPPY_BREAKDOWN + status_name = "KSP_CONVERGED_HAPPY_BREAKDOWN" + suite = "petsc" + +class KSP_IteratingConvergence(IteratingConvergence): + """This flag is returned if you call KSPGetConvergedReason() while the + KSPSolve() is still running. + """ + status_code = PETSc.KSP.ConvergedReason.CONVERGED_ITERATING + status_name = "KSP_CONVERGED_ITERATING" + suite = "petsc" + + +class KSP_IterationDivergence(IterationDivergence): + """Ran out of iterations before any convergence criteria was reached""" + status_code = PETSc.KSP.ConvergedReason.DIVERGED_MAX_IT + status_name = "KSP_DIVERGED_ITS" + suite = "petsc" + +class KSP_NullDivergence(NullDivergence): + status_code = PETSc.KSP.ConvergedReason.DIVERGED_NULL + status_name = "KSP_DIVERGED_NULL" + suite = "petsc" + +class KSP_ToleranceDivergence(ToleranceDivergence): + """Residual norm increased by a factor of divtol. + """ + status_code = PETSc.KSP.ConvergedReason.DIVERGED_DTOL + status_name = "KSP_DIVERGED_DTOL" + suite = "petsc" + +class KSP_BreakdownDivergence(BreakdownDivergence): + """Generic breakdown in method.""" + status_code = PETSc.KSP.ConvergedReason.DIVERGED_BREAKDOWN + status_name = "KSP_DIVERGED_BREAKDOWN" + suite = "petsc" + +class KSP_BreakdownBICGDivergence(KSP_BreakdownDivergence): + """Initial residual is orthogonal to preconditioned initial residual. + Try a different preconditioner, or a different initial Level.) + """ + status_code = PETSc.KSP.ConvergedReason.DIVERGED_BREAKDOWN_BICG + status_name = "KSP_DIVERGED_BREAKDOWN_BICG" + +class KSP_IndefiniteMatrixDivergence(IllConditionedDivergence): + status_code = PETSc.KSP.ConvergedReason.DIVERGED_INDEFINITE_MAT + status_name = "KSP_DIVERGED_INDEFINITE_MAT" + suite = "petsc" + +class KSP_NonSymmetricDivergence(IllConditionedDivergence): + """It appears the operator or preconditioner is not symmetric and this + Krylov method (KSPCG, KSPMINRES, KSPCR) requires symmetry + """ + status_code = PETSc.KSP.ConvergedReason.DIVERGED_NONSYMMETRIC + status_name = "KSP_DIVERGED_NONSYMMETRIC" + suite = "petsc" + +class KSP_PreconditioningDivergence(PreconditioningDivergence): + """It was not possible to build or use the requested preconditioner. + This is usually due to a zero pivot in a factorization. It can also + result from a failure in a subpreconditioner inside a nested + preconditioner such as PCFIELDSPLIT. + """ + status_code = PETSc.KSP.ConvergedReason.DIVERGED_PCSETUP_FAILED + status_name = "KSP_DIVERGED_PC_FAILED" + suite = "petsc" + +class KSP_IndefinitePreconditionerDivergence(IllConditionedPreconditionerDivergence): + """It appears the preconditioner is indefinite (has both positive and + negative eigenvalues) and this Krylov method (KSPCG) requires it to be + positive definite. + """ + status_code = PETSc.KSP.ConvergedReason.DIVERGED_INDEFINITE_PC + status_name = "KSP_DIVERGED_INDEFINITE_PC" + suite = "petsc" + +class KSP_NanOrInfDivergence(OutOfRangeDivergence): + """Residual norm became Not-a-number or Inf likely due to 0/0.""" + status_code = PETSc.KSP.ConvergedReason.DIVERGED_NANORINF + status_name = "KSP_DIVERGED_NANORINF" + suite = "petsc" diff --git a/fipy/solvers/petsc/petscKrylovSolver.py b/fipy/solvers/petsc/petscKrylovSolver.py index 3230706a8d..224b7e4697 100644 --- a/fipy/solvers/petsc/petscKrylovSolver.py +++ b/fipy/solvers/petsc/petscKrylovSolver.py @@ -1,32 +1,17 @@ +from __future__ import unicode_literals __docformat__ = 'restructuredtext' from petsc4py import PETSc -from fipy.solvers.petsc.petscSolver import PETScSolver from fipy.tools.timer import Timer +from .petscSolver import PETScSolver +from .preconditioners.defaultPreconditioner import DefaultPreconditioner -__all__ = ["PETScKrylovSolver"] +from fipy.tools import numerix -_reason = {1: "KSP_CONVERGED_RTOL_NORMAL", - 9: "KSP_CONVERGED_ATOL_NORMAL", - 2: "KSP_CONVERGED_RTOL", - 3: "KSP_CONVERGED_ATOL", - 4: "KSP_CONVERGED_ITS", - 5: "KSP_CONVERGED_CG_NEG_CURVE", - 6: "KSP_CONVERGED_CG_CONSTRAINED", - 7: "KSP_CONVERGED_STEP_LENGTH", - 8: "KSP_CONVERGED_HAPPY_BREAKDOWN", - -2: "KSP_DIVERGED_NULL", - -3: "KSP_DIVERGED_ITS", - -4: "KSP_DIVERGED_DTOL", - -5: "KSP_DIVERGED_BREAKDOWN", - -6: "KSP_DIVERGED_BREAKDOWN_BICG", - -7: "KSP_DIVERGED_NONSYMMETRIC", - -8: "KSP_DIVERGED_INDEFINITE_PC", - -9: "KSP_DIVERGED_NANORINF", - -10: "KSP_DIVERGED_INDEFINITE_MAT", - -11: "KSP_DIVERGED_PC_FAILED", - 0: "KSP_CONVERGED_ITERATING"} +__all__ = ["PETScKrylovSolver"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] class PETScKrylovSolver(PETScSolver): @@ -35,37 +20,190 @@ class PETScKrylovSolver(PETScSolver): It provides the code to call all Krylov solvers from the PETSc package. """ - - def __init__(self, tolerance=1e-10, iterations=1000, precon=None): - """ - :Parameters: - - `tolerance`: The required error tolerance. - - `iterations`: The maximum number of iterative steps to perform. - - `precon`: Preconditioner to use (string). + DEFAULT_PRECONDITIONER = DefaultPreconditioner + + def __init__(self, tolerance="default", + absolute_tolerance=None, + divergence_tolerance=None, + criterion="default", + iterations="default", precon="default"): + """ + Parameters + ---------- + tolerance : float + Required relative error tolerance. + absolute_tolerance : float + Required absolute error tolerance. + divergence_tolerance : float + Required divergence error tolerance. + criterion : {'default', 'unscaled', 'RHS', 'matrix', 'initial', 'preconditioned', 'natural', 'legacy'} + Interpretation of ``tolerance``. + See :ref:`CONVERGENCE` for more information. + iterations : int + Maximum number of iterative steps to perform. + precon : ~fipy.solvers.petsc.preconditioners.petscPreconditioner.PETScPreconditioner, optional + Preconditioner to apply to the matrix. A value of None means + to perform an unpreconditioned solve. (default: + :class:`~fipy.solvers.petsc.preconditioners.defaultPreconditioner.DefaultPreconditioner`). """ if self.__class__ is PETScKrylovSolver: raise NotImplementedError("can't instantiate abstract base class") - - PETScSolver.__init__(self, tolerance=tolerance, - iterations=iterations, precon=precon) + + self.absolute_tolerance = absolute_tolerance + self.divergence_tolerance = divergence_tolerance + super(PETScKrylovSolver, self).__init__(tolerance=tolerance, criterion=criterion, + iterations=iterations, precon=precon) + + def _adaptLegacyTolerance(self, L, x, b): + return (1., PETSc.KSP.NormType.DEFAULT) + + def _convergenceTest(self, ksp, its, rnorm, scale): + """Replace `KSPConvergedDefault()` with custom normalization + + Modeled on `KSPConvergedDefault() + `_. + Simplisitically, converged if `rnorm <= rtol * scale`. + + It would be much nicer (and less expensive) if they would just let + you specify how to calculate rnorm0! + `KSPConvergedDefaultSetUIRNorm()` isn't exposed to petsc4py, and it + wouldn't help with "unscaled" or "matrix". + + Parameters + ---------- + ksp : ~petsc4py.PETSc.KSP + Krylov solver object. + its : int + Number of iterations performed. + rnorm : float + Norm of the residual to test. + scale : float + How to interpret magnitude of tolerance. + + Returns + ------- + bool + Whether solution is converged. + """ + + reason = PETSc.KSP.ConvergedReason.CONVERGED_ITERATING + + min_it = 0 + + if numerix.isnan(rnorm) or numerix.isinf(rnorm): + pcreason = PCReduceFailedReason + if pcreason: + reason = PETSc.KSP.ConvergedReason.DIVERGED_PCSETUP_FAILED + self._log.debug("Linear solver pcsetup fails, " + "declaring divergence") + else: + reason = PETSc.KSP.ConvergedReason.DIVERGED_NANORINF + self._log.debug("Linear solver has created a not a number (NaN) " + "as the residual norm, declaring divergence") + elif its >= min_it: + if rnorm <= max(ksp.rtol * scale, ksp.atol): + if rnorm < ksp.atol: + reason = PETSc.KSP.ConvergedReason.CONVERGED_ATOL + self._log.debug("Linear solver has converged. " + "Residual norm {rnorm:14.12e} is less " + "than absolute tolerance {atol:14.12e} " + "at iteration {its}".format(rnorm=rnorm, + atol=ksp.atol, + its=its)) + else: + reason = PETSc.KSP.ConvergedReason.CONVERGED_RTOL + self._log.debug("Linear solver has converged. " + "Residual norm {rnorm:14.12e} is less " + "than relative tolerance {rtol:14.12e} " + "times residual scale {scale:14.12e} " + "at iteration {its}".format(rnorm=rnorm, + rtol=ksp.rtol, + scale=scale, + its=its)) + elif rnorm >= ksp.dtol * scale: + reason = PETSc.KSP.ConvergedReason.DIVERGED_DTOL + self._log.debug("Linear solver is diverging. " + "Residual scale {scale:14.12e}, " + "current residual norm {rnorm:14.12e} " + "at iteration {its}".format(rnorm=rnorm, + scale=scale, + its=its)) + + return reason + + def _adaptUnscaledTolerance(self, L, x, b): + return (1., self._convergenceTest) + + def _adaptRHSTolerance(self, L, x, b): + return (1., PETSc.KSP.NormType.UNPRECONDITIONED) + + def _adaptMatrixTolerance(self, L, x, b): + return (self._matrixNorm(L, x, b), self._convergenceTest) + + def _adaptInitialTolerance(self, L, x, b): + return (self._residualNorm(L, x, b), self._convergenceTest) + + def _adaptPreconditionedTolerance(self, L, x, b): + return (1., PETSc.KSP.NormType.PRECONDITIONED) + + def _adaptNaturalTolerance(self, L, x, b): + return (1., PETSc.KSP.NormType.NATURAL) def _solve_(self, L, x, b): + """Solve system of equations posed for PETSc + + Parameters + ---------- + L : PETSc.Mat + Sparse matrix + x : PETSc.Vec + Solution variable as ghosted vector + b : PETSc.Vec + Right-hand side as ghosted vector + + Returns + ------- + x : PETSc.Vec + Solution variable as ghosted vector + """ ksp = PETSc.KSP() ksp.create(L.comm) ksp.setType(self.solver) + if self.criterion != "legacy": + ksp.setInitialGuessNonzero(True) + + L.assemble() + ksp.setOperators(L) + + tolerance_scale, suite_criterion = self._adaptTolerance(L, x, b) + if suite_criterion == self._convergenceTest: + ksp.setConvergenceTest(suite_criterion, + kargs=dict(scale=tolerance_scale)) + suite_criterion = PETSc.KSP.NormType.PRECONDITIONED + rtol = self.tolerance + divtol = self.divergence_tolerance + else: + rtol, divtol = (self.scale_tolerance(tol, tolerance_scale) + for tol in (self.tolerance, + self.divergence_tolerance)) + + ksp.setTolerances(rtol=rtol, + atol=self.absolute_tolerance, + divtol=divtol, + max_it=self.iterations) + ksp.setNormType(suite_criterion) self._log.debug("BEGIN precondition") with Timer() as t: - if self.preconditioner is not None: - ksp.getPC().setType(self.preconditioner) + if self.preconditioner is None: + ksp.getPC().setType("none") + else: + self.preconditioner._applyToSolver(solver=ksp, matrix=L) self._log.debug("END precondition - {} ns".format(t.elapsed)) - ksp.setTolerances(rtol=self.tolerance, max_it=self.iterations) - L.assemble() - ksp.setOperators(L) ksp.setFromOptions() self._log.debug("BEGIN solve") @@ -75,11 +213,19 @@ def _solve_(self, L, x, b): self._log.debug("END solve - {} ns".format(t.elapsed)) - self._log.debug('solver: %s', ksp.type) - self._log.debug('precon: %s', ksp.getPC().type) - self._log.debug('convergence: %s', _reason[ksp.reason]) - self._log.debug('iterations: %d / %d', ksp.its, self.iterations) - self._log.debug('norm: %s', ksp.norm) - self._log.debug('norm_type: %s', ksp.norm_type) + self._setConvergence(suite="petsc", + code=ksp.reason, + iterations=ksp.its, + tolerance_scale=tolerance_scale, + # "The residual value that is tested may be an approximation" + # https://petsc.org/release/petsc4py/reference/petsc4py.PETSc.KSP.html#petsc4py.PETSc.KSP.setConvergenceTest + residual=ksp.norm, + ksp_solver=ksp.type, + ksp_precon=ksp.getPC().type, + ksp_norm_type=ksp.norm_type) ksp.destroy() + + self.convergence.warn() + + return x diff --git a/fipy/solvers/petsc/petscSolver.py b/fipy/solvers/petsc/petscSolver.py index 7c649cc79b..a48c9827f7 100644 --- a/fipy/solvers/petsc/petscSolver.py +++ b/fipy/solvers/petsc/petscSolver.py @@ -12,17 +12,10 @@ class PETScSolver(Solver): .. attention:: This class is abstract. Always create one of its subclasses. """ - def __init__(self, *args, **kwargs): - if self.__class__ is PETScSolver: - raise NotImplementedError("can't instantiate abstract base class") - else: - Solver.__init__(self, *args, **kwargs) @property - def _globalMatrixAndVectors(self): - if not hasattr(self, 'globalVectors'): - globalMatrix = self.matrix - + def globalVectors(self): + if not hasattr(self, '_globalVectors'): overlappingVector = self.matrix._fipy2petscGhost(var=self.var) from fipy.variables.coupledCellVariable import _CoupledCellVariable @@ -33,40 +26,65 @@ def _globalMatrixAndVectors(self): overlappingRHSvector = self.matrix._fipy2petscGhost(var=RHSvector) - self.globalVectors = (globalMatrix, overlappingVector, overlappingRHSvector) + self._globalVectors = (overlappingVector, overlappingRHSvector) - return self.globalVectors + return self._globalVectors - def _deleteGlobalMatrixAndVectors(self): - del self.matrix - if hasattr(self, "globalVectors"): - globalMatrix, overlappingVector, overlappingRHSvector = self._globalMatrixAndVectors + def _deleteGlobalVectors(self): + if hasattr(self, "_globalVectors"): + overlappingVector, overlappingRHSvector = self.globalVectors overlappingVector.destroy() overlappingRHSvector.destroy() - del self.globalVectors + del self._globalVectors - def _solve(self): - from fipy.terms import SolutionVariableNumberError - - globalMatrix, overlappingVector, overlappingRHSvector = self._globalMatrixAndVectors + def _rhsNorm(self, L, x, b): + return b.norm(PETSc.NormType.NORM_2) + + def _matrixNorm(self, L, x, b): + L.assemble() + return L.norm(PETSc.NormType.NORM_INFINITY) + + def _residualVectorAndNorm(self, L, x, b): + residualVector = L * x - b + + return residualVector, residualVector.norm(PETSc.NormType.NORM_2) + + @property + def _Lxb(self): + """Matrix, solution vector, and right-hand side vector + + Returns + ------- + L : PETSc.Mat + Sparse matrix + x : PETSc.Vec + Solution variable as ghosted vector + b : PETSc.Vec + Right-hand side as ghosted vector + """ + x, b = self.globalVectors + L = self.matrix.matrix if ((self.matrix == 0) - or (self.matrix.matrix.sizes[0][1] != self.matrix.matrix.sizes[1][1]) - or (self.matrix.matrix.sizes[0][1] != overlappingVector.size)): + or (L.sizes[0][1] != L.sizes[1][1]) + or (L.sizes[0][1] != x.size)): + + from fipy.terms import SolutionVariableNumberError raise SolutionVariableNumberError - self._solve_(globalMatrix.matrix, - overlappingVector, - overlappingRHSvector) + return (L, x, b) - value = self.matrix._petsc2fipyGhost(vec=overlappingVector) - self.var.value = numerix.reshape(value, self.var.shape) - - self._deleteGlobalMatrixAndVectors() + def _scatterGhosts(self, x): + """Distribute ghost values (if any) across processes + """ + return self.matrix._petsc2fipyGhost(vec=x) + + def _cleanup(self): + self._deleteGlobalVectors() del self.var del self.RHSvector - + @property def _matrixClass(self): return _PETScMeshMatrix @@ -84,8 +102,8 @@ def _calcResidualVector(self, residualFn=None): return arr def _calcResidualVector_(self): - globalMatrix, overlappingVector, overlappingRHSvector = self._globalMatrixAndVectors - Lx = globalMatrix * overlappingVector + overlappingVector, overlappingRHSvector = self.globalVectors + Lx = self.matrix * overlappingVector residual = Lx - overlappingRHSvector Lx.destroy() return residual @@ -104,8 +122,8 @@ def _calcRHSNorm(self): return self.nonOverlappingRHSvector.Norm2() def __del__(self): - if hasattr(self, "globalVectors"): - globalMatrix, overlappingVector, overlappingRHSvector = self._globalMatrixAndVectors - del globalMatrix + if hasattr(self, "_globalVectors"): + overlappingVector, overlappingRHSvector = self.globalVectors + del self.matrix overlappingVector.destroy() overlappingRHSvector.destroy() diff --git a/fipy/solvers/petsc/preconditioners/__init__.py b/fipy/solvers/petsc/preconditioners/__init__.py new file mode 100644 index 0000000000..75226d347c --- /dev/null +++ b/fipy/solvers/petsc/preconditioners/__init__.py @@ -0,0 +1,15 @@ +from __future__ import unicode_literals +from .defaultPreconditioner import * +from .icPreconditioner import * +from .iluPreconditioner import * +from .jacobiPreconditioner import * +from .luPreconditioner import * +from .ssorPreconditioner import * + +__all__ = [] +__all__.extend(defaultPreconditioner.__all__) +__all__.extend(icPreconditioner.__all__) +__all__.extend(iluPreconditioner.__all__) +__all__.extend(jacobiPreconditioner.__all__) +__all__.extend(luPreconditioner.__all__) +__all__.extend(ssorPreconditioner.__all__) diff --git a/fipy/solvers/petsc/preconditioners/defaultPreconditioner.py b/fipy/solvers/petsc/preconditioners/defaultPreconditioner.py new file mode 100644 index 0000000000..658be5daba --- /dev/null +++ b/fipy/solvers/petsc/preconditioners/defaultPreconditioner.py @@ -0,0 +1,25 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from .petscPreconditioner import PETScPreconditioner + +__all__ = ["DefaultPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class DefaultPreconditioner(PETScPreconditioner): + """Apply PETSc's default preconditioning to :class:`~fipy.solvers.petsc.petscSolver.PETScSolver`. + + "The default preconditioner for sparse matrices is PCILU or PCICC with + 0 fill on one process and block Jacobi (PCBJACOBI) with PCILU or PCICC + in parallel." [#PETSc_Default_Preconditioner]_ + + .. [#PETSc_Default_Preconditioner] https://petsc.org/main/manualpages/PC/PCCreate/#note + """ + + pctype = None + + def _applyToSolver(self, solver, matrix): + """Leave solver alone. + """ + pass diff --git a/fipy/solvers/petsc/preconditioners/icPreconditioner.py b/fipy/solvers/petsc/preconditioners/icPreconditioner.py new file mode 100644 index 0000000000..71bf5137e1 --- /dev/null +++ b/fipy/solvers/petsc/preconditioners/icPreconditioner.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from .petscPreconditioner import PETScPreconditioner + +__all__ = ["ICPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class ICPreconditioner(PETScPreconditioner): + """Incomplete Choleski preconditioner for :class:`~fipy.solvers.petsc.petscSolver.PETScSolver`. + """ + + pctype = "icc" diff --git a/fipy/solvers/petsc/preconditioners/iluPreconditioner.py b/fipy/solvers/petsc/preconditioners/iluPreconditioner.py new file mode 100644 index 0000000000..fdd5b311d6 --- /dev/null +++ b/fipy/solvers/petsc/preconditioners/iluPreconditioner.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from .petscPreconditioner import PETScPreconditioner + +__all__ = ["ILUPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class ILUPreconditioner(PETScPreconditioner): + """ILU preconditioner for :class:`~fipy.solvers.petsc.petscSolver.PETScSolver`. + """ + + pctype = "ilu" diff --git a/fipy/solvers/petsc/preconditioners/jacobiPreconditioner.py b/fipy/solvers/petsc/preconditioners/jacobiPreconditioner.py new file mode 100644 index 0000000000..05a34ef3f1 --- /dev/null +++ b/fipy/solvers/petsc/preconditioners/jacobiPreconditioner.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from .petscPreconditioner import PETScPreconditioner + +__all__ = ["JacobiPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class JacobiPreconditioner(PETScPreconditioner): + """Jacobi preconditioner for :class:`~fipy.solvers.petsc.petscSolver.PETScSolver`. + """ + + pctype = "jacobi" diff --git a/fipy/solvers/petsc/preconditioners/luPreconditioner.py b/fipy/solvers/petsc/preconditioners/luPreconditioner.py new file mode 100644 index 0000000000..8871419d77 --- /dev/null +++ b/fipy/solvers/petsc/preconditioners/luPreconditioner.py @@ -0,0 +1,15 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from .petscPreconditioner import PETScPreconditioner + +__all__ = ["LUPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class LUPreconditioner(PETScPreconditioner): + """LU preconditioner for :class:`~fipy.solvers.petsc.petscSolver.PETScSolver`. + + """ + + pctype = "lu" diff --git a/fipy/solvers/petsc/preconditioners/petscPreconditioner.py b/fipy/solvers/petsc/preconditioners/petscPreconditioner.py new file mode 100644 index 0000000000..0722dbf0bf --- /dev/null +++ b/fipy/solvers/petsc/preconditioners/petscPreconditioner.py @@ -0,0 +1,17 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from fipy.solvers.preconditioner import SolverModifyingPreconditioner + +__all__ = ["PETScPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class PETScPreconditioner(SolverModifyingPreconditioner): + """Base class preconditioners of for :class:`~fipy.solvers.petsc.petscSolver.PETScSolver`. + + .. attention:: This class is abstract. Always create one of its subclasses. + """ + + def _applyToSolver(self, solver, matrix): + solver.getPC().setType(self.pctype) diff --git a/fipy/solvers/petsc/preconditioners/ssorPreconditioner.py b/fipy/solvers/petsc/preconditioners/ssorPreconditioner.py new file mode 100644 index 0000000000..5d0888967a --- /dev/null +++ b/fipy/solvers/petsc/preconditioners/ssorPreconditioner.py @@ -0,0 +1,16 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from .petscPreconditioner import PETScPreconditioner + +__all__ = ["SSORPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class SSORPreconditioner(PETScPreconditioner): + """ + SSOR preconditioner for :class:`~fipy.solvers.petsc.petscSolver.PETScSolver`. + + """ + + pctype = "sor" diff --git a/fipy/solvers/preconditioner.py b/fipy/solvers/preconditioner.py new file mode 100644 index 0000000000..487b2c2a8c --- /dev/null +++ b/fipy/solvers/preconditioner.py @@ -0,0 +1,57 @@ +from __future__ import unicode_literals +from builtins import object +__docformat__ = 'restructuredtext' + +__all__ = ["Preconditioner", + "SolverModifyingPreconditioner", + "MatrixModifyingPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class Preconditioner(object): + """Base class for solver preconditioners. + + .. attention:: This class is abstract. Always create one of its subclasses. + """ + + pass + + +class SolverModifyingPreconditioner(Preconditioner): + """Base class for preconditioners that modify a :class:`~fipy.solvers.solver.Solver`. + """ + + def _applyToSolver(self, solver, matrix): + """Modify `solver` to apply preconditioning to `matrix`. + + Parameters + ---------- + solver + The solver to modify with preconditioner. + matrix + The matrix the preconditioner applies to. + + Returns + ------- + None + """ + raise NotImplementedError + + +class MatrixModifyingPreconditioner(Preconditioner): + """Base class for preconditioners that modify a :class:`~fipy.matrices.sparseMatrix.SparseMatrix`. + """ + + def _applyToMatrix(self, matrix): + """Create a preconditioner for `matrix`. + + Returns + ------- + preconditioner : object + Preconditioning object appropriate for this solver suite. + matrix : :class:`~fipy.matrices.sparseMatrix.SparseMatrix` + Matrix, possibly restructured to facilitate applying + preconditioner. + """ + raise NotImplementedError + diff --git a/fipy/solvers/pyAMG/linearGeneralSolver.py b/fipy/solvers/pyAMG/linearGeneralSolver.py index a0c419ffe1..4adf403a9d 100644 --- a/fipy/solvers/pyAMG/linearGeneralSolver.py +++ b/fipy/solvers/pyAMG/linearGeneralSolver.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from fipy.solvers.scipy.scipySolver import _ScipySolver +from fipy.solvers.scipy.scipySolver import ScipySolver from pyamg import solve import os from fipy.tools import numerix @@ -8,7 +8,7 @@ from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class LinearGeneralSolver(_ScipySolver): +class LinearGeneralSolver(ScipySolver): """ The `LinearGeneralSolver` is an interface to the generic PyAMG, which solves the arbitrary system Ax=b with the best out-of-the box diff --git a/fipy/solvers/pyAMG/preconditioners/smoothedAggregationPreconditioner.py b/fipy/solvers/pyAMG/preconditioners/smoothedAggregationPreconditioner.py index ff4576cad0..a4f3f55c2e 100644 --- a/fipy/solvers/pyAMG/preconditioners/smoothedAggregationPreconditioner.py +++ b/fipy/solvers/pyAMG/preconditioners/smoothedAggregationPreconditioner.py @@ -1,16 +1,22 @@ from __future__ import unicode_literals from builtins import object + from pyamg import smoothed_aggregation_solver +from ...scipy.preconditioners.scipyPreconditioner import ScipyPreconditioner + __all__ = ["SmoothedAggregationPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class SmoothedAggregationPreconditioner(object): - """Preconditioner based on `PyAMG smoothed_aggregation_solver `_ +class SmoothedAggregationPreconditioner(ScipyPreconditioner): + """Preconditioner based on `PyAMG smoothed_aggregation_solver`_ for :class:`~fipy.solvers.scipy.scipySolver.ScipySolver`. + + .. _PyAMG smoothed_aggregation_solver: https://pyamg.readthedocs.io/en/latest/generated/pyamg.aggregation.html#pyamg.aggregation.smoothed_aggregation_solver """ + def __init__(self): pass - def _applyToMatrix(self, A): - return smoothed_aggregation_solver(A).aspreconditioner(cycle='V') + def _applyToMatrix(self, matrix): + return smoothed_aggregation_solver(matrix).aspreconditioner(cycle='V'), None diff --git a/fipy/solvers/pyamgx/__init__.py b/fipy/solvers/pyamgx/__init__.py index 57c19c4ee1..27d281d23c 100644 --- a/fipy/solvers/pyamgx/__init__.py +++ b/fipy/solvers/pyamgx/__init__.py @@ -8,19 +8,23 @@ import pyamgx -from fipy.solvers.pyamgx.pyAMGXSolver import * -from fipy.solvers.pyamgx.linearCGSolver import * -from fipy.solvers.pyamgx.linearGMRESSolver import * -from fipy.solvers.pyamgx.linearFGMRESSolver import * -from fipy.solvers.pyamgx.linearBiCGStabSolver import * -from fipy.solvers.pyamgx.linearLUSolver import * -from fipy.solvers.pyamgx.aggregationAMGSolver import * -from fipy.solvers.pyamgx.classicalAMGSolver import * +from .preconditioners import * + +from .pyAMGXSolver import * +from .linearPCGSolver import * +from .linearGMRESSolver import * +from .linearFGMRESSolver import * +from .linearBiCGStabSolver import * +from .linearLUSolver import * +from .aggregationAMGSolver import * +from .classicalAMGSolver import * + +from . import pyamgxConvergence pyamgx.initialize() atexit.register(pyamgx.finalize) -DefaultSolver = LinearCGSolver +DefaultSolver = LinearPCGSolver DefaultAsymmetricSolver = LinearLUSolver DummySolver = DefaultSolver GeneralSolver = LinearLUSolver diff --git a/fipy/solvers/pyamgx/aggregationAMGSolver.py b/fipy/solvers/pyamgx/aggregationAMGSolver.py index 053f5aa30a..ad90cf4745 100644 --- a/fipy/solvers/pyamgx/aggregationAMGSolver.py +++ b/fipy/solvers/pyamgx/aggregationAMGSolver.py @@ -11,37 +11,18 @@ class AggregationAMGSolver(PyAMGXSolver): The `AggregationAMGSolver` is an interface to the aggregation AMG solver in AMGX, with a Jacobi smoother by default. """ - def __init__(self, tolerance=1e-10, iterations=2000, - precon=None, - smoother=BlockJacobiSmoother(), - **kwargs): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.pyamgx.preconditioners.preconditioners.Preconditioner, optional - **kwargs - Other AMGX solver options - """ - config_dict = { - "config_version": 2, - "determinism_flag": 1, - "solver": { - "algorithm": "AGGREGATION", - "solver": "AMG", - "selector": "SIZE_2", - "monitor_residual": 1, - "max_levels": 1000, - "cycle": "V" - } + + CONFIG_DICT = { + "config_version": 2, + "determinism_flag": 1, + "solver": { + "algorithm": "AGGREGATION", + "solver": "AMG", + "selector": "SIZE_2", + "monitor_residual": 1, + "max_levels": 1000, + "cycle": "V" } - super(AggregationAMGSolver, self).__init__( - config_dict, - tolerance=tolerance, - iterations=iterations, - precon=precon, - smoother=smoother, - **kwargs) + } + + DEFAULT_SMOOTHER = BlockJacobiSmoother diff --git a/fipy/solvers/pyamgx/classicalAMGSolver.py b/fipy/solvers/pyamgx/classicalAMGSolver.py index c90500fbbc..7b0a0236df 100644 --- a/fipy/solvers/pyamgx/classicalAMGSolver.py +++ b/fipy/solvers/pyamgx/classicalAMGSolver.py @@ -11,36 +11,16 @@ class ClassicalAMGSolver(PyAMGXSolver): The `ClassicalAMGSolver` is an interface to the classical AMG solver in AMGX, with a Jacobi smoother by default. """ - def __init__(self, tolerance=1e-10, iterations=2000, - precon=None, - smoother=BlockJacobiSmoother(), - **kwargs): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.pyamgx.preconditioners.preconditioners.Preconditioner, optional - smoother : ~fipy.solvers.pyamgx.smoothers.smoothers.Smoother, optional - **kwargs - Other AMGX solver options - """ - config_dict = { - "config_version": 2, - "determinism_flag": 1, - "solver": { - "algorithm": "CLASSICAL", - "solver": "AMG", - "monitor_residual": 1, - "max_levels": 1000, - } + + CONFIG_DICT = { + "config_version": 2, + "determinism_flag": 1, + "solver": { + "algorithm": "CLASSICAL", + "solver": "AMG", + "monitor_residual": 1, + "max_levels": 1000, } - super(ClassicalAMGSolver, self).__init__( - config_dict, - tolerance=tolerance, - iterations=iterations, - precon=precon, - smoother=smoother, - **kwargs) + } + + DEFAULT_SMOOTHER = BlockJacobiSmoother diff --git a/fipy/solvers/pyamgx/linearBiCGStabSolver.py b/fipy/solvers/pyamgx/linearBiCGStabSolver.py index 6af2a5b1df..11be2385ae 100644 --- a/fipy/solvers/pyamgx/linearBiCGStabSolver.py +++ b/fipy/solvers/pyamgx/linearBiCGStabSolver.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from fipy.solvers.pyamgx import PyAMGXSolver -from fipy.solvers.pyamgx.preconditioners import * +from fipy.solvers.pyamgx.preconditioners import JacobiPreconditioner __all__ = ["LinearBiCGStabSolver"] from future.utils import text_to_native_str @@ -12,36 +12,15 @@ class LinearBiCGStabSolver(PyAMGXSolver): AMGX, with a Jacobi preconditioner by default. """ - def __init__(self, tolerance=1e-10, iterations=2000, - precon=BlockJacobiPreconditioner(), - **kwargs): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.pyamgx.preconditioners.preconditioners.Preconditioner, optional - **kwargs - Other AMGX solver options - """ - config_dict = { - "config_version": 2, - "determinism_flag": 1, - "exception_handling" : 1, - "solver": { - "convergence": "RELATIVE_INI_CORE", - "monitor_residual": 1, - "solver": "PBICGSTAB", - "preconditioner": { - "solver": "NOSOLVER" - } - } + CONFIG_DICT = { + "config_version": 2, + "determinism_flag": 1, + "exception_handling" : 1, + "solver": { + "convergence": "RELATIVE_INI_CORE", + "monitor_residual": 1, + "solver": "PBICGSTAB", } - super(LinearBiCGStabSolver, self).__init__( - config_dict, - tolerance=tolerance, - iterations=iterations, - precon=precon, - **kwargs) + } + + DEFAULT_PRECONDITIONER = JacobiPreconditioner diff --git a/fipy/solvers/pyamgx/linearCGSolver.py b/fipy/solvers/pyamgx/linearCGSolver.py deleted file mode 100644 index 05c6887bda..0000000000 --- a/fipy/solvers/pyamgx/linearCGSolver.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import unicode_literals -from fipy.solvers.pyamgx import PyAMGXSolver -from fipy.solvers.pyamgx.preconditioners import * - -__all__ = ["LinearCGSolver", "LinearPCGSolver"] -from future.utils import text_to_native_str -__all__ = [text_to_native_str(n) for n in __all__] - -class LinearCGSolver(PyAMGXSolver): - """ - The `LinearCGSolver` is an interface to the PCG solver in - AMGX, with no preconditioning by default. - """ - - def __init__(self, tolerance=1e-10, iterations=2000, precon=BlockJacobiPreconditioner(), - **kwargs): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.pyamgx.preconditioners.preconditioners.Preconditioner, optional - **kwargs - Other AMGX solver options - """ - config_dict = { - "config_version": 2, - "determinism_flag": 1, - "exception_handling" : 1, - "solver": { - "convergence": "RELATIVE_INI_CORE", - "monitor_residual": 1, - "solver": "PCG", - "preconditioner": { - "solver": "NOSOLVER", - } - } - } - super(LinearCGSolver, self).__init__( - config_dict, - tolerance=tolerance, - iterations=iterations, - precon=precon, - **kwargs) - - def _canSolveAsymmetric(self): - return False - -LinearPCGSolver = LinearCGSolver diff --git a/fipy/solvers/pyamgx/linearFGMRESSolver.py b/fipy/solvers/pyamgx/linearFGMRESSolver.py index 948396a15b..51adf22bf6 100644 --- a/fipy/solvers/pyamgx/linearFGMRESSolver.py +++ b/fipy/solvers/pyamgx/linearFGMRESSolver.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from fipy.solvers.pyamgx import PyAMGXSolver -from fipy.solvers.pyamgx.preconditioners import BlockJacobiPreconditioner +from fipy.solvers.pyamgx.preconditioners import JacobiPreconditioner __all__ = ["LinearFGMRESSolver"] from future.utils import text_to_native_str @@ -12,35 +12,14 @@ class LinearFGMRESSolver(PyAMGXSolver): AMGX, with a Jacobi preconditioner by default. """ - def __init__(self, tolerance=1e-10, iterations=2000, - precon=BlockJacobiPreconditioner(), - **kwargs): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.pyamgx.preconditioners.preconditioners.Preconditioner, optional - **kwargs - Other AMGX solver options - """ - config_dict = { - "config_version": 2, - "determinism_flag": 1, - "exception_handling" : 1, - "solver": { - "monitor_residual": 1, - "solver": "FGMRES", - "preconditioner": { - "solver": "NOSOLVER" - } - } + CONFIG_DICT = { + "config_version": 2, + "determinism_flag": 1, + "exception_handling" : 1, + "solver": { + "monitor_residual": 1, + "solver": "FGMRES", } - super(LinearFGMRESSolver, self).__init__( - config_dict, - tolerance=tolerance, - iterations=iterations, - precon=precon, - **kwargs) + } + + DEFAULT_PRECONDITIONER = JacobiPreconditioner diff --git a/fipy/solvers/pyamgx/linearGMRESSolver.py b/fipy/solvers/pyamgx/linearGMRESSolver.py index 123b0f23e2..78b7201702 100644 --- a/fipy/solvers/pyamgx/linearGMRESSolver.py +++ b/fipy/solvers/pyamgx/linearGMRESSolver.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from fipy.solvers.pyamgx import PyAMGXSolver -from fipy.solvers.pyamgx.preconditioners import BlockJacobiPreconditioner +from fipy.solvers.pyamgx.preconditioners import JacobiPreconditioner __all__ = ["LinearGMRESSolver"] from future.utils import text_to_native_str @@ -12,36 +12,15 @@ class LinearGMRESSolver(PyAMGXSolver): AMGX, with a Jacobi preconditioner by default. """ - def __init__(self, tolerance=1e-10, iterations=2000, - precon=BlockJacobiPreconditioner(), - **kwargs): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.pyamgx.preconditioners.preconditioners.Preconditioner, optional - **kwargs - Other AMGX solver options - """ - config_dict = { - "config_version": 2, - "determinism_flag": 1, - "exception_handling" : 1, - "solver": { - "monitor_residual": 1, - "solver": "GMRES", - "convergence": "RELATIVE_INI_CORE", - "preconditioner": { - "solver": "NOSOLVER" - } - } + CONFIG_DICT = { + "config_version": 2, + "determinism_flag": 1, + "exception_handling" : 1, + "solver": { + "monitor_residual": 1, + "solver": "GMRES", + "convergence": "RELATIVE_INI_CORE", } - super(LinearGMRESSolver, self).__init__( - config_dict, - tolerance=tolerance, - iterations=iterations, - precon=precon, - **kwargs) + } + + DEFAULT_PRECONDITIONER = JacobiPreconditioner diff --git a/fipy/solvers/pyamgx/linearPCGSolver.py b/fipy/solvers/pyamgx/linearPCGSolver.py new file mode 100644 index 0000000000..9952ab26b1 --- /dev/null +++ b/fipy/solvers/pyamgx/linearPCGSolver.py @@ -0,0 +1,32 @@ +from __future__ import unicode_literals +from fipy.solvers.pyamgx import PyAMGXSolver +from fipy.solvers.pyamgx.preconditioners import JacobiPreconditioner + +__all__ = ["LinearPCGSolver"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class LinearPCGSolver(PyAMGXSolver): + """ + The `LinearPCGSolver` is an interface to the PCG solver in + AMGX, with no preconditioning by default. + """ + + CONFIG_DICT = { + "config_version": 2, + "determinism_flag": 1, + "exception_handling" : 1, + "solver": { + "convergence": "RELATIVE_INI_CORE", + "monitor_residual": 1, + "solver": "PCG", + "preconditioner": { + "solver": "NOSOLVER", + } + } + } + + DEFAULT_PRECONDITIONER = JacobiPreconditioner + + def _canSolveAsymmetric(self): + return False diff --git a/fipy/solvers/pyamgx/preconditioners.py b/fipy/solvers/pyamgx/preconditioners.py new file mode 100644 index 0000000000..b4f132b93f --- /dev/null +++ b/fipy/solvers/pyamgx/preconditioners.py @@ -0,0 +1,102 @@ +from __future__ import unicode_literals + +from fipy.solvers.preconditioner import SolverModifyingPreconditioner + +__all__ = ["AMGPreconditioner", + "AggregationAMGPreconditioner", + "BiCGStabPreconditioner", + "CGPreconditioner", + "DILUPreconditioner", + "FGMRESPreconditioner", + "GaussSeidelPreconditioner", + "ILUPreconditioner", + "JacobiPreconditioner", + "PolynomialPreconditioner", + "PyAMGXPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class PyAMGXPreconditioner(SolverModifyingPreconditioner): + """Interface to pyamgx_ `preconditioner configuration`_ for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + + .. _pyamgx: https://pyamgx.readthedocs.io + .. _preconditioner configuration: https://pyamgx.readthedocs.io/en/latest/basic.html#config-objects + """ + + def __init__(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict, optional + Extra arguments to preconditioner: refer to `preconditioner + configuration`_ for information about possible arguments. + """ + self.config_dict = { + "solver": self.pctype, + "max_iters": 1 + } + self.config_dict.update(kwargs) + + def _applyToSolver(self, solver, matrix=None): + solver["preconditioner"] = self.config_dict.copy() + +class AMGPreconditioner(PyAMGXPreconditioner): + """Adaptive Multigrid preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + pctype = "AMG" + +class AggregationAMGPreconditioner(AMGPreconditioner): + """Aggregation Adaptive Multigrid preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + def __init__(self): + super(ClassicalAMGPreconditioner, self).__init__(algorithm="AGGREGATION", + selector="SIZE_2") +class BiCGStabPreconditioner(PyAMGXPreconditioner): + """Biconjugate Gradient Stabilized preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + pctype = "PCIBCGSTAB" + +class CGPreconditioner(PyAMGXPreconditioner): + """Conjugate Gradient preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + pctype = "PCG" + +class DILUPreconditioner(PyAMGXPreconditioner): + """DILU preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + pctype = "MULTICOLOR_DILU" + +class FGMRESPreconditioner(PyAMGXPreconditioner): + """Flexible Generalized Mimumal Residual preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + pctype = "FGMRES" + +class GaussSeidelPreconditioner(PyAMGXPreconditioner): + """Gauss-Seidel preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + pctype = "MULTICOLOR_GS" + +class ILUPreconditioner(PyAMGXPreconditioner): + """ILU preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + pctype = "MULTICOLOR_GS" + +class JacobiPreconditioner(PyAMGXPreconditioner): + """Block Jacobi preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + pctype = "BLOCK_JACOBI" + +class PolynomialPreconditioner(PyAMGXPreconditioner): + """Polynomial preconditioner for :class:`~fipy.solvers.pyamgx.pyAMGXSolver.PyAMGXSolver`. + """ + + pctype = "POLYNOMIAL" diff --git a/fipy/solvers/pyamgx/preconditioners/__init__.py b/fipy/solvers/pyamgx/preconditioners/__init__.py deleted file mode 100644 index cb98003d62..0000000000 --- a/fipy/solvers/pyamgx/preconditioners/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from __future__ import unicode_literals -from fipy.solvers.pyamgx.preconditioners.preconditioners import * diff --git a/fipy/solvers/pyamgx/preconditioners/preconditioners.py b/fipy/solvers/pyamgx/preconditioners/preconditioners.py deleted file mode 100644 index 3f45aac41d..0000000000 --- a/fipy/solvers/pyamgx/preconditioners/preconditioners.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import unicode_literals -from builtins import object -import copy - -__all__ = ["AggregationAMGPreconditioner", - "ClassicalAMGPreconditioner", - "CGPreconditioner", - "BiCGStabPreconditioner", - "FGMRESPreconditioner", - "BlockJacobiPreconditioner", - "MultiColorDILUPreconditioner", - "PolynomialPreconditioner", - "MultiColorGSPreconditioner", - "Preconditioner"] -from future.utils import text_to_native_str -__all__ = [text_to_native_str(n) for n in __all__] - -class Preconditioner(object): - """Interface to pyamgx_ `preconditioner configuration`_. - - .. _pyamgx: https://pyamgx.readthedocs.io - .. _preconditioner configuration: https://pyamgx.readthedocs.io/en/latest/basic.html#config-objects - """ - - def __init__(self, preconditioner_type, **kwargs): - self.config_dict = { - "solver": preconditioner_type, - "max_iters": 1 - } - self.config_dict.update(kwargs) - def __call__(self, **kwargs): - """ - Parameters - ---------- - **kwargs - Other AMGX solver options - """ - self.config_dict.update(kwargs) - return copy.copy(self.config_dict) - -AggregationAMGPreconditioner = Preconditioner("AMG", algorithm="AGGREGATION", selector="SIZE_2") -ClassicalAMGPreconditioner = Preconditioner("AMG") -CGPreconditioner = Preconditioner("PCG") -BiCGStabPreconditioner = Preconditioner("PCIBCGSTAB") -FGMRESPreconditioner = Preconditioner("FGMRES") -BlockJacobiPreconditioner = Preconditioner("BLOCK_JACOBI") -MultiColorDILUPreconditioner = Preconditioner("MULTICOLOR_DILU") -PolynomialPreconditioner = Preconditioner("POLYNOMIAL") -MultiColorGSPreconditioner = Preconditioner("MULTICOLOR_GS") diff --git a/fipy/solvers/pyamgx/pyAMGXSolver.py b/fipy/solvers/pyamgx/pyAMGXSolver.py index e589ce8b14..4ae92abcfc 100644 --- a/fipy/solvers/pyamgx/pyAMGXSolver.py +++ b/fipy/solvers/pyamgx/pyAMGXSolver.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -import numpy -from scipy.sparse import csr_matrix + +from scipy.sparse import csr_matrix, linalg import pyamgx @@ -15,80 +15,141 @@ class PyAMGXSolver(Solver): - def __init__(self, config_dict, tolerance=1e-10, iterations=2000, - precon=None, smoother=None, **kwargs): + # AMGX configuration options + CONFIG_DICT = {} + + #: Default smoother to apply to the ??? + DEFAULT_SMOOTHER = None + + def __init__(self, tolerance="default", criterion="default", + iterations="default", precon="default", smoother="default", **kwargs): """ Parameters ---------- - config_dict : dict - AMGX configuration options tolerance : float Required error tolerance. + criterion : {'default', 'unscaled', 'RHS', 'matrix', 'initial', 'legacy'} + Interpretation of ``tolerance``. + See :ref:`CONVERGENCE` for more information. iterations : int Maximum number of iterative steps to perform. - precon : ~fipy.solvers.pyamgx.preconditioners.preconditioners.Preconditioner, optional - smoother : ~fipy.solvers.pyamgx.smoothers.smoothers.Smoother, optional + precon : ~fipy.solvers.pyamgx.preconditioners.PyAMGXPreconditioner, optional + smoother : ~fipy.solvers.pyamgx.smoothers.Smoother, optional **kwargs Other AMGX solver options """ + super(PyAMGXSolver, self).__init__(tolerance=tolerance, criterion=criterion, iterations=iterations) + # update solver config: - config_dict["solver"]["tolerance"] = tolerance - config_dict["solver"]["max_iters"] = iterations - if precon: - config_dict["solver"]["preconditioner"] = precon - if smoother: - config_dict["solver"]["smoother"] = smoother - config_dict["solver"].update(kwargs) + self.config_dict = self.CONFIG_DICT.copy() + + self.config_dict["solver"]["max_iters"] = self.iterations + + if self.precon is not None: + self.precon._applyToSolver(self.config_dict["solver"]) + + smoother = self.value_or_default(smoother, self.default_smoother) + if smoother is not None: + smoother._applyToSolver(self.config_dict["solver"]) + + self.config_dict["solver"].update(kwargs) # create AMGX objects: - self.cfg = pyamgx.Config().create_from_dict(config_dict) + self.cfg = pyamgx.Config().create_from_dict(self.config_dict) self.resources = pyamgx.Resources().create_simple(self.cfg) self.x_gpu = pyamgx.Vector().create(self.resources) self.b_gpu = pyamgx.Vector().create(self.resources) self.A_gpu = pyamgx.Matrix().create(self.resources) - self.solver = pyamgx.Solver().create(self.resources, self.cfg) - super(PyAMGXSolver, self).__init__(tolerance=tolerance, iterations=iterations) + @property + def default_smoother(self): + if self.DEFAULT_SMOOTHER is not None: + # instantiate DEFAULT_SMOOTHER class + return self.DEFAULT_SMOOTHER() + else: + return None + + def _destroy_AMGX(self): + # destroy AMGX objects: + # self.resources apparently doesn't need to be destroyed + if hasattr(self, "A_gpu"): + self.A_gpu.destroy() + del self.A_gpu + if hasattr(self, "b_gpu"): + self.b_gpu.destroy() + del self.b_gpu + if hasattr(self, "x_gpu"): + self.x_gpu.destroy() + del self.x_gpu + if hasattr(self, "cfg"): + self.cfg.destroy() + del self.cfg def __exit__(self, *args): - # destroy AMGX objects: - self.A_gpu.destroy() - self.b_gpu.destroy() - self.x_gpu.destroy() - self.solver.destroy() - self.resources.destroy() - self.cfg.destroy() + self._destroy_AMGX() + + def __del__(self): + self._destroy_AMGX() @property def _matrixClass(self): return _ScipyMeshMatrix - def _storeMatrix(self, var, matrix, RHSvector): - self.var = var - self.matrix = matrix - self.RHSvector = RHSvector - self.A_gpu.upload_CSR(self.matrix.matrix) - self.solver.setup(self.A_gpu) + def _rhsNorm(self, L, x, b): + return numerix.L2norm(b) + + def _matrixNorm(self, L, x, b): + return linalg.norm(L.matrix, ord=numerix.inf) + + def _adaptLegacyTolerance(self, L, x, b): + return self._adaptInitialTolerance(L, x, b) + + def _adaptUnscaledTolerance(self, L, x, b): + return (1., "ABSOLUTE") + + def _adaptRHSTolerance(self, L, x, b): + return (self._rhsNorm(L, x, b), "ABSOLUTE") + + def _adaptMatrixTolerance(self, L, x, b): + return (self._matrixNorm(L, x, b), "ABSOLUTE") + + def _adaptInitialTolerance(self, L, x, b): + return (1., "RELATIVE_INI_CORE") def _solve_(self, L, x, b): # transfer data from CPU to GPU + self._log.debug("BEGIN cpu2gpu") with Timer() as t: self.x_gpu.upload(x) self.b_gpu.upload(b) + self.A_gpu.upload_CSR(L) self._log.debug("END cpu2gpu - {elapsed} ns".format(elapsed=t.elapsed)) + tolerance_scale, suite_criterion = self._adaptTolerance(L, x, b) + config_dict = self.config_dict.copy() + config_dict["solver"]["monitor_residual"] = 1 + config_dict["solver"]["store_res_history"] = 1 + config_dict["solver"]["tolerance"] = self.tolerance * tolerance_scale + config_dict["solver"]["convergence"] = suite_criterion + + cfg = pyamgx.Config().create_from_dict(config_dict) + solver = pyamgx.Solver().create(self.resources, cfg) + solver.setup(self.A_gpu) + # solve system on GPU + self._log.debug("BEGIN solve") with Timer() as t: - self.solver.solve(self.b_gpu, self.x_gpu) + solver.solve(self.b_gpu, self.x_gpu) self._log.debug("END solve - {} ns".format(t.elapsed)) # download values from GPU to CPU + self._log.debug("BEGIN gpu2cpu") with Timer() as t: @@ -96,10 +157,20 @@ def _solve_(self, L, x, b): self._log.debug("END gpu2cpu - {} ns".format(t.elapsed)) - return x + if solver.iterations_number == -1: + residual = None + else: + residual = solver.get_residual() - def _solve(self): - if self.var.mesh.communicator.Nproc > 1: - raise Exception("SciPy solvers cannot be used with multiple processors") + self._setConvergence(suite="pyamgx", + code=solver.status, + iterations=solver.iterations_number, + tolerance_scale=tolerance_scale, + residual=residual) - self.var[:] = numerix.reshape(self._solve_(self.matrix, self.var.ravel(), numerix.array(self.RHSvector)), self.var.shape) + self.convergence.warn() + + solver.destroy() + cfg.destroy() + + return x diff --git a/fipy/solvers/pyamgx/pyamgxConvergence.py b/fipy/solvers/pyamgx/pyamgxConvergence.py new file mode 100644 index 0000000000..b78eb7a7e7 --- /dev/null +++ b/fipy/solvers/pyamgx/pyamgxConvergence.py @@ -0,0 +1,23 @@ +from ..convergence import (Convergence, IterationDivergence, + BreakdownDivergence) + +class pyamgx_Convergence(Convergence): + """ + """ + status_code = "success" + status_name = "AMGX_SOLVE_SUCCESS" + suite = "pyamgx" + +class pyamgx_BreakdownDivergence(BreakdownDivergence): + """ + """ + status_code = "failed" + status_name = "AMGX_SOLVE_FAILED" + suite = "pyamgx" + +class pyamgx_IterationDivergence(IterationDivergence): + """ + """ + status_code = "diverged" + status_name = "AMGX_SOLVE_DIVERGED" + suite = "pyamgx" diff --git a/fipy/solvers/pyamgx/smoothers/smoothers.py b/fipy/solvers/pyamgx/smoothers.py similarity index 87% rename from fipy/solvers/pyamgx/smoothers/smoothers.py rename to fipy/solvers/pyamgx/smoothers.py index fff51140e8..5168c68bb2 100644 --- a/fipy/solvers/pyamgx/smoothers/smoothers.py +++ b/fipy/solvers/pyamgx/smoothers.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals from builtins import object -import copy __all__ = ["BlockJacobiSmoother", "MultiColorDILUSmoother", @@ -22,9 +21,9 @@ def __init__(self, smoother_type): "solver": smoother_type, "max_iters": 1 } - def __call__(self, **kwargs): - self.config_dict.update(kwargs) - return copy.copy(self.config_dict) + + def _applyToSolver(self, solver): + solver["smoother"] = self.config_dict.copy() BlockJacobiSmoother = Smoother("BLOCK_JACOBI") MultiColorDILUSmoother = Smoother("MULTICOLOR_DILU") diff --git a/fipy/solvers/pyamgx/smoothers/__init__.py b/fipy/solvers/pyamgx/smoothers/__init__.py deleted file mode 100644 index f567c0dd66..0000000000 --- a/fipy/solvers/pyamgx/smoothers/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from __future__ import unicode_literals -from fipy.solvers.pyamgx.smoothers.smoothers import * diff --git a/fipy/solvers/pysparse/__init__.py b/fipy/solvers/pysparse/__init__.py index afb94dbfa9..d6ba6ae26a 100644 --- a/fipy/solvers/pysparse/__init__.py +++ b/fipy/solvers/pysparse/__init__.py @@ -4,13 +4,15 @@ _log = logging.getLogger(__name__) -from fipy.solvers.pysparse.linearCGSSolver import * -from fipy.solvers.pysparse.linearPCGSolver import * -from fipy.solvers.pysparse.linearGMRESSolver import * -from fipy.solvers.pysparse.linearLUSolver import * -from fipy.solvers.pysparse.linearJORSolver import * +from .linearCGSSolver import * +from .linearPCGSolver import * +from .linearGMRESSolver import * +from .linearLUSolver import * +from .linearJORSolver import * -from fipy.solvers.pysparse.preconditioners import * +from .preconditioners import * + +from . import pysparseConvergence DefaultSolver = LinearPCGSolver DefaultAsymmetricSolver = LinearLUSolver diff --git a/fipy/solvers/pysparse/linearCGSSolver.py b/fipy/solvers/pysparse/linearCGSSolver.py index 7ff7f5d132..d908933dc8 100644 --- a/fipy/solvers/pysparse/linearCGSSolver.py +++ b/fipy/solvers/pysparse/linearCGSSolver.py @@ -2,16 +2,17 @@ __docformat__ = 'restructuredtext' import sys +import warnings from pysparse.itsolvers import krylov -from fipy.solvers.pysparse.pysparseSolver import PysparseSolver +from .linearRHSSolver import LinearRHSSolver __all__ = ["LinearCGSSolver"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class LinearCGSSolver(PysparseSolver): +class LinearCGSSolver(LinearRHSSolver): """ @@ -27,13 +28,25 @@ class LinearCGSSolver(PysparseSolver): """ - def __init__(self, precon=None, *args, **kwargs): + solveFnc = staticmethod(krylov.cgs) + + def __init__(self, tolerance="default", criterion="default", + iterations="default", precon="default"): """ + Create a `LinearCGSSolver` object. + Parameters ---------- - precon : ~fipy.solvers.pysparse.preconditioners.preconditioner.Preconditioner, optional + tolerance : float + Required error tolerance. + criterion : {'default', 'unscaled', 'RHS', 'matrix', 'initial', 'legacy'} + Interpretation of ``tolerance``. + See :ref:`CONVERGENCE` for more information. + iterations : int + Maximum number of iterative steps to perform. + precon : ~fipy.solvers.pysparse.preconditioners.pysparsePreconditioner.PysparsePreconditioner + Preconditioner to use. """ - import warnings warnings.warn("The Pysparse CGS solver may return incorrect results for some matrices", UserWarning) - super(LinearCGSSolver, self).__init__(precon=precon, *args, **kwargs) - self.solveFnc = krylov.cgs + super(LinearCGSSolver, self).__init__(tolerance=tolerance, criterion=criterion, + iterations=iterations, precon=precon) diff --git a/fipy/solvers/pysparse/linearGMRESSolver.py b/fipy/solvers/pysparse/linearGMRESSolver.py index 88920c0fe7..424a4f77a1 100644 --- a/fipy/solvers/pysparse/linearGMRESSolver.py +++ b/fipy/solvers/pysparse/linearGMRESSolver.py @@ -3,14 +3,14 @@ from pysparse.itsolvers import krylov -from fipy.solvers.pysparse.preconditioners import JacobiPreconditioner -from fipy.solvers.pysparse.pysparseSolver import PysparseSolver +from .linearInitialSolver import LinearInitialSolver +from .preconditioners import JacobiPreconditioner __all__ = ["LinearGMRESSolver"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class LinearGMRESSolver(PysparseSolver): +class LinearGMRESSolver(LinearInitialSolver): """ The `LinearGMRESSolver` solves a linear system of equations using the @@ -25,11 +25,6 @@ class LinearGMRESSolver(PysparseSolver): """ - def __init__(self, precon=JacobiPreconditioner(), *args, **kwargs): - """ - Parameters - ---------- - precon : ~fipy.solvers.pysparse.preconditioners.preconditioner.Preconditioner, optional - """ - super(LinearGMRESSolver, self).__init__(precon=precon, *args, **kwargs) - self.solveFnc = krylov.gmres + solveFnc = staticmethod(krylov.gmres) + + DEFAULT_PRECONDITIONER = JacobiPreconditioner diff --git a/fipy/solvers/pysparse/linearInitialSolver.py b/fipy/solvers/pysparse/linearInitialSolver.py new file mode 100644 index 0000000000..bc03b58485 --- /dev/null +++ b/fipy/solvers/pysparse/linearInitialSolver.py @@ -0,0 +1,13 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from .pysparseSolver import PysparseSolver + +class LinearInitialSolver(PysparseSolver): + """Wrapper for solvers that normalize the residual by the initial value. + + .. attention:: This class is abstract. Always create one of its subclasses. + """ + + def _legacyNorm(self, L, x, b): + return self._residualNorm(L, x, b) diff --git a/fipy/solvers/pysparse/linearJORSolver.py b/fipy/solvers/pysparse/linearJORSolver.py index 26b09befd5..a9c95c60d4 100644 --- a/fipy/solvers/pysparse/linearJORSolver.py +++ b/fipy/solvers/pysparse/linearJORSolver.py @@ -19,26 +19,31 @@ class LinearJORSolver(PysparseSolver): non-symmetric coefficient matrix. """ - def __init__(self, tolerance=1e-10, iterations=1000, relaxation=1.0): + + def __init__(self, tolerance="default", criterion="default", + iterations="default", relaxation=1.0): """ - The `Solver` class should not be invoked directly. + Create a `LinearJORSolver` object. Parameters ---------- tolerance : float Required error tolerance. + criterion : {'default', 'unscaled', 'RHS', 'matrix', 'initial', 'legacy'} + Interpretation of ``tolerance``. + See :ref:`CONVERGENCE` for more information. iterations : int Maximum number of iterative steps to perform. relaxation : float Fraction of update to apply """ - super(LinearJORSolver, self).__init__(tolerance=tolerance, - iterations=iterations) + super(LinearJORSolver, self).__init__(tolerance=tolerance, criterion=criterion, + iterations=iterations, precon=None) self.relaxation = relaxation def _solve_(self, L, x, b): - d = L.takeDiagonal() + d = L.diagonal() D = _PysparseMatrixFromShape(size=len(d)) D.putDiagonal(d) @@ -46,17 +51,24 @@ def _solve_(self, L, x, b): tol = 1e+10 xold = x.copy() + tolerance_scale, _ = self._adaptTolerance(L, x, b) + for iteration in range(self.iterations): - if tol <= self.tolerance: - break + residual = numerix.L2norm(L * x - b) - residual = L * x - b + if residual <= self.tolerance * tolerance_scale: + break xold[:] = x x[:] = (-(LU) * x + b) / d x[:] = xold + self.relaxation * (x - xold) - tol = max(abs(residual)) + self._setConvergence(suite="pysparse", + code=0, + iterations=iteration+1, + residual=residual / tolerance_scale) + + self.convergence.warn() - print(iteration, tol) + return x diff --git a/fipy/solvers/pysparse/linearLUSolver.py b/fipy/solvers/pysparse/linearLUSolver.py index e3ebe4f403..5d4a98f392 100644 --- a/fipy/solvers/pysparse/linearLUSolver.py +++ b/fipy/solvers/pysparse/linearLUSolver.py @@ -6,7 +6,7 @@ from pysparse.direct import superlu -from fipy.solvers.pysparse.pysparseSolver import PysparseSolver +from .pysparseSolver import PysparseSolver from fipy.tools import numerix from fipy.tools.timer import Timer @@ -28,8 +28,8 @@ class LinearLUSolver(PysparseSolver): """ - def __init__(self, tolerance=1e-10, iterations=10, - maxIterations=10, precon=None): + def __init__(self, tolerance="default", criterion="default", + iterations=10, precon=None): """ Creates a `LinearLUSolver`. @@ -37,42 +37,74 @@ def __init__(self, tolerance=1e-10, iterations=10, ---------- tolerance : float Required error tolerance. + criterion : {'default', 'unscaled', 'RHS', 'matrix', 'initial', 'legacy'} + Interpretation of ``tolerance``. + See :ref:`CONVERGENCE` for more information. iterations : int Maximum number of iterative steps to perform. - precon : ~fipy.solvers.pysparse.preconditioners.preconditioner.Preconditioner + precon *ignored* """ + super(LinearLUSolver, self).__init__(tolerance=tolerance, criterion=criterion, + iterations=iterations, precon=None) - iterations = min(iterations, maxIterations) + def _adaptLegacyTolerance(self, L, x, b): + return self._adaptInitialTolerance(L, x, b) - super(LinearLUSolver, self).__init__(tolerance = tolerance, - iterations = iterations) + def _adaptUnscaledTolerance(self, L, x, b): + return (1., None) + + def _adaptRHSTolerance(self, L, x, b): + return (self._rhsNorm(L, x, b), None) + + def _adaptMatrixTolerance(self, L, x, b): + return (self._matrixNorm(L, x, b), None) + + def _adaptInitialTolerance(self, L, x, b): + return (self._residualNorm(L, x, b), None) def _solve_(self, L, x, b): - diag = L.takeDiagonal() - maxdiag = max(numerix.absolute(diag)) + """Solve system of equations posed for PySparse - L = L * (1 / maxdiag) - b = b * (1 / maxdiag) + Parameters + ---------- + L : ~pysparse.spmatrix.ll_mat + Sparse matrix + x : array_like + Solution vector + b : array_like + Right hand side vector + + Returns + ------- + x : ndarray + Solution vector + """ + tolerance_scale, _ = self._adaptTolerance(L, x, b) self._log.debug("BEGIN solve") with Timer() as t: - LU = superlu.factorize(L.matrix.to_csr()) - - error0 = numerix.sqrt(numerix.sum((L * x - b)**2)) + LU = superlu.factorize(L.to_csr()) for iteration in range(self.iterations): - errorVector = L * x - b + residualVector, residual = self._residualVectorAndNorm(L, x, b) - if numerix.sqrt(numerix.sum(errorVector**2)) <= self.tolerance * error0: + if residual <= self.tolerance * tolerance_scale: break xError = numerix.zeros(len(b), 'd') - LU.solve(errorVector, xError) + + LU.solve(residualVector, xError) x[:] = x - xError self._log.debug("END solve - {} ns".format(t.elapsed)) - self._log.debug('iterations: %d / %d', iteration+1, self.iterations) - self._log.debug('residual: %s', numerix.sqrt(numerix.sum(errorVector**2))) + self._setConvergence(suite="pysparse", + code=0, + iterations=iteration+1, + residual=residual) + + self.convergence.warn() + + return x diff --git a/fipy/solvers/pysparse/linearPCGSolver.py b/fipy/solvers/pysparse/linearPCGSolver.py index 8291e53668..46c8b1a57a 100644 --- a/fipy/solvers/pysparse/linearPCGSolver.py +++ b/fipy/solvers/pysparse/linearPCGSolver.py @@ -3,14 +3,14 @@ from pysparse.itsolvers import krylov -from fipy.solvers.pysparse.preconditioners import SsorPreconditioner -from fipy.solvers.pysparse.pysparseSolver import PysparseSolver +from .linearRHSSolver import LinearRHSSolver +from .preconditioners import SSORPreconditioner __all__ = ["LinearPCGSolver"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class LinearPCGSolver(PysparseSolver): +class LinearPCGSolver(LinearRHSSolver): """ The `LinearPCGSolver` solves a linear system of equations using the @@ -27,14 +27,9 @@ class LinearPCGSolver(PysparseSolver): """ - def __init__(self, precon=SsorPreconditioner(), *args, **kwargs): - """ - Parameters - ---------- - precon : ~fipy.solvers.pysparse.preconditioners.preconditioner.Preconditioner, optional - """ - super(LinearPCGSolver, self).__init__(precon=precon, *args, **kwargs) - self.solveFnc = krylov.pcg + solveFnc = staticmethod(krylov.pcg) + + DEFAULT_PRECONDITIONER = SSORPreconditioner def _canSolveAsymmetric(self): return False diff --git a/fipy/solvers/pysparse/linearRHSSolver.py b/fipy/solvers/pysparse/linearRHSSolver.py new file mode 100644 index 0000000000..12f54fd49a --- /dev/null +++ b/fipy/solvers/pysparse/linearRHSSolver.py @@ -0,0 +1,13 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from .pysparseSolver import PysparseSolver + +class LinearRHSSolver(PysparseSolver): + """Wrapper for solvers that normalize the residual by the right-hand-side. + + .. attention:: This class is abstract. Always create one of its subclasses. + """ + + def _legacyNorm(self, L, x, b): + return self._rhsNorm(L, x, b) diff --git a/fipy/solvers/pysparse/preconditioners/__init__.py b/fipy/solvers/pysparse/preconditioners/__init__.py index 9aa77d6b4f..9f1e453425 100644 --- a/fipy/solvers/pysparse/preconditioners/__init__.py +++ b/fipy/solvers/pysparse/preconditioners/__init__.py @@ -1,3 +1,7 @@ from __future__ import unicode_literals -from fipy.solvers.pysparse.preconditioners.jacobiPreconditioner import * -from fipy.solvers.pysparse.preconditioners.ssorPreconditioner import * +from .jacobiPreconditioner import * +from .ssorPreconditioner import * + +__all__ = [] +__all__.extend(jacobiPreconditioner.__all__) +__all__.extend(ssorPreconditioner.__all__) diff --git a/fipy/solvers/pysparse/preconditioners/jacobiPreconditioner.py b/fipy/solvers/pysparse/preconditioners/jacobiPreconditioner.py index 764fbf29b7..efe5d0ab41 100644 --- a/fipy/solvers/pysparse/preconditioners/jacobiPreconditioner.py +++ b/fipy/solvers/pysparse/preconditioners/jacobiPreconditioner.py @@ -1,19 +1,17 @@ from __future__ import unicode_literals from pysparse.precon import precon -from fipy.solvers.pysparse.preconditioners.preconditioner import Preconditioner +from fipy.solvers.pysparse.preconditioners.pysparsePreconditioner import PysparsePreconditioner __all__ = ["JacobiPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class JacobiPreconditioner(Preconditioner): - """ - Jacobi preconditioner for Pysparse. - Really just a wrapper class for `pysparse.precon.jacobi`. +class JacobiPreconditioner(PysparsePreconditioner): + """Jacobi preconditioner for :class:`~fipy.solvers.pysparse.pysparseSolver.PysparseSolver`. + + Wrapper class for :func:`pysparse.precon.jacobi`. """ - def _applyToMatrix(self, A): - """ - Returns (preconditioning matrix, resulting matrix) - """ - return precon.jacobi(A), A.to_csr() + + def _applyToMatrix(self, matrix): + return precon.jacobi(matrix), matrix.to_csr() diff --git a/fipy/solvers/pysparse/preconditioners/preconditioner.py b/fipy/solvers/pysparse/preconditioners/preconditioner.py deleted file mode 100644 index e21581bfa1..0000000000 --- a/fipy/solvers/pysparse/preconditioners/preconditioner.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import unicode_literals -from builtins import object -__all__ = ["Preconditioner"] -from future.utils import text_to_native_str -__all__ = [text_to_native_str(n) for n in __all__] - -class Preconditioner(object): - """ - Base preconditioner class - - .. attention:: This class is abstract. Always create one of its subclasses. - """ - - def __init__(self): - """ - Create a `Preconditioner` object. - """ - if self.__class__ is Preconditioner: - raise NotImplementedError("can't instantiate abstract base class") - - def _applyToMatrix(self, matrix): - """ - Returns the function used for Pysparse - preconditioning. - """ - raise NotImplementedError diff --git a/fipy/solvers/pysparse/preconditioners/pysparsePreconditioner.py b/fipy/solvers/pysparse/preconditioners/pysparsePreconditioner.py new file mode 100644 index 0000000000..0a1497b536 --- /dev/null +++ b/fipy/solvers/pysparse/preconditioners/pysparsePreconditioner.py @@ -0,0 +1,15 @@ +from __future__ import unicode_literals + +from fipy.solvers.preconditioner import MatrixModifyingPreconditioner + +__all__ = ["PysparsePreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class PysparsePreconditioner(MatrixModifyingPreconditioner): + """Base class for preconditioners of :class:`~fipy.solvers.pysparse.pysparseSolver.PysparseSolver`. + + .. attention:: This class is abstract. Always create one of its subclasses. + """ + + pass diff --git a/fipy/solvers/pysparse/preconditioners/ssorPreconditioner.py b/fipy/solvers/pysparse/preconditioners/ssorPreconditioner.py index 7b5c353658..9beabb8d82 100644 --- a/fipy/solvers/pysparse/preconditioners/ssorPreconditioner.py +++ b/fipy/solvers/pysparse/preconditioners/ssorPreconditioner.py @@ -1,20 +1,18 @@ from __future__ import unicode_literals from pysparse.precon import precon -from fipy.solvers.pysparse.preconditioners.preconditioner import Preconditioner +from fipy.solvers.pysparse.preconditioners.pysparsePreconditioner import PysparsePreconditioner -__all__ = ["SsorPreconditioner"] +__all__ = ["SSORPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class SsorPreconditioner(Preconditioner): - """ - SSOR preconditioner for Pysparse. - Really just a wrapper class for `pysparse.precon.jacobi`. +class SSORPreconditioner(PysparsePreconditioner): + """SSOR preconditioner for :class:`~fipy.solvers.pysparse.pysparseSolver.PysparseSolver`. + + Wrapper class for :func:`pysparse.precon.ssor`. """ - def _applyToMatrix(self, A): - """ - Returns (preconditioning matrix, resulting matrix) - """ - A = A.to_sss() - return precon.ssor(A), A + + def _applyToMatrix(self, matrix): + matrix = matrix.to_sss() + return precon.ssor(matrix), matrix diff --git a/fipy/solvers/pysparse/pysparseConvergence.py b/fipy/solvers/pysparse/pysparseConvergence.py new file mode 100644 index 0000000000..e5403f4ef9 --- /dev/null +++ b/fipy/solvers/pysparse/pysparseConvergence.py @@ -0,0 +1,70 @@ +from ..convergence import (Convergence, AbsoluteToleranceConvergence, + RelativeToleranceConvergence, RHSZeroConvergence, + Divergence, IterationDivergence, + PreconditioningDivergence, StagnatedDivergence, + IllConditionedDivergence, OutOfRangeDivergence) + +class Pysparse_AbsoluteToleranceConvergence(AbsoluteToleranceConvergence): + """Residual 2-norm less than abstol""" + status_code = 2 + status_name = "Pysparse_CONVERGED_ATOL" + suite = "pysparse" + +class Pysparse_RHSZeroConvergence(RHSZeroConvergence): + r""":math:`\vec{b} = 0`, so exact solution is :math:`\vec{x} = 0`. + """ + status_code = 1 + status_name = "Pysparse_CONVERGED_BZERO" + suite = "pysparse" + +class Pysparse_RelativeToleranceConvergence(RelativeToleranceConvergence): + """Residual 2-norm decreased by a factor of rtol, from 2-norm of right + hand side. + """ + status_code = 0 + status_name = "Pysparse_CONVERGED_RTOL" + suite = "pysparse" + +class Pysparse_IterationDivergence(IterationDivergence): + """Ran out of iterations before any convergence criteria was reached""" + status_code = -1 + status_name = "Pysparse_DIVERGED_MAXITS" + suite = "pysparse" + +class Pysparse_IllConditionedPreconditionerDivergence(PreconditioningDivergence): + """The system involving the preconditioner was ill-conditioned. + """ + status_code = -2 + status_name = "Pysparse_DIVERGED_PC_ILL" + suite = "pysparse" + +class Pysparse_NonPosDefPreconditioningDivergence(PreconditioningDivergence): + r"""An inner product of the form + :math:`\mathbf{x}^T \mathbf{K}^{-1} \mathbf{x}` was not positive, + so the preconditioning matrix :math:`\mathbf{K}` does not appear to be + positive definite. + """ + status_code = -3 + status_name = "Pysparse_DIVERGED_PC_NONPOSDEF" + suite = "pysparse" + +class Pysparse_IllConditionedDivergence(IllConditionedDivergence): + """The matrix appears to be very ill-conditioned. + """ + status_code = -4 + status_name = "Pysparse_DIVERGED_MAT_ILL" + suite = "pysparse" + +class Pysparse_StagnatedDivergence(StagnatedDivergence): + """The method stagnated. + """ + status_code = -5 + status_name = "Pysparse_DIVERGED_STAG" + suite = "pysparse" + +class Pysparse_OutOfRangeDivergence(OutOfRangeDivergence): + """A scalar quantity became too small or too large to continue computing. + """ + status_code = -6 + status_name = "Pysparse_DIVERGED_RANGE" + suite = "pysparse" diff --git a/fipy/solvers/pysparse/pysparseSolver.py b/fipy/solvers/pysparse/pysparseSolver.py index 7132066a51..9add2701b3 100644 --- a/fipy/solvers/pysparse/pysparseSolver.py +++ b/fipy/solvers/pysparse/pysparseSolver.py @@ -1,42 +1,44 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' -from fipy.solvers.pysparseMatrixSolver import _PysparseMatrixSolver +from ..pysparseMatrixSolver import PysparseMatrixSolver +from fipy.tools import numerix from fipy.tools.timer import Timer __all__ = ["PysparseSolver"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class PysparseSolver(_PysparseMatrixSolver): +class PysparseSolver(PysparseMatrixSolver): """ The base `pysparseSolver` class. .. attention:: This class is abstract. Always create one of its subclasses. """ - def __init__(self, *args, **kwargs): - if self.__class__ is PysparseSolver: - raise NotImplementedError("can't instantiate abstract base class") - - super(PysparseSolver, self).__init__(*args, **kwargs) def _solve_(self, L, x, b): - """ - `_solve_` is only for use by solvers which may use - preconditioning. If you are writing a solver which - doesn't use preconditioning, this must be overridden. + """Solve system of equations posed for PySparse Parameters ---------- - L : ~fipy.matrices.pysparseMatrix._PysparseMeshMatrix - Matrix - x : ndarray + L : ~pysparse.spmatrix.ll_mat + Sparse matrix + x : array_like Solution vector - b : ndarray + b : array_like Right hand side vector + + Returns + ------- + x : ndarray + Solution vector """ - A = L.matrix + tolerance_scale, _ = self._adaptTolerance(L, x, b) + + # Pysparse returns the relative residual, + # which changes depending on which solver is used + legacy_norm = self._legacyNorm(L, x, b) self._log.debug("BEGIN precondition") @@ -44,43 +46,57 @@ def _solve_(self, L, x, b): if self.preconditioner is None: P = None else: - P, A = self.preconditioner._applyToMatrix(A) + P, L = self.preconditioner._applyToMatrix(L) self._log.debug("END precondition - {} ns".format(t.elapsed)) self._log.debug("BEGIN solve") with Timer() as t: - info, iter, relres = self.solveFnc(A, b, x, self.tolerance, + info, iter, relres = self.solveFnc(L, b, x, + self.tolerance * tolerance_scale, self.iterations, P) self._log.debug("END solve - {} ns".format(t.elapsed)) - self._raiseWarning(info, iter, relres) + self._setConvergence(suite="pysparse", + code=info, + iterations=iter + 1, + tolerance_scale=tolerance_scale, + residual=relres * legacy_norm) + + self.convergence.warn() + + return x - self._log.debug('iterations: %d / %d', iter, self.iterations) - if info < 0: - self._log.debug('failure: %s', self._warningList[info].__class__.__name__) - self._log.debug('relres: %s', relres) + def _rhsNorm(self, L, x, b): + return numerix.L2norm(b) - def _solve(self): + def _matrixNorm(self, L, x, b): + return L.norm('inf') - if self.var.mesh.communicator.Nproc > 1: - raise Exception("Pysparse solvers cannot be used with multiple processors") + def _residualVectorAndNorm(self, L, x, b): + y = numerix.empty((L.shape[0],)) + L.matvec(x, y) + residualVector = y - b - array = self.var.numericValue.ravel() + return residualVector, numerix.L2norm(residualVector) - from fipy.terms import SolutionVariableNumberError + def _adaptUnscaledTolerance(self, L, x, b): + factor = 1. / self._legacyNorm(L, x, b) + return (factor, None) - if ((self.matrix == 0) - or (self.matrix.matrix.shape[0] != self.matrix.matrix.shape[1]) - or (self.matrix.matrix.shape[0] != len(array))): + def _adaptRHSTolerance(self, L, x, b): + factor = self._rhsNorm(L, x, b) / self._legacyNorm(L, x, b) + return (factor, None) - raise SolutionVariableNumberError + def _adaptMatrixTolerance(self, L, x, b): + factor = self._matrixNorm(L, x, b) / self._legacyNorm(L, x, b) + return (factor, None) - self._solve_(self.matrix, array, self.RHSvector) - factor = self.var.unit.factor - if factor != 1: - array /= self.var.unit.factor + def _adaptInitialTolerance(self, L, x, b): + factor = self._residualNorm(L, x, b) / self._legacyNorm(L, x, b) + return (factor, None) - self.var[:] = array.reshape(self.var.shape) + def _adaptLegacyTolerance(self, L, x, b): + return (1., None) diff --git a/fipy/solvers/pysparseMatrixSolver.py b/fipy/solvers/pysparseMatrixSolver.py index dbb4f6b0cb..046a92dfe8 100644 --- a/fipy/solvers/pysparseMatrixSolver.py +++ b/fipy/solvers/pysparseMatrixSolver.py @@ -6,7 +6,7 @@ from fipy.solvers.solver import Solver from fipy.matrices.pysparseMatrix import _PysparseMeshMatrix -class _PysparseMatrixSolver(Solver): +class PysparseMatrixSolver(Solver): """ A class consolidating methods for solver packages which use @@ -27,29 +27,3 @@ class _PysparseMatrixSolver(Solver): @property def _matrixClass(self): return _PysparseMeshMatrix - - def _solve(self): - """ - Call `_solve_` for the new value of `self.var`. - - In certain cases, `_solve_` won't return anything, e.g. - `fipy.solvers.pysparse.linearLUSolver`. In these cases, we preserve the - value of `self.var.numericValue`. - """ - - if self.var.mesh.communicator.Nproc > 1: - raise Exception("%ss cannot be used with multiple processors" \ - % self.__class__) - - array = self.var.numericValue - newArr = self._solve_(self.matrix, array, self.RHSvector) - - if newArr is not None: - array = newArr - - factor = self.var.unit.factor - - if factor != 1: - array /= self.var.unit.factor - - self.var[:] = array diff --git a/fipy/solvers/scipy/__init__.py b/fipy/solvers/scipy/__init__.py index b71f13b635..a95ec055d6 100644 --- a/fipy/solvers/scipy/__init__.py +++ b/fipy/solvers/scipy/__init__.py @@ -4,11 +4,15 @@ _log = logging.getLogger(__name__) -from fipy.solvers.scipy.linearCGSSolver import * -from fipy.solvers.scipy.linearGMRESSolver import * -from fipy.solvers.scipy.linearBicgstabSolver import * -from fipy.solvers.scipy.linearLUSolver import * -from fipy.solvers.scipy.linearPCGSolver import * +from .preconditioners import * + +from .linearCGSSolver import * +from .linearGMRESSolver import * +from .linearBicgstabSolver import * +from .linearLUSolver import * +from .linearPCGSolver import * + +from . import scipyConvergence DefaultSolver = LinearLUSolver DefaultAsymmetricSolver = LinearLUSolver diff --git a/fipy/solvers/scipy/linearBicgstabSolver.py b/fipy/solvers/scipy/linearBicgstabSolver.py index 2e66b1cca5..0260802984 100644 --- a/fipy/solvers/scipy/linearBicgstabSolver.py +++ b/fipy/solvers/scipy/linearBicgstabSolver.py @@ -1,30 +1,17 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' -from fipy.solvers.scipy.scipyKrylovSolver import _ScipyKrylovSolver +from fipy.solvers.scipy.scipyKrylovSolver import ScipyKrylovSolver from scipy.sparse.linalg import bicgstab __all__ = ["LinearBicgstabSolver"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class LinearBicgstabSolver(_ScipyKrylovSolver): +class LinearBicgstabSolver(ScipyKrylovSolver): """ The `LinearBicgstabSolver` is an interface to the Bicgstab solver in Scipy, with no preconditioning by default. """ - def __init__(self, tolerance=1e-15, iterations=2000, precon=None): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon - Preconditioner to use. - """ - - super(LinearBicgstabSolver, self).__init__(tolerance=tolerance, iterations=iterations, precon=precon) - self.solveFnc = bicgstab + solveFnc = staticmethod(bicgstab) diff --git a/fipy/solvers/scipy/linearCGSSolver.py b/fipy/solvers/scipy/linearCGSSolver.py index 306d9bd9ce..1eaa80f496 100644 --- a/fipy/solvers/scipy/linearCGSSolver.py +++ b/fipy/solvers/scipy/linearCGSSolver.py @@ -3,29 +3,16 @@ from scipy.sparse.linalg import cgs -from fipy.solvers.scipy.scipyKrylovSolver import _ScipyKrylovSolver +from fipy.solvers.scipy.scipyKrylovSolver import ScipyKrylovSolver __all__ = ["LinearCGSSolver"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class LinearCGSSolver(_ScipyKrylovSolver): +class LinearCGSSolver(ScipyKrylovSolver): """ The `LinearCGSSolver` is an interface to the CGS solver in Scipy, with no preconditioning by default. """ - def __init__(self, tolerance=1e-15, iterations=2000, precon=None): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon - Preconditioner to use. - """ - - super(LinearCGSSolver, self).__init__(tolerance=tolerance, iterations=iterations, precon=precon) - self.solveFnc = cgs + solveFnc = staticmethod(cgs) diff --git a/fipy/solvers/scipy/linearGMRESSolver.py b/fipy/solvers/scipy/linearGMRESSolver.py index 0fb9320413..e63ca951ab 100644 --- a/fipy/solvers/scipy/linearGMRESSolver.py +++ b/fipy/solvers/scipy/linearGMRESSolver.py @@ -3,29 +3,16 @@ from scipy.sparse.linalg import gmres -from fipy.solvers.scipy.scipyKrylovSolver import _ScipyKrylovSolver +from fipy.solvers.scipy.scipyKrylovSolver import ScipyKrylovSolver __all__ = ["LinearGMRESSolver"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class LinearGMRESSolver(_ScipyKrylovSolver): +class LinearGMRESSolver(ScipyKrylovSolver): """ The `LinearGMRESSolver` is an interface to the GMRES solver in Scipy, with no preconditioning by default. """ - def __init__(self, tolerance=1e-15, iterations=2000, precon=None): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon - Preconditioner to use. - """ - - super(LinearGMRESSolver, self).__init__(tolerance=tolerance, iterations=iterations, precon=precon) - self.solveFnc = gmres + solveFnc = staticmethod(gmres) diff --git a/fipy/solvers/scipy/linearLUSolver.py b/fipy/solvers/scipy/linearLUSolver.py index cd91dc77ed..289fccdf73 100644 --- a/fipy/solvers/scipy/linearLUSolver.py +++ b/fipy/solvers/scipy/linearLUSolver.py @@ -5,7 +5,7 @@ from scipy.sparse.linalg import splu -from fipy.solvers.scipy.scipySolver import _ScipySolver +from fipy.solvers.scipy.scipySolver import ScipySolver from fipy.tools import numerix from fipy.tools.timer import Timer @@ -13,47 +13,82 @@ from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class LinearLUSolver(_ScipySolver): +class LinearLUSolver(ScipySolver): """ The `LinearLUSolver` solves a linear system of equations using LU-factorization. The `LinearLUSolver` is a wrapper class for the the Scipy `scipy.sparse.linalg.splu` module. """ - def _solve_(self, L, x, b): - diag = L.takeDiagonal() - maxdiag = max(numerix.absolute(diag)) + def _adaptLegacyTolerance(self, L, x, b): + return self._adaptInitialTolerance(L, x, b) + + def _adaptUnscaledTolerance(self, L, x, b): + return (1., None) + + def _adaptRHSTolerance(self, L, x, b): + return (self._rhsNorm(L, x, b), None) + + def _adaptMatrixTolerance(self, L, x, b): + return (self._matrixNorm(L, x, b), None) + + def _adaptInitialTolerance(self, L, x, b): + return (self._residualNorm(L, x, b), None) + def _solve_(self, L, x, b): + """Solve system of equations posed for SciPy + + Parameters + ---------- + L : ~scipy.sparse.csr_matrix + Sparse matrix + x : ndarray + Solution vector + b : ndarray + Right hand side vector + + Returns + ------- + x : ndarray + Solution vector + """ self._log.debug("BEGIN precondition") with Timer() as t: + diag = L.diagonal() + maxdiag = max(numerix.absolute(diag)) L = L * (1 / maxdiag) b = b * (1 / maxdiag) self._log.debug("END precondition - {} ns".format(t.elapsed)) + tolerance_scale, _ = self._adaptTolerance(L, x, b) + self._log.debug("BEGIN solve") with Timer() as t: - LU = splu(L.matrix.asformat("csc"), diag_pivot_thresh=1., - relax=1, - panel_size=10, - permc_spec=3) - - error0 = numerix.sqrt(numerix.sum((L * x - b)**2)) + LU = splu(L.asformat("csc"), + diag_pivot_thresh=maxdiag, + relax=1, + panel_size=10, + permc_spec=3) for iteration in range(min(self.iterations, 10)): - errorVector = L * x - b + residualVector, residual = self._residualVectorAndNorm(L, x, b) - if numerix.sqrt(numerix.sum(errorVector**2)) <= self.tolerance * error0: + if residual <= self.tolerance * tolerance_scale: break - xError = LU.solve(errorVector) + xError = LU.solve(residualVector) x[:] = x - xError self._log.debug("END solve - {} ns".format(t.elapsed)) - self._log.debug('iterations: %d / %d', iteration+1, self.iterations) - self._log.debug('residual: %s', numerix.sqrt(numerix.sum(errorVector**2))) + self._setConvergence(suite="scipy", + code=0, + iterations=iteration+1, + residual=residual) + + self.convergence.warn() return x diff --git a/fipy/solvers/scipy/linearPCGSolver.py b/fipy/solvers/scipy/linearPCGSolver.py index 7c7ad9ef86..a93711615d 100644 --- a/fipy/solvers/scipy/linearPCGSolver.py +++ b/fipy/solvers/scipy/linearPCGSolver.py @@ -3,32 +3,19 @@ from scipy.sparse.linalg import cg -from fipy.solvers.scipy.scipyKrylovSolver import _ScipyKrylovSolver +from fipy.solvers.scipy.scipyKrylovSolver import ScipyKrylovSolver __all__ = ["LinearPCGSolver"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class LinearPCGSolver(_ScipyKrylovSolver): +class LinearPCGSolver(ScipyKrylovSolver): """ The `LinearPCGSolver` is an interface to the CG solver in Scipy, with no preconditioning by default. """ - def __init__(self, tolerance=1e-15, iterations=2000, precon=None): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon - Preconditioner to use. - """ - - super(LinearPCGSolver, self).__init__(tolerance=tolerance, iterations=iterations, precon=precon) - self.solveFnc = cg + solveFnc = staticmethod(cg) def _canSolveAsymmetric(self): return False diff --git a/fipy/solvers/scipy/preconditioners/__init__.py b/fipy/solvers/scipy/preconditioners/__init__.py new file mode 100644 index 0000000000..44086bb4ea --- /dev/null +++ b/fipy/solvers/scipy/preconditioners/__init__.py @@ -0,0 +1,8 @@ +from __future__ import unicode_literals + +from .iluPreconditioner import * +from .jacobiPreconditioner import * + +__all__ = [] +__all__.extend(iluPreconditioner.__all__) +__all__.extend(jacobiPreconditioner.__all__) diff --git a/fipy/solvers/scipy/preconditioners/iluPreconditioner.py b/fipy/solvers/scipy/preconditioners/iluPreconditioner.py new file mode 100644 index 0000000000..6fb01b6202 --- /dev/null +++ b/fipy/solvers/scipy/preconditioners/iluPreconditioner.py @@ -0,0 +1,23 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +__all__ = ["ILUPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +from scipy.sparse.linalg import LinearOperator, spilu + +from .scipyPreconditioner import ScipyPreconditioner + +class ILUPreconditioner(ScipyPreconditioner): + """ILU preconditioner for :class:`~fipy.solvers.scipy.scipySolver.ScipySolver`. + + Wrapper class for :func:`scipy.sparse.linalg.spilu`. + Adapted from https://stackoverflow.com/q/46876951/2019542. + """ + + def _applyToMatrix(self, matrix): + ilu = spilu(matrix.tocsc()) + Mx = lambda x: ilu.solve(x) + + return LinearOperator(matrix.shape, Mx), matrix diff --git a/fipy/solvers/scipy/preconditioners/jacobiPreconditioner.py b/fipy/solvers/scipy/preconditioners/jacobiPreconditioner.py new file mode 100644 index 0000000000..578fc9645d --- /dev/null +++ b/fipy/solvers/scipy/preconditioners/jacobiPreconditioner.py @@ -0,0 +1,25 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +__all__ = ["JacobiPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +from scipy.sparse import diags +from scipy.sparse.linalg import LinearOperator, spsolve + +from .scipyPreconditioner import ScipyPreconditioner + +class JacobiPreconditioner(ScipyPreconditioner): + """Jacobi preconditioner for :class:`~fipy.solvers.scipy.scipySolver.ScipySolver`. + + Wrapper class for :func:`scipy.sparse.linalg.spsolve` with `matrix` + diagonal. + Adapted from https://stackoverflow.com/q/46876951/2019542. + """ + + def _applyToMatrix(self, matrix): + P = diags(matrix.diagonal()).tocsc() + Mx = lambda x: spsolve(P, x) + + return LinearOperator(matrix.shape, Mx), matrix diff --git a/fipy/solvers/scipy/preconditioners/scipyPreconditioner.py b/fipy/solvers/scipy/preconditioners/scipyPreconditioner.py new file mode 100644 index 0000000000..226d9f5d1f --- /dev/null +++ b/fipy/solvers/scipy/preconditioners/scipyPreconditioner.py @@ -0,0 +1,16 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from fipy.solvers.preconditioner import MatrixModifyingPreconditioner + +__all__ = ["ScipyPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class ScipyPreconditioner(MatrixModifyingPreconditioner): + """Base class for preconditioners for :class:`~fipy.solvers.scipy.scipySolver.ScipySolver`. + + .. attention:: This class is abstract. Always create one of its subclasses. + """ + + pass diff --git a/fipy/solvers/scipy/scipyConvergence.py b/fipy/solvers/scipy/scipyConvergence.py new file mode 100644 index 0000000000..94f2cfb05c --- /dev/null +++ b/fipy/solvers/scipy/scipyConvergence.py @@ -0,0 +1,23 @@ +from ..convergence import (Convergence, IterationDivergence, + BreakdownDivergence) + +class SciPy_Convergence(Convergence): + """ + """ + status_code = 0 + status_name = "SCIPY_SUCCESS" + suite = "scipy" + +class SciPy_BreakdownDivergence(BreakdownDivergence): + """ + """ + status_code = -1 + status_name = "SCIPY_ILLEGAL/BREAKDOWN" + suite = "scipy" + +class SciPy_IterationDivergence(IterationDivergence): + """ + """ + status_code = +1 + status_name = "SCIPY_MAXIT" + suite = "scipy" diff --git a/fipy/solvers/scipy/scipyKrylovSolver.py b/fipy/solvers/scipy/scipyKrylovSolver.py index 7581d2a6b0..9f0d7d2f70 100644 --- a/fipy/solvers/scipy/scipyKrylovSolver.py +++ b/fipy/solvers/scipy/scipyKrylovSolver.py @@ -1,20 +1,67 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' -__all__ = [] +__all__ = ["ScipyKrylovSolver"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] -from fipy.solvers.scipy.scipySolver import _ScipySolver +import os +import warnings + +from .scipySolver import ScipySolver +from fipy.tools import numerix from fipy.tools.timer import Timer -class _ScipyKrylovSolver(_ScipySolver): +class ScipyKrylovSolver(ScipySolver): """ The base `ScipyKrylovSolver` class. .. attention:: This class is abstract. Always create one of its subclasses. """ + def _countIterations(self, xk): + self.actualIterations += 1 + + def _adaptLegacyTolerance(self, L, x, b): + return self._adaptRHSTolerance(L, x, b) + + def _adaptUnscaledTolerance(self, L, x, b): + factor = 1. / self._rhsNorm(L, x, b) + return (factor, None) + + def _adaptRHSTolerance(self, L, x, b): + return (1., None) + + def _adaptMatrixTolerance(self, L, x, b): + factor = self._matrixNorm(L, x, b) / self._rhsNorm(L, x, b) + return (factor, None) + + def _adaptInitialTolerance(self, L, x, b): + factor = self._residualNorm(L, x, b) / self._rhsNorm(L, x, b) + return (factor, None) + def _solve_(self, L, x, b): - A = L.matrix + """Solve system of equations posed for SciPy + + Parameters + ---------- + L : ~scipy.sparse.csr_matrix + Sparse matrix + x : ndarray + Solution vector + b : ndarray + Right hand side vector + + Returns + ------- + x : ndarray + Solution vector + """ + tolerance_scale, _ = self._adaptTolerance(L, x, b) + + self.actualIterations = 0 + + rtol = self.scale_tolerance(self.tolerance, tolerance_scale) self._log.debug("BEGIN precondition") @@ -22,22 +69,29 @@ def _solve_(self, L, x, b): if self.preconditioner is None: M = None else: - M = self.preconditioner._applyToMatrix(A) + M, _ = self.preconditioner._applyToMatrix(L) self._log.debug("END precondition - {} ns".format(t.elapsed)) self._log.debug("BEGIN solve") with Timer() as t: - x, info = self.solveFnc(A, b, x, - rtol=self.tolerance, + x, info = self.solveFnc(L, b, x, + tol=rtol, + atol=self.absolute_tolerance, maxiter=self.iterations, M=M, - atol=0.0) + callback=self._countIterations) self._log.debug("END solve - {} ns".format(t.elapsed)) - if info < 0: - self._log.debug('failure: %s', self._warningList[info].__class__.__name__) + self._setConvergence(suite="scipy", + code=numerix.sign(info), + actual_code=info, + iterations=self.actualIterations, + tolerance_scale=tolerance_scale, + residual=self._residualNorm(L, x, b)) + + self.convergence.warn() return x diff --git a/fipy/solvers/scipy/scipySolver.py b/fipy/solvers/scipy/scipySolver.py index d4117cc5f7..9f15d4ad5e 100644 --- a/fipy/solvers/scipy/scipySolver.py +++ b/fipy/solvers/scipy/scipySolver.py @@ -1,26 +1,59 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' -__all__ = [] +__all__ = ["ScipySolver"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +from scipy.sparse import linalg from fipy.matrices.scipyMatrix import _ScipyMeshMatrix from fipy.solvers.solver import Solver from fipy.tools import numerix -class _ScipySolver(Solver): +class ScipySolver(Solver): """ The base `ScipySolver` class. .. attention:: This class is abstract. Always create one of its subclasses. """ + def __init__(self, tolerance="default", absolute_tolerance=0., + criterion="default", + iterations="default", precon="default"): + """ + Create a `Solver` object. + + Parameters + ---------- + tolerance : float + Required relative error tolerance. + absolute_tolerance : float + Required absolute error tolerance. + criterion : {'default', 'unscaled', 'RHS', 'matrix', 'initial', 'legacy', } + Interpretation of ``tolerance``. + See :ref:`CONVERGENCE` for more information. + iterations : int + Maximum number of iterative steps to perform. + precon + Preconditioner to use. Not all solver suites support + preconditioners. + """ + self.absolute_tolerance = absolute_tolerance + super(ScipySolver, self).__init__(tolerance=tolerance, criterion=criterion, + iterations=iterations, precon=precon) + @property def _matrixClass(self): return _ScipyMeshMatrix - def _solve(self): + def _rhsNorm(self, L, x, b): + return numerix.L2norm(b) + + def _matrixNorm(self, L, x, b): + return linalg.norm(L, ord=numerix.inf) - if self.var.mesh.communicator.Nproc > 1: - raise Exception("SciPy solvers cannot be used with multiple processors") + def _residualVectorAndNorm(self, L, x, b): + residualVector = L * x - b - self.var[:] = numerix.reshape(self._solve_(self.matrix, self.var.ravel(), numerix.array(self.RHSvector)), self.var.shape) + return residualVector, numerix.L2norm(residualVector) diff --git a/fipy/solvers/solver.py b/fipy/solvers/solver.py index d9df8f5f9d..e631449b37 100644 --- a/fipy/solvers/solver.py +++ b/fipy/solvers/solver.py @@ -12,19 +12,26 @@ """ from __future__ import division +from __future__ import print_function from __future__ import unicode_literals from builtins import object from builtins import str __docformat__ = 'restructuredtext' import logging +import os +import warnings from fipy.tools import numerix +from .convergence import ConvergenceBase -__all__ = ["SolverConvergenceWarning", "MaximumIterationWarning", +__all__ = ["SolverConvergenceWarning", "NormalConvergence", "MaximumIterationWarning", "PreconditionerWarning", "IllConditionedPreconditionerWarning", "PreconditionerNotPositiveDefiniteWarning", "MatrixIllConditionedWarning", - "StagnatedSolverWarning", "ScalarQuantityOutOfRangeWarning", "Solver"] + "StagnatedSolverWarning", "ScalarQuantityOutOfRangeWarning", + "IllegalInputOrBreakdownWarning", + "ParameterWarning", "BreakdownWarning", "LossOfPrecisionWarning", + "Solver"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] @@ -37,6 +44,10 @@ def __init__(self, solver, iter, relres): def __str__(self): return "%s failed. Iterations: %g. Relative error: %g" % (str(self.solver), self.iter, self.relres) +class NormalConvergence(SolverConvergenceWarning): + def __str__(self): + return "User requested convergence criteria is satisfied. Iterations: {0}. Relative error: {1}".format(self.iter, self.relres) + class MaximumIterationWarning(SolverConvergenceWarning): def __str__(self): return "Iterations: %g. Relative error: %g" % (self.iter, self.relres) @@ -64,6 +75,25 @@ class ScalarQuantityOutOfRangeWarning(SolverConvergenceWarning): def __str__(self): return "A scalar quantity became too small or too large to continue computing. Iterations: %g. Relative error: %g" % (self.iter, self.relres) +class IllegalInputOrBreakdownWarning(SolverConvergenceWarning): + def __str__(self): + return "{0} received illegal input or had a breakdown." \ + "Iterations: {1}. Relative error: {2}".format(self.solver, self.iter, self.relres) + +class ParameterWarning(SolverConvergenceWarning): + def __str__(self): + return "User requested option is not available for {0}.".format(self.solver) + +class BreakdownWarning(SolverConvergenceWarning): + def __str__(self): + return "Numerical breakdown occurred. Iterations: {0}. Relative error: {1}".format(self.iter, self.relres) + +class LossOfPrecisionWarning(SolverConvergenceWarning): + def __str__(self): + return "Numerical loss of precision occurred. Iterations: {0}. Relative error: {1}".format(self.iter, self.relres) + + + class Solver(object): """ The base `LinearXSolver` class. @@ -71,42 +101,251 @@ class Solver(object): .. attention:: This class is abstract. Always create one of its subclasses. """ - def __init__(self, tolerance=1e-10, iterations=1000, precon=None): + #: Default tolerance for linear solves unless `criterion="legacy"` + DEFAULT_TOLERANCE = 1e-5 + + #: Default tolerance for linear solves if `criterion="legacy"` + LEGACY_TOLERANCE = 1e-10 + + #: Default maximum number of iterative steps to perform + DEFAULT_ITERATIONS = 1000 + + #: Default preconditioner to apply to the matrix + DEFAULT_PRECONDITIONER = None + + def __init__(self, tolerance="default", criterion="default", + iterations="default", precon="default"): """ Create a `Solver` object. Parameters ---------- tolerance : float - Required error tolerance. + Required residual tolerance. + criterion : {'default', 'initial', 'unscaled', 'RHS', 'matrix', 'solution', 'preconditioned', 'natural', 'legacy'}, optional + Interpretation of ``tolerance``. + See :ref:`CONVERGENCE` for more information. iterations : int Maximum number of iterative steps to perform. - precon + precon : ~fipy.solvers.preconditioner.Preconditioner Preconditioner to use. Not all solver suites support preconditioners. """ if self.__class__ is Solver: raise NotImplementedError("can't instantiate abstract base class") - self.tolerance = tolerance - self.iterations = iterations - - self.preconditioner = precon + self.criterion = self.value_or_default(criterion, + "default", + "FIPY_DEFAULT_CRITERION") + self.tolerance = self.value_or_default(tolerance, + self.default_tolerance) + self.iterations = self.value_or_default(iterations, + self.DEFAULT_ITERATIONS) + self.preconditioner = self.value_or_default(precon, + self.default_preconditioner) self._log = logging.getLogger(self.__class__.__module__ + "." + self.__class__.__name__) + def value_or_default(self, value, default, envvar=None): + if value == "default": + if envvar is not None: + value = os.environ.get(envvar, default) + else: + value = default + return value + + @property + def default_tolerance(self): + """Default tolerance for linear solve + """ + if self.criterion == "legacy": + return self.LEGACY_TOLERANCE + else: + return self.DEFAULT_TOLERANCE + + @property + def default_preconditioner(self): + if self.DEFAULT_PRECONDITIONER is not None: + # instantiate DEFAULT_PRECONDITIONER class + return self.DEFAULT_PRECONDITIONER() + else: + return None + def _storeMatrix(self, var, matrix, RHSvector): self.var = var self.matrix = matrix self.RHSvector = RHSvector + def _scatterGhosts(self, x): + """Distribute ghost values (if any) across processes + """ + return x + + def _cleanup(self): + pass + def _solve(self): - raise NotImplementedError + """Solve system of equations posed for FiPy + + Common method invoked by :class:`~fipy.terms.term.Term`, which then + calls solver-suite-specific :math:`~fipy.solvers.solver._solve_` + methods. + """ + L, x, b = self._Lxb + + if self._log.isEnabledFor(logging.DEBUG): + s = "Lnorm: {Lnorm}, bnorm: {bnorm}, rnorm: {rnorm}" + self._log.debug(s.format(Lnorm=self._matrixNorm(L, x, b), + bnorm=self._rhsNorm(L, x, b), + rnorm=self._residualNorm(L, x, b))) + + x = self._solve_(L, x, b) + + x = self._scatterGhosts(x) + + factor = self.var.unit.factor + if factor != 1: + x /= self.var.unit.factor + + self.var.value = x.reshape(self.var.shape) + + self._cleanup() def _solve_(self, L, x, b): + """Solve system of equations posed for solver suite + + Parameters + ---------- + L : ~fipy.matrices.sparseMatrix._SparseMatrix + Sparse matrix object + x : array_like + Solution variable in form suitable for solver + b : array_like + Right-hand side vector in form suitable for solver + + Returns + ------- + ndarray + Solution vector + """ + raise NotImplementedError + + def _setConvergence(self, suite, code, iterations, residual, actual_code=None, **kwargs): + cls = ConvergenceBase.code_registry[(suite, code)] + self.convergence = cls(solver=self, + iterations=iterations, + residual=residual, + criterion=self.criterion, + actual_code=actual_code, + **kwargs) + + def _legacyNorm(self, L, x, b): + raise NotImplementedError + + def _unscaledNorm(self, L, x, b): + return 1. + + def _rhsNorm(self, L, x, b): + raise NotImplementedError + + def _matrixNorm(self, L, x, b): + raise NotImplementedError + + def _residualVectorAndNorm(self, L, x, b): + raise NotImplementedError + + def _residualNorm(self, L, x, b): + _, residual = self._residualVectorAndNorm(L, x, b) + + return residual + + def _solutionNorm(self, L, x, b): + raise NotImplementedError + + def _preconditionedNorm(self, L, x, b): + raise NotImplementedError + + def _naturalNorm(self, L, x, b): + raise NotImplementedError + + @property + def _Lxb(self): + """Matrix, solution vector, and right-hand side vector + + Returns + ------- + L : matrix + Sparse matrix in form suitable for solver + x : ndarray + Solution variable in form suitable for solver + b : ndarray + Right-hand side vector in form suitable for solver + """ + if self.var.mesh.communicator.Nproc > 1: + raise Exception(str(type(self)) + " cannot be used with multiple processors") + + L = self.matrix.matrix + x = self.var.numericValue.ravel() + b = numerix.asarray(self.RHSvector) + + if ((self.matrix == 0) + or (L.shape[0] != L.shape[1]) + or (L.shape[0] != len(x))): + + from fipy.terms import SolutionVariableNumberError + + raise SolutionVariableNumberError + + return (L, x, b) + + def _adaptLegacyTolerance(self, L, x, b): + raise NotImplementedError + + def _adaptUnscaledTolerance(self, L, x, b): + raise NotImplementedError + + def _adaptRHSTolerance(self, L, x, b): + raise NotImplementedError + + def _adaptMatrixTolerance(self, L, x, b): + raise NotImplementedError + + def _adaptInitialTolerance(self, L, x, b): + raise NotImplementedError + + def _adaptSolutionTolerance(self, L, x, b): + raise NotImplementedError + + def _adaptPreconditionedTolerance(self, L, x, b): raise NotImplementedError + def _adaptNaturalTolerance(self, L, x, b): + raise NotImplementedError + + def _adaptTolerance(self, L, x, b): + adapt = { + "legacy": self._adaptLegacyTolerance, + "unscaled": self._adaptUnscaledTolerance, + "RHS": self._adaptRHSTolerance, + "matrix": self._adaptMatrixTolerance, + "initial": self._adaptInitialTolerance, + "solution": self._adaptSolutionTolerance, + "preconditioned": self._adaptPreconditionedTolerance, + "natural": self._adaptNaturalTolerance, + "default": self._adaptRHSTolerance + } + + tolerance_scale, suite_criterion = adapt[self.criterion](L, x, b) + + return tolerance_scale, suite_criterion + + @staticmethod + def scale_tolerance(tol, scale): + if tol is not None: + tol *= scale + return tol + def _applyUnderRelaxation(self, underRelaxation=None): if underRelaxation is not None: self.matrix.putDiagonal(numerix.asarray(self.matrix.takeDiagonal()) / underRelaxation) @@ -129,22 +368,6 @@ def _calcResidual(self, residualFn=None): def _calcRHSNorm(self): return numerix.L2norm(self.RHSvector) - _warningList = (ScalarQuantityOutOfRangeWarning, - StagnatedSolverWarning, - MatrixIllConditionedWarning, - PreconditionerNotPositiveDefiniteWarning, - IllConditionedPreconditionerWarning, - MaximumIterationWarning) - - def _raiseWarning(self, info, iter, relres): - # info is negative, so we list in reverse order so that - # info can be used as an index from the end - - if info < 0: - # is stacklevel=5 always what's needed to get to the user's scope? - import warnings - warnings.warn(self._warningList[info](self, iter, relres), stacklevel=5) - def __repr__(self): return '%s(tolerance=%g, iterations=%g)' \ % (self.__class__.__name__, self.tolerance, self.iterations) @@ -170,5 +393,198 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): pass + def __del__(self): pass + + def _test(self): + """ + >>> import fipy as fp + + For sufficiently constrained circumstances, all solver suites + should do the same thing. The following problem setup is designed + to ensure that all interpret solver criteria correctly and achieve + the "same" tolerance in the same number of iterations. + + Consider a steady-state 1D diffusion problem with a + position-dependent diffusivity and Dirichlet boundary conditions: + + .. math:: + + \begin{aligned} + \frac{\partial}{\partial x}\left[ + \left(1 + x\right) + \frac{\partial \phi}{\partial x} + \right] &= 0 + \\ + \left.\phi\right\rvert_{x=0} &= \phi_L + \\ + \left.\phi\right\rvert_{x=1} &= \phi_R + \end{aligned} + + with the analytical solution + + .. math:: + + \phi = \frac{\phi_R - \phi_L}{\ln 2} \ln\left(1 + x\right) + \phi_L + + >>> N = 100 + >>> mesh = fp.Grid1D(nx=N, Lx=1) + >>> phi = fp.CellVariable(mesh=mesh, name=r"$\phi") + >>> phiL = 1000. + >>> phiR = 2000. + >>> phi_analytical = ((((phiR - phiL)/fp.numerix.log(2.)) + ... * fp.numerix.log(1 + mesh.x)) + ... + phiL) + >>> phi_analytical.name = r"$\phi_\mathrm{analytical}$" + + >>> fp.numerix.random.seed(12345) + >>> variance = 1e-3 + >>> phi_initial = phi_analytical + fp.GaussianNoiseVariable(mesh=mesh, variance=variance) + >>> phi.value = phi_initial + >>> phi.constrain(phiL, where=mesh.facesLeft) + >>> phi.constrain(phiR, where=mesh.facesRight) + >>> D = fp.FaceVariable(mesh=mesh, value=1 + mesh.faceCenters[0]) + >>> eq = fp.DiffusionTerm(coeff=D) == 0 + + For reproducibility between suites, we select a solver with + predictable characteristics (that counts out GMRES) and no + preconditioning. + + >>> Solver = fp.LinearCGSSolver + >>> solver = Solver(precon=None) + + >>> solver = eq._prepareLinearSystem(var=phi, + ... solver=solver, + ... boundaryConditions=(), + ... dt=1.) + >>> L, x, b = solver._Lxb + + The problem parameters were chosen to give good separation between the + different convergence norms. + + The norm of the matrix is the infinity norm + + .. math:: + + \left\| L_{ij}\right\|_\infty &= \max_i \sum_j \left| A_ij \right| + \\ + &= \max_i \left[ + \left| -N(1 + x_i) \right| + + \left| 2N(1 + x_i) \right| + + \left| -N(1 + x_i) \right| + \right] + \\ + &= \max_i 4N(1 + x_i) + &= \mathcal{O}(8 N) + + >>> Lnorm = solver._matrixNorm(L, x, b) + >>> print(numerix.allclose(Lnorm, 8 * N, rtol=0.1)) + True + + The right-hand-side vector is zero except at the boundaries, + where the contribution is + + .. math:: + + \frac{(1 + x) \phi_{BC} A_f}{d_{AP}} &= (1 + x) \phi_{BC} 2 N + \\ + &= 2 N \phi_L = 2000 N\qquad\text{at $x = 0$} + \\ + &= 4 N \phi_R = 8000 N\qquad\text{at $x = 1$} + + Thus the :math:`L_2` norm of the right-hand-side vector is + :math:`\left\| b \right\|_2 = \math{O}(8000 N}`. + + >>> bnorm = solver._rhsNorm(L, x, b) + >>> print(numerix.allclose(bnorm, 8000 * N, rtol=0.1)) + True + + We choose the initial condition such that the initial residual will + be small. + + .. math:: + + \phi_0 &= \phi_\text{analytical} + \mathcal{O}(\sigma) + \\ + r = L \phi_0 - b + &= L \phi_\text{analytical} - b + L \mathcal{O}(\sigma) + \\ + &= L \mathcal{O}(\sigma) + \\ + \left\| r \right\|_2 &= \left\| L \mathcal{O}(\sigma) \right\|_2 + \\ + &= \sqrt{\sum_{0 \le i < N} \left[ + N(1 + x_i) \mathcal{O}(\sigma) + + 2N(1 + x_i) \mathcal{O}(\sigma) + + N(1 + x_i) \mathcal{O}(\sigma) + \right]^2} + \\ + &= 4 N \mathcal{O}(\sigma) \sqrt{\sum_{0 \le i < N} (1 + x_i)^2} + \\ + &= \text{probably $\sqrt{\pi}$ or something} + \\ + &= \mathcal{O}(4 N \sqrt{N} \sigma) + + >>> rnorm = solver._residualNorm(L, x, b) + >>> print(numerix.allclose(rnorm, 4 * N * numerix.sqrt(N * variance), + ... rtol=0.1)) + True + + Calculate the error of the initial condition (probably could be + estimated via truncation error blah blah blah). + + >>> enorm = fp.numerix.L2norm(phi - phi_analytical) / fp.numerix.L2norm(phi_analytical) + + >>> from fipy.solvers.convergence import Convergence + + Check that: + - the solution is converged, + - the solver reaches the desired residual for the + criterion, without overshooting too much. Most get close, but + "unscaled" overshoots a lot for most suites. + - the iteration count is as expected + - the error has been reduced from the initial guess + + >>> criteria = [ + ... ("unscaled", 1., 0.003, 114), + ... ("RHS", bnorm, 0.6, 2), + ... ("matrix", Lnorm, 0.6, 58), + ... ("initial", rnorm, 0.6, 110) + ... ] + >>> # criteria += ["solution"] doctest: +TRILINOS_SOLVER + >>> criteria += [ + ... ("preconditioned", bnorm, 0.6, 2), + ... ("natural", bnorm, 0.6, 6) + ... ] # doctest: +PETSC_SOLVER + >>> satisfied = [] + >>> for (criterion, target, lower_bound, iterations) in criteria: + ... phi.setValue(phi_initial) + ... with Solver(criterion=criterion, precon=None) as s: + ... res = eq.sweep(var=phi, solver=s) + ... error = (fp.numerix.L2norm(phi - phi_analytical) + ... / fp.numerix.L2norm(phi_analytical)) + ... checks = [isinstance(s.convergence, Convergence), + ... (lower_bound + ... < (s.convergence.residual + ... / (s.tolerance * target)) + ... < 1.0), + ... numerix.allclose(s.convergence.iterations, + ... iterations, + ... atol=1), + ... error < enorm] + ... satisfied.append(all(checks)) + >>> print(all(satisfied)) + True + + # str(fp.numerix.L2norm(phi - phi_analytical) / fp.numerix.L2norm(phi_analytical)) + + """ + pass + +def _test(): + import fipy.tests.doctestPlus + return fipy.tests.doctestPlus.testmod() + +if __name__ == "__main__": + _test() diff --git a/fipy/solvers/test.py b/fipy/solvers/test.py index 67a1be08be..8f97a99481 100755 --- a/fipy/solvers/test.py +++ b/fipy/solvers/test.py @@ -1,12 +1,13 @@ from __future__ import unicode_literals __all__ = [] -from fipy.tests.lateImportTest import _LateImportTestSuite +from fipy.tests.doctestPlus import _LateImportDocTestSuite import fipy.tests.testProgram def _suite(): - return _LateImportTestSuite(testModuleNames = (), - base = __name__) + return _LateImportDocTestSuite(docTestModuleNames = ( + 'solver', + ), base = __name__) if __name__ == '__main__': fipy.tests.testProgram.main(defaultTest='_suite') diff --git a/fipy/solvers/trilinos/__init__.py b/fipy/solvers/trilinos/__init__.py index 52e1ea5f6d..987511e421 100644 --- a/fipy/solvers/trilinos/__init__.py +++ b/fipy/solvers/trilinos/__init__.py @@ -49,15 +49,17 @@ def _dealWithTrilinosImportPathologies(): _dealWithTrilinosImportPathologies() -from fipy.solvers.trilinos.preconditioners import * +from .preconditioners import * -from fipy.solvers.trilinos.linearCGSSolver import * -from fipy.solvers.trilinos.linearPCGSolver import * -from fipy.solvers.trilinos.linearGMRESSolver import * -from fipy.solvers.trilinos.linearLUSolver import * -from fipy.solvers.trilinos.linearBicgstabSolver import * +from .linearCGSSolver import * +from .linearPCGSolver import * +from .linearGMRESSolver import * +from .linearLUSolver import * +from .linearBicgstabSolver import * -from fipy.solvers.trilinos.trilinosMLTest import * +from .trilinosMLTest import * + +from . import aztecConvergence DefaultSolver = LinearGMRESSolver DefaultAsymmetricSolver = LinearGMRESSolver diff --git a/fipy/solvers/trilinos/aztecConvergence.py b/fipy/solvers/trilinos/aztecConvergence.py new file mode 100644 index 0000000000..e9ea9de697 --- /dev/null +++ b/fipy/solvers/trilinos/aztecConvergence.py @@ -0,0 +1,51 @@ +from PyTrilinos import AztecOO + +from ..convergence import (Convergence, Divergence, + BreakdownDivergence, + IllConditionedDivergence, + IterationDivergence, + LossOfAccuracyConvergence) + +class AZ_NormalConvergence(Convergence): + """ + """ + status_code = AztecOO.AZ_normal + status_name = "AZ_normal" + suite = "trilinos" + +# Is this a convergence or divergence? +# Does it mean the same thing as OutOfRangeDivergence? +class AZ_LossOfAccuracyConvergence(LossOfAccuracyConvergence): + """Numerical loss of precision occurred. + """ + status_code = AztecOO.AZ_loss + status_name = "AZ_loss" + suite = "trilinos" + +class AZ_ParameterDivergence(Divergence): + """ + """ + status_code = AztecOO.AZ_param + status_name = "AZ_param" + suite = "trilinos" + +class AZ_BreakdownDivergence(BreakdownDivergence): + """ + """ + status_code = AztecOO.AZ_breakdown + status_name = "AZ_breakdown" + suite = "trilinos" + +class AZ_IllConditionedDivergence(IllConditionedDivergence): + """ + """ + status_code = AztecOO.AZ_ill_cond + status_name = "AZ_ill_cond" + suite = "trilinos" + +class AZ_IterationDivergence(IterationDivergence): + """ + """ + status_code = AztecOO.AZ_maxits + status_name = "AZ_maxits" + suite = "trilinos" diff --git a/fipy/solvers/trilinos/linearBicgstabSolver.py b/fipy/solvers/trilinos/linearBicgstabSolver.py index 9cd46f9984..33a1d8000c 100644 --- a/fipy/solvers/trilinos/linearBicgstabSolver.py +++ b/fipy/solvers/trilinos/linearBicgstabSolver.py @@ -18,16 +18,6 @@ class LinearBicgstabSolver(TrilinosAztecOOSolver): """ - def __init__(self, tolerance=1e-10, iterations=1000, precon=JacobiPreconditioner()): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.trilinos.preconditioners.preconditioner.Preconditioner - """ - TrilinosAztecOOSolver.__init__(self, tolerance=tolerance, - iterations=iterations, precon=precon) - self.solver = AztecOO.AZ_bicgstab + solver = AztecOO.AZ_bicgstab + + DEFAULT_PRECONDITIONER = JacobiPreconditioner diff --git a/fipy/solvers/trilinos/linearCGSSolver.py b/fipy/solvers/trilinos/linearCGSSolver.py index 999ab06add..0dc58bcac9 100644 --- a/fipy/solvers/trilinos/linearCGSSolver.py +++ b/fipy/solvers/trilinos/linearCGSSolver.py @@ -18,16 +18,6 @@ class LinearCGSSolver(TrilinosAztecOOSolver): """ - def __init__(self, tolerance=1e-10, iterations=1000, precon=MultilevelDDPreconditioner()): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.trilinos.preconditioners.preconditioner.Preconditioner - """ - TrilinosAztecOOSolver.__init__(self, tolerance=tolerance, - iterations=iterations, precon=precon) - self.solver = AztecOO.AZ_cgs + solver = AztecOO.AZ_cgs + + DEFAULT_PRECONDITIONER = MultilevelDDPreconditioner diff --git a/fipy/solvers/trilinos/linearGMRESSolver.py b/fipy/solvers/trilinos/linearGMRESSolver.py index acaa851a59..e20167180d 100644 --- a/fipy/solvers/trilinos/linearGMRESSolver.py +++ b/fipy/solvers/trilinos/linearGMRESSolver.py @@ -14,20 +14,10 @@ class LinearGMRESSolver(TrilinosAztecOOSolver): """ The `LinearGMRESSolver` is an interface to the GMRES solver in Trilinos, - using a the `MultilevelDDPreconditioner` by default. + using the `MultilevelDDPreconditioner` by default. """ - def __init__(self, tolerance=1e-10, iterations=1000, precon=MultilevelDDPreconditioner()): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.trilinos.preconditioners.preconditioner.Preconditioner - """ - TrilinosAztecOOSolver.__init__(self, tolerance=tolerance, - iterations=iterations, precon=precon) - self.solver = AztecOO.AZ_gmres + solver = AztecOO.AZ_gmres + + DEFAULT_PRECONDITIONER = MultilevelDDPreconditioner diff --git a/fipy/solvers/trilinos/linearLUSolver.py b/fipy/solvers/trilinos/linearLUSolver.py index 86750b0f4c..4feb3e8fd9 100644 --- a/fipy/solvers/trilinos/linearLUSolver.py +++ b/fipy/solvers/trilinos/linearLUSolver.py @@ -3,6 +3,7 @@ from builtins import range __docformat__ = 'restructuredtext' +from PyTrilinos import AztecOO from PyTrilinos import Epetra from PyTrilinos import Amesos @@ -20,22 +21,24 @@ class LinearLUSolver(TrilinosSolver): """ - def __init__(self, tolerance=1e-10, iterations=10, precon=None, maxIterations=10): + def __init__(self, tolerance="default", criterion="default", precon=None, + iterations=10): """ Parameters ---------- tolerance : float Required error tolerance. + criterion : {'default', 'unscaled', 'RHS', 'matrix', 'initial', 'legacy'} + Interpretation of ``tolerance``. + See :ref:`CONVERGENCE` for more information. iterations : int Maximum number of iterative steps to perform. precon *ignored* """ - iterations = min(iterations, maxIterations) - - TrilinosSolver.__init__(self, tolerance=tolerance, - iterations=iterations, precon=None) + super(LinearLUSolver, self).__init__(tolerance=tolerance, criterion=criterion, + iterations=iterations, precon=None) if precon is not None: import warnings @@ -43,39 +46,65 @@ def __init__(self, tolerance=1e-10, iterations=10, precon=None, maxIterations=10 UserWarning, stacklevel=2) self.Factory = Amesos.Factory() + def _adaptLegacyTolerance(self, L, x, b): + return self._adaptInitialTolerance(L, x, b) + + def _adaptUnscaledTolerance(self, L, x, b): + return (1., None) + + def _adaptRHSTolerance(self, L, x, b): + return (self._rhsNorm(L, x, b), None) + + def _adaptMatrixTolerance(self, L, x, b): + return (self._matrixNorm(L, x, b), None) + + def _adaptInitialTolerance(self, L, x, b): + return (self._residualNorm(L, x, b), None) def _solve_(self, L, x, b): + """Solve system of equations posed for PyTrilinos + + Parameters + ---------- + L : Epetra.CrsMatrix + Sparse matrix + x : Epetra.Vector + Solution variable as non-ghosted vector + b : Epetra.Vector + Right-hand side as non-ghosted vector + + Returns + ------- + x : Epetra.Vector + Solution variable as non-ghosted vector + """ + + tolerance_scale, _ = self._adaptTolerance(L, x, b) self._log.debug("BEGIN solve") with Timer() as t: for iteration in range(self.iterations): - # errorVector = L*x - b - errorVector = Epetra.Vector(L.RangeMap()) - L.Multiply(False, x, errorVector) - # If A is an Epetra.Vector with map M - # and B is an Epetra.Vector with map M - # and C = A - B - # then C is an Epetra.Vector with *no map* !!!?!?! - errorVector -= b + residualVector, residual = self._residualVectorAndNorm(L, x, b) - tol = errorVector.Norm1() + if residual <= self.tolerance * tolerance_scale: + break - if iteration == 0: - tol0 = tol + xError = Epetra.Vector(L.RowMap()) - if (tol / tol0) <= self.tolerance: - break + Problem = Epetra.LinearProblem(L, xError, residualVector) + Solver = self.Factory.Create(text_to_native_str("Klu"), Problem) + Solver.Solve() - xError = Epetra.Vector(L.RowMap()) + x[:] = x - xError - Problem = Epetra.LinearProblem(L, xError, errorVector) - Solver = self.Factory.Create(text_to_native_str("Klu"), Problem) - Solver.Solve() + self._log.debug("END solve - {} ns".format(t.elapsed)) - x[:] = x - xError + self._setConvergence(suite="trilinos", + code=AztecOO.AZ_normal, + iterations=iteration+1, + residual=float(residual)) - self._log.debug("END solve - {} ns".format(t.elapsed)) + self.convergence.warn() - self._log.debug('iterations: %d / %d', iteration+1, self.iterations) - self._log.debug('residual: %s', errorVector.Norm2()) + return x diff --git a/fipy/solvers/trilinos/linearPCGSolver.py b/fipy/solvers/trilinos/linearPCGSolver.py index ca090c076b..46ef5ae024 100644 --- a/fipy/solvers/trilinos/linearPCGSolver.py +++ b/fipy/solvers/trilinos/linearPCGSolver.py @@ -18,19 +18,9 @@ class LinearPCGSolver(TrilinosAztecOOSolver): """ - def __init__(self, tolerance=1e-10, iterations=1000, precon=MultilevelDDPreconditioner()): - """ - Parameters - ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.trilinos.preconditioners.preconditioner.Preconditioner - """ - TrilinosAztecOOSolver.__init__(self, tolerance=tolerance, - iterations=iterations, precon=precon) - self.solver = AztecOO.AZ_cg + solver = AztecOO.AZ_cg + + DEFAULT_PRECONDITIONER = MultilevelDDPreconditioner def _canSolveAsymmetric(self): return False diff --git a/fipy/solvers/trilinos/preconditioners/__init__.py b/fipy/solvers/trilinos/preconditioners/__init__.py index bfc112fa4a..129d479c34 100644 --- a/fipy/solvers/trilinos/preconditioners/__init__.py +++ b/fipy/solvers/trilinos/preconditioners/__init__.py @@ -1,10 +1,25 @@ from __future__ import unicode_literals -from fipy.solvers.trilinos.preconditioners.multilevelDDPreconditioner import * -from fipy.solvers.trilinos.preconditioners.multilevelSAPreconditioner import * -from fipy.solvers.trilinos.preconditioners.multilevelDDMLPreconditioner import * -from fipy.solvers.trilinos.preconditioners.multilevelNSSAPreconditioner import * -from fipy.solvers.trilinos.preconditioners.jacobiPreconditioner import * -from fipy.solvers.trilinos.preconditioners.icPreconditioner import * -from fipy.solvers.trilinos.preconditioners.domDecompPreconditioner import * -from fipy.solvers.trilinos.preconditioners.multilevelSGSPreconditioner import * -from fipy.solvers.trilinos.preconditioners.multilevelSolverSmootherPreconditioner import * + +from .domDecompPreconditioner import * +from .icPreconditioner import * +from .iluPreconditioner import * +from .jacobiPreconditioner import * +from .multilevelDDPreconditioner import * +from .multilevelDDMLPreconditioner import * +from .multilevelNSSAPreconditioner import * +from .multilevelSAPreconditioner import * +from .multilevelSGSPreconditioner import * +from .multilevelSolverSmootherPreconditioner import * + +__all__ = [] +__all__.extend(domDecompPreconditioner.__all__) +__all__.extend(icPreconditioner.__all__) +__all__.extend(iluPreconditioner.__all__) +__all__.extend(jacobiPreconditioner.__all__) +__all__.extend(icPreconditioner.__all__) +__all__.extend(multilevelDDPreconditioner.__all__) +__all__.extend(multilevelDDMLPreconditioner.__all__) +__all__.extend(multilevelNSSAPreconditioner.__all__) +__all__.extend(multilevelSAPreconditioner.__all__) +__all__.extend(multilevelSGSPreconditioner.__all__) +__all__.extend(multilevelSolverSmootherPreconditioner.__all__) diff --git a/fipy/solvers/trilinos/preconditioners/domDecompPreconditioner.py b/fipy/solvers/trilinos/preconditioners/domDecompPreconditioner.py index 5195794362..24feb88548 100644 --- a/fipy/solvers/trilinos/preconditioners/domDecompPreconditioner.py +++ b/fipy/solvers/trilinos/preconditioners/domDecompPreconditioner.py @@ -3,17 +3,15 @@ from PyTrilinos import AztecOO -from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner +from .trilinosPreconditioner import TrilinosPreconditioner __all__ = ["DomDecompPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class DomDecompPreconditioner(Preconditioner): +class DomDecompPreconditioner(TrilinosPreconditioner): + """Domain Decomposition preconditioner for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. """ - Domain Decomposition preconditioner for Trilinos solvers. - """ - - def _ApplyToSolver(self, solver, matrix): + def _applyToSolver(self, solver, matrix): solver.SetAztecOption(AztecOO.AZ_precond, AztecOO.AZ_dom_decomp) diff --git a/fipy/solvers/trilinos/preconditioners/icPreconditioner.py b/fipy/solvers/trilinos/preconditioners/icPreconditioner.py index 82f4fc2faa..adf4c985d7 100644 --- a/fipy/solvers/trilinos/preconditioners/icPreconditioner.py +++ b/fipy/solvers/trilinos/preconditioners/icPreconditioner.py @@ -3,16 +3,14 @@ from PyTrilinos import IFPACK -from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner +from .trilinosPreconditioner import TrilinosPreconditioner __all__ = ["ICPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class ICPreconditioner(Preconditioner): - """ - Incomplete Cholesky Preconditioner from IFPACK for Trilinos Solvers. - +class ICPreconditioner(TrilinosPreconditioner): + """Incomplete Cholesky Preconditioner from IFPACK for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. """ def _applyToSolver(self, solver, matrix): diff --git a/fipy/solvers/trilinos/preconditioners/iluPreconditioner.py b/fipy/solvers/trilinos/preconditioners/iluPreconditioner.py new file mode 100644 index 0000000000..3ef401ef59 --- /dev/null +++ b/fipy/solvers/trilinos/preconditioners/iluPreconditioner.py @@ -0,0 +1,18 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from PyTrilinos import AztecOO + +from .domDecompPreconditioner import DomDecompPreconditioner + +__all__ = ["ILUPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class ILUPreconditioner(DomDecompPreconditioner): + """ILU Domain Decomposition preconditioner for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. + """ + + def _applyToSolver(self, solver, matrix): + super(ILUPreconditioner, self)._applyToSolver(solver, matrix) + solver.SetAztecOption(AztecOO.AZ_subdomain_solve, AztecOO.AZ_ilu) diff --git a/fipy/solvers/trilinos/preconditioners/jacobiPreconditioner.py b/fipy/solvers/trilinos/preconditioners/jacobiPreconditioner.py index b4ea2912e9..ff074a90c7 100644 --- a/fipy/solvers/trilinos/preconditioners/jacobiPreconditioner.py +++ b/fipy/solvers/trilinos/preconditioners/jacobiPreconditioner.py @@ -3,16 +3,14 @@ from PyTrilinos import AztecOO -from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner +from .trilinosPreconditioner import TrilinosPreconditioner __all__ = ["JacobiPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class JacobiPreconditioner(Preconditioner): - """ - Jacobi Preconditioner for Trilinos solvers. - +class JacobiPreconditioner(TrilinosPreconditioner): + """Jacobi preconditioner for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. """ def _applyToSolver(self, solver, matrix): diff --git a/fipy/solvers/trilinos/preconditioners/multilevelDDMLPreconditioner.py b/fipy/solvers/trilinos/preconditioners/multilevelDDMLPreconditioner.py index b7b7a9d583..4e31696295 100644 --- a/fipy/solvers/trilinos/preconditioners/multilevelDDMLPreconditioner.py +++ b/fipy/solvers/trilinos/preconditioners/multilevelDDMLPreconditioner.py @@ -1,43 +1,43 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' -from PyTrilinos import ML - -from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner +from .multilevelPreconditioner import MultilevelPreconditioner __all__ = ["MultilevelDDMLPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class MultilevelDDMLPreconditioner(Preconditioner): - """ - Multilevel preconditioner for Trilinos solvers. 3-level algebraic domain decomposition. +class MultilevelDDMLPreconditioner(MultilevelPreconditioner): + """3-level algebraic domain decomposition multilevel preconditioner for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. """ - def _applyToSolver(self, solver, matrix): - if matrix.NumGlobalNonzeros() <= matrix.NumGlobalRows(): - return - - self.Prec = ML.MultiLevelPreconditioner(matrix, False) - - self.Prec.SetParameterList({text_to_native_str("output"): 0, - text_to_native_str("max levels") : 3, - text_to_native_str("prec type") : text_to_native_str("MGV"), - text_to_native_str("increasing or decreasing") : text_to_native_str("increasing"), - text_to_native_str("aggregation: type") : text_to_native_str("METIS"), - text_to_native_str("aggregation: nodes per aggregate") : 512, - text_to_native_str("aggregation: next-level aggregates per process") : 128, - text_to_native_str("aggregation: damping factor") : 4. / 3., - text_to_native_str("eigen-analysis: type") : text_to_native_str("power-method"), - text_to_native_str("eigen-analysis: iterations") : 20, - text_to_native_str("smoother: sweeps") : 1, - text_to_native_str("smoother: pre or post") : text_to_native_str("both"), - text_to_native_str("smoother: type") : text_to_native_str("Aztec"), - text_to_native_str("smoother: Aztec as solver") : False, - text_to_native_str("coarse: type") : text_to_native_str("Amesos-KLU"), - text_to_native_str("coarse: max size") : 128 - }) - - self.Prec.ComputePreconditioner() - - solver.SetPrecOperator(self.Prec) + def __init__(self, levels=3): + """ + + Parameters + ---------- + levels : int + Maximum number of levels + """ + self.levels = levels + + @property + def _parameterList(self): + return { + "output": 0, + "max levels": self.levels, + "prec type": "MGV", + "increasing or decreasing": "increasing", + "aggregation: type": "METIS", + "aggregation: nodes per aggregate": 512, + "aggregation: next-level aggregates per process": 128, + "aggregation: damping factor" : 4. / 3., + "eigen-analysis: type": "power-method", + "eigen-analysis: iterations": 20, + "smoother: sweeps": 1, + "smoother: pre or post": "both", + "smoother: type": "Aztec", + "smoother: Aztec as solver": False, + "coarse: type": "Amesos-KLU", + "coarse: max size": 128 + } diff --git a/fipy/solvers/trilinos/preconditioners/multilevelDDPreconditioner.py b/fipy/solvers/trilinos/preconditioners/multilevelDDPreconditioner.py index 2dcd19e4ce..81394a5ef2 100644 --- a/fipy/solvers/trilinos/preconditioners/multilevelDDPreconditioner.py +++ b/fipy/solvers/trilinos/preconditioners/multilevelDDPreconditioner.py @@ -1,43 +1,42 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' -from PyTrilinos import ML - -from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner +from .multilevelPreconditioner import MultilevelPreconditioner __all__ = ["MultilevelDDPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class MultilevelDDPreconditioner(Preconditioner): - """ - Multilevel preconditioner for Trilinos solvers. A classical smoothed - aggregation-based 2-level domain decomposition. +class MultilevelDDPreconditioner(MultilevelPreconditioner): + """Classical smoothed aggregation-based 2-level domain decomposition preconditioner for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. """ - def _applyToSolver(self, solver, matrix): - if matrix.NumGlobalNonzeros() <= matrix.NumGlobalRows(): - return - - self.Prec = ML.MultiLevelPreconditioner(matrix, False) - - self.Prec.SetParameterList({text_to_native_str("output"): 0, - text_to_native_str("max levels") : 2, - text_to_native_str("prec type") : text_to_native_str("MGV"), - text_to_native_str("increasing or decreasing") : text_to_native_str("increasing"), - text_to_native_str("aggregation: type") : text_to_native_str("METIS"), - text_to_native_str("aggregation: local aggregates") : 1, - text_to_native_str("aggregation: damping factor") : 4. / 3., - text_to_native_str("eigen-analysis: type") : text_to_native_str("power-method"), - text_to_native_str("eigen-analysis: iterations") : 20, - text_to_native_str("smoother: sweeps") : 1, - text_to_native_str("smoother: pre or post") : text_to_native_str("both"), - text_to_native_str("smoother: type") : text_to_native_str("Aztec"), - text_to_native_str("smoother: Aztec as solver") : False, - text_to_native_str("coarse: type") : text_to_native_str("Amesos-KLU"), - text_to_native_str("coarse: max size") : 128 - }) - - self.Prec.ComputePreconditioner() - - solver.SetPrecOperator(self.Prec) + def __init__(self, levels=2): + """ + + Parameters + ---------- + levels : int + Maximum number of levels + """ + self.levels = levels + + @property + def _parameterList(self): + return { + "output": 0, + "max levels": self.levels, + "prec type": "MGV", + "increasing or decreasing": "increasing", + "aggregation: type": "METIS", + "aggregation: local aggregates": 1, + "aggregation: damping factor" : 4. / 3., + "eigen-analysis: type": "power-method", + "eigen-analysis: iterations": 20, + "smoother: sweeps": 1, + "smoother: pre or post": "both", + "smoother: type": "Aztec", + "smoother: Aztec as solver": False, + "coarse: type": "Amesos-KLU", + "coarse: max size": 128 + } diff --git a/fipy/solvers/trilinos/preconditioners/multilevelNSSAPreconditioner.py b/fipy/solvers/trilinos/preconditioners/multilevelNSSAPreconditioner.py index 5c3bc24b44..8e7a64dc66 100644 --- a/fipy/solvers/trilinos/preconditioners/multilevelNSSAPreconditioner.py +++ b/fipy/solvers/trilinos/preconditioners/multilevelNSSAPreconditioner.py @@ -1,41 +1,33 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' -from PyTrilinos import ML - -from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner +from .multilevelPreconditioner import MultilevelPreconditioner __all__ = ["MultilevelNSSAPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class MultilevelNSSAPreconditioner(Preconditioner): - """ - Energy-based minimizing smoothed aggregation suitable for highly - convective non-symmetric fluid flow problems. - """ - def _applyToSolver(self, solver, matrix): - if matrix.NumGlobalNonzeros() <= matrix.NumGlobalRows(): - return - - self.Prec = ML.MultiLevelPreconditioner(matrix, False) - - self.Prec.SetParameterList({text_to_native_str("output"): 0, - text_to_native_str("max levels") : 10, - text_to_native_str("prec type") : text_to_native_str("MGW"), - text_to_native_str("increasing or decreasing") : text_to_native_str("increasing"), - text_to_native_str("aggregation: type") : text_to_native_str("Uncoupled-MIS"), - text_to_native_str("energy minimization: enable") : True, - text_to_native_str("eigen-analysis: type") : text_to_native_str("power-method"), - text_to_native_str("eigen-analysis: iterations") : 20, - text_to_native_str("smoother: sweeps") : 4, - text_to_native_str("smoother: damping factor") : 0.67, - text_to_native_str("smoother: pre or post") : text_to_native_str("post"), - text_to_native_str("smoother: type") : text_to_native_str("symmetric Gauss-Seidel"), - text_to_native_str("coarse: type") : text_to_native_str("Amesos-KLU"), - text_to_native_str("coarse: max size") : 256 - }) +class MultilevelNSSAPreconditioner(MultilevelPreconditioner): + """Energy-based minimizing smoothed aggregation preconditioner for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. - self.Prec.ComputePreconditioner() + Suitable for highly convective non-symmetric fluid flow problems. + """ - solver.SetPrecOperator(self.Prec) + @property + def _parameterList(self): + return { + "output": 0, + "max levels": self.levels, + "prec type": "MGW", + "increasing or decreasing": "increasing", + "aggregation: type": "Uncoupled-MIS", + "energy minimization: enable": True, + "eigen-analysis: type": "power-method", + "eigen-analysis: iterations": 20, + "smoother: sweeps": 4, + "smoother: damping factor": 0.67, + "smoother: pre or post": "post", + "smoother: type": "symmetric Gauss-Seidel", + "coarse: type": "Amesos-KLU", + "coarse: max size": 256 + } diff --git a/fipy/solvers/trilinos/preconditioners/multilevelPreconditioner.py b/fipy/solvers/trilinos/preconditioners/multilevelPreconditioner.py new file mode 100644 index 0000000000..395b5e8bc4 --- /dev/null +++ b/fipy/solvers/trilinos/preconditioners/multilevelPreconditioner.py @@ -0,0 +1,48 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from PyTrilinos import ML + +from .trilinosPreconditioner import TrilinosPreconditioner + +__all__ = ["MultilevelPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class MultilevelPreconditioner(TrilinosPreconditioner): + """Base class for multilevel preconditioners for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. + """ + + def __init__(self, levels=10): + """ + Parameters + ---------- + levels : int + Maximum number of levels + """ + self.levels = levels + + @property + def _parameterList(self): + """Trilinos preconditioner parameters. + + Implemented as a property to avoid + `side-effects `_. + + Returns + ------- + dict + """ + raise NotImplementedError + + def _applyToSolver(self, solver, matrix): + if matrix.NumGlobalNonzeros() <= matrix.NumGlobalRows(): + return + + self.Prec = ML.MultiLevelPreconditioner(matrix, False) + + self.Prec.SetParameterList(self._parameterList) + + self.Prec.ComputePreconditioner() + + solver.SetPrecOperator(self.Prec) diff --git a/fipy/solvers/trilinos/preconditioners/multilevelSAPreconditioner.py b/fipy/solvers/trilinos/preconditioners/multilevelSAPreconditioner.py index d5442f1e01..18d6c8c2c2 100644 --- a/fipy/solvers/trilinos/preconditioners/multilevelSAPreconditioner.py +++ b/fipy/solvers/trilinos/preconditioners/multilevelSAPreconditioner.py @@ -1,47 +1,34 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' -from PyTrilinos import ML - -from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner +from .multilevelPreconditioner import MultilevelPreconditioner __all__ = ["MultilevelSAPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class MultilevelSAPreconditioner(Preconditioner): - """ - Multilevel preconditioner for Trilinos solvers suitable classical - smoothed aggregation for symmetric positive definite or nearly - symmetric positive definite systems. - """ - - def _applyToSolver(self, solver, matrix): - if matrix.NumGlobalNonzeros() <= matrix.NumGlobalRows(): - return +class MultilevelSAPreconditioner(MultilevelPreconditioner): + """Classical smoothed aggregation multilevel preconditioner for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. - self.Prec = ML.MultiLevelPreconditioner(matrix, False) - - self.Prec.SetParameterList({text_to_native_str("output"): 0, - text_to_native_str("max levels") : 10, - text_to_native_str("prec type") : text_to_native_str("MGV"), - text_to_native_str("increasing or decreasing") : text_to_native_str("increasing"), - text_to_native_str("aggregation: type") : text_to_native_str("Uncoupled-MIS"), - text_to_native_str("aggregation: damping factor") : 4. / 3., -## "energy minimization: enable" : False, -## "smoother: type" : "Aztec", -## "smoother: type" : "symmetric Gauss-Seidel", -## "eigen-analysis: type" : "power-method", - text_to_native_str("eigen-analysis: type") : text_to_native_str("cg"), - text_to_native_str("eigen-analysis: iterations") : 10, - text_to_native_str("smoother: sweeps") : 2, - text_to_native_str("smoother: damping factor") : 1.0, - text_to_native_str("smoother: pre or post") : text_to_native_str("both"), - text_to_native_str("smoother: type") : text_to_native_str("symmetric Gauss-Seidel"), - text_to_native_str("coarse: type") : text_to_native_str("Amesos-KLU"), - text_to_native_str("coarse: max size") : 128 - }) - - self.Prec.ComputePreconditioner() + Suitable for symmetric positive definite or nearly symmetric positive + definite systems. + """ - solver.SetPrecOperator(self.Prec) + @property + def _parameterList(self): + return { + "output": 0, + "max levels": self.levels, + "prec type": "MGV", + "increasing or decreasing": "increasing", + "aggregation: type": "Uncoupled-MIS", + "aggregation: damping factor" : 4. / 3., + "eigen-analysis: type": "cg", + "eigen-analysis: iterations": 10, + "smoother: sweeps": 2, + "smoother: damping factor": 1.0, + "smoother: pre or post": "both", + "smoother: type": "symmetric Gauss-Seidel", + "coarse: type": "Amesos-KLU", + "coarse: max size": 128 + } diff --git a/fipy/solvers/trilinos/preconditioners/multilevelSGSPreconditioner.py b/fipy/solvers/trilinos/preconditioners/multilevelSGSPreconditioner.py index 96e68c9922..de3c98c848 100644 --- a/fipy/solvers/trilinos/preconditioners/multilevelSGSPreconditioner.py +++ b/fipy/solvers/trilinos/preconditioners/multilevelSGSPreconditioner.py @@ -1,32 +1,20 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' -from PyTrilinos import ML - -from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner +from .multilevelPreconditioner import MultilevelPreconditioner __all__ = ["MultilevelSGSPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class MultilevelSGSPreconditioner(Preconditioner): - """ - Multilevel preconditioner for Trilinos solvers using Symmetric Gauss-Seidel smoothing. - +class MultilevelSGSPreconditioner(MultilevelPreconditioner): + """Multilevel preconditioner using Symmetric Gauss-Seidel smoothing for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. """ - def __init__(self, levels=10): - """ - Initialize the multilevel preconditioner - - - `levels`: Maximum number of levels - """ - self.levels = levels - - def _applyToSolver(self, solver, matrix): - if matrix.NumGlobalNonzeros() <= matrix.NumGlobalRows(): - return - self.Prec = ML.MultiLevelPreconditioner(matrix, False) - self.Prec.SetParameterList({text_to_native_str("output"): 0, text_to_native_str("smoother: type") : text_to_native_str("symmetric Gauss-Seidel")}) - self.Prec.ComputePreconditioner() - solver.SetPrecOperator(self.Prec) + @property + def _parameterList(self): + return { + "output": 0, + "max levels": self.levels, + "smoother: type": "symmetric Gauss-Seidel", + } diff --git a/fipy/solvers/trilinos/preconditioners/multilevelSolverSmootherPreconditioner.py b/fipy/solvers/trilinos/preconditioners/multilevelSolverSmootherPreconditioner.py index 956e7b1b66..254df0e46e 100644 --- a/fipy/solvers/trilinos/preconditioners/multilevelSolverSmootherPreconditioner.py +++ b/fipy/solvers/trilinos/preconditioners/multilevelSolverSmootherPreconditioner.py @@ -3,31 +3,21 @@ from PyTrilinos import ML -from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner +from .multilevelPreconditioner import MultilevelPreconditioner __all__ = ["MultilevelSolverSmootherPreconditioner"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] -class MultilevelSolverSmootherPreconditioner(Preconditioner): +class MultilevelSolverSmootherPreconditioner(MultilevelPreconditioner): + """Multilevel preconditioner using Aztec solvers as smoothers for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. """ - Multilevel preconditioner for Trilinos solvers using Aztec solvers - as smoothers. - """ - def __init__(self, levels=10): - """ - Initialize the multilevel preconditioner - - - `levels`: Maximum number of levels - """ - self.levels = levels - - def _applyToSolver(self, solver, matrix): - if matrix.NumGlobalNonzeros() <= matrix.NumGlobalRows(): - return - - self.Prec = ML.MultiLevelPreconditioner(matrix, False) - self.Prec.SetParameterList({text_to_native_str("output"): 0, text_to_native_str("smoother: type") : text_to_native_str("Aztec"), text_to_native_str("smoother: Aztec as solver") : True}) - self.Prec.ComputePreconditioner() - solver.SetPrecOperator(self.Prec) + @property + def _parameterList(self): + return { + "output": 0, + "max levels": self.levels, + "smoother: type": "Aztec", + "smoother: Aztec as solver": True + } diff --git a/fipy/solvers/trilinos/preconditioners/preconditioner.py b/fipy/solvers/trilinos/preconditioners/preconditioner.py deleted file mode 100644 index 3b57be7cf4..0000000000 --- a/fipy/solvers/trilinos/preconditioners/preconditioner.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import unicode_literals -from builtins import object -__docformat__ = 'restructuredtext' - -__all__ = ["Preconditioner"] -from future.utils import text_to_native_str -__all__ = [text_to_native_str(n) for n in __all__] - -class Preconditioner(object): - """ - The base Preconditioner class. - - .. attention:: This class is abstract. Always create one of its subclasses. - """ - - def __init__(self): - """ - Create a `Preconditioner` object. - """ - if self.__class__ is Preconditioner: - raise NotImplementedError("can't instantiate abstract base class") - - def _applyToSolver(self, solver, matrix): - raise NotImplementedError diff --git a/fipy/solvers/trilinos/preconditioners/trilinosPreconditioner.py b/fipy/solvers/trilinos/preconditioners/trilinosPreconditioner.py new file mode 100644 index 0000000000..597765e01e --- /dev/null +++ b/fipy/solvers/trilinos/preconditioners/trilinosPreconditioner.py @@ -0,0 +1,16 @@ +from __future__ import unicode_literals +__docformat__ = 'restructuredtext' + +from fipy.solvers.preconditioner import SolverModifyingPreconditioner + +__all__ = ["TrilinosPreconditioner"] +from future.utils import text_to_native_str +__all__ = [text_to_native_str(n) for n in __all__] + +class TrilinosPreconditioner(SolverModifyingPreconditioner): + """Base class of preconditioners for :class:`~fipy.solvers.trilinos.trilinosSolver.TrilinosSolver`. + + .. attention:: This class is abstract. Always create one of its subclasses. + """ + + pass diff --git a/fipy/solvers/trilinos/trilinosAztecOOSolver.py b/fipy/solvers/trilinos/trilinosAztecOOSolver.py index 697aa60900..333c9538b1 100644 --- a/fipy/solvers/trilinos/trilinosAztecOOSolver.py +++ b/fipy/solvers/trilinos/trilinosAztecOOSolver.py @@ -3,15 +3,8 @@ from PyTrilinos import AztecOO -_reason = {AztecOO.AZ_normal : 'AztecOO.AZ_normal', - AztecOO.AZ_param : 'AztecOO.AZ_param', - AztecOO.AZ_breakdown : 'AztecOO.AZ_breakdown', - AztecOO.AZ_loss : 'AztecOO.AZ_loss', - AztecOO.AZ_ill_cond : 'AztecOO.AZ_ill_cond', - AztecOO.AZ_maxits : 'AztecOO.AZ_maxits'} - -from fipy.solvers.trilinos.trilinosSolver import TrilinosSolver -from fipy.solvers.trilinos.preconditioners.jacobiPreconditioner import JacobiPreconditioner +from .trilinosSolver import TrilinosSolver +from .preconditioners.jacobiPreconditioner import JacobiPreconditioner from fipy.tools.timer import Timer __all__ = ["TrilinosAztecOOSolver"] @@ -26,46 +19,71 @@ class TrilinosAztecOOSolver(TrilinosSolver): """ - def __init__(self, tolerance=1e-10, iterations=1000, precon=JacobiPreconditioner()): - """ + DEFAULT_PRECONDITIONER = JacobiPreconditioner + + def _adaptLegacyTolerance(self, L, x, b): + return self._adaptInitialTolerance(L, x, b) + + def _adaptUnscaledTolerance(self, L, x, b): + return (1., AztecOO.AZ_noscaled) + + def _adaptRHSTolerance(self, L, x, b): + return (1., AztecOO.AZ_rhs) + + def _adaptMatrixTolerance(self, L, x, b): + return (1., AztecOO.AZ_Anorm) + + def _adaptInitialTolerance(self, L, x, b): + return (1., AztecOO.AZ_r0) + + def _adaptSolutionTolerance(self, L, x, b): + return (1., AztecOO.AZ_sol) + + def _solve_(self, L, x, b): + """Solve system of equations posed for PyTrilinos + Parameters ---------- - tolerance : float - Required error tolerance. - iterations : int - Maximum number of iterative steps to perform. - precon : ~fipy.solvers.trilinos.preconditioners.preconditioner.Preconditioner + L : Epetra.CrsMatrix + Sparse matrix + x : Epetra.Vector + Solution variable as non-ghosted vector + b : Epetra.Vector + Right-hand side as non-ghosted vector + + Returns + ------- + x : Epetra.Vector + Solution variable as non-ghosted vector """ - if self.__class__ is TrilinosAztecOOSolver: - raise NotImplementedError("can't instantiate abstract base class") - TrilinosSolver.__init__(self, tolerance=tolerance, - iterations=iterations, precon=None) - self.preconditioner = precon + solver = AztecOO.AztecOO(L, x, b) + solver.SetAztecOption(AztecOO.AZ_solver, self.solver) - def _solve_(self, L, x, b): +## solver.SetAztecOption(AztecOO.AZ_kspace, 30) - Solver = AztecOO.AztecOO(L, x, b) - Solver.SetAztecOption(AztecOO.AZ_solver, self.solver) + solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_none) -## Solver.SetAztecOption(AztecOO.AZ_kspace, 30) + tolerance_scale, suite_criterion = self._adaptTolerance(L, x, b) - Solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_none) + rtol = self.scale_tolerance(self.tolerance, tolerance_scale) + + solver.SetAztecOption(AztecOO.AZ_conv, suite_criterion) self._log.debug("BEGIN precondition") with Timer() as t: - if self.preconditioner is not None: - self.preconditioner._applyToSolver(solver=Solver, matrix=L) + if self.preconditioner is None: + solver.SetAztecOption(AztecOO.AZ_precond, AztecOO.AZ_none) else: - Solver.SetAztecOption(AztecOO.AZ_precond, AztecOO.AZ_none) + self.preconditioner._applyToSolver(solver=solver, matrix=L) self._log.debug("END precondition - {} ns".format(t.elapsed)) self._log.debug("BEGIN solve") with Timer() as t: - output = Solver.Iterate(self.iterations, self.tolerance) + solver.Iterate(self.iterations, rtol) self._log.debug("END solve - {} ns".format(t.elapsed)) @@ -73,12 +91,18 @@ def _solve_(self, L, x, b): if hasattr(self.preconditioner, 'Prec'): del self.preconditioner.Prec - status = Solver.GetAztecStatus() - self._log.debug('iterations: %d / %d', status[AztecOO.AZ_its], self.iterations) - self._log.debug('failure: %s', _reason[status[AztecOO.AZ_why]]) - self._log.debug('AztecOO.AZ_r: %s', status[AztecOO.AZ_r]) - self._log.debug('AztecOO.AZ_scaled_r: %s', status[AztecOO.AZ_scaled_r]) - self._log.debug('AztecOO.AZ_solve_time: %s', status[AztecOO.AZ_solve_time]) - self._log.debug('AztecOO.AZ_Aztec_version: %s', status[AztecOO.AZ_Aztec_version]) + status = solver.GetAztecStatus() + + self._setConvergence(suite="trilinos", + code=int(status[AztecOO.AZ_why]), + iterations=int(status[AztecOO.AZ_its]), + tolerance_scale=tolerance_scale, + residual=status[AztecOO.AZ_r], + scaled_residual=status[AztecOO.AZ_scaled_r], + convergence_residual=status[AztecOO.AZ_rec_r], + solve_time=status[AztecOO.AZ_solve_time], + Aztec_version=status[AztecOO.AZ_Aztec_version]) + + self.convergence.warn() - return output + return x diff --git a/fipy/solvers/trilinos/trilinosMLTest.py b/fipy/solvers/trilinos/trilinosMLTest.py index 60ea9e89b4..d17ca13d20 100644 --- a/fipy/solvers/trilinos/trilinosMLTest.py +++ b/fipy/solvers/trilinos/trilinosMLTest.py @@ -23,7 +23,7 @@ class TrilinosMLTest(TrilinosSolver): information about what ML preconditioner settings will work best. """ - def __init__(self, tolerance=1e-10, iterations=5, MLOptions={}, testUnsupported = False): + def __init__(self, tolerance="default", iterations=5, MLOptions={}, testUnsupported=False): """ For detailed information on the possible parameters for ML, see http://trilinos.sandia.gov/packages/ml/documentation.html diff --git a/fipy/solvers/trilinos/trilinosNonlinearSolver.py b/fipy/solvers/trilinos/trilinosNonlinearSolver.py index a2028a5595..f6ebc16f83 100644 --- a/fipy/solvers/trilinos/trilinosNonlinearSolver.py +++ b/fipy/solvers/trilinos/trilinosNonlinearSolver.py @@ -100,7 +100,7 @@ class _DummyJacobianSolver(TrilinosSolver): __all__ = ["TrilinosNonlinearSolver"] class TrilinosNonlinearSolver(TrilinosSolver): - def __init__(self, equation, jacobian=None, tolerance=1e-10, iterations=1000, + def __init__(self, equation, jacobian=None, tolerance="default", iterations="default", printingOptions=None, solverOptions=None, linearSolverOptions=None, lineSearchOptions=None, directionOptions=None, newtonOptions=None): TrilinosSolver.__init__(self, tolerance=tolerance, iterations=iterations, precon=None) diff --git a/fipy/solvers/trilinos/trilinosSolver.py b/fipy/solvers/trilinos/trilinosSolver.py index 864269b8bf..77e3de2033 100644 --- a/fipy/solvers/trilinos/trilinosSolver.py +++ b/fipy/solvers/trilinos/trilinosSolver.py @@ -13,11 +13,6 @@ class TrilinosSolver(Solver): .. attention:: This class is abstract. Always create one of its subclasses. """ - def __init__(self, *args, **kwargs): - if self.__class__ is TrilinosSolver: - raise NotImplementedError("can't instantiate abstract base class") - else: - Solver.__init__(self, *args, **kwargs) def _storeMatrix(self, var, matrix, RHSvector): self.var = var @@ -29,7 +24,7 @@ def _storeMatrix(self, var, matrix, RHSvector): @property def _globalMatrixAndVectors(self): - if not hasattr(self, 'globalVectors'): + if not hasattr(self, '_globalVectors'): globalMatrix = self.matrix.asTrilinosMeshMatrix() mesh = self.var.mesh @@ -60,35 +55,69 @@ def _globalMatrixAndVectors(self): overlappingVector = Epetra.Vector(globalMatrix.colMap, self.var) - self.globalVectors = (globalMatrix, nonOverlappingVector, nonOverlappingRHSvector, overlappingVector) + self._globalVectors = (globalMatrix, nonOverlappingVector, nonOverlappingRHSvector, overlappingVector) - return self.globalVectors + return self._globalVectors def _deleteGlobalMatrixAndVectors(self): self.matrix.flush() - del self.globalVectors + del self._globalVectors - def _solve(self): - from fipy.terms import SolutionVariableNumberError + def _rhsNorm(self, L, x, b): + return float(b.Norm2()) - globalMatrix, nonOverlappingVector, nonOverlappingRHSvector, overlappingVector = self._globalMatrixAndVectors + def _matrixNorm(self, L, x, b): + return L.NormInf() + + def _residualVectorAndNorm(self, L, x, b): + # residualVector = L*x - b + residualVector = Epetra.Vector(L.RangeMap()) + L.Multiply(False, x, residualVector) + # If A is an Epetra.Vector with map M + # and B is an Epetra.Vector with map M + # and C = A - B + # then C is an Epetra.Vector with *no map* !!!?!?! + residualVector -= b - if not (globalMatrix.rangeMap.SameAs(globalMatrix.domainMap) - and globalMatrix.rangeMap.SameAs(nonOverlappingVector.Map())): + return residualVector, float(residualVector.Norm2()) + + @property + def _Lxb(self): + """Matrix, solution vector, and right-hand side vector + + Returns + ------- + L : Epetra.CrsMatrix + Sparse matrix + x : Epetra.Vector + Solution variable as non-ghosted vector + b : Epetra.Vector + Right-hand side as non-ghosted vector + """ + L, x, b, _ = self._globalMatrixAndVectors + + if not (L.rangeMap.SameAs(L.domainMap) + and L.rangeMap.SameAs(x.Map())): + + from fipy.terms import SolutionVariableNumberError raise SolutionVariableNumberError - self._solve_(globalMatrix.matrix, - nonOverlappingVector, - nonOverlappingRHSvector) + return (L.matrix, x, b) + + def _scatterGhosts(self, x): + """Distribute ghost values (if any) across processes + """ + globalMatrix, _, _, overlappingVector = self._globalMatrixAndVectors - overlappingVector.Import(nonOverlappingVector, + overlappingVector.Import(x, Epetra.Import(globalMatrix.colMap, globalMatrix.domainMap), Epetra.Insert) - self.var.value = numerix.reshape(numerix.array(overlappingVector), self.var.shape) + return numerix.asarray(overlappingVector) + def _cleanup(self): self._deleteGlobalMatrixAndVectors() del self.var del self.RHSvector diff --git a/fipy/terms/abstractConvectionTerm.py b/fipy/terms/abstractConvectionTerm.py index 52c933f265..2270961154 100644 --- a/fipy/terms/abstractConvectionTerm.py +++ b/fipy/terms/abstractConvectionTerm.py @@ -11,6 +11,7 @@ from fipy.terms import AbstractBaseClassError from fipy.terms import VectorCoeffError from fipy.tools import numerix +from fipy.solvers import INDEX_TYPE class _AbstractConvectionTerm(FaceTerm): """ @@ -189,7 +190,8 @@ def divergence(face_value): ) - ids = self._reshapeIDs(var, numerix.arange(mesh.numberOfCells)) + ids = self._reshapeIDs(var, numerix.arange(mesh.numberOfCells, + dtype=INDEX_TYPE)) L.addAt(numerix.array(self.constraintL).ravel(), ids.ravel(), ids.swapaxes(0, 1).ravel()) b += numerix.reshape(self.constraintB.value, ids.shape).sum(0).ravel() diff --git a/fipy/terms/abstractDiffusionTerm.py b/fipy/terms/abstractDiffusionTerm.py index b6241d9ffe..64d158da37 100644 --- a/fipy/terms/abstractDiffusionTerm.py +++ b/fipy/terms/abstractDiffusionTerm.py @@ -7,11 +7,12 @@ import os from fipy import input +from fipy.solvers import INDEX_TYPE from fipy.terms.unaryTerm import _UnaryTerm from fipy.tools import numerix from fipy.terms import TermMultiplyError from fipy.terms import AbstractBaseClassError -from fipy.variables.faceVariable import FaceVariable +from fipy.variables import Variable, FaceVariable, CellVariable class _AbstractDiffusionTerm(_UnaryTerm): @@ -19,6 +20,9 @@ def __init__(self, coeff = (1.,), var=None): if self.__class__ is _AbstractDiffusionTerm: raise AbstractBaseClassError + self.constraintL = {} + self.constraintB = {} + if type(coeff) not in (type(()), type([])): coeff = [coeff] @@ -28,11 +32,9 @@ def __init__(self, coeff = (1.,), var=None): if len(coeff) > 0: self.nthCoeff = coeff[0] - from fipy.variables.variable import Variable if not isinstance(self.nthCoeff, Variable): - self.nthCoeff = Variable(value = self.nthCoeff) + self.nthCoeff = Variable(value=self.nthCoeff) - from fipy.variables.cellVariable import CellVariable if isinstance(self.nthCoeff, CellVariable): self.nthCoeff = self.nthCoeff.arithmeticFaceValue @@ -42,7 +44,7 @@ def __init__(self, coeff = (1.,), var=None): _UnaryTerm.__init__(self, coeff=coeff, var=var) if self.order > 0: - self.lowerOrderDiffusionTerm = self.__class__(coeff = coeff[1:]) + self.lowerOrderDiffusionTerm = self.__class__(coeff=coeff[1:], var=var) def __mul__(self, other): if isinstance(other, (int, float)): @@ -64,7 +66,7 @@ def __neg__(self): return self.__class__(coeff=negatedCoeff, var=self.var) - def __getBoundaryConditions(self, boundaryConditions): + def _getBoundaryConditions(self, boundaryConditions): higherOrderBCs = [] lowerOrderBCs = [] @@ -77,7 +79,7 @@ def __getBoundaryConditions(self, boundaryConditions): return higherOrderBCs, lowerOrderBCs - def __getRotationTensor(self, mesh): + def _getRotationTensor(self, mesh): if not hasattr(self, 'rotationTensor'): rotationTensor = FaceVariable(mesh=mesh, rank=2) @@ -85,8 +87,9 @@ def __getRotationTensor(self, mesh): rotationTensor[:, 0] = self._getNormals(mesh) if mesh.dim == 2: - rotationTensor[:, 1] = rotationTensor[:, 0].dot((((0, 1), (-1, 0)))) - elif mesh.dim ==3: + rotationTensor[:, 1] = rotationTensor[:, 0].dot(((( 0, 1), + (-1, 0)))) + elif mesh.dim == 3: epsilon = 1e-20 div = numerix.sqrt(1 - rotationTensor[2, 0]**2) @@ -94,7 +97,9 @@ def __getRotationTensor(self, mesh): rotationTensor[0, 1] = 1 rotationTensor[:, 1] = numerix.where(flag, - rotationTensor[:, 0].dot((((0, 1, 0), (-1, 0, 0), (0, 0, 0)))) / div, + rotationTensor[:, 0].dot(((( 0, 1, 0), + (-1, 0, 0), + ( 0, 0, 0)))) / div, rotationTensor[:, 1]) @@ -108,28 +113,106 @@ def __getRotationTensor(self, mesh): return self.rotationTensor - def __calcAnisotropySource(self, coeff, mesh, var): + def _isotropicOrthogonalCoeff(self, coeff, mesh): + """Geometric coefficient for isotropic diffusion on orthogonal mesh - if not hasattr(self, 'anisotropySource'): - if len(coeff) > 1: - unconstrainedVar = var + 0 - gradients = unconstrainedVar.grad.harmonicFaceValue.dot(self.__getRotationTensor(mesh)) - from fipy.variables.addOverFacesVariable import _AddOverFacesVariable - self.anisotropySource = _AddOverFacesVariable(gradients[1:].dot(coeff[1:])) * mesh.cellVolumes + Parameters + ---------- + coeff : array_like + Diffusion coefficient. + mesh : ~fipy.meshes.mesh.Mesh + Geometry and topology. + + Returns + ------- + ~fipy.variables.faceVariable.FaceVariable + Contribution to the matrix, consisting of a rank-1 vector at + each face, composed of the normal contribution. + + Notes + ----- + .. math:: + + \left(\Gamma_0 \frac{A_f}{d_{AP}}\right)^T + """ + if coeff.shape != () and not isinstance(coeff, FaceVariable): + # increase dimension of non-scalar coefficient such that + # it projects to each cell or face it applies to + coeff = coeff[..., numerix.newaxis] + + return (coeff * FaceVariable(mesh=mesh, value=mesh._faceAreas) + / mesh._cellDistances)[numerix.newaxis, ...] + + def _anisotropicOrNonorthogonalCoeff(self, coeff, mesh, anisotropicRank): + """Geometric coefficient for anisotropic diffusion or nonorthogonal mesh + + Parameters + ---------- + coeff : array_like + Diffusion coefficient. + mesh : ~fipy.meshes.mesh.Mesh + Geometry and topology. + anisotropyRank : int + ??? + + Returns + ------- + ~fipy.variables.faceVariable.FaceVariable + Contribution to the matrix, consisting of a rank-1 vector at + each face, composed of the normal contribution and the + tangential contribution(s). + + Notes + ----- + .. math:: + + \hat{n}\cdot\Gamma_0\cdot\mathsf{R} A_f + + where :math:`\mathsf{R}` is the rotation tensor. The normal + component of the rotation tensor is scaled by the cell distances. + """ + if anisotropicRank < 2: + coeff = coeff * numerix.identity(mesh.dim) + + if anisotropicRank > 0: + shape = numerix.getShape(coeff) + if mesh.dim != shape[0] or mesh.dim != shape[1]: + raise IndexError('diffusion coefficient tensor is not an appropriate shape for this mesh') + + faceNormals = FaceVariable(mesh=mesh, rank=1, value=mesh.faceNormals) + rotationTensor = self._getRotationTensor(mesh) + rotationTensor[:, 0] = rotationTensor[:, 0] / mesh._cellDistances + + return faceNormals.dot(coeff).dot(rotationTensor) * mesh._faceAreas def _calcGeomCoeff(self, var): + """Geometric cofficient + + Combination of diffusion coefficient and geometric factor. + + Parameters + ---------- + var : ~fipy.variables.cellVariable.CellVariable + Solution variable. + + Returns + ------- + ~fipy.variables.faceVariable.FaceVariable + Contribution to the matrix, consisting of a rank-1 vector at + each face, composed of the normal contribution and (for + nonorthogonal meshes or anisotropic diffusion) the tangential + contribution(s). + """ mesh = var.mesh if self.nthCoeff is not None: coeff = self.nthCoeff - shape = numerix.getShape(coeff) - if isinstance(coeff, FaceVariable): rank = coeff.rank else: - rank = len(shape) + rank = len(numerix.getShape(coeff)) if var.rank == 0: anisotropicRank = rank @@ -140,41 +223,17 @@ def _calcGeomCoeff(self, var): if anisotropicRank == 0 and self._treatMeshAsOrthogonal(mesh): - if coeff.shape != () and not isinstance(coeff, FaceVariable): - coeff = coeff[..., numerix.newaxis] - - tmpBop = (coeff * FaceVariable(mesh=mesh, value=mesh._faceAreas) / mesh._cellDistances)[numerix.newaxis,:] + return self._isotropicOrthogonalCoeff(coeff, mesh) else: - if anisotropicRank == 1 or anisotropicRank == 0: - coeff = coeff * numerix.identity(mesh.dim) - - if anisotropicRank > 0: - shape = numerix.getShape(coeff) - if mesh.dim != shape[0] or mesh.dim != shape[1]: - raise IndexError('diffusion coefficient tensor is not an appropriate shape for this mesh') - - faceNormals = FaceVariable(mesh=mesh, rank=1, value=mesh.faceNormals) - rotationTensor = self.__getRotationTensor(mesh) - rotationTensor[:, 0] = rotationTensor[:, 0] / mesh._cellDistances - - tmpBop = faceNormals.dot(coeff).dot(rotationTensor) * mesh._faceAreas - - return tmpBop + return self._anisotropicOrNonorthogonalCoeff(coeff, mesh, anisotropicRank) else: return None - def _getCoefficientMatrixForTests(self, SparseMatrix, var, coeff): - """ - This method was introduced because `__getCoefficientMatrix` is private, but - the tests in `DiffusionTerm` need to call it. - """ - return self.__getCoefficientMatrix(SparseMatrix, var, coeff) - - def __getCoefficientMatrix(self, SparseMatrix, var, coeff): + def _getCoefficientMatrix(self, SparseMatrix, var, coeff): mesh = var.mesh id1, id2 = mesh._adjacentCellIDs @@ -186,70 +245,175 @@ def __getCoefficientMatrix(self, SparseMatrix, var, coeff): id1 = self._reshapeIDs(var, id1) id2 = self._reshapeIDs(var, id2) -## print 'id1',id1 -## print 'id2',id2 - facesPerCell = mesh._facesPerCell[..., mesh._localNonOverlappingCellIDs] - coefficientMatrix = SparseMatrix(mesh=mesh, nonZerosPerRow=facesPerCell + 1) + coefficientMatrix = self._getMatrix(SparseMatrix=SparseMatrix, mesh=mesh, nonZerosPerRow=facesPerCell + 1) interiorCoeff = numerix.take(coeff, interiorFaces, axis=-1).ravel() coefficientMatrix.addAt(interiorCoeff, id1.ravel(), id1.swapaxes(0, 1).ravel()) coefficientMatrix.addAt(-interiorCoeff, id1.ravel(), id2.swapaxes(0, 1).ravel()) coefficientMatrix.addAt(-interiorCoeff, id2.ravel(), id1.swapaxes(0, 1).ravel()) coefficientMatrix.addAt(interiorCoeff, id2.ravel(), id2.swapaxes(0, 1).ravel()) -## print 'coefficientMatrix',coefficientMatrix -## raw_input('stopped') + return coefficientMatrix -## interiorCoeff = numerix.array(coeff) + def _doBCs(self, SparseMatrix, higherOrderBCs, N, M, coeffs, coefficientMatrix, boundaryB): + for boundaryCondition in higherOrderBCs: + LL, bb = boundaryCondition._buildMatrix(SparseMatrix, N, M, coeffs) + if 'FIPY_DISPLAY_MATRIX' in os.environ: + self._viewer.title = r"%s %s" % (boundaryCondition.__class__.__name__, self.__class__.__name__) + self._viewer.plot(matrix=LL, RHSvector=bb) + from fipy import input + input() + coefficientMatrix += LL + boundaryB += bb -## interiorCoeff[...,mesh.exteriorFaces.value] = 0 -## print 'interiorCoeff',interiorCoeff -## interiorCoeff = numerix.take(interiorCoeff, mesh.cellFaceIDs, axis=-1) -## ## print interiorCoeff.shape -## ## print interiorCoeff[:,:,0] -## ## print interiorCoeff[:,:,1] + return coefficientMatrix, boundaryB -## coefficientMatrix = SparseMatrix(mesh=mesh, nonZerosPerRow=mesh._facesPerCell + 1) + def _constrainValue(self, var): + """Determine value constraint contributions to matrix and RHS -## ## print 'numerix.sum(interiorCoeff, -2)',numerix.sum(interiorCoeff, -2) -## ## print numerix.sum(interiorCoeff, -2).ravel() -## ## raw_input('stopped') -## ## coefficientMatrix.addAtDiagonal(numerix.sum(interiorCoeff, -2).ravel()) -## ## print 'coefficientMatrix',coefficientMatrix + Parameters + ---------- + var : ~fipy.variables.cellVariable.CellVariable + Constrained solution variable -## del interiorCoeff + Returns + ------- + L : ~fipy.matrices.sparseMatrix.SparseMatrix + The NxN sparse matrix contribution. + b : array_like + The length-N right-hand-side vector contribution. -## interiorFaces = mesh.interiorFaceIDs -## interiorFaceCellIDs = mesh.interiorFaceCellIDs + Notes + ----- + For the variable :math:`\phi`, with its value constrained to + :math:`\phi\rvert_{\partial\Omega_V} = V` on boundary faces + :math:`\partial\Omega_V`, determines the matrix contribution -## interiorCoeff = -numerix.take(coeff, interiorFaces, axis=-1) + .. math:: -## print 'interiorCoeff',interiorCoeff -## raw_input('stopped') -## coefficientMatrix.addAt(interiorCoeff, interiorFaceCellIDs[0], interiorFaceCellIDs[1]) -## interiorCoeff = -numerix.take(coeff, interiorFaces, axis=-1) -## coefficientMatrix.addAt(interiorCoeff, interiorFaceCellIDs[1], interiorFaceCellIDs[0]) + \begin{align} + \mathsf{L} &= -\nabla\cdot\left(\frac{\Gamma}{d_{fP}}\hat{n}\right)_{f\in\partial\Omega_V} V_P + \\ + &\approx -\sum_{f\in\partial\Omega_V}(\frac{\Gamma}{d_{fP}}\hat{n}\cdot\hat{n})_f A_f + \end{align} - return coefficientMatrix + and the right-hand-side vector contribution - def __bcAdd(self, coefficientMatrix, boundaryB, LL, bb): - coefficientMatrix += LL - boundaryB += bb + .. math:: - def __doBCs(self, SparseMatrix, higherOrderBCs, N, M, coeffs, coefficientMatrix, boundaryB): - for boundaryCondition in higherOrderBCs: - LL, bb = boundaryCondition._buildMatrix(SparseMatrix, N, M, coeffs) - if 'FIPY_DISPLAY_MATRIX' in os.environ: - self._viewer.title = r"%s %s" % (boundaryCondition.__class__.__name__, self.__class__.__name__) - self._viewer.plot(matrix=LL, RHSvector=bb) - from fipy import input - input() - self.__bcAdd(coefficientMatrix, boundaryB, LL, bb) + \begin{align} + \mathbf{b} &= -\nabla\cdot\left(\frac{\Gamma V}{d_{fP}}\hat{n}\right)_{f\in\partial\Omega_V} V_P + \\ + &\approx -\sum_{f\in\partial\Omega_V}(\frac{\Gamma V}{d_{fP}}\hat{n}\cdot\hat{n})_f A_f + \end{align} + """ + mesh = var.mesh + normals = FaceVariable(mesh=mesh, rank=1, value=mesh._orientedFaceNormals) - return coefficientMatrix, boundaryB + if len(var.shape) == 1 and len(self.nthCoeff.shape) > 1: + normalsNthCoeff = normals.dot(self.nthCoeff) + else: + + if self.nthCoeff.shape != () and not isinstance(self.nthCoeff, FaceVariable): + coeff = self.nthCoeff[..., numerix.newaxis] + else: + coeff = self.nthCoeff + + s = (slice(0, None, None),) + (numerix.newaxis,) * (len(coeff.shape) - 1) + (slice(0, None, None),) + normalsNthCoeff = coeff[numerix.newaxis] * normals[s] + + constrainedNormalsDotCoeffOverdAP = var.arithmeticFaceValue.constraintMask * \ + normalsNthCoeff / mesh._cellDistances + + L = -constrainedNormalsDotCoeffOverdAP.divergence * mesh.cellVolumes + b = -(constrainedNormalsDotCoeffOverdAP + * var.arithmeticFaceValue).divergence * mesh.cellVolumes + + return L, b + + def _constrainGradient(self, var): + """Determine gradient constraint contributions to matrix and RHS + + Parameters + ---------- + var : ~fipy.variables.cellVariable.CellVariable + Constrained solution variable of N cells. + + Returns + ------- + L : ~fipy.matrices.sparseMatrix.SparseMatrix + The NxN sparse matrix contribution. + b : array_like + The length-N right-hand-side vector contribution + + Notes + ----- + For the variable :math:`\phi`, with its gradient constrained to + :math:`\nabla\phi\rvert_{\partial\Omega_G} = \vec{G}` on boundary + faces :math:`\partial\Omega_G`, determines the matrix contribution + + .. math:: + + \begin{align} + \mathsf{L} &= \mathsf{0} + \end{align} + + and the right-hand-side vector contribution + + .. math:: + + \begin{align} + \mathbf{b} &= -\nabla\cdot\left(\Gamma\vec{G}\right)_{f\in\partial\Omega_G} V_P + \\ + &\approx -\sum_{f\in\partial\Omega_G}(\Gamma\vec{G}\cdot\hat{n})_f A_f + \end{align} + """ + if len(var.shape) == 1 and len(self.nthCoeff.shape) > 1: + # var is scalar field and self.nthCoeff is vector (or tensor) + nthCoeffFaceGrad = var.faceGrad.dot(self.nthCoeff) + else: + # var is vector or tensor field or self.nthCoeff is scalar + if not (self.nthCoeff.shape == () or isinstance(self.nthCoeff, FaceVariable)): + # self.nthCoeff is not a scalar or a FaceVariable + coeff = self.nthCoeff[..., numerix.newaxis] + else: + # self.nthCoeff is a scalar or a FaceVariable + coeff = self.nthCoeff + + nthCoeffFaceGrad = coeff[numerix.newaxis] * var.faceGrad[:, numerix.newaxis] + + b = -(var.faceGrad.constraintMask + * nthCoeffFaceGrad).divergence * var.mesh.cellVolumes + + return 0, b + + + def _calcConstraints(self, var): + """Determine contributions to matrix and RHS due to constraints on `var` + + Parameters + ---------- + var : ~fipy.variables.cellVariable.CellVariable + The constrained variable + + Returns + ------- + None + """ + if (var not in self.constraintL) or (var not in self.constraintB): + LL, bb = self._constrainValue(var) + + self.constraintL[var] = LL + self.constraintB[var] = bb + + LL, bb = self._constrainGradient(var) + + self.constraintL[var] += LL + self.constraintB[var] += bb def _buildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, transientGeomCoeff=None, diffusionGeomCoeff=None): """ + Test to ensure that a changing coefficient influences the boundary conditions. >>> from fipy import * @@ -285,141 +449,310 @@ def _buildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, transi """ - var, L, b = self.__higherOrderbuildMatrix(var, SparseMatrix, boundaryConditions=boundaryConditions, dt=dt, transientGeomCoeff=transientGeomCoeff, diffusionGeomCoeff=diffusionGeomCoeff) - mesh = var.mesh + if self.order > 2: + buildFn = self._higherOrderBuildMatrix + elif self.order == 2: + buildFn = self._secondOrderBuildMatrix + elif self.order == 0: + buildFn = self._zerothOrderBuildMatrix + else: + raise ValueError("Order of diffusion coefficient must be non-zero and even, not {}".format(self.order)) - if self.order == 2: + return buildFn(var=var, + SparseMatrix=SparseMatrix, + boundaryConditions=boundaryConditions, + dt=dt, + transientGeomCoeff=transientGeomCoeff, + diffusionGeomCoeff=diffusionGeomCoeff) - if (not hasattr(self, 'constraintL')) or (not hasattr(self, 'constraintB')): + def _calcAnisotropySource(self, coeff, var): - normals = FaceVariable(mesh=mesh, rank=1, value=mesh._orientedFaceNormals) + if not hasattr(self, 'anisotropySource'): + if len(coeff) > 1: + mesh = var.mesh + unconstrainedVar = var + 0 + gradients = unconstrainedVar.grad.harmonicFaceValue.dot(self._getRotationTensor(mesh)) + from fipy.variables.addOverFacesVariable import _AddOverFacesVariable + self.anisotropySource = _AddOverFacesVariable(gradients[1:].dot(coeff[1:])) * mesh.cellVolumes + + def _calcCoeffDict(self, var): + """Matrix contributions to cells on either side of face + + Returns + ------- + dict + .. table:: Stencil contributions + :widths: auto + + ============== ============ + key value + ============== ============ + `cell1diag` `-geomCoeff` + `cell1offdiag` `geomCoeff` + `cell2offdiag` `geomCoeff` + `cell2diag` `-geomCoeff` + ============== ============ + + Notes + ----- + For 2nd-order diffusion, if the diffusion coefficient is + anisotropic or the mesh is nonorthogonal, also sets + :prop:`~fipy.terms.abstractDiffusionTerm._AbstractDiffusionTerm.anisotropySource`. + """ + if not hasattr(self, 'coeffDict'): - if len(var.shape) == 1 and len(self.nthCoeff.shape) > 1: - nthCoeffFaceGrad = var.faceGrad.dot(self.nthCoeff) - normalsNthCoeff = normals.dot(self.nthCoeff) - else: + coeff = self._getGeomCoeff(var) - if self.nthCoeff.shape != () and not isinstance(self.nthCoeff, FaceVariable): - coeff = self.nthCoeff[..., numerix.newaxis] - else: - coeff = self.nthCoeff + coeffDict = { + 'cell 1 diag': -coeff[0], + 'cell 1 offdiag': coeff[0] + } - nthCoeffFaceGrad = coeff[numerix.newaxis] * var.faceGrad[:, numerix.newaxis] - s = (slice(0, None, None),) + (numerix.newaxis,) * (len(coeff.shape) - 1) + (slice(0, None, None),) - normalsNthCoeff = coeff[numerix.newaxis] * normals[s] + coeffDict['cell 1 diag'].dontCacheMe() + coeffDict['cell 1 offdiag'].dontCacheMe() - self.constraintB = -(var.faceGrad.constraintMask * nthCoeffFaceGrad).divergence * mesh.cellVolumes + coeffDict['cell 2 offdiag'] = coeffDict['cell 1 offdiag'] + coeffDict['cell 2 diag'] = coeffDict['cell 1 diag'] - constrainedNormalsDotCoeffOverdAP = var.arithmeticFaceValue.constraintMask * \ - normalsNthCoeff / mesh._cellDistances + self.coeffDict = coeffDict - self.constraintB -= (constrainedNormalsDotCoeffOverdAP * var.arithmeticFaceValue).divergence * mesh.cellVolumes + if self.order == 2: + self._calcAnisotropySource(coeff, var) - ids = self._reshapeIDs(var, numerix.arange(mesh.numberOfCells)) + del coeff - self.constraintL = -constrainedNormalsDotCoeffOverdAP.divergence * mesh.cellVolumes + def _higherOrderBuildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, transientGeomCoeff=None, diffusionGeomCoeff=None): + """Recursively build matrix and RHS vector - ids = self._reshapeIDs(var, numerix.arange(mesh.numberOfCells)) - L.addAt(self.constraintL.ravel(), ids.ravel(), ids.swapaxes(0, 1).ravel()) - b += numerix.reshape(self.constraintB.ravel(), ids.shape).sum(-2).ravel() + Parameters + ---------- + var : ~fipy.variables.cellVariable.CellVariable + Solution variable of N cells. + SparseMatrix : class + ~fipy.matrices.sparseMatrix.SparseMatrix class to build into. + boundaryConditions : tuple of ~fipy.boundaryConditions.boundaryCondition.BoundaryCondition + Old-style (pre-contraint) boundary conditions to apply. + dt : float + Time step. + transientGeomCoeff : ~fipy.variables.cellVariable.CellVariable + Unused. + diffusionGeomCoeff : ~fipy.variables.faceVariable.FaceVariable + Unused. - return (var, L, b) + Returns + ------- + var : ~fipy.variables.cellVariable.CellVariable + Solution variable of N cells. + Why pass in `var` and then pass it back out? + L : ~fipy.matrices.sparseMatrix.SparseMatrix + The NxN sparse matrix from this and all lower-order + contributions. + b : array_like + The length-N right-hand-side vector from this and all + lower-order contributions. + + Notes + ----- + Given an :math:`O^{th}`-order diffusion term + + .. math:: + + \nabla\cdot\{\Gamma_2 \nabla [\nabla\cdot(\Gamma_4 \nabla \cdots \{ \Gamma_O \nabla \phi \})]\} + + recursively determines the matrix + + .. math:: + + \mathsf{L} = \mathsf{L}_O \mathsf{L}_{O-2} \frac{1}{V_P} \mathsf{I} + + and the right-hand-side vector + + .. math:: - def __higherOrderbuildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, transientGeomCoeff=None, diffusionGeomCoeff=None): + \mathbf{b} = \mathbf{b}_O + \mathsf{L}_O \mathbf{b}_{O-2} \frac{1}{V_P} + """ mesh = var.mesh N = mesh.numberOfCells M = mesh._maxFacesPerCell - if self.order > 2: + higherOrderBCs, lowerOrderBCs = self._getBoundaryConditions(boundaryConditions) - higherOrderBCs, lowerOrderBCs = self.__getBoundaryConditions(boundaryConditions) + (var, + lowerOrderL, + lowerOrderb) = self.lowerOrderDiffusionTerm._buildMatrix(var=var, + SparseMatrix=SparseMatrix, + boundaryConditions=lowerOrderBCs, + dt=dt, + transientGeomCoeff=transientGeomCoeff, + diffusionGeomCoeff=diffusionGeomCoeff) + del lowerOrderBCs - var, lowerOrderL, lowerOrderb = self.lowerOrderDiffusionTerm._buildMatrix(var = var, SparseMatrix=SparseMatrix, - boundaryConditions = lowerOrderBCs, - dt = dt, transientGeomCoeff=transientGeomCoeff, - diffusionGeomCoeff=diffusionGeomCoeff) - del lowerOrderBCs + lowerOrderb = lowerOrderb / mesh.cellVolumes + volMatrix = SparseMatrix(mesh=var.mesh, nonZerosPerRow=1) - lowerOrderb = lowerOrderb / mesh.cellVolumes - volMatrix = SparseMatrix(mesh=var.mesh, nonZerosPerRow=1) + volMatrix.addAtDiagonal(1. / mesh.cellVolumes) + lowerOrderL = volMatrix * lowerOrderL + del volMatrix - volMatrix.addAtDiagonal(1. / mesh.cellVolumes) - lowerOrderL = volMatrix * lowerOrderL - del volMatrix + self._calcCoeffDict(var) - if not hasattr(self, 'coeffDict'): + L = self._getCoefficientMatrix(SparseMatrix, var, self.coeffDict['cell 1 diag']) + L, b = self._doBCs(SparseMatrix, higherOrderBCs, N, M, self.coeffDict, + L, numerix.zeros(len(var.ravel()), 'd')) - coeff = self._getGeomCoeff(var)[0] - minusCoeff = -coeff + del higherOrderBCs - coeff.dontCacheMe() - minusCoeff.dontCacheMe() + b = numerix.asarray(L * lowerOrderb) + b + del lowerOrderb - self.coeffDict = { - 'cell 1 diag': minusCoeff, - 'cell 1 offdiag': coeff - } - del coeff - del minusCoeff + L = L * lowerOrderL + del lowerOrderL - self.coeffDict['cell 2 offdiag'] = self.coeffDict['cell 1 offdiag'] - self.coeffDict['cell 2 diag'] = self.coeffDict['cell 1 diag'] + return (var, L, b) + def _secondOrderBuildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, transientGeomCoeff=None, diffusionGeomCoeff=None): + """Build the 2nd-order matrix and RHS vector - mm = self.__getCoefficientMatrix(SparseMatrix, var, self.coeffDict['cell 1 diag']) - L, b = self.__doBCs(SparseMatrix, higherOrderBCs, N, M, self.coeffDict, - mm, numerix.zeros(len(var.ravel()), 'd')) + .. math:: - del higherOrderBCs - del mm + \nabla\cdot(\Gamma_0 \nabla \phi) - b = numerix.asarray(L * lowerOrderb) + b - del lowerOrderb + Parameters + ---------- + var : ~fipy.variables.cellVariable.CellVariable + Solution variable of N cells. + SparseMatrix : class + ~fipy.matrices.sparseMatrix.SparseMatrix class to build into. + boundaryConditions : tuple of ~fipy.boundaryConditions.boundaryCondition.BoundaryCondition + Old-style (pre-contraint) boundary conditions to apply. + dt : float + Time step. + transientGeomCoeff : ~fipy.variables.cellVariable.CellVariable + Unused. + diffusionGeomCoeff : ~fipy.variables.faceVariable.FaceVariable + Unused. - L = L * lowerOrderL - del lowerOrderL + Returns + ------- + var : ~fipy.variables.cellVariable.CellVariable + Solution variable of N cells. + Why pass in `var` and then pass it back out? + L : ~fipy.matrices.sparseMatrix.SparseMatrix + The NxN sparse matrix from this second-order contribution. + b : array_like + Right-hand-side vector from this second-order contribution. - elif self.order == 2: + Notes + ----- + Given a 2nd-order diffusion term + + .. math:: + + \nabla\cdot(\Gamma_0 \nabla \phi) + + determines the matrix + + .. math:: - if not hasattr(self, 'coeffDict'): + \mathsf{L} = \mathsf{L}_2 - coeff = self._getGeomCoeff(var) - minusCoeff = -coeff[0] + and the right-hand-side vector - coeff[0].dontCacheMe() - minusCoeff.dontCacheMe() + .. math:: - self.coeffDict = { - 'cell 1 diag': minusCoeff, - 'cell 1 offdiag': coeff[0] - } + \mathbf{b} = \mathbf{b}_2 + """ + mesh = var.mesh + + N = mesh.numberOfCells + M = mesh._maxFacesPerCell + + higherOrderBCs, lowerOrderBCs = self._getBoundaryConditions(boundaryConditions) + del lowerOrderBCs - self.coeffDict['cell 2 offdiag'] = self.coeffDict['cell 1 offdiag'] - self.coeffDict['cell 2 diag'] = self.coeffDict['cell 1 diag'] + self._calcCoeffDict(var) - self.__calcAnisotropySource(coeff, mesh, var) + L = self._getCoefficientMatrix(SparseMatrix, var, self.coeffDict['cell 1 diag']) + L, b = self._doBCs(SparseMatrix, higherOrderBCs, N, M, self.coeffDict, + L, numerix.zeros(len(var.ravel()), 'd')) - del coeff - del minusCoeff + del higherOrderBCs - higherOrderBCs, lowerOrderBCs = self.__getBoundaryConditions(boundaryConditions) - del lowerOrderBCs + if hasattr(self, 'anisotropySource'): + b -= self.anisotropySource - L, b = self.__doBCs(SparseMatrix, higherOrderBCs, N, M, self.coeffDict, - self.__getCoefficientMatrix(SparseMatrix, var, self.coeffDict['cell 1 diag']), numerix.zeros(len(var.ravel()), 'd')) + self._calcConstraints(var) - if hasattr(self, 'anisotropySource'): - b -= self.anisotropySource + ids = self._reshapeIDs(var, numerix.arange(mesh.numberOfCells, + dtype=INDEX_TYPE)) + L.addAt(self.constraintL[var].ravel(), + ids.ravel(), + ids.swapaxes(0, 1).ravel()) + b += numerix.reshape(self.constraintB[var].ravel(), + ids.shape).sum(-2).ravel() - del higherOrderBCs + return (var, L, b) + def _zerothOrderBuildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, transientGeomCoeff=None, diffusionGeomCoeff=None): + """Recursively build the 0th-order matrix and RHS vector - else: + The purpose of diffusion order 0 is to enable recursive + construction of higher-order diffusion terms. + + Parameters + ---------- + var : ~fipy.variables.cellVariable.CellVariable + Solution variable of N cells. + SparseMatrix : class + ~fipy.matrices.sparseMatrix.SparseMatrix class to build into. + boundaryConditions : tuple of ~fipy.boundaryConditions.boundaryCondition.BoundaryCondition + Unused. + dt : float + Unused. + transientGeomCoeff : ~fipy.variables.cellVariable.CellVariable + Unused. + diffusionGeomCoeff : ~fipy.variables.faceVariable.FaceVariable + Unused. + + Returns + ------- + var : ~fipy.variables.cellVariable.CellVariable + Solution variable of N cells. + Why pass in `var` and then pass it back out? + L : ~fipy.matrices.sparseMatrix.SparseMatrix + The NxN sparse matrix from this zeroth-order contribution. + b : array_like + Right-hand-side vector from this zeroth-order contribution. + + Notes + ----- + Given a 0th-order diffusion term + + .. math:: + + \phi + + determines the matrix + + .. math:: + + \mathsf{L} = V_P \mathsf{I} + + and the right-hand-side vector + + .. math:: + + \mathbf{b} = \mathbf{0} + """ + mesh = var.mesh + + N = mesh.numberOfCells + M = mesh._maxFacesPerCell - L = SparseMatrix(mesh=mesh, nonZerosPerRow=1) - L.addAtDiagonal(mesh.cellVolumes) - b = numerix.zeros(len(var.ravel()), 'd') + L = SparseMatrix(mesh=mesh, nonZerosPerRow=1) + L.addAtDiagonal(mesh.cellVolumes) + b = numerix.zeros(len(var.ravel()), 'd') return (var, L, b) diff --git a/fipy/terms/advectionTerm.py b/fipy/terms/advectionTerm.py index d3e0d4e185..5da8a32158 100644 --- a/fipy/terms/advectionTerm.py +++ b/fipy/terms/advectionTerm.py @@ -8,6 +8,7 @@ from fipy.tools.numerix import MA from fipy.tools import numerix +from fipy.solvers import INDEX_TYPE from fipy.terms.firstOrderAdvectionTerm import FirstOrderAdvectionTerm @@ -182,8 +183,10 @@ def _getDifferences(self, adjacentValues, cellValues, oldArray, cellToCellIDs, m adjacentNormalGradient = numerix.dot(adjacentGradient, mesh._cellNormals) adjacentUpValues = cellValues + 2 * dAP * adjacentNormalGradient - cellIDs = numerix.repeat(numerix.arange(mesh.numberOfCells)[numerix.newaxis, ...], - mesh._maxFacesPerCell, axis=0) + cellIDs = numerix.arange(mesh.numberOfCells, + dtype=INDEX_TYPE)[numerix.newaxis, ...] + cellIDs = numerix.repeat(cellIDs, + mesh._maxFacesPerCell, axis=0) cellIDs = MA.masked_array(cellIDs, mask = MA.getmask(mesh._cellToCellIDs)) cellGradient = numerix.take(oldArray.grad, cellIDs, axis=-1) cellNormalGradient = numerix.dot(cellGradient, mesh._cellNormals) diff --git a/fipy/terms/binaryTerm.py b/fipy/terms/binaryTerm.py index 085248584a..ea55822c4d 100644 --- a/fipy/terms/binaryTerm.py +++ b/fipy/terms/binaryTerm.py @@ -20,7 +20,7 @@ def _buildAndAddMatrices(self, var, SparseMatrix, boundaryConditions=(), dt=Non """ - matrix = SparseMatrix(mesh=var.mesh) + matrix = self._getMatrix(SparseMatrix=SparseMatrix, mesh=var.mesh, var=var) RHSvector = 0 for term in (self.term, self.other): @@ -40,6 +40,16 @@ def _buildAndAddMatrices(self, var, SparseMatrix, boundaryConditions=(), dt=Non return (var, matrix, RHSvector) + def _getMatrix(self, SparseMatrix, mesh, var, nonZerosPerRow=0): + if not hasattr(self, "_sparsematrix"): + self._sparsematrix = {} + + if var not in self._sparsematrix: + self._sparsematrix[var] = SparseMatrix(mesh=mesh, nonZerosPerRow=nonZerosPerRow) + else: + self._sparsematrix[var].zeroEntries() + return self._sparsematrix[var] + def _getDefaultSolver(self, var, solver, *args, **kwargs): for term in (self.term, self.other): defaultSolver = term._getDefaultSolver(var, solver, *args, **kwargs) diff --git a/fipy/terms/cellTerm.py b/fipy/terms/cellTerm.py index d746df6d5e..8d070cdb8d 100644 --- a/fipy/terms/cellTerm.py +++ b/fipy/terms/cellTerm.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals __docformat__ = 'restructuredtext' +from fipy.solvers import INDEX_TYPE from fipy.terms.nonDiffusionTerm import _NonDiffusionTerm from fipy.tools import inline from fipy.tools import numerix @@ -112,16 +113,20 @@ def _buildMatrixInline_(self, L, oldArray, b, dt, coeffVectors): L.addAtDiagonal(updatePyArray) def _buildMatrixNoInline_(self, L, oldArray, b, dt, coeffVectors): - ids = self._reshapeIDs(oldArray, numerix.arange(oldArray.shape[-1])) - b += (oldArray.value[numerix.newaxis] * coeffVectors['old value']).sum(-2).ravel() / dt + ids = self._reshapeIDs(oldArray, numerix.arange(oldArray.shape[-1], + dtype=INDEX_TYPE)) + b += (oldArray.value[numerix.newaxis] + * coeffVectors['old value']).sum(-2).ravel() / dt b += coeffVectors['b vector'][numerix.newaxis].sum(-2).ravel() - L.addAt(coeffVectors['new value'].ravel() / dt, ids.ravel(), ids.swapaxes(0, 1).ravel()) - L.addAt(coeffVectors['diagonal'].ravel(), ids.ravel(), ids.swapaxes(0, 1).ravel()) + L.addAt(coeffVectors['new value'].ravel() / dt, + ids.ravel(), ids.swapaxes(0, 1).ravel()) + L.addAt(coeffVectors['diagonal'].ravel(), + ids.ravel(), ids.swapaxes(0, 1).ravel()) def _buildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, transientGeomCoeff=None, diffusionGeomCoeff=None): b = numerix.zeros(var.shape, 'd').ravel() - L = SparseMatrix(mesh=var.mesh, nonZerosPerRow=1) + L = self._getMatrix(SparseMatrix=SparseMatrix, mesh=var.mesh, nonZerosPerRow=1) coeffVectors = self._getCoeffVectors_(var=var, transientGeomCoeff=transientGeomCoeff, diffusionGeomCoeff=diffusionGeomCoeff) diff --git a/fipy/terms/coupledBinaryTerm.py b/fipy/terms/coupledBinaryTerm.py index 8b6a46845e..a06e4748bf 100644 --- a/fipy/terms/coupledBinaryTerm.py +++ b/fipy/terms/coupledBinaryTerm.py @@ -55,6 +55,14 @@ def _verifyVar(self, var): def _buildExplcitIfOther(self): return False + def _offsetMatrix(self, SparseMatrix, equationIndex=0, varIndex=0): + from fipy.matrices.offsetSparseMatrix import OffsetSparseMatrix + return OffsetSparseMatrix(SparseMatrix=SparseMatrix, + numberOfVariables=len(self._vars), + numberOfEquations=len(self._uncoupledTerms), + equationIndex=equationIndex, + varIndex=varIndex) + def _buildAndAddMatrices(self, var, SparseMatrix, boundaryConditions=(), dt=None, transientGeomCoeff=None, diffusionGeomCoeff=None, buildExplicitIfOther=False): """Build matrices of constituent Terms and collect them @@ -62,30 +70,33 @@ def _buildAndAddMatrices(self, var, SparseMatrix, boundaryConditions=(), dt=Non """ - from fipy.matrices.offsetSparseMatrix import OffsetSparseMatrix - SparseMatrix = OffsetSparseMatrix(SparseMatrix=SparseMatrix, - numberOfVariables=len(self._vars), - numberOfEquations=len(self._uncoupledTerms)) - matrix = SparseMatrix(mesh=var.mesh) + CoupledMatrixClass = self._offsetMatrix(SparseMatrix=SparseMatrix) + matrix = self._getMatrix(SparseMatrix=CoupledMatrixClass, + mesh=var.mesh) RHSvectors = [] for equationIndex, uncoupledTerm in enumerate(self._uncoupledTerms): - SparseMatrix.equationIndex = equationIndex termRHSvector = 0 - termMatrix = SparseMatrix(mesh=var.mesh) + EqnMatrixClass = self._offsetMatrix(SparseMatrix=SparseMatrix, + equationIndex=equationIndex) + termMatrix = EqnMatrixClass(mesh=var.mesh) for varIndex, tmpVar in enumerate(var.vars): - SparseMatrix.varIndex = varIndex - - tmpVar, tmpMatrix, tmpRHSvector = uncoupledTerm._buildAndAddMatrices(tmpVar, - SparseMatrix, - boundaryConditions=(), - dt=dt, - transientGeomCoeff=uncoupledTerm._getTransientGeomCoeff(tmpVar), - diffusionGeomCoeff=uncoupledTerm._getDiffusionGeomCoeff(tmpVar), - buildExplicitIfOther=buildExplicitIfOther) + VarMatrixClass = self._offsetMatrix(SparseMatrix=SparseMatrix, + equationIndex=equationIndex, + varIndex=varIndex) + + (tmpVar, + tmpMatrix, + tmpRHSvector) = uncoupledTerm._buildAndAddMatrices(tmpVar, + VarMatrixClass, + boundaryConditions=(), + dt=dt, + transientGeomCoeff=uncoupledTerm._getTransientGeomCoeff(tmpVar), + diffusionGeomCoeff=uncoupledTerm._getDiffusionGeomCoeff(tmpVar), + buildExplicitIfOther=buildExplicitIfOther) termMatrix += tmpMatrix termRHSvector += tmpRHSvector diff --git a/fipy/terms/diffusionTerm.py b/fipy/terms/diffusionTerm.py index a9f0631c4a..4a0aabbc6b 100644 --- a/fipy/terms/diffusionTerm.py +++ b/fipy/terms/diffusionTerm.py @@ -43,7 +43,7 @@ def _test(self): >>> mesh = Grid1D(dx = 1., nx = 2) >>> term = DiffusionTerm(coeff = (1,)) >>> coeff = term._getGeomCoeff(CellVariable(mesh=mesh)) - >>> M = term._getCoefficientMatrixForTests(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) + >>> M = term._getCoefficientMatrix(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) >>> A = M.numpyArray >>> print(numerix.allclose(A, ... (( 1., -1.), @@ -65,7 +65,7 @@ def _test(self): >>> from fipy.variables.faceVariable import FaceVariable >>> term = DiffusionTerm(coeff=FaceVariable(mesh=mesh, value=1)) >>> coeff = term._getGeomCoeff(CellVariable(mesh=mesh)) - >>> M = term._getCoefficientMatrixForTests(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) + >>> M = term._getCoefficientMatrix(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) >>> A = M.numpyArray >>> print(numerix.allclose(A, (( 1., -1.), ... (-1., 1.)))) # doctest: +PROCESSOR_0 @@ -81,7 +81,7 @@ def _test(self): >>> term = DiffusionTerm(coeff=CellVariable(mesh=mesh, value=1)) >>> coeff = term._getGeomCoeff(CellVariable(mesh=mesh)) - >>> M = term._getCoefficientMatrixForTests(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) + >>> M = term._getCoefficientMatrix(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) >>> A = M.numpyArray >>> print(numerix.allclose(A, ... (( 1., -1.), @@ -99,7 +99,7 @@ def _test(self): >>> from fipy.variables.variable import Variable >>> term = DiffusionTerm(coeff = Variable(value = 1)) >>> coeff = term._getGeomCoeff(CellVariable(mesh=mesh)) - >>> M = term._getCoefficientMatrixForTests(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) + >>> M = term._getCoefficientMatrix(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) >>> A = M.numpyArray >>> print(numerix.allclose(A, ... (( 1., -1.), @@ -126,7 +126,7 @@ def _test(self): >>> var.constrain(4., mesh.facesRight) >>> term = DiffusionTerm(coeff = (1.,)) >>> coeff = term._getGeomCoeff(CellVariable(mesh=mesh)) - >>> M = term._getCoefficientMatrixForTests(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) + >>> M = term._getCoefficientMatrix(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) >>> A = M.numpyArray >>> print(numerix.allclose(A, ... (( 1., -1.), @@ -154,7 +154,7 @@ def _test(self): >>> bcRight2 = NthOrderBoundaryCondition(mesh.facesRight, 0., 2) >>> term = DiffusionTerm(coeff = (1., 1.)) >>> coeff = term._getGeomCoeff(CellVariable(mesh=mesh)) - >>> M = term._getCoefficientMatrixForTests(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) + >>> M = term._getCoefficientMatrix(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) >>> A = M.numpyArray >>> print(numerix.allclose(A, ... (( 1., -1.), @@ -183,7 +183,7 @@ def _test(self): >>> term = DiffusionTerm(coeff = (-1., 1.)) >>> coeff = term._getGeomCoeff(CellVariable(mesh=mesh)) - >>> M = term._getCoefficientMatrixForTests(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) + >>> M = term._getCoefficientMatrix(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) >>> A = M.numpyArray >>> print(numerix.allclose(A, ... ((-1., 1.), @@ -215,7 +215,7 @@ def _test(self): >>> term = DiffusionTerm(coeff = (1., 1.)) >>> coeff = term._getGeomCoeff(CellVariable(mesh=mesh)) - >>> M = term._getCoefficientMatrixForTests(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) + >>> M = term._getCoefficientMatrix(_MeshMatrix, CellVariable(mesh=mesh), coeff[0]) >>> A = M.numpyArray >>> print(numerix.allclose(A, ... (( 2., -2.), diff --git a/fipy/terms/faceTerm.py b/fipy/terms/faceTerm.py index 351f92b2f8..51719b3801 100644 --- a/fipy/terms/faceTerm.py +++ b/fipy/terms/faceTerm.py @@ -141,7 +141,7 @@ def _buildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, transi b = numerix.zeros(var.shape, 'd').ravel() facesPerCell = mesh._facesPerCell[..., mesh._localNonOverlappingCellIDs] - L = SparseMatrix(mesh=mesh, nonZerosPerRow=facesPerCell + 1) + L = self._getMatrix(SparseMatrix=SparseMatrix, mesh=mesh, nonZerosPerRow=facesPerCell + 1) weight = self._getWeight(var, transientGeomCoeff, diffusionGeomCoeff) diff --git a/fipy/terms/firstOrderAdvectionTerm.py b/fipy/terms/firstOrderAdvectionTerm.py index 40060df268..74d929d39b 100644 --- a/fipy/terms/firstOrderAdvectionTerm.py +++ b/fipy/terms/firstOrderAdvectionTerm.py @@ -6,6 +6,7 @@ from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] +from fipy.solvers import INDEX_TYPE from fipy.tools import numerix from fipy.tools.numerix import MA @@ -98,7 +99,8 @@ def _buildMatrix(self, var, SparseMatrix, boundaryConditions=(), dt=None, equati cellValues = numerix.repeat(oldArray[numerix.newaxis, ...], NCellFaces, axis = 0) - cellIDs = numerix.repeat(numerix.arange(NCells)[numerix.newaxis, ...], NCellFaces, axis = 0) + cellIDs = numerix.arange(NCells, dtype=INDEX_TYPE)[numerix.newaxis, ...] + cellIDs = numerix.repeat(cellIDs, NCellFaces, axis = 0) cellToCellIDs = mesh._cellToCellIDs if NCells > 0: diff --git a/fipy/terms/term.py b/fipy/terms/term.py index 6c570bec84..71636513e3 100644 --- a/fipy/terms/term.py +++ b/fipy/terms/term.py @@ -17,7 +17,8 @@ __all__ = [text_to_native_str(n) for n in __all__] class Term(object): - """ + """Base class for elements of a partial differential equation. + .. attention:: This class is abstract. Always create one of its subclasses. """ @@ -103,12 +104,20 @@ def _getMatrixClass(self, solver, var): from fipy.matrices.offsetSparseMatrix import OffsetSparseMatrix SparseMatrix = OffsetSparseMatrix(SparseMatrix=solver._matrixClass, numberOfVariables=self._vectorSize(var), - numberOfEquations=self._vectorSize(var)) + numberOfEquations=self._vectorSize(var), + equationIndex=0, varIndex=0) else: SparseMatrix = solver._matrixClass return SparseMatrix + def _getMatrix(self, SparseMatrix, mesh, nonZerosPerRow=0): + if not hasattr(self, "_sparsematrix"): + self._sparsematrix = SparseMatrix(mesh=mesh, nonZerosPerRow=nonZerosPerRow) + else: + self._sparsematrix.zeroEntries() + return self._sparsematrix + def _prepareLinearSystem(self, var, solver, boundaryConditions, dt): self._log.debug("BEGIN _prepareLinearSystem") @@ -554,6 +563,21 @@ def _test(self): ... sweep += 1 >>> x = m.cellCenters[0] >>> answer = (numerix.exp(x) - numerix.exp(-x)) / (numerix.exp(L) - numerix.exp(-L)) + + The default solver tolerance of :math:`10^{-5}` results in only + modest agreement with the analytical solution. + + >>> print(numerix.allclose(v, answer, rtol=3e-3)) + True + + Reducing the solver tolerance to :math:`10^{-8}` improves the solutio. + + >>> res = 1. + >>> sweep = 0 + >>> solver = eqn.getDefaultSolver(tolerance=1e-8) + >>> while res > 1e-8 and sweep < 100: + ... res = eqn.sweep(v, solver=solver) + ... sweep += 1 >>> print(numerix.allclose(v, answer, rtol=2e-5)) True diff --git a/fipy/terms/transientTerm.py b/fipy/terms/transientTerm.py index 376ee88383..5e85e51cdb 100644 --- a/fipy/terms/transientTerm.py +++ b/fipy/terms/transientTerm.py @@ -63,13 +63,13 @@ class TransientTerm(CellTerm): >>> for step in range(steps): ... var.updateOld() ... for sweep in range(sweeps): - ... eq.solve(var, dt = dt) + ... eq.solve(var, dt=dt) Compare the final result with the analytical solution. >>> from fipy.tools import numerix - >>> print(var.allclose(numerix.sqrt(k * dt * steps + phi0**2))) - 1 + >>> print(var.allclose(numerix.sqrt(k * dt * steps + phi0**2), rtol=2e-5)) + True """ def _getWeight(self, var, transientGeomCoeff=None, diffusionGeomCoeff=None):