From cb4cc22b30f68c9ffc2a0e486a7876b16dd7a186 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 12 Feb 2025 16:33:10 +0100 Subject: [PATCH 01/52] Rename fwd_alg and rrule_alg in Defaults --- src/PEPSKit.jl | 25 +++++++++++++++++-------- src/utility/svd.jl | 6 +++--- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index b4c1baa1..168ed088 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -69,14 +69,19 @@ Module containing default algorithm parameter values and arguments. - `ctmrg_maxiter=100`: Maximal number of CTMRG iterations per run - `ctmrg_miniter=4`: Minimal number of CTMRG carried out - `trscheme=FixedSpaceTruncation()`: Truncation scheme for SVDs and other decompositions -- `fwd_alg=TensorKit.SDD()`: SVD algorithm that is used in the forward pass -- `rrule_alg`: Reverse-rule for differentiating that SVD +- `svd_fwd_alg=TensorKit.SDD()`: SVD algorithm that is used in the forward pass +- `svd_rrule_alg`: Reverse-rule for differentiating that SVD ``` - rrule_alg = Arnoldi(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) + svd_rrule_alg = Arnoldi(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) + ``` + +- `svd_alg`: Combination of forward and reverse SVD algorithms + + ``` + svd_alg=SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) ``` -- `svd_alg=SVDAdjoint(; fwd_alg, rrule_alg)`: Combination of `fwd_alg` and `rrule_alg` - `projector_alg_type=HalfInfiniteProjector`: Default type of projector algorithm - `projector_alg`: Algorithm to compute CTMRG projectors @@ -115,7 +120,11 @@ Module containing default algorithm parameter values and arguments. ``` - `reuse_env=true`: If `true`, the current optimization step is initialized on the previous environment -- `optimizer=LBFGS(32; maxiter=100, gradtol=1e-4, verbosity=3)`: Default `OptimKit.OptimizerAlgorithm` for PEPS optimization +- `optimizer`: Default `OptimKit.OptimizerAlgorithm` for PEPS optimization + + ``` + optimizer=LBFGS(32; maxiter=100, gradtol=1e-4, verbosity=3) + ``` # OhMyThreads scheduler - `scheduler=Ref{Scheduler}(...)`: Multi-threading scheduler which can be accessed via `set_scheduler!` @@ -135,9 +144,9 @@ module Defaults const ctmrg_miniter = 4 const sparse = false const trscheme = FixedSpaceTruncation() - const fwd_alg = TensorKit.SDD() - const rrule_alg = Arnoldi(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) - const svd_alg = SVDAdjoint(; fwd_alg, rrule_alg) + const svd_fwd_alg = TensorKit.SDD() + const svd_rrule_alg = Arnoldi(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) + const svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) const projector_alg_type = HalfInfiniteProjector const projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) const ctmrg_alg = SimultaneousCTMRG( diff --git a/src/utility/svd.jl b/src/utility/svd.jl index 321f31d6..403c0d93 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -10,7 +10,7 @@ using TensorKit: const CRCExt = Base.get_extension(KrylovKit, :KrylovKitChainRulesCoreExt) """ - struct SVDAdjoint(; fwd_alg=Defaults.fwd_alg, rrule_alg=Defaults.rrule_alg, + struct SVDAdjoint(; fwd_alg=Defaults.svd_fwd_alg, rrule_alg=Defaults.svd_rrule_alg, broadening=nothing) Wrapper for a SVD algorithm `fwd_alg` with a defined reverse rule `rrule_alg`. @@ -19,8 +19,8 @@ In case of degenerate singular values, one might need a `broadening` scheme whic removes the divergences from the adjoint. """ @kwdef struct SVDAdjoint{F,R,B} - fwd_alg::F = Defaults.fwd_alg - rrule_alg::R = Defaults.rrule_alg + fwd_alg::F = Defaults.svd_fwd_alg + rrule_alg::R = Defaults.svd_rrule_alg broadening::B = nothing end # Keep truncation algorithm separate to be able to specify CTMRG dependent information From 90e16470f7562a188c88aed5aa56c61f8085c39c Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 12 Feb 2025 16:49:54 +0100 Subject: [PATCH 02/52] Update optimizer Defaults --- src/PEPSKit.jl | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 168ed088..92d6f8aa 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -120,10 +120,14 @@ Module containing default algorithm parameter values and arguments. ``` - `reuse_env=true`: If `true`, the current optimization step is initialized on the previous environment + +- `optimizer_tol`: Gradient norm tolerance of the optimizer +- `optimizer_maxiter`: Maximal number of optimization steps +- `lbfgs_memory`: Size of limited memory representation of BFGS Hessian matrix - `optimizer`: Default `OptimKit.OptimizerAlgorithm` for PEPS optimization ``` - optimizer=LBFGS(32; maxiter=100, gradtol=1e-4, verbosity=3) + optimizer=LBFGS(lbfgs_memory; maxiter=optimizer_maxiter, gradtol=optimizer_tol, verbosity=3) ``` # OhMyThreads scheduler @@ -163,7 +167,12 @@ module Defaults const iterscheme = :fixed const gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme) const reuse_env = true - const optimizer = LBFGS(32; maxiter=100, gradtol=1e-4, verbosity=3) + const optimizer_tol = 1e-4 + const optimizer_maxiter = 100 + const lbfgs_memory = 20 + const optimizer = LBFGS( + lbfgs_memory; maxiter=optimizer_maxiter, gradtol=optimizer_tol, verbosity=3 + ) # OhMyThreads scheduler defaults const scheduler = Ref{Scheduler}() From 99604b9b6bc8a0c11c5c705147cd9cf1534584bd Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 12 Feb 2025 17:19:31 +0100 Subject: [PATCH 03/52] Update CTMRG Defaults --- src/PEPSKit.jl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 92d6f8aa..ee66f1a2 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -68,6 +68,7 @@ Module containing default algorithm parameter values and arguments. - `ctmrg_tol=1e-8`: Tolerance checking singular value and norm convergence - `ctmrg_maxiter=100`: Maximal number of CTMRG iterations per run - `ctmrg_miniter=4`: Minimal number of CTMRG carried out +- `ctmrg_alg_type=SimultaneousCTMRG`: Default CTMRG algorithm variant - `trscheme=FixedSpaceTruncation()`: Truncation scheme for SVDs and other decompositions - `svd_fwd_alg=TensorKit.SDD()`: SVD algorithm that is used in the forward pass - `svd_rrule_alg`: Reverse-rule for differentiating that SVD @@ -92,7 +93,7 @@ Module containing default algorithm parameter values and arguments. - `ctmrg_alg`: Algorithm for performing CTMRG runs ``` - ctmrg_alg = SimultaneousCTMRG( + ctmrg_alg = ctmrg_alg_type( ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, 2, projector_alg ) ``` @@ -146,6 +147,7 @@ module Defaults const ctmrg_tol = 1e-8 const ctmrg_maxiter = 100 const ctmrg_miniter = 4 + const ctmrg_alg_type = SimultaneousCTMRG const sparse = false const trscheme = FixedSpaceTruncation() const svd_fwd_alg = TensorKit.SDD() @@ -153,7 +155,7 @@ module Defaults const svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) const projector_alg_type = HalfInfiniteProjector const projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) - const ctmrg_alg = SimultaneousCTMRG( + const ctmrg_alg = ctmrg_alg_type( ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, 2, projector_alg ) From d20d04e90d82736b70b3708f31a4a0f161f37c93 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 12 Feb 2025 18:56:19 +0100 Subject: [PATCH 04/52] Update gradient algorithm defaults --- src/PEPSKit.jl | 28 ++++++++------- .../fixed_point_differentiation.jl | 36 +++++++++---------- 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index ee66f1a2..493e6083 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -99,25 +99,25 @@ Module containing default algorithm parameter values and arguments. ``` # Optimization -- `fpgrad_maxiter=30`: Maximal number of iterations for computing the CTMRG fixed-point gradient -- `fpgrad_tol=1e-6`: Convergence tolerance for the fixed-point gradient iteration -- `iterscheme=:fixed`: Scheme for differentiating one CTMRG iteration +- `gradient_alg_tol=1e-6`: Convergence tolerance for the fixed-point gradient iteration +- `gradient_alg_maxiter=30`: Maximal number of iterations for computing the CTMRG fixed-point gradient +- `gradient_alg_iterscheme=:fixed`: Scheme for differentiating one CTMRG iteration - `gradient_linsolver`: Default linear solver for the `LinSolver` gradient algorithm ``` - gradient_linsolver=KrylovKit.BiCGStab(; maxiter=fpgrad_maxiter, tol=fpgrad_tol) + gradient_linsolver=KrylovKit.BiCGStab(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol) ``` - `gradient_eigsolve`: Default eigsolver for the `EigSolver` gradient algorithm ``` - gradient_eigsolver = KrylovKit.Arnoldi(; maxiter=fpgrad_maxiter, tol=fpgrad_tol, eager=true) + gradient_eigsolver = KrylovKit.Arnoldi(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true) ``` - `gradient_alg`: Algorithm to compute the gradient fixed-point ``` - gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme) + gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme) ``` - `reuse_env=true`: If `true`, the current optimization step is initialized on the previous environment @@ -160,14 +160,16 @@ module Defaults ) # Optimization - const fpgrad_maxiter = 30 - const fpgrad_tol = 1e-6 - const gradient_linsolver = KrylovKit.BiCGStab(; maxiter=fpgrad_maxiter, tol=fpgrad_tol) - const gradient_eigsolver = KrylovKit.Arnoldi(; - maxiter=fpgrad_maxiter, tol=fpgrad_tol, eager=true + const gradient_alg_tol = 1e-6 + const gradient_alg_maxiter = 30 + const gradient_linsolver = BiCGStab(; + maxiter=gradient_alg_maxiter, tol=gradient_alg_tol ) - const iterscheme = :fixed - const gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme) + const gradient_eigsolver = Arnoldi(; + maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true + ) + const gradient_alg_iterscheme = :fixed + const gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme) const reuse_env = true const optimizer_tol = 1e-4 const optimizer_maxiter = 100 diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index 9ba15eeb..613fbda4 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -3,8 +3,8 @@ abstract type GradMode{F} end iterscheme(::GradMode{F}) where {F} = F """ - struct GeomSum(; maxiter=Defaults.fpgrad_maxiter, tol=Defaults.fpgrad_tol, - verbosity=0, iterscheme=Defaults.iterscheme) <: GradMode{iterscheme} + struct GeomSum(; tol=Defaults.gradient_alg_tol, maxiter=Defaults.gradient_alg_maxiter, + verbosity=0, iterscheme=Defaults.gradient_alg_iterscheme) <: GradMode{iterscheme} Gradient mode for CTMRG using explicit evaluation of the geometric sum. @@ -15,22 +15,22 @@ the differentiated iteration consists of a CTMRG iteration and a subsequent gaug such that `gauge_fix` will also be differentiated everytime a CTMRG derivative is computed. """ struct GeomSum{F} <: GradMode{F} - maxiter::Int tol::Real + maxiter::Int verbosity::Int end function GeomSum(; - maxiter=Defaults.fpgrad_maxiter, - tol=Defaults.fpgrad_tol, + tol=Defaults.gradient_alg_tol, + maxiter=Defaults.gradient_alg_maxiter, verbosity=0, - iterscheme=Defaults.iterscheme, + iterscheme=Defaults.gradient_alg_iterscheme, ) - return GeomSum{iterscheme}(maxiter, tol, verbosity) + return GeomSum{iterscheme}(tol, maxiter, verbosity) end """ - struct ManualIter(; maxiter=Defaults.fpgrad_maxiter, tol=Defaults.fpgrad_tol, - verbosity=0, iterscheme=Defaults.iterscheme) <: GradMode{iterscheme} + struct ManualIter(; tol=Defaults.gradient_alg_tol, maxiter=Defaults.gradient_alg_maxiter, + verbosity=0, iterscheme=Defaults.gradient_alg_iterscheme) <: GradMode{iterscheme} Gradient mode for CTMRG using manual iteration to solve the linear problem. @@ -41,21 +41,21 @@ the differentiated iteration consists of a CTMRG iteration and a subsequent gaug such that `gauge_fix` will also be differentiated everytime a CTMRG derivative is computed. """ struct ManualIter{F} <: GradMode{F} - maxiter::Int tol::Real + maxiter::Int verbosity::Int end function ManualIter(; - maxiter=Defaults.fpgrad_maxiter, - tol=Defaults.fpgrad_tol, + tol=Defaults.gradient_alg_tol, + maxiter=Defaults.gradient_alg_maxiter, verbosity=0, - iterscheme=Defaults.iterscheme, + iterscheme=Defaults.gradient_alg_iterscheme, ) - return ManualIter{iterscheme}(maxiter, tol, verbosity) + return ManualIter{iterscheme}(tol, maxiter, verbosity) end """ - struct LinSolver(; solver=KrylovKit.GMRES(), iterscheme=Defaults.iterscheme) <: GradMode{iterscheme} + struct LinSolver(; solver=KrylovKit.GMRES(), iterscheme=Defaults.gradient_alg_iterscheme) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.LinearSolver` for solving the gradient linear problem using iterative solvers. @@ -70,14 +70,14 @@ struct LinSolver{F} <: GradMode{F} solver::KrylovKit.LinearSolver end function LinSolver(; - solver=KrylovKit.BiCGStab(; maxiter=Defaults.fpgrad_maxiter, tol=Defaults.fpgrad_tol), + solver=Defaults.gradient_linsolver, iterscheme=Defaults.iterscheme, ) return LinSolver{iterscheme}(solver) end """ - struct EigSolver(; solver=KrylovKit.Arnoldi(), iterscheme=Defaults.iterscheme) <: GradMode{iterscheme} + struct EigSolver(; solver=Defaults.gradient_eigsolver, iterscheme=Defaults.gradient_alg_iterscheme) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.KrylovAlgorithm` for solving the gradient linear problem as an eigenvalue problem. @@ -91,7 +91,7 @@ such that `gauge_fix` will also be differentiated everytime a CTMRG derivative i struct EigSolver{F} <: GradMode{F} solver::KrylovKit.KrylovAlgorithm end -function EigSolver(; solver=Defauls.gradient_eigsolver, iterscheme=Defaults.iterscheme) +function EigSolver(; solver=Defaults.gradient_eigsolver, iterscheme=Defaults.iterscheme) return EigSolver{iterscheme}(solver) end From 80961cf4aeac06e1383a1ccc6c5d9fc70450abb3 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 12 Feb 2025 18:56:42 +0100 Subject: [PATCH 05/52] Add `fixedpoint_selector` --- .../optimization/peps_optimization.jl | 119 +++++++++++++++++- 1 file changed, 115 insertions(+), 4 deletions(-) diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index aecb2132..cfc18067 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -43,7 +43,7 @@ function PEPSOptimize(; end """ - fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) + fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) # TODO fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv, alg::PEPSOptimize; finalize!=OptimKit._finalize!) @@ -71,8 +71,8 @@ information `NamedTuple` which contains the following entries: """ function fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) throw(error("method not yet implemented")) - alg = fixedpoint_selector(; kwargs...) # TODO: implement fixedpoint_selector - return fixedpoint(operator, peps₀, env₀, alg) + alg, finalize! = fixedpoint_selector(; kwargs...) + return fixedpoint(operator, peps₀, env₀, alg; finalize!) end function fixedpoint( operator, @@ -131,7 +131,7 @@ function fixedpoint( return E, g end - info = ( + info = (; last_gradient=∂cost, fg_evaluations=numfg, costs=convergence_history[:, 1], @@ -144,6 +144,117 @@ function fixedpoint( return peps_final, env_final, cost, info end +""" + fixedpoint_selector(; + boundary_tol=Defaults.ctmrg_tol, + boundary_miniter=Defaults.ctmrg_maxiter, + boundary_maxiter=Defaults.ctmrg_miniter, + boundary_alg_type=Defaults.ctmrg_alg_type, + trscheme=Defaults.trscheme, + svd_fwd_alg=Defaults.svd_fwd_alg, + svd_rrule_alg=Defaults.svd_rrule_alg, + projector_alg_type=Defaults.projector_alg_type, + iterscheme=Defaults.gradient_alg_iterscheme, + reuse_env=Defaults.reuse_env, + gradient_alg_tol=Defaults.gradient_alg_tol, + gradient_alg_maxiter=Defaults.gradient_alg_maxiter, + gradient_alg_type=typeof(Defaults.gradient_alg), + optimizer_tol=Defaults.optimizer_tol, + optimizer_maxiter=Defaults.optimizer_maxiter, + lbfgs_memory=Defaults.lbfgs_memory, + symmetrization=nothing, + verbosity=1, + (finalize!)=OptimKit._finalize!, + ) + +Parse optimization keyword arguments onto the corresponding algorithm structs and return +a final `PEPSOptimize` to be used in `fixedpoint`. For a description of the keyword +arguments, see [`fixedpoint`](@ref). +""" +function fixedpoint_selector(; + boundary_tol=Defaults.ctmrg_tol, + boundary_miniter=Defaults.ctmrg_maxiter, + boundary_maxiter=Defaults.ctmrg_miniter, + boundary_alg_type=Defaults.ctmrg_alg_type, + trscheme=Defaults.trscheme, + svd_fwd_alg=Defaults.svd_fwd_alg, + svd_rrule_alg=Defaults.svd_rrule_alg, + projector_alg_type=Defaults.projector_alg_type, + iterscheme=Defaults.gradient_alg_iterscheme, + reuse_env=Defaults.reuse_env, + gradient_alg_tol=Defaults.gradient_alg_tol, + gradient_alg_maxiter=Defaults.gradient_alg_maxiter, + gradient_alg_type=typeof(Defaults.gradient_alg), + optimizer_tol=Defaults.optimizer_tol, + optimizer_maxiter=Defaults.optimizer_maxiter, + lbfgs_memory=Defaults.lbfgs_memory, + symmetrization=nothing, + verbosity=1, + (finalize!)=OptimKit._finalize!, +) + if verbosity ≤ 0 # disable output + optimizer_verbosity = -1 + boundary_verbosity = -1 + projector_verbosity = -1 + gradient_alg_verbosity = -1 + svd_rrule_verbosity = -1 + elseif verbosity == 1 # output only optimization steps and degeneracy warnings + optimizer_verbosity = 3 + boundary_verbosity = -1 + projector_verbosity = 1 + gradient_alg_verbosity = -1 + svd_rrule_verbosity = -1 + elseif verbosity == 2 # output optimization and boundary information + optimizer_verbosity = 3 + boundary_verbosity = 2 + projector_verbosity = 1 + gradient_alg_verbosity = -1 + svd_rrule_verbosity = -1 + elseif verbosity == 3 # verbose debug output + optimizer_verbosity = 3 + boundary_verbosity = 3 + projector_verbosity = 1 + gradient_alg_verbosity = 3 + svd_rrule_verbosity = 3 + end + + svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) + projector_alg = projector_alg_type(svd_alg, trscheme, projector_verbosity) + boundary_alg = boundary_alg_type( + boundary_tol, boundary_maxiter, boundary_miniter, boundary_verbosity, projector_alg + ) + gradient_alg = if gradient_alg_type <: Union{GeomSum,ManIter} + gradient_alg_type(; + tol=gradient_alg_tol, + maxiter=gradient_alg_maxiter, + verbosity=gradient_alg_verbosity, + iterscheme, + ) + elseif gradient_alg_type <: LinSolver + solver = Defaults.gradient_linsolver.solver + @reset solver.maxiter = gradient_alg_maxiter + @reset solver.tol = gradient_alg_tol + @reset solver.verbosity = gradient_alg_verbosity + LinSolver(; solver, iterscheme) + elseif gradient_alg_type <: EigSolver + solver = Defaults.gradient_eigsolver.solver + @reset solver.maxiter = gradient_alg_maxiter + @reset solver.tol = gradient_alg_tol + @reset solver.verbosity = gradient_alg_verbosity + EigSolver(; solver, iterscheme) + end + optimizer = LBFGS( + lbfgs_memory; + gradtol=optimizer_tol, + maxiter=optimizer_maxiter, + verbosity=optimizer_verbosity, + ) + optimization_alg = PEPSOptimize(; + boundary_alg, gradient_alg, optimizer, reuse_env, symmetrization + ) + return optimization_alg, finalize! +end + # Update PEPS unit cell in non-mutating way # Note: Both x and η are InfinitePEPS during optimization function peps_retract(x, η, α) From aab50ff288617dd07277a6a7dd420096ac5ab153 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 13 Feb 2025 15:54:43 +0100 Subject: [PATCH 06/52] Add leading_boundary selector (for CTMRG) --- src/algorithms/ctmrg/ctmrg.jl | 41 ++++++++++++++++++++++++++++ src/algorithms/ctmrg/sequential.jl | 6 ++-- src/algorithms/ctmrg/simultaneous.jl | 6 ++-- 3 files changed, 47 insertions(+), 6 deletions(-) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index 3dc5552e..6e7dd6d6 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -80,6 +80,47 @@ end @non_differentiable ctmrg_logfinish!(args...) @non_differentiable ctmrg_logcancel!(args...) +""" + select_leading_boundary_algorithm( + χenv::Int; + alg=SimultaneousCTMRG, + tol=Defaults.ctmrg_tol, + maxiter=Defaults.ctmrg_maxiter, + miniter=Defaults.ctmrg_miniter, + verbosity=2, + trscheme=Defaults.trscheme, + svd_alg=Defaults.svd_fwd_alg, + svd_rrule_alg=typeof(Defaults.svd_rrule_alg), + projector_alg=Defaults.projector_alg_type, + ) + +Parse optimization keyword arguments on to the corresponding algorithm structs and return +a final algorithm to be used in `fixedpoint`. For a description of the keyword arguments, +see [`leading_boundary`](@ref). +""" +function select_leading_boundary_algorithm( + χenv::Int; + alg=SimultaneousCTMRG, + tol=Defaults.ctmrg_tol, + maxiter=Defaults.ctmrg_maxiter, + miniter=Defaults.ctmrg_miniter, + verbosity=2, + trscheme=Defaults.trscheme, + svd_alg=Defaults.svd_fwd_alg, + svd_rrule_alg=typeof(Defaults.svd_rrule_alg), + projector_alg=Defaults.projector_alg_type, +) + svd_rrule_tol = boundary_tol + svd_rrule_algorithm = if svd_rrule_alg <: Union{GMRES,Arnoldi} + svd_rrule_alg(; tol=svd_rrule_tol, krylovdim=χenv + 24, verbosity) + elseif svd_rrule_alg <: BiCGStab + svd_rrule_alg(; tol=svd_rrule_tol, verbosity) + end + svd_algorithm = SVDAdjoint(; fwd_alg=svd_alg, rrule_alg=svd_rrule_algorithm) + projector_algorithm = projector_alg(svd_algorithm, trscheme, verbosity) + return alg(tol, maxiter, miniter, verbosity, projector_algorithm) +end + #= In order to compute an error measure, we compare the singular values of the current iteration with the previous one. However, when the virtual spaces change, this comparison is not directly possible. diff --git a/src/algorithms/ctmrg/sequential.jl b/src/algorithms/ctmrg/sequential.jl index 4d50a960..d5b8b141 100644 --- a/src/algorithms/ctmrg/sequential.jl +++ b/src/algorithms/ctmrg/sequential.jl @@ -1,8 +1,8 @@ """ SequentialCTMRG(; tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, verbosity=0, - projector_alg=typeof(Defaults.projector_alg), - svd_alg=SVDAdjoint(), trscheme=FixedSpaceTruncation()) + svd_alg=SVDAdjoint(), trscheme=Defaults.trscheme, + projector_alg=Defaults.projector_alg_type) CTMRG algorithm where the expansions and renormalization is performed sequentially column-wise. This is implemented as a growing and projecting step to the left, followed by @@ -21,9 +21,9 @@ function SequentialCTMRG(; maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, verbosity=2, - projector_alg=Defaults.projector_alg_type, svd_alg=Defaults.svd_alg, trscheme=Defaults.trscheme, + projector_alg=Defaults.projector_alg_type, ) return SequentialCTMRG( tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) diff --git a/src/algorithms/ctmrg/simultaneous.jl b/src/algorithms/ctmrg/simultaneous.jl index 79b8b6e1..555fe38a 100644 --- a/src/algorithms/ctmrg/simultaneous.jl +++ b/src/algorithms/ctmrg/simultaneous.jl @@ -1,8 +1,8 @@ """ SimultaneousCTMRG(; tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, verbosity=0, - projector_alg=Defaults.projector_alg, - svd_alg=SVDAdjoint(), trscheme=FixedSpaceTruncation()) + svd_alg=SVDAdjoint(), trscheme=Defaults.trscheme, + projector_alg=Defaults.projector_alg_type) CTMRG algorithm where all sides are grown and renormalized at the same time. In particular, the projectors are applied to the corners from two sides simultaneously. The projectors are @@ -21,9 +21,9 @@ function SimultaneousCTMRG(; maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, verbosity=2, - projector_alg=Defaults.projector_alg_type, svd_alg=Defaults.svd_alg, trscheme=Defaults.trscheme, + projector_alg=Defaults.projector_alg_type, ) return SimultaneousCTMRG( tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) From 3b21f40014d368cf2fb2d6d6fda9b25c1614d054 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 13 Feb 2025 15:55:12 +0100 Subject: [PATCH 07/52] Bundle kwargs in fixedpoint selector --- .../optimization/peps_optimization.jl | 199 ++++++++++-------- 1 file changed, 109 insertions(+), 90 deletions(-) diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index cfc18067..6194e6f8 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -71,7 +71,10 @@ information `NamedTuple` which contains the following entries: """ function fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) throw(error("method not yet implemented")) - alg, finalize! = fixedpoint_selector(; kwargs...) + χenv = maximum(env₀.corners) do corner # extract maximal environment dimension + return dim(space(corner, 1)) + end + alg, finalize! = select_fixedpoint_algorithm(χenv; kwargs...) return fixedpoint(operator, peps₀, env₀, alg; finalize!) end function fixedpoint( @@ -145,114 +148,130 @@ function fixedpoint( end """ - fixedpoint_selector(; - boundary_tol=Defaults.ctmrg_tol, - boundary_miniter=Defaults.ctmrg_maxiter, - boundary_maxiter=Defaults.ctmrg_miniter, - boundary_alg_type=Defaults.ctmrg_alg_type, - trscheme=Defaults.trscheme, - svd_fwd_alg=Defaults.svd_fwd_alg, - svd_rrule_alg=Defaults.svd_rrule_alg, - projector_alg_type=Defaults.projector_alg_type, - iterscheme=Defaults.gradient_alg_iterscheme, - reuse_env=Defaults.reuse_env, - gradient_alg_tol=Defaults.gradient_alg_tol, - gradient_alg_maxiter=Defaults.gradient_alg_maxiter, - gradient_alg_type=typeof(Defaults.gradient_alg), - optimizer_tol=Defaults.optimizer_tol, - optimizer_maxiter=Defaults.optimizer_maxiter, - lbfgs_memory=Defaults.lbfgs_memory, - symmetrization=nothing, + function select_fixedpoint_algorithm( + χenv::Int; + tol=Defaults.optimizer_tol, verbosity=1, - (finalize!)=OptimKit._finalize!, + boundary_alg, + gradient_alg, + optimization_alg, ) -Parse optimization keyword arguments onto the corresponding algorithm structs and return +Parse optimization keyword arguments on to the corresponding algorithm structs and return a final `PEPSOptimize` to be used in `fixedpoint`. For a description of the keyword arguments, see [`fixedpoint`](@ref). """ -function fixedpoint_selector(; - boundary_tol=Defaults.ctmrg_tol, - boundary_miniter=Defaults.ctmrg_maxiter, - boundary_maxiter=Defaults.ctmrg_miniter, - boundary_alg_type=Defaults.ctmrg_alg_type, - trscheme=Defaults.trscheme, - svd_fwd_alg=Defaults.svd_fwd_alg, - svd_rrule_alg=Defaults.svd_rrule_alg, - projector_alg_type=Defaults.projector_alg_type, - iterscheme=Defaults.gradient_alg_iterscheme, - reuse_env=Defaults.reuse_env, - gradient_alg_tol=Defaults.gradient_alg_tol, - gradient_alg_maxiter=Defaults.gradient_alg_maxiter, - gradient_alg_type=typeof(Defaults.gradient_alg), - optimizer_tol=Defaults.optimizer_tol, - optimizer_maxiter=Defaults.optimizer_maxiter, - lbfgs_memory=Defaults.lbfgs_memory, - symmetrization=nothing, - verbosity=1, - (finalize!)=OptimKit._finalize!, +function select_fixedpoint_algorithm( + χenv::Int; + tol=Defaults.optimizer_tol, # top-level tolerance + verbosity=1, # top-level verbosity + boundary_alg=nothing, + gradient_alg=nothing, + optimization_alg=nothing, ) + # top-level verbosity if verbosity ≤ 0 # disable output - optimizer_verbosity = -1 boundary_verbosity = -1 - projector_verbosity = -1 - gradient_alg_verbosity = -1 - svd_rrule_verbosity = -1 + gradient_verbosity = -1 + optimizer_verbosity = -1 elseif verbosity == 1 # output only optimization steps and degeneracy warnings - optimizer_verbosity = 3 boundary_verbosity = -1 - projector_verbosity = 1 - gradient_alg_verbosity = -1 - svd_rrule_verbosity = -1 - elseif verbosity == 2 # output optimization and boundary information + gradient_verbosity = -1 optimizer_verbosity = 3 + elseif verbosity == 2 # output optimization and boundary information boundary_verbosity = 2 - projector_verbosity = 1 - gradient_alg_verbosity = -1 - svd_rrule_verbosity = -1 - elseif verbosity == 3 # verbose debug output + gradient_verbosity = -1 optimizer_verbosity = 3 + elseif verbosity == 3 # verbose debug output boundary_verbosity = 3 - projector_verbosity = 1 - gradient_alg_verbosity = 3 - svd_rrule_verbosity = 3 + gradient_verbosity = 3 + optimizer_verbosity = 3 end - svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) - projector_alg = projector_alg_type(svd_alg, trscheme, projector_verbosity) - boundary_alg = boundary_alg_type( - boundary_tol, boundary_maxiter, boundary_miniter, boundary_verbosity, projector_alg - ) - gradient_alg = if gradient_alg_type <: Union{GeomSum,ManIter} - gradient_alg_type(; - tol=gradient_alg_tol, - maxiter=gradient_alg_maxiter, - verbosity=gradient_alg_verbosity, - iterscheme, + # parse boundary algorithm + boundary_algorithm = if boundary_alg isa Union{SimultaneousCTMRG,SequentialCTMRG} + boundary_alg + elseif boundary_alg isa NamedTuple + boundary_kwargs = (; + alg=Defaults.ctmrg_alg_type, + tol=1e-4tol, + maxiter=Defaults.ctmrg_miniter, + miniter=Defaults.ctmrg_maxiter, + verbosity=boundary_verbosity, + trscheme=Defaults.trscheme, + svd_alg=Defaults.svd_fwd_alg, + svd_rrule_alg=typeof(Defaults.svd_rrule_alg), + projector_alg=Defaults.projector_alg_type, + boundary_alg..., # replaces all specified kwargs ) - elseif gradient_alg_type <: LinSolver - solver = Defaults.gradient_linsolver.solver - @reset solver.maxiter = gradient_alg_maxiter - @reset solver.tol = gradient_alg_tol - @reset solver.verbosity = gradient_alg_verbosity - LinSolver(; solver, iterscheme) - elseif gradient_alg_type <: EigSolver - solver = Defaults.gradient_eigsolver.solver - @reset solver.maxiter = gradient_alg_maxiter - @reset solver.tol = gradient_alg_tol - @reset solver.verbosity = gradient_alg_verbosity - EigSolver(; solver, iterscheme) + select_leading_boundary_algorithm(χenv; boundary_kwargs...) + else + throw(ArgumentError("unknown boundary algorithm: $boundary_alg")) end - optimizer = LBFGS( - lbfgs_memory; - gradtol=optimizer_tol, - maxiter=optimizer_maxiter, - verbosity=optimizer_verbosity, - ) - optimization_alg = PEPSOptimize(; - boundary_alg, gradient_alg, optimizer, reuse_env, symmetrization - ) - return optimization_alg, finalize! + + # parse fixed-point gradient algorithm + gradient_algorithm = if gradient_alg isa GradMode + gradient_alg + elseif gradient_alg isa NamedTuple + gradient_kwargs = (; + tol=1e-2tol, + maxiter=Defaults.gradient_alg_maxiter, + alg=typeof(Defaults.gradient_alg), + verbosity=gradient_verbosity, + iterscheme=Defaults.gradient_alg_iterscheme, + gradient_alg..., # replaces all specified kwargs + ) + if gradient_kwargs.alg <: Union{GeomSum,ManIter} + gradient_alg_type(; + tol=gradient_kwargs.tol, + maxiter=gradient_kwargs.maxiter, + verbosity=gradient_kwargs.verbosity, + iterscheme=gradient_kwargs.iterscheme, + ) + elseif gradient_kwargs.alg <: LinSolver + solver = Defaults.gradient_linsolver.solver + @reset solver.maxiter = gradient_kwargs.maxiter + @reset solver.tol = gradient_kwargs.tol + @reset solver.verbosity = gradient_kwargs.verbosity + LinSolver(; solver, iterscheme=gradient_kwargs.iterscheme) + elseif gradient_kwargs.alg <: EigSolver + solver = Defaults.gradient_eigsolver.solver + @reset solver.maxiter = gradient_kwargs.maxiter + @reset solver.tol = gradient_kwargs.tol + @reset solver.verbosity = gradient_kwargs.verbosity + EigSolver(; solver, iterscheme=gradient_kwargs.iterscheme) + end + else + throw(ArgumentError("unknown gradient algorithm: $gradient_alg")) + end + + # construct final PEPSOptimize optimization algorithm + optimization_algorithm = if optimizer_alg isa OptimKit.OptimizationAlgorithm + optimization_alg + elseif optimization_algorithm isa NamedTuple + optimization_kwargs = (; + tol=tol, + maxiter=Defaults.optimizer_maxiter, + lbfgs_memory=Defaults.lbfgs_memory, + reuse_env=Defaults.reuse_env, + symmetrization=nothing, + (finalize!)=OptimKit._finalize!, + optimization_alg..., # replaces all specified kwargs + ) + optimizer = LBFGS( + lbfgs_memory; + gradtol=optimization_kwargs.tol, + maxiter=optimization_kwargs.maxiter, + verbosity=optimizer_verbosity, + ) + PEPSOptimize( + boundary_algorithm, gradient_algorithm, optimizer, reuse_env, symmetrization + ) + else + throw(ArgumentError("unknown optimization algorithm: $optimization_alg")) + end + + return optimization_algorithm, finalize! end # Update PEPS unit cell in non-mutating way From a5606ed4a99575fbf91428b201f08f938fa130a3 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 13 Feb 2025 17:35:47 +0100 Subject: [PATCH 08/52] Make kwarg-based leading_boundary and fixedpoint runnable --- src/PEPSKit.jl | 6 +-- src/algorithms/ctmrg/ctmrg.jl | 34 +++++++++---- .../optimization/peps_optimization.jl | 48 +++++++++---------- 3 files changed, 50 insertions(+), 38 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 493e6083..8aaff1e0 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -1,9 +1,8 @@ module PEPSKit using LinearAlgebra, Statistics, Base.Threads, Base.Iterators, Printf -using Base: @kwdef using Compat -using Accessors: @set +using Accessors: @set, @reset using VectorInterface using TensorKit, KrylovKit, MPSKit, OptimKit, TensorOperations using ChainRulesCore, Zygote @@ -151,7 +150,8 @@ module Defaults const sparse = false const trscheme = FixedSpaceTruncation() const svd_fwd_alg = TensorKit.SDD() - const svd_rrule_alg = Arnoldi(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) + const svd_rrule_type = Arnoldi + const svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) const svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) const projector_alg_type = HalfInfiniteProjector const projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index 6e7dd6d6..b8ace385 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -14,7 +14,9 @@ Perform a single CTMRG iteration in which all directions are being grown and ren function ctmrg_iteration(state, env, alg::CTMRGAlgorithm) end """ - MPSKit.leading_boundary([envinit], state, alg::CTMRGAlgorithm) + MPSKit.leading_boundary([env₀], state; kwargs...) + # expert version: + MPSKit.leading_boundary([env₀], state, alg::CTMRGAlgorithm) Contract `state` using CTMRG and return the CTM environment. Per default, a random initial environment is used. @@ -27,19 +29,31 @@ Different levels of output information are printed depending on `alg.verbosity`, suppresses all output, `1` only prints warnings, `2` gives information at the start and end, and `3` prints information every iteration. """ -function MPSKit.leading_boundary(state, alg::CTMRGAlgorithm) +function MPSKit.leading_boundary(state::InfiniteSquareNetwork; kwargs...) + return MPSKit.leading_boundary( + CTMRGEnv(state, oneunit(spacetype(state))), state; kwargs... + ) +end +function MPSKit.leading_boundary(env₀, state::InfiniteSquareNetwork; kwargs...) + χenv = maximum(env₀.corners) do corner # extract maximal environment dimension + return dim(space(corner, 1)) + end + alg = select_leading_boundary_algorithm(χenv; kwargs...) + return MPSKit.leading_boundary(env₀, state, alg) +end +function MPSKit.leading_boundary(state::InfiniteSquareNetwork, alg::CTMRGAlgorithm) return MPSKit.leading_boundary(CTMRGEnv(state, oneunit(spacetype(state))), state, alg) end -function MPSKit.leading_boundary(envinit, state, alg::CTMRGAlgorithm) - CS = map(x -> tsvd(x)[2], envinit.corners) - TS = map(x -> tsvd(x)[2], envinit.edges) +function MPSKit.leading_boundary(env₀, state::InfiniteSquareNetwork, alg::CTMRGAlgorithm) + CS = map(x -> tsvd(x)[2], env₀.corners) + TS = map(x -> tsvd(x)[2], env₀.edges) η = one(real(scalartype(state))) - env = deepcopy(envinit) + env = deepcopy(env₀) log = ignore_derivatives(() -> MPSKit.IterLog("CTMRG")) return LoggingExtras.withlevel(; alg.verbosity) do - ctmrg_loginit!(log, η, state, envinit) + ctmrg_loginit!(log, η, state, env₀) local info for iter in 1:(alg.maxiter) env, info = ctmrg_iteration(state, env, alg) # Grow and renormalize in all 4 directions @@ -107,12 +121,12 @@ function select_leading_boundary_algorithm( verbosity=2, trscheme=Defaults.trscheme, svd_alg=Defaults.svd_fwd_alg, - svd_rrule_alg=typeof(Defaults.svd_rrule_alg), + svd_rrule_alg=Defaults.svd_rrule_type, + svd_rrule_tol=1e1tol, projector_alg=Defaults.projector_alg_type, ) - svd_rrule_tol = boundary_tol svd_rrule_algorithm = if svd_rrule_alg <: Union{GMRES,Arnoldi} - svd_rrule_alg(; tol=svd_rrule_tol, krylovdim=χenv + 24, verbosity) + svd_rrule_alg(; tol=svd_rrule_tol, krylovdim=χenv + 24, verbosity=verbosity - 2) elseif svd_rrule_alg <: BiCGStab svd_rrule_alg(; tol=svd_rrule_tol, verbosity) end diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index 6194e6f8..37d7a944 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -43,7 +43,8 @@ function PEPSOptimize(; end """ - fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) # TODO + fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) + # expert version: fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv, alg::PEPSOptimize; finalize!=OptimKit._finalize!) @@ -70,7 +71,6 @@ information `NamedTuple` which contains the following entries: - `times`: history of times each optimization step took """ function fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) - throw(error("method not yet implemented")) χenv = maximum(env₀.corners) do corner # extract maximal environment dimension return dim(space(corner, 1)) end @@ -164,10 +164,11 @@ arguments, see [`fixedpoint`](@ref). function select_fixedpoint_algorithm( χenv::Int; tol=Defaults.optimizer_tol, # top-level tolerance - verbosity=1, # top-level verbosity - boundary_alg=nothing, - gradient_alg=nothing, - optimization_alg=nothing, + verbosity=2, # top-level verbosity + boundary_alg=(;), + gradient_alg=(;), + optimization_alg=(;), + (finalize!)=OptimKit._finalize!, ) # top-level verbosity if verbosity ≤ 0 # disable output @@ -189,22 +190,16 @@ function select_fixedpoint_algorithm( end # parse boundary algorithm - boundary_algorithm = if boundary_alg isa Union{SimultaneousCTMRG,SequentialCTMRG} + boundary_algorithm = if boundary_alg isa CTMRGAlgorithm boundary_alg elseif boundary_alg isa NamedTuple - boundary_kwargs = (; - alg=Defaults.ctmrg_alg_type, + select_leading_boundary_algorithm( + χenv; tol=1e-4tol, - maxiter=Defaults.ctmrg_miniter, - miniter=Defaults.ctmrg_maxiter, verbosity=boundary_verbosity, - trscheme=Defaults.trscheme, - svd_alg=Defaults.svd_fwd_alg, - svd_rrule_alg=typeof(Defaults.svd_rrule_alg), - projector_alg=Defaults.projector_alg_type, - boundary_alg..., # replaces all specified kwargs + svd_rrule_tol=1e-3tol, + boundary_alg..., ) - select_leading_boundary_algorithm(χenv; boundary_kwargs...) else throw(ArgumentError("unknown boundary algorithm: $boundary_alg")) end @@ -221,7 +216,7 @@ function select_fixedpoint_algorithm( iterscheme=Defaults.gradient_alg_iterscheme, gradient_alg..., # replaces all specified kwargs ) - if gradient_kwargs.alg <: Union{GeomSum,ManIter} + if gradient_kwargs.alg <: Union{GeomSum,ManualIter} gradient_alg_type(; tol=gradient_kwargs.tol, maxiter=gradient_kwargs.maxiter, @@ -229,13 +224,13 @@ function select_fixedpoint_algorithm( iterscheme=gradient_kwargs.iterscheme, ) elseif gradient_kwargs.alg <: LinSolver - solver = Defaults.gradient_linsolver.solver + solver = Defaults.gradient_linsolver @reset solver.maxiter = gradient_kwargs.maxiter @reset solver.tol = gradient_kwargs.tol @reset solver.verbosity = gradient_kwargs.verbosity LinSolver(; solver, iterscheme=gradient_kwargs.iterscheme) elseif gradient_kwargs.alg <: EigSolver - solver = Defaults.gradient_eigsolver.solver + solver = Defaults.gradient_eigsolver @reset solver.maxiter = gradient_kwargs.maxiter @reset solver.tol = gradient_kwargs.tol @reset solver.verbosity = gradient_kwargs.verbosity @@ -246,26 +241,29 @@ function select_fixedpoint_algorithm( end # construct final PEPSOptimize optimization algorithm - optimization_algorithm = if optimizer_alg isa OptimKit.OptimizationAlgorithm + optimization_algorithm = if optimization_alg isa OptimKit.OptimizationAlgorithm optimization_alg - elseif optimization_algorithm isa NamedTuple + elseif optimization_alg isa NamedTuple optimization_kwargs = (; tol=tol, maxiter=Defaults.optimizer_maxiter, lbfgs_memory=Defaults.lbfgs_memory, reuse_env=Defaults.reuse_env, symmetrization=nothing, - (finalize!)=OptimKit._finalize!, optimization_alg..., # replaces all specified kwargs ) optimizer = LBFGS( - lbfgs_memory; + optimization_kwargs.lbfgs_memory; gradtol=optimization_kwargs.tol, maxiter=optimization_kwargs.maxiter, verbosity=optimizer_verbosity, ) PEPSOptimize( - boundary_algorithm, gradient_algorithm, optimizer, reuse_env, symmetrization + boundary_algorithm, + gradient_algorithm, + optimizer, + optimization_kwargs.reuse_env, + optimization_kwargs.symmetrization, ) else throw(ArgumentError("unknown optimization algorithm: $optimization_alg")) From ef99d4c9c2e2e146d3fd7ad7969fc8c54a854bf4 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 13 Feb 2025 18:19:23 +0100 Subject: [PATCH 09/52] Add docstrings --- src/PEPSKit.jl | 9 ++++-- src/algorithms/ctmrg/ctmrg.jl | 29 ++++++++++++++----- .../fixed_point_differentiation.jl | 5 +--- .../optimization/peps_optimization.jl | 28 +++++++++++++----- 4 files changed, 49 insertions(+), 22 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 8aaff1e0..161d2d9b 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -70,10 +70,11 @@ Module containing default algorithm parameter values and arguments. - `ctmrg_alg_type=SimultaneousCTMRG`: Default CTMRG algorithm variant - `trscheme=FixedSpaceTruncation()`: Truncation scheme for SVDs and other decompositions - `svd_fwd_alg=TensorKit.SDD()`: SVD algorithm that is used in the forward pass -- `svd_rrule_alg`: Reverse-rule for differentiating that SVD +- `svd_rrule_type = Arnoldi`: Default solver type for SVD reverse-rule algorithm +- `svd_rrule_alg`: Reverse-rule algorithm for differentiating a SVD ``` - svd_rrule_alg = Arnoldi(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) + svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) ``` - `svd_alg`: Combination of forward and reverse SVD algorithms @@ -169,7 +170,9 @@ module Defaults maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true ) const gradient_alg_iterscheme = :fixed - const gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme) + const gradient_alg = LinSolver(; + solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme + ) const reuse_env = true const optimizer_tol = 1e-4 const optimizer_maxiter = 100 diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index b8ace385..7ad2858a 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -21,13 +21,25 @@ function ctmrg_iteration(state, env, alg::CTMRGAlgorithm) end Contract `state` using CTMRG and return the CTM environment. Per default, a random initial environment is used. -Each CTMRG run is converged up to `alg.tol` where the singular value convergence -of the corners and edges is checked. The maximal and minimal number of CTMRG -iterations is set with `alg.maxiter` and `alg.miniter`. - -Different levels of output information are printed depending on `alg.verbosity`, where `0` -suppresses all output, `1` only prints warnings, `2` gives information at the start and -end, and `3` prints information every iteration. +The algorithm can be supplied via the keyword arguments or directly as an `CTMRGAlgorithm` +struct. The following keyword arguments are supported: +- `alg=SimultaneousCTMRG`: Variant of the CTMRG algorithm; can be any `CTMRGAlgorithm` type +- `tol=Defaults.ctmrg_tol`: Tolerance checking singular value and norm convergence +- `maxiter=Defaults.ctmrg_maxiter`: Maximal number of CTMRG iterations per run +- `miniter=Defaults.ctmrg_miniter`: Minimal number of CTMRG carried out +- `verbosity=2`: Determines different levels of output information, where `0` suppresses + all output, `1` only prints warnings, `2` gives information at the start and end, and + `3` prints information every iteration +- `trscheme=Defaults.trscheme`: SVD truncation scheme during projector computation; can be + any `TruncationScheme` supported by the provided SVD algorithm +- `svd_alg=Defaults.svd_fwd_alg`: SVD algorithm used for computing projectors +- `svd_rrule_alg=Defaults.svd_rrule_alg_type`: Algorithm for differentiating SVDs; currently + supported through KrylovKit where `GMRES`, `BiCGStab` and `Arnoldi` are supported (only + relevant if `leading_boundary` is differentiated) +- `svd_rrule_tol=1e1tol`: Convergence tolerance for SVD reverse-rule algorithm (only + relevant if `leading_boundary` is differentiated) +- `projector_alg=Defaults.projector_alg_type`: Projector algorithm type, where any + `ProjectorAlgorithm` can be used """ function MPSKit.leading_boundary(state::InfiniteSquareNetwork; kwargs...) return MPSKit.leading_boundary( @@ -104,7 +116,8 @@ end verbosity=2, trscheme=Defaults.trscheme, svd_alg=Defaults.svd_fwd_alg, - svd_rrule_alg=typeof(Defaults.svd_rrule_alg), + svd_rrule_alg=Defaults.svd_rrule_type, + svd_rrule_tol=1e1tol, projector_alg=Defaults.projector_alg_type, ) diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index 613fbda4..792c4095 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -69,10 +69,7 @@ such that `gauge_fix` will also be differentiated everytime a CTMRG derivative i struct LinSolver{F} <: GradMode{F} solver::KrylovKit.LinearSolver end -function LinSolver(; - solver=Defaults.gradient_linsolver, - iterscheme=Defaults.iterscheme, -) +function LinSolver(; solver=Defaults.gradient_linsolver, iterscheme=Defaults.iterscheme) return LinSolver{iterscheme}(solver) end diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index 37d7a944..b974cfc4 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -1,6 +1,7 @@ """ - PEPSOptimize{G}(; boundary_alg=Defaults.ctmrg_alg, optimizer::OptimKit.OptimizationAlgorithm=Defaults.optimizer - reuse_env::Bool=true, gradient_alg::G=Defaults.gradient_alg) + PEPSOptimize{G}(; boundary_alg=Defaults.ctmrg_alg, gradient_alg::G=Defaults.gradient_alg + optimizer::OptimKit.OptimizationAlgorithm=Defaults.optimizer + reuse_env::Bool=true, symmetrization::Union{Nothing,SymmetrizationStyle}=nothing) Algorithm struct that represent PEPS ground-state optimization using AD. Set the algorithm to contract the infinite PEPS in `boundary_alg`; @@ -9,6 +10,9 @@ based on the CTMRG gradient and updates the PEPS parameters. In this optimizatio the CTMRG runs can be started on the converged environments of the previous optimizer step by setting `reuse_env` to true. Otherwise a random environment is used at each step. The CTMRG gradient itself is computed using the `gradient_alg` algorithm. +The `symmetrization` field accepts `nothing` or a `SymmetrizationStyle`, in which case the +PEPS and PEPS gradient are symmetrized after each optimization iteration. Note that this +requires an initial symmmetric PEPS and environment to converge properly. """ struct PEPSOptimize{G} boundary_alg::CTMRGAlgorithm @@ -49,8 +53,8 @@ end finalize!=OptimKit._finalize!) Find the fixed point of `operator` (i.e. the ground state) starting from `peps₀` according -to the optimization parameters supplied in `alg`. The initial environment `env₀` serves as -an initial guess for the first CTMRG run. By default, a random initial environment is used. +to the supplied optimization parameters. The initial environment `env₀` serves as an +initial guess for the first CTMRG run. By default, a random initial environment is used. The `finalize!` kwarg can be used to insert a function call after each optimization step by utilizing the `finalize!` kwarg of `OptimKit.optimize`. @@ -59,6 +63,15 @@ The `symmetrization` kwarg accepts `nothing` or a `SymmetrizationStyle`, in whic PEPS and PEPS gradient are symmetrized after each optimization iteration. Note that this requires a symmmetric `peps₀` and `env₀` to converge properly. +The optimization parameters can be supplied via the keyword arguments or directly as an +`PEPSOptimize` struct. The following keyword arguments are supported: +- `tol=Defaults.optimizer_tol`: TODO +- `verbosity=1`: TODO +- `boundary_alg=(; ...)`: TODO +- `gradient_alg=(; ...)`: TODO +- `optimization_alg=(; ...)`: TODO +- `(finalize!)=OptimKit._finalize!`: TODO + The function returns the final PEPS, CTMRG environment and cost value, as well as an information `NamedTuple` which contains the following entries: - `last_gradient`: last gradient of the cost function @@ -152,9 +165,10 @@ end χenv::Int; tol=Defaults.optimizer_tol, verbosity=1, - boundary_alg, - gradient_alg, - optimization_alg, + boundary_alg=(;), + gradient_alg=(;), + optimization_alg=(;), + (finalize!)=OptimKit._finalize!, ) Parse optimization keyword arguments on to the corresponding algorithm structs and return From 46baa3c02a42c1891c0af24db4801dacc92457dd Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Fri, 14 Feb 2025 11:26:55 +0100 Subject: [PATCH 10/52] Add more docstrings --- src/PEPSKit.jl | 10 ++-- src/algorithms/ctmrg/ctmrg.jl | 9 ++-- .../optimization/peps_optimization.jl | 51 +++++++++++++------ 3 files changed, 46 insertions(+), 24 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 161d2d9b..64471ef2 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -120,11 +120,11 @@ Module containing default algorithm parameter values and arguments. gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme) ``` -- `reuse_env=true`: If `true`, the current optimization step is initialized on the previous environment - -- `optimizer_tol`: Gradient norm tolerance of the optimizer -- `optimizer_maxiter`: Maximal number of optimization steps -- `lbfgs_memory`: Size of limited memory representation of BFGS Hessian matrix +- `reuse_env=true`: If `true`, the current optimization step is initialized on the previous + environment, otherwise a random environment is used +- `optimizer_tol=1e-4`: Gradient norm tolerance of the optimizer +- `optimizer_maxiter=100`: Maximal number of optimization steps +- `lbfgs_memory=20`: Size of limited memory representation of BFGS Hessian matrix - `optimizer`: Default `OptimKit.OptimizerAlgorithm` for PEPS optimization ``` diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index 7ad2858a..7df280dc 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -24,12 +24,13 @@ initial environment is used. The algorithm can be supplied via the keyword arguments or directly as an `CTMRGAlgorithm` struct. The following keyword arguments are supported: - `alg=SimultaneousCTMRG`: Variant of the CTMRG algorithm; can be any `CTMRGAlgorithm` type -- `tol=Defaults.ctmrg_tol`: Tolerance checking singular value and norm convergence +- `tol=Defaults.ctmrg_tol`: Tolerance checking singular value and norm convergence; also + sets related tolerances to sensible defaults unless they are explicitly specified - `maxiter=Defaults.ctmrg_maxiter`: Maximal number of CTMRG iterations per run - `miniter=Defaults.ctmrg_miniter`: Minimal number of CTMRG carried out -- `verbosity=2`: Determines different levels of output information, where `0` suppresses - all output, `1` only prints warnings, `2` gives information at the start and end, and - `3` prints information every iteration +- `verbosity=2`: Overall output information verbosity level, where `0` suppresses + all output, `1` only prints warnings, `2` gives information at the start and end, + `3` prints information every iteration, and `4` gives extra debug information - `trscheme=Defaults.trscheme`: SVD truncation scheme during projector computation; can be any `TruncationScheme` supported by the provided SVD algorithm - `svd_alg=Defaults.svd_fwd_alg`: SVD algorithm used for computing projectors diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index b974cfc4..d283ccb6 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -56,21 +56,42 @@ Find the fixed point of `operator` (i.e. the ground state) starting from `peps to the supplied optimization parameters. The initial environment `env₀` serves as an initial guess for the first CTMRG run. By default, a random initial environment is used. -The `finalize!` kwarg can be used to insert a function call after each optimization step -by utilizing the `finalize!` kwarg of `OptimKit.optimize`. -The function maps `(peps, env), f, g = finalize!((peps, env), f, g, numiter)`. -The `symmetrization` kwarg accepts `nothing` or a `SymmetrizationStyle`, in which case the -PEPS and PEPS gradient are symmetrized after each optimization iteration. Note that this -requires a symmmetric `peps₀` and `env₀` to converge properly. - -The optimization parameters can be supplied via the keyword arguments or directly as an +The optimization parameters can be supplied via the keyword arguments or directly as a `PEPSOptimize` struct. The following keyword arguments are supported: -- `tol=Defaults.optimizer_tol`: TODO -- `verbosity=1`: TODO -- `boundary_alg=(; ...)`: TODO -- `gradient_alg=(; ...)`: TODO -- `optimization_alg=(; ...)`: TODO -- `(finalize!)=OptimKit._finalize!`: TODO +- `tol=Defaults.optimizer_tol`: Overall tolerance for gradient norm convergence of the + optimizer; sets related tolerance such as the boundary and boundary-gradient tolerances + to sensible defaults unless they are explictly specified +- `verbosity=1`: Overall output information verbosity level, where `0` suppresses + all output, `1` only prints the optimizer output and warnings, `2` additionally prints + boundary information, and `3` prints all information including AD debug outputs +- `boundary_alg`: Boundary algorithm either specified as a `NamedTuple` of keyword + arguments or directly as a `CTMRGAlgorithm`; see [`leading_boundary`](@ref) for a + description of all possible keyword arguments +- `gradient_alg`: Algorithm for computing the boundary fixed-point gradient + specified either as a `NamedTuple` of keyword arguments or directly as a `GradMode`. + The supported keyword arguments are: + - `tol=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration + - `maxiter=Defaults.gradient_alg_maxiter`: Maximal number of gradient problem iterations + - `alg=typeof(Defaults.gradient_alg)`: Gradient algorithm type, can be any `GradMode` type + - `verbosity=gradient_verbosity`: Gradient output verbosity, ≤0 by default to disable too + verbose printing; should only be enabled for debug purposes + - `iterscheme=Defaults.gradient_alg_iterscheme`: CTMRG iteration scheme determining mode + of differentiation; can be `:fixed` (SVD with fixed gauge) or `:diffgauge` (differentiate + gauge-fixing routine) +- `optimization_alg`: PEPS optimization algorithm, specified either as a `NamedTuple` of + keyword arguments or directly as a `PEPSOptimize`. By default, `OptimKit.LBFGS` is used + in combination with a `HagerZhangLineSearch`. Possible keyword arguments are: + - `tol=tol`: Gradient norm tolerance of the optimizer + - `maxiter=Defaults.optimizer_maxiter`: Maximal number of optimization steps + - `lbfgs_memory=Defaults.lbfgs_memory`: Size of limited memory representation of BFGS + Hessian matrix + - `reuse_env=Defaults.reuse_env`: If `true`, the current optimization step is initialized + on the previous environment, otherwise a random environment is used + - `symmetrization=nothing`: Accepts `nothing` or a `SymmetrizationStyle`, in which case + the PEPS and PEPS gradient are symmetrized after each optimization iteration +- `(finalize!)=OptimKit._finalize!`: Inserts a `finalize!` function call after each + optimization step by utilizing the `finalize!` kwarg of `OptimKit.optimize`. + The function maps `(peps, env), f, g = finalize!((peps, env), f, g, numiter)`. The function returns the final PEPS, CTMRG environment and cost value, as well as an information `NamedTuple` which contains the following entries: @@ -255,7 +276,7 @@ function select_fixedpoint_algorithm( end # construct final PEPSOptimize optimization algorithm - optimization_algorithm = if optimization_alg isa OptimKit.OptimizationAlgorithm + optimization_algorithm = if optimization_alg isa PEPSOptimize optimization_alg elseif optimization_alg isa NamedTuple optimization_kwargs = (; From e9c35d64d08116e2f620b41b3c2970c8476b8d35 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Fri, 14 Feb 2025 15:34:45 +0100 Subject: [PATCH 11/52] Use kwarg-based methods in tests --- .../fixed_point_differentiation.jl | 8 +++++-- test/boundarymps/vumps.jl | 8 ++----- test/ctmrg/flavors.jl | 18 ++++++--------- test/ctmrg/gaugefix.jl | 2 +- test/ctmrg/partition_function.jl | 7 +++--- test/heisenberg.jl | 22 ++++++++----------- test/j1j2_model.jl | 18 +++++++-------- test/pwave.jl | 8 ++----- test/tf_ising.jl | 11 ++++------ 9 files changed, 43 insertions(+), 59 deletions(-) diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index 792c4095..f98cf5fd 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -69,7 +69,9 @@ such that `gauge_fix` will also be differentiated everytime a CTMRG derivative i struct LinSolver{F} <: GradMode{F} solver::KrylovKit.LinearSolver end -function LinSolver(; solver=Defaults.gradient_linsolver, iterscheme=Defaults.iterscheme) +function LinSolver(; + solver=Defaults.gradient_linsolver, iterscheme=Defaults.gradient_alg_iterscheme +) return LinSolver{iterscheme}(solver) end @@ -88,7 +90,9 @@ such that `gauge_fix` will also be differentiated everytime a CTMRG derivative i struct EigSolver{F} <: GradMode{F} solver::KrylovKit.KrylovAlgorithm end -function EigSolver(; solver=Defaults.gradient_eigsolver, iterscheme=Defaults.iterscheme) +function EigSolver(; + solver=Defaults.gradient_eigsolver, iterscheme=Defaults.gradient_alg_iterscheme +) return EigSolver{iterscheme}(solver) end diff --git a/test/boundarymps/vumps.jl b/test/boundarymps/vumps.jl index 8b816ad6..676b32df 100644 --- a/test/boundarymps/vumps.jl +++ b/test/boundarymps/vumps.jl @@ -17,9 +17,7 @@ const vumps_alg = VUMPS(; alg_eigsolve=MPSKit.Defaults.alg_eigsolve(; ishermitia mps, env, ϵ = leading_boundary(mps, T, vumps_alg) N = abs(sum(expectation_value(mps, T))) - ctm, = leading_boundary( - CTMRGEnv(psi, ComplexSpace(20)), psi, SimultaneousCTMRG(; verbosity=1) - ) + ctm, = leading_boundary(CTMRGEnv(psi, ComplexSpace(20)), psi) N´ = abs(norm(psi, ctm)) @test N ≈ N´ atol = 1e-3 @@ -33,9 +31,7 @@ end mps, env, ϵ = leading_boundary(mps, T, vumps_alg) N = abs(prod(expectation_value(mps, T))) - ctm, = leading_boundary( - CTMRGEnv(psi, ComplexSpace(20)), psi, SimultaneousCTMRG(; verbosity=1) - ) + ctm, = leading_boundary(CTMRGEnv(psi, ComplexSpace(20)), psi) N´ = abs(norm(psi, ctm)) @test N ≈ N´ rtol = 1e-2 diff --git a/test/ctmrg/flavors.jl b/test/ctmrg/flavors.jl index b3d935a5..75a08c02 100644 --- a/test/ctmrg/flavors.jl +++ b/test/ctmrg/flavors.jl @@ -7,8 +7,6 @@ using PEPSKit # initialize parameters χbond = 2 χenv = 16 -ctm_alg_sequential = SequentialCTMRG() -ctm_alg_simultaneous = SimultaneousCTMRG() unitcells = [(1, 1), (3, 4)] projector_algs = [HalfInfiniteProjector, FullInfiniteProjector] @@ -20,10 +18,10 @@ projector_algs = [HalfInfiniteProjector, FullInfiniteProjector] Random.seed!(32350283290358) psi = InfinitePEPS(2, χbond; unitcell) env_sequential, = leading_boundary( - CTMRGEnv(psi, ComplexSpace(χenv)), psi, ctm_alg_sequential + CTMRGEnv(psi, ComplexSpace(χenv)), psi; alg=SequentialCTMRG, projector_alg ) env_simultaneous, = leading_boundary( - CTMRGEnv(psi, ComplexSpace(χenv)), psi, ctm_alg_simultaneous + CTMRGEnv(psi, ComplexSpace(χenv)), psi; alg=SimultaneousCTMRG, projector_alg ) # compare norms @@ -56,19 +54,17 @@ projector_algs = [HalfInfiniteProjector, FullInfiniteProjector] end # test fixedspace actually fixes space -@testset "Fixedspace truncation using $ctmrg_alg and $projector_alg" for ( - ctmrg_alg, projector_alg -) in Iterators.product( +@testset "Fixedspace truncation using $alg and $projector_alg" for (alg, projector_alg) in + Iterators.product( [SequentialCTMRG, SimultaneousCTMRG], projector_algs ) - ctm_alg = ctmrg_alg(; - tol=1e-6, maxiter=1, verbosity=0, trscheme=FixedSpaceTruncation(), projector_alg - ) Ds = fill(2, 3, 3) χs = [16 17 18; 15 20 21; 14 19 22] psi = InfinitePEPS(Ds, Ds, Ds) env = CTMRGEnv(psi, rand(10:20, 3, 3), rand(10:20, 3, 3)) - env2, = leading_boundary(env, psi, ctm_alg) + env2, = leading_boundary( + env, psi; alg, maxiter=1, trscheme=FixedSpaceTruncation(), projector_alg + ) # check that the space is fixed @test all(space.(env.corners) .== space.(env2.corners)) diff --git a/test/ctmrg/gaugefix.jl b/test/ctmrg/gaugefix.jl index 52231498..897b9cdc 100644 --- a/test/ctmrg/gaugefix.jl +++ b/test/ctmrg/gaugefix.jl @@ -20,7 +20,7 @@ function _pre_converge_env( Random.seed!(seed) # Seed RNG to make random environment consistent psi = InfinitePEPS(rand, T, physical_space, peps_space; unitcell) env₀ = CTMRGEnv(psi, ctm_space) - env_conv, = leading_boundary(env₀, psi, SequentialCTMRG(; tol)) + env_conv, = leading_boundary(env₀, psi; alg=SequentialCTMRG, tol) return env_conv, psi end diff --git a/test/ctmrg/partition_function.jl b/test/ctmrg/partition_function.jl index 13a20ffb..4d12ce5e 100644 --- a/test/ctmrg/partition_function.jl +++ b/test/ctmrg/partition_function.jl @@ -92,13 +92,12 @@ env0 = CTMRGEnv(Z, χenv) ctm_styles = [SequentialCTMRG, SimultaneousCTMRG] projector_algs = [HalfInfiniteProjector, FullInfiniteProjector] -@testset "Classical Ising partition function using $ctm_style with $projector_alg" for ( - ctm_style, projector_alg +@testset "Classical Ising partition function using $alg with $projector_alg" for ( + alg, projector_alg ) in Iterators.product( ctm_styles, projector_algs ) - ctm_alg = ctm_style(; maxiter=150, projector_alg) - env, = leading_boundary(env0, Z, ctm_alg) + env, = leading_boundary(env0, Z; alg, maxiter=150, projector_alg) # check observables λ = PEPSKit.value(Z, env) diff --git a/test/heisenberg.jl b/test/heisenberg.jl index 24ea65ae..16e1f071 100644 --- a/test/heisenberg.jl +++ b/test/heisenberg.jl @@ -9,10 +9,7 @@ using OptimKit # initialize parameters Dbond = 2 χenv = 16 -ctm_alg = SimultaneousCTMRG() -opt_alg = PEPSOptimize(; - boundary_alg=ctm_alg, optimizer=LBFGS(4; gradtol=1e-3, verbosity=3) -) +gradtol = 1e-3 # compare against Juraj Hasik's data: # https://github.com/jurajHasik/j1j2_ipeps_states/blob/main/single-site_pg-C4v-A1/j20.0/state_1s_A1_j20.0_D2_chi_opt48.dat E_ref = -0.6602310934799577 @@ -22,10 +19,10 @@ E_ref = -0.6602310934799577 Random.seed!(123) H = heisenberg_XYZ(InfiniteSquare()) peps₀ = InfinitePEPS(2, Dbond) - env₀, = leading_boundary(CTMRGEnv(peps₀, ComplexSpace(χenv)), peps₀, ctm_alg) + env₀, = leading_boundary(CTMRGEnv(peps₀, ComplexSpace(χenv)), peps₀) # optimize energy and compute correlation lengths - peps, env, E, = fixedpoint(H, peps₀, env₀, opt_alg) + peps, env, E, = fixedpoint(H, peps₀, env₀; tol=gradtol) ξ_h, ξ_v, = correlation_length(peps, env) @test E ≈ E_ref atol = 1e-2 @@ -38,10 +35,10 @@ end unitcell = (1, 2) H = heisenberg_XYZ(InfiniteSquare(unitcell...)) peps₀ = InfinitePEPS(2, Dbond; unitcell) - env₀, = leading_boundary(CTMRGEnv(peps₀, ComplexSpace(χenv)), peps₀, ctm_alg) + env₀, = leading_boundary(CTMRGEnv(peps₀, ComplexSpace(χenv)), peps₀) # optimize energy and compute correlation lengths - peps, env, E, = fixedpoint(H, peps₀, env₀, opt_alg) + peps, env, E, = fixedpoint(H, peps₀, env₀; tol=gradtol) ξ_h, ξ_v, = correlation_length(peps, env) @test E ≈ 2 * E_ref atol = 1e-2 @@ -80,8 +77,7 @@ end # absorb weight into site tensors and CTMRG peps = InfinitePEPS(wpeps) - env₀ = CTMRGEnv(rand, Float64, peps, Espace) - env, = leading_boundary(env₀, peps, SimultaneousCTMRG()) + env, = leading_boundary(CTMRGEnv(rand, Float64, peps, Espace), peps) # measure physical quantities e_site = cost_function(peps, env, ham) / (N1 * N2) @@ -90,9 +86,9 @@ end @test isapprox(e_site, -0.6594; atol=1e-3) # continue with auto differentiation - svd_alg_gmres = SVDAdjoint(; rrule_alg=GMRES(; tol=1e-5)) - opt_alg_gmres = @set opt_alg.boundary_alg.projector_alg.svd_alg = svd_alg_gmres - peps_final, env_final, E_final, = fixedpoint(ham, peps, env, opt_alg_gmres) # sensitivity warnings and degeneracies due to SU(2)? + peps_final, env_final, E_final, = fixedpoint( + ham, peps, env; boundary_alg=(; svd_rrule_alg=GMRES, svd_rrule_tol=1e-5) + ) # sensitivity warnings and degeneracies due to SU(2)? ξ_h, ξ_v, = correlation_length(peps_final, env_final) e_site2 = E_final / (N1 * N2) @info "Auto diff energy = $e_site2" diff --git a/test/j1j2_model.jl b/test/j1j2_model.jl index 391f81a2..45588a0c 100644 --- a/test/j1j2_model.jl +++ b/test/j1j2_model.jl @@ -8,23 +8,23 @@ using OptimKit # initialize parameters χbond = 2 χenv = 12 -ctm_alg = SimultaneousCTMRG() -opt_alg = PEPSOptimize(; - boundary_alg=ctm_alg, - optimizer=LBFGS(4; gradtol=1e-3, verbosity=3), - gradient_alg=LinSolver(; iterscheme=:diffgauge), - symmetrization=RotateReflect(), -) # initialize states Random.seed!(91283219347) H = j1_j2(InfiniteSquare(); J2=0.25) peps₀ = product_peps(2, χbond; noise_amp=1e-1) peps₀ = symmetrize!(peps₀, RotateReflect()) -env₀, = leading_boundary(CTMRGEnv(peps₀, ComplexSpace(χenv)), peps₀, ctm_alg); +env₀, = leading_boundary(CTMRGEnv(peps₀, ComplexSpace(χenv)), peps₀) # find fixedpoint -peps, env, E, = fixedpoint(H, peps₀, env₀, opt_alg) +peps, env, E, = fixedpoint( + H, + peps₀, + env₀; + tol=1e-3, + gradient_alg=(; iterscheme=:diffgauge), + optimization_alg=(; symmetrization=RotateReflect()), +) ξ_h, ξ_v, = correlation_length(peps, env) # compare against Juraj Hasik's data: diff --git a/test/pwave.jl b/test/pwave.jl index 00a65391..d2c70833 100644 --- a/test/pwave.jl +++ b/test/pwave.jl @@ -10,10 +10,6 @@ unitcell = (2, 2) H = pwave_superconductor(InfiniteSquare(unitcell...)) Dbond = 2 χenv = 16 -ctm_alg = SimultaneousCTMRG() -opt_alg = PEPSOptimize(; - boundary_alg=ctm_alg, optimizer=LBFGS(4; maxiter=10, gradtol=1e-3, verbosity=3) -) # initialize states Pspace = Vect[FermionParity](0 => 1, 1 => 1) @@ -21,10 +17,10 @@ Vspace = Vect[FermionParity](0 => Dbond ÷ 2, 1 => Dbond ÷ 2) Envspace = Vect[FermionParity](0 => χenv ÷ 2, 1 => χenv ÷ 2) Random.seed!(91283219347) peps₀ = InfinitePEPS(Pspace, Vspace, Vspace; unitcell) -env₀, = leading_boundary(CTMRGEnv(peps₀, Envspace), peps₀, ctm_alg); +env₀, = leading_boundary(CTMRGEnv(peps₀, Envspace), peps₀) # find fixedpoint -_, _, E, = fixedpoint(H, peps₀, env₀, opt_alg) +_, _, E, = fixedpoint(H, peps₀, env₀; tol=1e-3, optimization_alg=(; maxiter=10)) # comparison with Gaussian PEPS minimum at D=2 on 1000x1000 square lattice with aPBC @test E / prod(size(peps₀)) ≈ -2.6241 atol = 5e-2 diff --git a/test/tf_ising.jl b/test/tf_ising.jl index f4ad251c..fa3b730c 100644 --- a/test/tf_ising.jl +++ b/test/tf_ising.jl @@ -19,19 +19,16 @@ mˣ = 0.91 # initialize parameters χbond = 2 χenv = 16 -ctm_alg = SimultaneousCTMRG() -opt_alg = PEPSOptimize(; - boundary_alg=ctm_alg, optimizer=LBFGS(4; gradtol=1e-3, verbosity=3) -) +gradtol = 1e-3 # initialize states H = transverse_field_ising(InfiniteSquare(); g) Random.seed!(2928528935) peps₀ = InfinitePEPS(2, χbond) -env₀, = leading_boundary(CTMRGEnv(peps₀, ComplexSpace(χenv)), peps₀, ctm_alg) +env₀, = leading_boundary(CTMRGEnv(peps₀, ComplexSpace(χenv)), peps₀) # find fixedpoint -peps, env, E, = fixedpoint(H, peps₀, env₀, opt_alg) +peps, env, E, = fixedpoint(H, peps₀, env₀; tol=gradtol) ξ_h, ξ_v, = correlation_length(peps, env) # compute magnetization @@ -45,7 +42,7 @@ magn = expectation_value(peps, M, env) # find fixedpoint in polarized phase and compute correlations lengths H_polar = transverse_field_ising(InfiniteSquare(); g=4.5) -peps_polar, env_polar, = fixedpoint(H_polar, peps₀, env₀, opt_alg) +peps_polar, env_polar, = fixedpoint(H_polar, peps₀, env₀; tol=gradtol) ξ_h_polar, ξ_v_polar, = correlation_length(peps_polar, env_polar) @test ξ_h_polar < ξ_h @test ξ_v_polar < ξ_v From 7fb7cfe9ebd431b3aed91893e5dfe6a88739deab Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Mon, 17 Feb 2025 11:28:27 +0100 Subject: [PATCH 12/52] Format leading_boundary and fixedpoint docstrings --- src/algorithms/ctmrg/ctmrg.jl | 10 ++++++++++ src/algorithms/optimization/peps_optimization.jl | 7 +++++++ 2 files changed, 17 insertions(+) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index 7df280dc..191f12da 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -23,22 +23,32 @@ initial environment is used. The algorithm can be supplied via the keyword arguments or directly as an `CTMRGAlgorithm` struct. The following keyword arguments are supported: + - `alg=SimultaneousCTMRG`: Variant of the CTMRG algorithm; can be any `CTMRGAlgorithm` type + - `tol=Defaults.ctmrg_tol`: Tolerance checking singular value and norm convergence; also sets related tolerances to sensible defaults unless they are explicitly specified + - `maxiter=Defaults.ctmrg_maxiter`: Maximal number of CTMRG iterations per run + - `miniter=Defaults.ctmrg_miniter`: Minimal number of CTMRG carried out + - `verbosity=2`: Overall output information verbosity level, where `0` suppresses all output, `1` only prints warnings, `2` gives information at the start and end, `3` prints information every iteration, and `4` gives extra debug information + - `trscheme=Defaults.trscheme`: SVD truncation scheme during projector computation; can be any `TruncationScheme` supported by the provided SVD algorithm + - `svd_alg=Defaults.svd_fwd_alg`: SVD algorithm used for computing projectors + - `svd_rrule_alg=Defaults.svd_rrule_alg_type`: Algorithm for differentiating SVDs; currently supported through KrylovKit where `GMRES`, `BiCGStab` and `Arnoldi` are supported (only relevant if `leading_boundary` is differentiated) + - `svd_rrule_tol=1e1tol`: Convergence tolerance for SVD reverse-rule algorithm (only relevant if `leading_boundary` is differentiated) + - `projector_alg=Defaults.projector_alg_type`: Projector algorithm type, where any `ProjectorAlgorithm` can be used """ diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index d283ccb6..e6435988 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -58,15 +58,19 @@ initial guess for the first CTMRG run. By default, a random initial environment The optimization parameters can be supplied via the keyword arguments or directly as a `PEPSOptimize` struct. The following keyword arguments are supported: + - `tol=Defaults.optimizer_tol`: Overall tolerance for gradient norm convergence of the optimizer; sets related tolerance such as the boundary and boundary-gradient tolerances to sensible defaults unless they are explictly specified + - `verbosity=1`: Overall output information verbosity level, where `0` suppresses all output, `1` only prints the optimizer output and warnings, `2` additionally prints boundary information, and `3` prints all information including AD debug outputs + - `boundary_alg`: Boundary algorithm either specified as a `NamedTuple` of keyword arguments or directly as a `CTMRGAlgorithm`; see [`leading_boundary`](@ref) for a description of all possible keyword arguments + - `gradient_alg`: Algorithm for computing the boundary fixed-point gradient specified either as a `NamedTuple` of keyword arguments or directly as a `GradMode`. The supported keyword arguments are: @@ -78,6 +82,7 @@ The optimization parameters can be supplied via the keyword arguments or directl - `iterscheme=Defaults.gradient_alg_iterscheme`: CTMRG iteration scheme determining mode of differentiation; can be `:fixed` (SVD with fixed gauge) or `:diffgauge` (differentiate gauge-fixing routine) + - `optimization_alg`: PEPS optimization algorithm, specified either as a `NamedTuple` of keyword arguments or directly as a `PEPSOptimize`. By default, `OptimKit.LBFGS` is used in combination with a `HagerZhangLineSearch`. Possible keyword arguments are: @@ -89,12 +94,14 @@ The optimization parameters can be supplied via the keyword arguments or directl on the previous environment, otherwise a random environment is used - `symmetrization=nothing`: Accepts `nothing` or a `SymmetrizationStyle`, in which case the PEPS and PEPS gradient are symmetrized after each optimization iteration + - `(finalize!)=OptimKit._finalize!`: Inserts a `finalize!` function call after each optimization step by utilizing the `finalize!` kwarg of `OptimKit.optimize`. The function maps `(peps, env), f, g = finalize!((peps, env), f, g, numiter)`. The function returns the final PEPS, CTMRG environment and cost value, as well as an information `NamedTuple` which contains the following entries: + - `last_gradient`: last gradient of the cost function - `fg_evaluations`: number of evaluations of the cost and gradient function - `costs`: history of cost values From 4a61855dfd08172a3fde8f019a85c5f10993a1aa Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Mon, 17 Feb 2025 11:41:46 +0100 Subject: [PATCH 13/52] Implement select_algorithm scheme --- src/algorithms/ctmrg/ctmrg.jl | 24 +++++++++++-------- .../optimization/peps_optimization.jl | 23 +++++++++--------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index 191f12da..acbf7ece 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -58,10 +58,7 @@ function MPSKit.leading_boundary(state::InfiniteSquareNetwork; kwargs...) ) end function MPSKit.leading_boundary(env₀, state::InfiniteSquareNetwork; kwargs...) - χenv = maximum(env₀.corners) do corner # extract maximal environment dimension - return dim(space(corner, 1)) - end - alg = select_leading_boundary_algorithm(χenv; kwargs...) + alg = select_leading_boundary_algorithm(leading_boundary, env₀; kwargs...) return MPSKit.leading_boundary(env₀, state, alg) end function MPSKit.leading_boundary(state::InfiniteSquareNetwork, alg::CTMRGAlgorithm) @@ -118,8 +115,9 @@ end @non_differentiable ctmrg_logcancel!(args...) """ - select_leading_boundary_algorithm( - χenv::Int; + select_algorithm( + ::typeof(leading_boundary), + env₀::CTMRGEnv; alg=SimultaneousCTMRG, tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, @@ -132,12 +130,13 @@ end projector_alg=Defaults.projector_alg_type, ) -Parse optimization keyword arguments on to the corresponding algorithm structs and return -a final algorithm to be used in `fixedpoint`. For a description of the keyword arguments, +Parse CTMRG keyword arguments on to the corresponding algorithm structs and return a final +algorithm to be used in `leading_boundary`. For a description of the keyword arguments, see [`leading_boundary`](@ref). """ -function select_leading_boundary_algorithm( - χenv::Int; +function select_algorithm( + ::typeof(leading_boundary), + env₀::CTMRGEnv; alg=SimultaneousCTMRG, tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, @@ -149,6 +148,11 @@ function select_leading_boundary_algorithm( svd_rrule_tol=1e1tol, projector_alg=Defaults.projector_alg_type, ) + # extract maximal environment dimenions + χenv = maximum(env₀.corners) do corner + return dim(space(corner, 1)) + end + svd_rrule_algorithm = if svd_rrule_alg <: Union{GMRES,Arnoldi} svd_rrule_alg(; tol=svd_rrule_tol, krylovdim=χenv + 24, verbosity=verbosity - 2) elseif svd_rrule_alg <: BiCGStab diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index e6435988..a1e24cb7 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -112,10 +112,7 @@ information `NamedTuple` which contains the following entries: - `times`: history of times each optimization step took """ function fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) - χenv = maximum(env₀.corners) do corner # extract maximal environment dimension - return dim(space(corner, 1)) - end - alg, finalize! = select_fixedpoint_algorithm(χenv; kwargs...) + alg, finalize! = select_algorithm(fixedpoint, env₀; kwargs...) return fixedpoint(operator, peps₀, env₀, alg; finalize!) end function fixedpoint( @@ -189,10 +186,11 @@ function fixedpoint( end """ - function select_fixedpoint_algorithm( - χenv::Int; + function select_algorithm( + ::typeof(fixedpoint), + env₀::CTMRGEnv; tol=Defaults.optimizer_tol, - verbosity=1, + verbosity=2, boundary_alg=(;), gradient_alg=(;), optimization_alg=(;), @@ -203,8 +201,9 @@ Parse optimization keyword arguments on to the corresponding algorithm structs a a final `PEPSOptimize` to be used in `fixedpoint`. For a description of the keyword arguments, see [`fixedpoint`](@ref). """ -function select_fixedpoint_algorithm( - χenv::Int; +function select_algorithm( + ::typeof(fixedpoint), + env₀::CTMRGEnv; tol=Defaults.optimizer_tol, # top-level tolerance verbosity=2, # top-level verbosity boundary_alg=(;), @@ -212,6 +211,7 @@ function select_fixedpoint_algorithm( optimization_alg=(;), (finalize!)=OptimKit._finalize!, ) + # top-level verbosity if verbosity ≤ 0 # disable output boundary_verbosity = -1 @@ -235,8 +235,9 @@ function select_fixedpoint_algorithm( boundary_algorithm = if boundary_alg isa CTMRGAlgorithm boundary_alg elseif boundary_alg isa NamedTuple - select_leading_boundary_algorithm( - χenv; + select_algorithm( + leading_boundary, + env₀; tol=1e-4tol, verbosity=boundary_verbosity, svd_rrule_tol=1e-3tol, From f6e00f71b4d4bdc3290c3b4db14bab333a88a3c8 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Mon, 17 Feb 2025 11:47:02 +0100 Subject: [PATCH 14/52] Fix leading_boundary --- src/algorithms/ctmrg/ctmrg.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index acbf7ece..cdb92a6e 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -58,7 +58,7 @@ function MPSKit.leading_boundary(state::InfiniteSquareNetwork; kwargs...) ) end function MPSKit.leading_boundary(env₀, state::InfiniteSquareNetwork; kwargs...) - alg = select_leading_boundary_algorithm(leading_boundary, env₀; kwargs...) + alg = select_algorithm(leading_boundary, env₀; kwargs...) return MPSKit.leading_boundary(env₀, state, alg) end function MPSKit.leading_boundary(state::InfiniteSquareNetwork, alg::CTMRGAlgorithm) From 1a7cd3694d75e78a3fa2beab9697801b4239ea51 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Mon, 17 Feb 2025 12:14:39 +0100 Subject: [PATCH 15/52] Fix Heisenberg SU teset --- test/heisenberg.jl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/heisenberg.jl b/test/heisenberg.jl index 16e1f071..6552fb19 100644 --- a/test/heisenberg.jl +++ b/test/heisenberg.jl @@ -87,7 +87,11 @@ end # continue with auto differentiation peps_final, env_final, E_final, = fixedpoint( - ham, peps, env; boundary_alg=(; svd_rrule_alg=GMRES, svd_rrule_tol=1e-5) + ham, + peps, + env; + tol=gradtol, + boundary_alg=(; svd_rrule_alg=GMRES, svd_rrule_tol=1e-5), ) # sensitivity warnings and degeneracies due to SU(2)? ξ_h, ξ_v, = correlation_length(peps_final, env_final) e_site2 = E_final / (N1 * N2) From c6380186bb879a98742331ab5977c2d2107cd240 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Mon, 17 Feb 2025 15:30:46 +0100 Subject: [PATCH 16/52] Make selector compatible with svd_rrule_alg=nothing and improve alg warnings --- src/algorithms/ctmrg/ctmrg.jl | 4 ++- .../optimization/peps_optimization.jl | 30 ++++++++++++++----- src/utility/svd.jl | 23 +++++++++++--- 3 files changed, 45 insertions(+), 12 deletions(-) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index cdb92a6e..a1792fb5 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -153,7 +153,9 @@ function select_algorithm( return dim(space(corner, 1)) end - svd_rrule_algorithm = if svd_rrule_alg <: Union{GMRES,Arnoldi} + svd_rrule_algorithm = if isnothing(svd_rrule_alg) + nothing + elseif svd_rrule_alg <: Union{GMRES,Arnoldi} svd_rrule_alg(; tol=svd_rrule_tol, krylovdim=χenv + 24, verbosity=verbosity - 2) elseif svd_rrule_alg <: BiCGStab svd_rrule_alg(; tol=svd_rrule_tol, verbosity) diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index a1e24cb7..1c05bc42 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -30,7 +30,10 @@ struct PEPSOptimize{G} ) where {G} if gradient_alg isa GradMode if boundary_alg isa SequentialCTMRG && iterscheme(gradient_alg) === :fixed - throw(ArgumentError(":sequential and :fixed are not compatible")) + msg = ":fixed was converted to :diffgauge since SequentialCTMRG does not \ + support :fixed differentiation mode due to sequential application of \ + SVDs; select SimultaneousCTMRG instead to use :fixed mode" + throw(ArgumentError(msg)) end end return new{G}(boundary_alg, gradient_alg, optimizer, reuse_env, symmetrization) @@ -131,12 +134,25 @@ function fixedpoint( finalize! = (x, f, g, numiter) -> fin!(symm_finalize!(x, f, g, numiter)..., numiter) end - # check realness compatibility - if scalartype(env₀) <: Real && iterscheme(alg.gradient_alg) == :fixed - env₀ = complex(env₀) - @warn "the provided real environment was converted to a complex environment since \ - :fixed mode generally produces complex gauges; use :diffgauge mode instead to work \ - with purely real environments" + # :fixed mode compatibility + if !isnothing(alg.gradient_alg) && iterscheme(alg.gradient_alg) == :fixed + if scalartype(env₀) <: Real # incompatible with real environments + env₀ = complex(env₀) + @warn "the provided real environment was converted to a complex environment + since :fixed mode generally produces complex gauges; use :diffgauge mode \ + instead to work with purely real environments" + end + if isnothing(alg.boundary_alg.projector_alg.svd_alg.rrule_alg) # incompatible with TensorKit SVD rrule + G = Base.typename(typeof(alg.gradient_alg)).wrapper # simple type without iterscheme parameter + gradient_alg = G{:diffgauge}( + (getproperty(alg.gradient_alg, f) for f in fieldnames(G))... + ) + @reset alg.gradient_alg = gradient_alg + @warn ":fixed was converted to :diffgauge since :fixed mode and \ + rrule_alg=nothing are incompatible - nothing uses the TensorKit \ + reverse-rule requiring access to the untruncated SVD which FixedSVD does not \ + have; select GMRES, BiCGStab or Arnoldi instead to use :fixed mode" + end end # initialize info collection vectors diff --git a/src/utility/svd.jl b/src/utility/svd.jl index 403c0d93..940d81ec 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -18,11 +18,26 @@ If `isnothing(rrule_alg)`, Zygote differentiates the forward call automatically. In case of degenerate singular values, one might need a `broadening` scheme which removes the divergences from the adjoint. """ -@kwdef struct SVDAdjoint{F,R,B} - fwd_alg::F = Defaults.svd_fwd_alg - rrule_alg::R = Defaults.svd_rrule_alg - broadening::B = nothing +struct SVDAdjoint{F,R,B} + fwd_alg::F + rrule_alg::R + broadening::B + + # Inner constructor to prohibit illegal setting combinations + function SVDAdjoint(fwd_alg::F, rrule_alg::R, broadening::B) where {F,R,B} + if fwd_alg isa FixedSVD && isnothing(rrule_alg) + throw( + ArgumentError("FixedSVD and nothing (TensorKit rrule) are not compatible") + ) + end + return new{F,R,B}(fwd_alg, rrule_alg, broadening) + end end # Keep truncation algorithm separate to be able to specify CTMRG dependent information +function SVDAdjoint(; + fwd_alg=Defaults.svd_fwd_alg, rrule_alg=Defaults.svd_rrule_alg, broadening=nothing +) + return SVDAdjoint(fwd_alg, rrule_alg, broadening) +end """ PEPSKit.tsvd(t, alg; trunc=notrunc(), p=2) From 4fd41cb8c39e6a544c03df2e83704d82b8753f69 Mon Sep 17 00:00:00 2001 From: leburgel Date: Fri, 21 Feb 2025 10:36:34 +0100 Subject: [PATCH 17/52] Properly merge... --- src/algorithms/ctmrg/ctmrg.jl | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index ba0a3f58..509531ac 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -7,19 +7,18 @@ for contracting infinite PEPS. abstract type CTMRGAlgorithm end """ - ctmrg_iteration(state, env, alg::CTMRGAlgorithm) -> env′, info + ctmrg_iteration(network, env, alg::CTMRGAlgorithm) -> env′, info Perform a single CTMRG iteration in which all directions are being grown and renormalized. """ -function ctmrg_iteration(state, env, alg::CTMRGAlgorithm) end +function ctmrg_iteration(network, env, alg::CTMRGAlgorithm) end """ - MPSKit.leading_boundary([env₀], state; kwargs...) + MPSKit.leading_boundary(env₀, network; kwargs...) # expert version: - MPSKit.leading_boundary([env₀], state, alg::CTMRGAlgorithm) + MPSKit.leading_boundary(env₀, network, alg::CTMRGAlgorithm) -Contract `state` using CTMRG and return the CTM environment. Per default, a random -initial environment is used. +Contract `network` using CTMRG and return the CTM environment. The algorithm can be supplied via the keyword arguments or directly as an `CTMRGAlgorithm` struct. The following keyword arguments are supported: @@ -52,22 +51,22 @@ struct. The following keyword arguments are supported: - `projector_alg=Defaults.projector_alg_type`: Projector algorithm type, where any `ProjectorAlgorithm` can be used """ -function MPSKit.leading_boundary(env₀::CTMRGEnv, state::InfiniteSquareNetwork; kwargs...) +function MPSKit.leading_boundary(env₀::CTMRGEnv, network::InfiniteSquareNetwork; kwargs...) alg = select_algorithm(leading_boundary, env₀; kwargs...) - return MPSKit.leading_boundary(env₀, state, alg) + return MPSKit.leading_boundary(env₀, network, alg) end function MPSKit.leading_boundary( - env₀::CTMRGEnv, state::InfiniteSquareNetwork, alg::CTMRGAlgorithm + env₀::CTMRGEnv, network::InfiniteSquareNetwork, alg::CTMRGAlgorithm ) CS = map(x -> tsvd(x)[2], env₀.corners) TS = map(x -> tsvd(x)[2], env₀.edges) - η = one(real(scalartype(state))) + η = one(real(scalartype(network))) env = deepcopy(env₀) log = ignore_derivatives(() -> MPSKit.IterLog("CTMRG")) return LoggingExtras.withlevel(; alg.verbosity) do - ctmrg_loginit!(log, η, state, env₀) + ctmrg_loginit!(log, η, network, env₀) local info for iter in 1:(alg.maxiter) env, info = ctmrg_iteration(network, env, alg) # Grow and renormalize in all 4 directions From 202e70af2878ca514ccf56e59a47674ebcb22d9b Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 26 Feb 2025 17:01:15 +0100 Subject: [PATCH 18/52] Update src/algorithms/optimization/peps_optimization.jl Co-authored-by: Lander Burgelman <39218680+leburgel@users.noreply.github.com> --- src/algorithms/optimization/peps_optimization.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index 1c05bc42..0b0b9f59 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -276,7 +276,7 @@ function select_algorithm( gradient_alg..., # replaces all specified kwargs ) if gradient_kwargs.alg <: Union{GeomSum,ManualIter} - gradient_alg_type(; + gradient_kwargs.alg(; tol=gradient_kwargs.tol, maxiter=gradient_kwargs.maxiter, verbosity=gradient_kwargs.verbosity, From 347dc1019f96401913cfdc7c26e899c3ed77124e Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 26 Feb 2025 17:01:41 +0100 Subject: [PATCH 19/52] Update src/PEPSKit.jl Co-authored-by: Lander Burgelman <39218680+leburgel@users.noreply.github.com> --- src/PEPSKit.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index a61587d1..fe5b9d79 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -109,7 +109,7 @@ Module containing default algorithm parameter values and arguments. gradient_linsolver=KrylovKit.BiCGStab(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol) ``` -- `gradient_eigsolve`: Default eigsolver for the `EigSolver` gradient algorithm +- `gradient_eigsolver`: Default eigsolver for the `EigSolver` gradient algorithm ``` gradient_eigsolver = KrylovKit.Arnoldi(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true) From bc3fe024b8fbd787285b3c75c081c5c548307241 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 26 Feb 2025 17:19:13 +0100 Subject: [PATCH 20/52] Apply most suggestions --- src/PEPSKit.jl | 9 ++++++--- src/algorithms/ctmrg/ctmrg.jl | 6 +++--- src/algorithms/optimization/peps_optimization.jl | 6 +++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index a61587d1..92bed6dd 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -75,7 +75,7 @@ Module containing default algorithm parameter values and arguments. - `svd_rrule_alg`: Reverse-rule algorithm for differentiating a SVD ``` - svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) + svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, eager=true, krylovdim=48, verbosity=-1) ``` - `svd_alg`: Combination of forward and reverse SVD algorithms @@ -153,7 +153,9 @@ module Defaults const trscheme = FixedSpaceTruncation() const svd_fwd_alg = TensorKit.SDD() const svd_rrule_type = Arnoldi - const svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) + const svd_rrule_alg = svd_rrule_type(; + tol=ctmrg_tol, eager=true, krylovdim=48, verbosity=-1 + ) const svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) const projector_alg_type = HalfInfiniteProjector const projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) @@ -171,7 +173,8 @@ module Defaults maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true ) const gradient_alg_iterscheme = :fixed - const gradient_alg = LinSolver(; + const gradient_alg_type = LinSolver + const gradient_alg = gradient_alg_type(; solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme ) const reuse_env = true diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index 509531ac..f8dc7c63 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -23,7 +23,7 @@ Contract `network` using CTMRG and return the CTM environment. The algorithm can be supplied via the keyword arguments or directly as an `CTMRGAlgorithm` struct. The following keyword arguments are supported: -- `alg=SimultaneousCTMRG`: Variant of the CTMRG algorithm; can be any `CTMRGAlgorithm` type +- `alg=Defaults.ctmrg_alg_type`: Variant of the CTMRG algorithm; can be any `CTMRGAlgorithm` type - `tol=Defaults.ctmrg_tol`: Tolerance checking singular value and norm convergence; also sets related tolerances to sensible defaults unless they are explicitly specified @@ -112,7 +112,7 @@ end select_algorithm( ::typeof(leading_boundary), env₀::CTMRGEnv; - alg=SimultaneousCTMRG, + alg=Defaults.ctmrg_alg_type, tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, @@ -131,7 +131,7 @@ see [`leading_boundary`](@ref). function select_algorithm( ::typeof(leading_boundary), env₀::CTMRGEnv; - alg=SimultaneousCTMRG, + alg=Defaults.ctmrg_alg_type, tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index 1c05bc42..8337f726 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -79,7 +79,7 @@ The optimization parameters can be supplied via the keyword arguments or directl The supported keyword arguments are: - `tol=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration - `maxiter=Defaults.gradient_alg_maxiter`: Maximal number of gradient problem iterations - - `alg=typeof(Defaults.gradient_alg)`: Gradient algorithm type, can be any `GradMode` type + - `alg=Defaults.gradient_alg_type`: Gradient algorithm type, can be any `GradMode` type - `verbosity=gradient_verbosity`: Gradient output verbosity, ≤0 by default to disable too verbose printing; should only be enabled for debug purposes - `iterscheme=Defaults.gradient_alg_iterscheme`: CTMRG iteration scheme determining mode @@ -235,7 +235,7 @@ function select_algorithm( optimizer_verbosity = -1 elseif verbosity == 1 # output only optimization steps and degeneracy warnings boundary_verbosity = -1 - gradient_verbosity = -1 + gradient_verbosity = 1 optimizer_verbosity = 3 elseif verbosity == 2 # output optimization and boundary information boundary_verbosity = 2 @@ -270,7 +270,7 @@ function select_algorithm( gradient_kwargs = (; tol=1e-2tol, maxiter=Defaults.gradient_alg_maxiter, - alg=typeof(Defaults.gradient_alg), + alg=Defaults.gradient_alg_type, verbosity=gradient_verbosity, iterscheme=Defaults.gradient_alg_iterscheme, gradient_alg..., # replaces all specified kwargs From ec2c675befe84a49876cc7febdbf46c418a62d82 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 26 Feb 2025 18:46:48 +0100 Subject: [PATCH 21/52] Apply more suggestions --- src/PEPSKit.jl | 4 +++- src/algorithms/ctmrg/ctmrg.jl | 10 +++++----- src/algorithms/ctmrg/sequential.jl | 2 +- src/algorithms/ctmrg/simultaneous.jl | 2 +- src/algorithms/optimization/peps_optimization.jl | 5 +++-- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index f69d4eb0..d196cde1 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -69,6 +69,7 @@ Module containing default algorithm parameter values and arguments. - `ctmrg_maxiter=100`: Maximal number of CTMRG iterations per run - `ctmrg_miniter=4`: Minimal number of CTMRG carried out - `ctmrg_alg_type=SimultaneousCTMRG`: Default CTMRG algorithm variant +- `ctmrg_verbosity=2`: CTMRG output information verbosity - `trscheme=FixedSpaceTruncation()`: Truncation scheme for SVDs and other decompositions - `svd_fwd_alg=TensorKit.SDD()`: SVD algorithm that is used in the forward pass - `svd_rrule_type = Arnoldi`: Default solver type for SVD reverse-rule algorithm @@ -149,6 +150,7 @@ module Defaults const ctmrg_maxiter = 100 const ctmrg_miniter = 4 const ctmrg_alg_type = SimultaneousCTMRG + const ctmrg_verbosity = 2 const sparse = false const trscheme = FixedSpaceTruncation() const svd_fwd_alg = TensorKit.SDD() @@ -160,7 +162,7 @@ module Defaults const projector_alg_type = HalfInfiniteProjector const projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) const ctmrg_alg = ctmrg_alg_type( - ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, 2, projector_alg + ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, ctmrg_verbosity, projector_alg ) # Optimization diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index f8dc7c63..c25bf47b 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -32,9 +32,9 @@ struct. The following keyword arguments are supported: - `miniter=Defaults.ctmrg_miniter`: Minimal number of CTMRG carried out -- `verbosity=2`: Overall output information verbosity level, where `0` suppresses - all output, `1` only prints warnings, `2` gives information at the start and end, - `3` prints information every iteration, and `4` gives extra debug information +- `verbosity=Defaults.ctmrg_verbosity`: Overall output information verbosity level, where + `0` suppresses all output, `1` only prints warnings, `2` gives information at the start + and end, `3` prints information every iteration, and `4` gives extra debug information - `trscheme=Defaults.trscheme`: SVD truncation scheme during projector computation; can be any `TruncationScheme` supported by the provided SVD algorithm @@ -116,7 +116,7 @@ end tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, - verbosity=2, + verbosity=Defaults.ctmrg_verbosity, trscheme=Defaults.trscheme, svd_alg=Defaults.svd_fwd_alg, svd_rrule_alg=Defaults.svd_rrule_type, @@ -135,7 +135,7 @@ function select_algorithm( tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, - verbosity=2, + verbosity=Defaults.ctmrg_verbosity, trscheme=Defaults.trscheme, svd_alg=Defaults.svd_fwd_alg, svd_rrule_alg=Defaults.svd_rrule_type, diff --git a/src/algorithms/ctmrg/sequential.jl b/src/algorithms/ctmrg/sequential.jl index 4d1760ce..338cd884 100644 --- a/src/algorithms/ctmrg/sequential.jl +++ b/src/algorithms/ctmrg/sequential.jl @@ -20,7 +20,7 @@ function SequentialCTMRG(; tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, - verbosity=2, + verbosity=Defaults.ctmrg_verbosity, svd_alg=Defaults.svd_alg, trscheme=Defaults.trscheme, projector_alg=Defaults.projector_alg_type, diff --git a/src/algorithms/ctmrg/simultaneous.jl b/src/algorithms/ctmrg/simultaneous.jl index 606157b7..fc17f202 100644 --- a/src/algorithms/ctmrg/simultaneous.jl +++ b/src/algorithms/ctmrg/simultaneous.jl @@ -20,7 +20,7 @@ function SimultaneousCTMRG(; tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, - verbosity=2, + verbosity=Defaults.ctmrg_verbosity, svd_alg=Defaults.svd_alg, trscheme=Defaults.trscheme, projector_alg=Defaults.projector_alg_type, diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index cfce7955..b64dea8d 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -138,9 +138,10 @@ function fixedpoint( if !isnothing(alg.gradient_alg) && iterscheme(alg.gradient_alg) == :fixed if scalartype(env₀) <: Real # incompatible with real environments env₀ = complex(env₀) - @warn "the provided real environment was converted to a complex environment + @warn "the provided real environment was converted to a complex environment \ since :fixed mode generally produces complex gauges; use :diffgauge mode \ - instead to work with purely real environments" + instead by passing gradient_alg=(; iterscheme=:diffgauge) to the fixedpoint \ + keyword arguments to work with purely real environments" end if isnothing(alg.boundary_alg.projector_alg.svd_alg.rrule_alg) # incompatible with TensorKit SVD rrule G = Base.typename(typeof(alg.gradient_alg)).wrapper # simple type without iterscheme parameter From e7e9a506809855745f5f95f1c8f27f699b62d5cd Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 26 Feb 2025 18:57:19 +0100 Subject: [PATCH 22/52] Set eager=false in svd_rrule_alg again --- src/PEPSKit.jl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index d196cde1..48b60f82 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -76,7 +76,7 @@ Module containing default algorithm parameter values and arguments. - `svd_rrule_alg`: Reverse-rule algorithm for differentiating a SVD ``` - svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, eager=true, krylovdim=48, verbosity=-1) + svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) ``` - `svd_alg`: Combination of forward and reverse SVD algorithms @@ -155,9 +155,7 @@ module Defaults const trscheme = FixedSpaceTruncation() const svd_fwd_alg = TensorKit.SDD() const svd_rrule_type = Arnoldi - const svd_rrule_alg = svd_rrule_type(; - tol=ctmrg_tol, eager=true, krylovdim=48, verbosity=-1 - ) + const svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) const svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) const projector_alg_type = HalfInfiniteProjector const projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) From 2fd2333212582af36e5469c51611969719c39e40 Mon Sep 17 00:00:00 2001 From: Lukas Devos Date: Thu, 27 Feb 2025 11:53:00 -0500 Subject: [PATCH 23/52] example `select_algorithm` docstring --- src/algorithms/ctmrg/ctmrg.jl | 51 ++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index c25bf47b..ae838369 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -108,25 +108,40 @@ end @non_differentiable ctmrg_logfinish!(args...) @non_differentiable ctmrg_logcancel!(args...) +# TODO: support reasonable kwargs for `trscheme::Union{TruncationScheme,NamedTuple}`? +# TODO: bit strange to have `svd_rrule_alg` and `svd_rrule_tol`. Merge this into a single `svd_rrule_alg::Union{Symbol,Algorithm,NamedTuple}`? +# TODO: interpolate defaults """ - select_algorithm( - ::typeof(leading_boundary), - env₀::CTMRGEnv; - alg=Defaults.ctmrg_alg_type, - tol=Defaults.ctmrg_tol, - maxiter=Defaults.ctmrg_maxiter, - miniter=Defaults.ctmrg_miniter, - verbosity=Defaults.ctmrg_verbosity, - trscheme=Defaults.trscheme, - svd_alg=Defaults.svd_fwd_alg, - svd_rrule_alg=Defaults.svd_rrule_type, - svd_rrule_tol=1e1tol, - projector_alg=Defaults.projector_alg_type, - ) - -Parse CTMRG keyword arguments on to the corresponding algorithm structs and return a final -algorithm to be used in `leading_boundary`. For a description of the keyword arguments, -see [`leading_boundary`](@ref). + select_algorithm(::typeof(leading_boundary), env₀::CTMRGEnv; kwargs...) -> CTMRGAlgorithm + +Parse and standardize CTMRG keyword arguments, and bundle them into a `CTMRGAlgorithm` struct, +which is passed on to [`leading_boundary`](@ref). + +## Keyword arguments + +### CTMRG iterations + +* `tol::Real`: Stopping criterium for the CTMRG iterations. This is the norm convergence, as well as the distance in singular values of the corners. +* `miniter::Int`: Minimal number of CTMRG iterations. +* `maxiter::Int`: Maximal number of CTMRG iterations. +* `verbosity::Int`: Output verbosity level, should be one of the following: + 0. Suppress all output + 1. Only print warnings + 2. Initialization and convergence info + 3. Iteration info + 4. Debug info +* `alg::Union{Symbol,Algorithm}`: Variant of the CTMRG algorithm. See also [`CTMRGAlgorithm`](@ref). + +### Projector algorithm + +* `trscheme::TruncationScheme`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. +* `svd_alg`: SVD algorithm for computing projectors. See also [`PEPSKit.tsvd`](@ref). +* `projector_alg::Union{Symbol,Algorithm}`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). + +### Differentiation settings + +* `svd_rrule_alg::Union{Symbol,Algorithm}`: Algorithm used for differentiating SVDs. +* `svd_rrule_tol::Real` Convergence tolerance for SVD `rrule` """ function select_algorithm( ::typeof(leading_boundary), From 46a6724f57ba962f9d0f166de3cc58b33ae32127 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Mon, 3 Mar 2025 14:41:27 +0100 Subject: [PATCH 24/52] Interpolate docstrings and update `fixedpoint` and `leading_boundary` docstrings --- src/algorithms/ctmrg/ctmrg.jl | 76 ++++--------- src/algorithms/ctmrg/projectors.jl | 8 +- src/algorithms/ctmrg/sequential.jl | 8 +- src/algorithms/ctmrg/simultaneous.jl | 8 +- .../fixed_point_differentiation.jl | 12 +- .../optimization/peps_optimization.jl | 105 +++++++++--------- src/utility/svd.jl | 2 +- 7 files changed, 97 insertions(+), 122 deletions(-) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index ae838369..db8602a5 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -18,38 +18,34 @@ function ctmrg_iteration(network, env, alg::CTMRGAlgorithm) end # expert version: MPSKit.leading_boundary(env₀, network, alg::CTMRGAlgorithm) -Contract `network` using CTMRG and return the CTM environment. +Contract `network` using CTMRG and return the CTM environment. The algorithm can be +supplied via the keyword arguments or directly as an [`CTMRGAlgorithm`](@ref) struct. -The algorithm can be supplied via the keyword arguments or directly as an `CTMRGAlgorithm` -struct. The following keyword arguments are supported: - -- `alg=Defaults.ctmrg_alg_type`: Variant of the CTMRG algorithm; can be any `CTMRGAlgorithm` type - -- `tol=Defaults.ctmrg_tol`: Tolerance checking singular value and norm convergence; also - sets related tolerances to sensible defaults unless they are explicitly specified - -- `maxiter=Defaults.ctmrg_maxiter`: Maximal number of CTMRG iterations per run - -- `miniter=Defaults.ctmrg_miniter`: Minimal number of CTMRG carried out +## Keyword arguments -- `verbosity=Defaults.ctmrg_verbosity`: Overall output information verbosity level, where - `0` suppresses all output, `1` only prints warnings, `2` gives information at the start - and end, `3` prints information every iteration, and `4` gives extra debug information +### CTMRG iterations -- `trscheme=Defaults.trscheme`: SVD truncation scheme during projector computation; can be - any `TruncationScheme` supported by the provided SVD algorithm +* `tol::Real=$(Defaults.ctmrg_tol)`: Stopping criterium for the CTMRG iterations. This is the norm convergence, as well as the distance in singular values of the corners and edges. +* `miniter::Int=$(Defaults.ctmrg_miniter)`: Minimal number of CTMRG iterations. +* `maxiter::Int=$(Defaults.ctmrg_maxiter)`: Maximal number of CTMRG iterations. +* `verbosity::Int=$(Defaults.ctmrg_verbosity)`: Output verbosity level, should be one of the following: + 0. Suppress all output + 1. Only print warnings + 2. Initialization and convergence info + 3. Iteration info + 4. Debug info +* `alg::Union{Symbol,<:CTMRGAlgorithm}=$(Defaults.ctmrg_alg_type)`: Variant of the CTMRG algorithm. See also [`CTMRGAlgorithm`](@ref). -- `svd_alg=Defaults.svd_fwd_alg`: SVD algorithm used for computing projectors +### Projector algorithm -- `svd_rrule_alg=Defaults.svd_rrule_alg_type`: Algorithm for differentiating SVDs; currently - supported through KrylovKit where `GMRES`, `BiCGStab` and `Arnoldi` are supported (only - relevant if `leading_boundary` is differentiated) +* `trscheme::TruncationScheme=$(Defaults.trscheme)`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. +* `svd_alg=Defaults.svd_fwd_alg`: SVD algorithm for computing projectors. See also [`PEPSKit.tsvd`](@ref). +* `projector_alg::Union{Symbol,<:ProjectorAlgorithm}=$(Defaults.projector_alg_type)`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). -- `svd_rrule_tol=1e1tol`: Convergence tolerance for SVD reverse-rule algorithm (only - relevant if `leading_boundary` is differentiated) +### Differentiation settings -- `projector_alg=Defaults.projector_alg_type`: Projector algorithm type, where any - `ProjectorAlgorithm` can be used +* `svd_rrule_alg::Union{Symbol,Algorithm}=$(Defaults.svd_rrule_alg_type)`: Algorithm used for differentiating SVDs. +* `svd_rrule_tol::Real=1e1tol`: Convergence tolerance for SVD `rrule` """ function MPSKit.leading_boundary(env₀::CTMRGEnv, network::InfiniteSquareNetwork; kwargs...) alg = select_algorithm(leading_boundary, env₀; kwargs...) @@ -110,38 +106,12 @@ end # TODO: support reasonable kwargs for `trscheme::Union{TruncationScheme,NamedTuple}`? # TODO: bit strange to have `svd_rrule_alg` and `svd_rrule_tol`. Merge this into a single `svd_rrule_alg::Union{Symbol,Algorithm,NamedTuple}`? -# TODO: interpolate defaults """ select_algorithm(::typeof(leading_boundary), env₀::CTMRGEnv; kwargs...) -> CTMRGAlgorithm Parse and standardize CTMRG keyword arguments, and bundle them into a `CTMRGAlgorithm` struct, -which is passed on to [`leading_boundary`](@ref). - -## Keyword arguments - -### CTMRG iterations - -* `tol::Real`: Stopping criterium for the CTMRG iterations. This is the norm convergence, as well as the distance in singular values of the corners. -* `miniter::Int`: Minimal number of CTMRG iterations. -* `maxiter::Int`: Maximal number of CTMRG iterations. -* `verbosity::Int`: Output verbosity level, should be one of the following: - 0. Suppress all output - 1. Only print warnings - 2. Initialization and convergence info - 3. Iteration info - 4. Debug info -* `alg::Union{Symbol,Algorithm}`: Variant of the CTMRG algorithm. See also [`CTMRGAlgorithm`](@ref). - -### Projector algorithm - -* `trscheme::TruncationScheme`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. -* `svd_alg`: SVD algorithm for computing projectors. See also [`PEPSKit.tsvd`](@ref). -* `projector_alg::Union{Symbol,Algorithm}`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). - -### Differentiation settings - -* `svd_rrule_alg::Union{Symbol,Algorithm}`: Algorithm used for differentiating SVDs. -* `svd_rrule_tol::Real` Convergence tolerance for SVD `rrule` +which is passed on to [`leading_boundary`](@ref). See [`leading_boundary`](@ref) for a +description of all keyword arguments. """ function select_algorithm( ::typeof(leading_boundary), diff --git a/src/algorithms/ctmrg/projectors.jl b/src/algorithms/ctmrg/projectors.jl index 61965f68..a79b3dbe 100644 --- a/src/algorithms/ctmrg/projectors.jl +++ b/src/algorithms/ctmrg/projectors.jl @@ -33,8 +33,8 @@ function truncation_scheme(alg::ProjectorAlgorithm, edge) end """ - struct HalfInfiniteProjector{S,T}(; svd_alg=Defaults.svd_alg, - trscheme=Defaults.trscheme, verbosity=0) + struct HalfInfiniteProjector{S,T}(; svd_alg=$(Defaults.svd_alg), + trscheme=$(Defaults.trscheme), verbosity=0) Projector algorithm implementing projectors from SVDing the half-infinite CTMRG environment. """ @@ -45,8 +45,8 @@ Projector algorithm implementing projectors from SVDing the half-infinite CTMRG end """ - struct FullInfiniteProjector{S,T}(; svd_alg=Defaults.svd_alg, - trscheme=Defaults.trscheme, verbosity=0) + struct FullInfiniteProjector{S,T}(; svd_alg=$(Defaults.svd_alg), + trscheme=$(Defaults.trscheme), verbosity=0) Projector algorithm implementing projectors from SVDing the full 4x4 CTMRG environment. """ diff --git a/src/algorithms/ctmrg/sequential.jl b/src/algorithms/ctmrg/sequential.jl index 338cd884..1cbee69e 100644 --- a/src/algorithms/ctmrg/sequential.jl +++ b/src/algorithms/ctmrg/sequential.jl @@ -1,8 +1,8 @@ """ - SequentialCTMRG(; tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, - miniter=Defaults.ctmrg_miniter, verbosity=0, - svd_alg=SVDAdjoint(), trscheme=Defaults.trscheme, - projector_alg=Defaults.projector_alg_type) + SequentialCTMRG(; tol=$(Defaults.ctmrg_tol), maxiter=$(Defaults.ctmrg_maxiter), + miniter=$(Defaults.ctmrg_miniter), verbosity=$(Defaults.ctmrg_verbosity), + svd_alg=$(Defaults.svd_alg), trscheme=$(Defaults.trscheme), + projector_alg=$(Defaults.projector_alg_type)) CTMRG algorithm where the expansions and renormalization is performed sequentially column-wise. This is implemented as a growing and projecting step to the left, followed by diff --git a/src/algorithms/ctmrg/simultaneous.jl b/src/algorithms/ctmrg/simultaneous.jl index fc17f202..c09a1ab9 100644 --- a/src/algorithms/ctmrg/simultaneous.jl +++ b/src/algorithms/ctmrg/simultaneous.jl @@ -1,8 +1,8 @@ """ - SimultaneousCTMRG(; tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, - miniter=Defaults.ctmrg_miniter, verbosity=0, - svd_alg=SVDAdjoint(), trscheme=Defaults.trscheme, - projector_alg=Defaults.projector_alg_type) + SimultaneousCTMRG(; tol=$(Defaults.ctmrg_tol), maxiter=$(Defaults.ctmrg_maxiter), + miniter=$(Defaults.ctmrg_miniter), verbosity=$(Defaults.ctmrg_verbosity), + svd_alg=$(Defaults.svd_alg), trscheme=$(Defaults.trscheme), + projector_alg=$(Defaults.projector_alg_type)) CTMRG algorithm where all sides are grown and renormalized at the same time. In particular, the projectors are applied to the corners from two sides simultaneously. The projectors are diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index 4da49c74..870d8678 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -3,8 +3,8 @@ abstract type GradMode{F} end iterscheme(::GradMode{F}) where {F} = F """ - struct GeomSum(; tol=Defaults.gradient_alg_tol, maxiter=Defaults.gradient_alg_maxiter, - verbosity=0, iterscheme=Defaults.gradient_alg_iterscheme) <: GradMode{iterscheme} + struct GeomSum(; tol=$(Defaults.gradient_alg_tol), maxiter=$(Defaults.gradient_alg_maxiter), + verbosity=0, iterscheme=$(Defaults.gradient_alg_iterscheme)) <: GradMode{iterscheme} Gradient mode for CTMRG using explicit evaluation of the geometric sum. @@ -29,8 +29,8 @@ function GeomSum(; end """ - struct ManualIter(; tol=Defaults.gradient_alg_tol, maxiter=Defaults.gradient_alg_maxiter, - verbosity=0, iterscheme=Defaults.gradient_alg_iterscheme) <: GradMode{iterscheme} + struct ManualIter(; tol=$(Defaults.gradient_alg_tol), maxiter=$(Defaults.gradient_alg_maxiter), + verbosity=0, iterscheme=$(Defaults.gradient_alg_iterscheme)) <: GradMode{iterscheme} Gradient mode for CTMRG using manual iteration to solve the linear problem. @@ -55,7 +55,7 @@ function ManualIter(; end """ - struct LinSolver(; solver=KrylovKit.GMRES(), iterscheme=Defaults.gradient_alg_iterscheme) <: GradMode{iterscheme} + struct LinSolver(; solver=$(Defaults.gradient_linsolver), iterscheme=$(Defaults.gradient_alg_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.LinearSolver` for solving the gradient linear problem using iterative solvers. @@ -76,7 +76,7 @@ function LinSolver(; end """ - struct EigSolver(; solver=Defaults.gradient_eigsolver, iterscheme=Defaults.gradient_alg_iterscheme) <: GradMode{iterscheme} + struct EigSolver(; solver=$(Defaults.gradient_eigsolver), iterscheme=$(Defaults.gradient_alg_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.KrylovAlgorithm` for solving the gradient linear problem as an eigenvalue problem. diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index b64dea8d..6686b3dc 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -1,7 +1,7 @@ """ - PEPSOptimize{G}(; boundary_alg=Defaults.ctmrg_alg, gradient_alg::G=Defaults.gradient_alg - optimizer::OptimKit.OptimizationAlgorithm=Defaults.optimizer - reuse_env::Bool=true, symmetrization::Union{Nothing,SymmetrizationStyle}=nothing) + PEPSOptimize{G}(; boundary_alg=$(Defaults.ctmrg_alg), gradient_alg::G=$(Defaults.gradient_alg), + optimizer::OptimKit.OptimizationAlgorithm=$(Defaults.optimizer) + reuse_env::Bool=$(Defaults.reuse_env), symmetrization::Union{Nothing,SymmetrizationStyle}=nothing) Algorithm struct that represent PEPS ground-state optimization using AD. Set the algorithm to contract the infinite PEPS in `boundary_alg`; @@ -62,57 +62,62 @@ initial guess for the first CTMRG run. By default, a random initial environment The optimization parameters can be supplied via the keyword arguments or directly as a `PEPSOptimize` struct. The following keyword arguments are supported: -- `tol=Defaults.optimizer_tol`: Overall tolerance for gradient norm convergence of the - optimizer; sets related tolerance such as the boundary and boundary-gradient tolerances - to sensible defaults unless they are explictly specified - -- `verbosity=1`: Overall output information verbosity level, where `0` suppresses - all output, `1` only prints the optimizer output and warnings, `2` additionally prints - boundary information, and `3` prints all information including AD debug outputs - -- `boundary_alg`: Boundary algorithm either specified as a `NamedTuple` of keyword - arguments or directly as a `CTMRGAlgorithm`; see [`leading_boundary`](@ref) for a - description of all possible keyword arguments - -- `gradient_alg`: Algorithm for computing the boundary fixed-point gradient - specified either as a `NamedTuple` of keyword arguments or directly as a `GradMode`. - The supported keyword arguments are: - - `tol=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration - - `maxiter=Defaults.gradient_alg_maxiter`: Maximal number of gradient problem iterations - - `alg=Defaults.gradient_alg_type`: Gradient algorithm type, can be any `GradMode` type - - `verbosity=gradient_verbosity`: Gradient output verbosity, ≤0 by default to disable too - verbose printing; should only be enabled for debug purposes - - `iterscheme=Defaults.gradient_alg_iterscheme`: CTMRG iteration scheme determining mode - of differentiation; can be `:fixed` (SVD with fixed gauge) or `:diffgauge` (differentiate - gauge-fixing routine) - -- `optimization_alg`: PEPS optimization algorithm, specified either as a `NamedTuple` of - keyword arguments or directly as a `PEPSOptimize`. By default, `OptimKit.LBFGS` is used - in combination with a `HagerZhangLineSearch`. Possible keyword arguments are: - - `tol=tol`: Gradient norm tolerance of the optimizer - - `maxiter=Defaults.optimizer_maxiter`: Maximal number of optimization steps - - `lbfgs_memory=Defaults.lbfgs_memory`: Size of limited memory representation of BFGS - Hessian matrix - - `reuse_env=Defaults.reuse_env`: If `true`, the current optimization step is initialized - on the previous environment, otherwise a random environment is used - - `symmetrization=nothing`: Accepts `nothing` or a `SymmetrizationStyle`, in which case - the PEPS and PEPS gradient are symmetrized after each optimization iteration - -- `(finalize!)=OptimKit._finalize!`: Inserts a `finalize!` function call after each - optimization step by utilizing the `finalize!` kwarg of `OptimKit.optimize`. - The function maps `(peps, env), f, g = finalize!((peps, env), f, g, numiter)`. +## Keyword arguments + +### Global settings + +* `tol::Real=$(Defaults.optimizer_tol)`: Overall tolerance for gradient norm convergence of the optimizer. Sets related tolerance such as the boundary and boundary-gradient tolerances to sensible defaults unless they are explictly specified. +* `verbosity::Int=1`: Overall output information verbosity level, should be one of the following: + 0. Suppress all output + 1. Optimizer output and warnings + 2. Additionally print boundary information + 3. All information including AD debug outputs +* `(finalize!)=OptimKit._finalize!`: Inserts a `finalize!` function call after each optimization step by utilizing the `finalize!` kwarg of `OptimKit.optimize`. The function maps `(peps, env), f, g = finalize!((peps, env), f, g, numiter)`. + +### Boundary algorithm + +Supply boundary algorithm parameters via `boundary_alg::Union{NamedTuple,<:CTMRGAlgorithm}` +using either a `NamedTuple` of keyword arguments or a `CTMRGAlgorithm` directly. +See [`leading_boundary`](@ref) for a description of all possible keyword arguments. + +### Gradient algorithm + +Supply gradient algorithm parameters via `gradient_alg::Union{NamedTuple,<:GradMode}` using +either a `NamedTuple` of keyword arguments or a `GradMode` struct directly. The supported +keyword arguments are: + +* `tol=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration. +* `maxiter=$(Defaults.gradient_alg_maxiter)`: Maximal number of gradient problem iterations. +* `alg=$(Defaults.gradient_alg_type)`: Gradient algorithm type, can be any `GradMode` type. +* `verbosity`: Gradient output verbosity, ≤0 by default to disable too verbose printing. Should only be >0 for debug purposes. +* `iterscheme=$(Defaults.gradient_alg_iterscheme)`: CTMRG iteration scheme determining mode of differentiation. This can be `:fixed` (SVD with fixed gauge) or `:diffgauge` (differentiate gauge-fixing routine). + +### PEPS optimization settings + +Supply the optimization algorithm via `optimization_alg::Union{NamedTuple,<:PEPSOptimize}` +using either a `NamedTuple` of keyword arguments or a `PEPSOptimize` directly. By default, +`OptimKit.LBFGS` is used in combination with a `HagerZhangLineSearch`. The supported +keyword arguments are: + +* `tol=tol`: Gradient norm tolerance of the optimizer. +* `maxiter=$(Defaults.optimizer_maxiter)`: Maximal number of optimization steps. +* `lbfgs_memory=$(Defaults.lbfgs_memory)`: Size of limited memory representation of BFGS Hessian matrix. +* `reuse_env=$(Defaults.reuse_env)`: If `true`, the current optimization step is initialized on the previous environment, otherwise a random environment is used. +* `symmetrization=nothing`: Accepts `nothing` or a `SymmetrizationStyle`, in which case the PEPS and PEPS gradient are symmetrized after each optimization iteration. + +## Return values The function returns the final PEPS, CTMRG environment and cost value, as well as an information `NamedTuple` which contains the following entries: -- `last_gradient`: last gradient of the cost function -- `fg_evaluations`: number of evaluations of the cost and gradient function -- `costs`: history of cost values -- `gradnorms`: history of gradient norms -- `truncation_errors`: history of truncation errors of the boundary algorithm -- `condition_numbers`: history of condition numbers of the CTMRG environments -- `gradnorms_unitcell`: history of gradient norms for each respective unit cell entry -- `times`: history of times each optimization step took +* `last_gradient`: Last gradient of the cost function. +* `fg_evaluations`: Number of evaluations of the cost and gradient function. +* `costs`: History of cost values. +* `gradnorms`: History of gradient norms. +* `truncation_errors`: History of maximal truncation errors of the boundary algorithm. +* `condition_numbers`: History of maximal condition numbers of the CTMRG environments. +* `gradnorms_unitcell`: History of gradient norms for each respective unit cell entry. +* `times`: History of optimization step execution times. """ function fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) alg, finalize! = select_algorithm(fixedpoint, env₀; kwargs...) diff --git a/src/utility/svd.jl b/src/utility/svd.jl index 940d81ec..9565b210 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -10,7 +10,7 @@ using TensorKit: const CRCExt = Base.get_extension(KrylovKit, :KrylovKitChainRulesCoreExt) """ - struct SVDAdjoint(; fwd_alg=Defaults.svd_fwd_alg, rrule_alg=Defaults.svd_rrule_alg, + struct SVDAdjoint(; fwd_alg=$(Defaults.svd_fwd_alg), rrule_alg=$(Defaults.svd_rrule_alg), broadening=nothing) Wrapper for a SVD algorithm `fwd_alg` with a defined reverse rule `rrule_alg`. From 9d56f115c0a754d3d38ccd918e385c60b1df5e5d Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Mon, 3 Mar 2025 15:17:35 +0100 Subject: [PATCH 25/52] Move out Defaults and make types concrete --- src/Defaults.jl | 188 ++++++++++++++++++++++++++++++++++++++++++++++++ src/PEPSKit.jl | 172 +------------------------------------------- 2 files changed, 190 insertions(+), 170 deletions(-) create mode 100644 src/Defaults.jl diff --git a/src/Defaults.jl b/src/Defaults.jl new file mode 100644 index 00000000..07baa077 --- /dev/null +++ b/src/Defaults.jl @@ -0,0 +1,188 @@ +""" + module Defaults + +Module containing default algorithm parameter values and arguments. + +# CTMRG +- `ctmrg_tol=1e-8`: Tolerance checking singular value and norm convergence +- `ctmrg_maxiter=100`: Maximal number of CTMRG iterations per run +- `ctmrg_miniter=4`: Minimal number of CTMRG carried out +- `ctmrg_alg_type=SimultaneousCTMRG`: Default CTMRG algorithm variant +- `ctmrg_verbosity=2`: CTMRG output information verbosity +- `trscheme=FixedSpaceTruncation()`: Truncation scheme for SVDs and other decompositions +- `svd_fwd_alg=TensorKit.SDD()`: SVD algorithm that is used in the forward pass +- `svd_rrule_type = Arnoldi`: Default solver type for SVD reverse-rule algorithm +- `svd_rrule_alg`: Reverse-rule algorithm for differentiating a SVD + + ``` + svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) + ``` + +- `svd_alg`: Combination of forward and reverse SVD algorithms + + ``` + svd_alg=SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) + ``` + +- `projector_alg_type=HalfInfiniteProjector`: Default type of projector algorithm +- `projector_alg`: Algorithm to compute CTMRG projectors + + ``` + projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) + ``` + +- `ctmrg_alg`: Algorithm for performing CTMRG runs + + ``` + ctmrg_alg = ctmrg_alg_type( + ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, 2, projector_alg + ) + ``` + +# Optimization +- `gradient_alg_tol=1e-6`: Convergence tolerance for the fixed-point gradient iteration +- `gradient_alg_maxiter=30`: Maximal number of iterations for computing the CTMRG fixed-point gradient +- `gradient_alg_iterscheme=:fixed`: Scheme for differentiating one CTMRG iteration +- `gradient_linsolver`: Default linear solver for the `LinSolver` gradient algorithm + + ``` + gradient_linsolver=KrylovKit.BiCGStab(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol) + ``` + +- `gradient_eigsolver`: Default eigsolver for the `EigSolver` gradient algorithm + + ``` + gradient_eigsolver = KrylovKit.Arnoldi(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true) + ``` + +- `gradient_alg`: Algorithm to compute the gradient fixed-point + + ``` + gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme) + ``` + +- `reuse_env=true`: If `true`, the current optimization step is initialized on the previous + environment, otherwise a random environment is used +- `optimizer_tol=1e-4`: Gradient norm tolerance of the optimizer +- `optimizer_maxiter=100`: Maximal number of optimization steps +- `lbfgs_memory=20`: Size of limited memory representation of BFGS Hessian matrix +- `optimizer`: Default `OptimKit.OptimizerAlgorithm` for PEPS optimization + + ``` + optimizer=LBFGS(lbfgs_memory; maxiter=optimizer_maxiter, gradtol=optimizer_tol, verbosity=3) + ``` + +# OhMyThreads scheduler +- `scheduler=Ref{Scheduler}(...)`: Multi-threading scheduler which can be accessed via `set_scheduler!` +""" +module Defaults + using TensorKit, KrylovKit, OptimKit, OhMyThreads + using PEPSKit: + LinSolver, + FixedSpaceTruncation, + SVDAdjoint, + HalfInfiniteProjector, + SimultaneousCTMRG + + # CTMRG + const ctmrg_tol = 1e-8 + const ctmrg_maxiter = 100 + const ctmrg_miniter = 4 + const ctmrg_alg = :simultaneous # ∈ {:simultaneous, :sequential} + const ctmrg_verbosity = 2 + const sparse = false # TODO: implement sparse CTMRG + + # SVD forward & reverse + const trscheme = FixedSpaceTruncation() + # const trscheme = FixedSpaceTruncation() + const svd_fwd_alg = :sdd # ∈ {:sdd, :svd, :iterative} + # const svd_fwd_alg = TensorKit.SDD() + const svd_rrule_alg = :arnoldi # ∈ {:gmres, :bicgstab, :arnoldi} + const svd_rrule_krylovdim_factor = 1.3 + const svd_rrule_verbosity = -1 + # const svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) + # const svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) + + # Projector + const projector_alg = :halfinfinite # ∈ {:halfinfinite, :fullinfinite} + # const projector_alg_type = HalfInfiniteProjector + const projector_verbosity = 0 + # const projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) + # const ctmrg_alg = ctmrg_alg_type( + # ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, ctmrg_verbosity, projector_alg + # ) + + # Fixed-point gradient + const gradient_alg_tol = 1e-6 + const gradient_alg_maxiter = 30 + const gradient_linsolver = :bicgstab # ∈ {:gmres, :bicgstab} + # const gradient_linsolver = BiCGStab(; + # maxiter=gradient_alg_maxiter, tol=gradient_alg_tol + # ) + const gradient_eigsolver = :arnoldi + const gradient_eigsolver_eager = true + # const gradient_eigsolver = Arnoldi(; + # maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true + # ) + const gradient_alg_iterscheme = :fixed # ∈ {:fixed, :diffgauge} + const gradient_alg_type = :linsolver # ∈ {:geomsum, :manualiter, :linsolver, :eigsolver} + # const gradient_alg_type = LinSolver + # const gradient_alg = gradient_alg_type(; + # solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme + # ) + + # Optimization + const reuse_env = true + const optimizer_tol = 1e-4 + const optimizer_maxiter = 100 + const lbfgs_memory = 20 + const optimizer_verbosity = 3 + const optimizer_alg = :lbfgs + # const optimizer = LBFGS( + # lbfgs_memory; maxiter=optimizer_maxiter, gradtol=optimizer_tol, verbosity=3 + # ) + + # OhMyThreads scheduler defaults + const scheduler = Ref{Scheduler}() + """ + set_scheduler!([scheduler]; kwargs...) + + Set `OhMyThreads` multi-threading scheduler parameters. + + The function either accepts a `scheduler` as an `OhMyThreads.Scheduler` or + as a symbol where the corresponding parameters are specificed as keyword arguments. + For instance, a static scheduler that uses four tasks with chunking enabled + can be set via + ``` + set_scheduler!(StaticScheduler(; ntasks=4, chunking=true)) + ``` + or equivalently with + ``` + set_scheduler!(:static; ntasks=4, chunking=true) + ``` + For a detailed description of all schedulers and their keyword arguments consult the + [`OhMyThreads` documentation](https://juliafolds2.github.io/OhMyThreads.jl/stable/refs/api/#Schedulers). + + If no `scheduler` is passed and only kwargs are provided, the `DynamicScheduler` + constructor is used with the provided kwargs. + + To reset the scheduler to its default value, one calls `set_scheduler!` without passing + arguments which then uses the default `DynamicScheduler()`. If the number of used threads is + just one it falls back to `SerialScheduler()`. + """ + function set_scheduler!(sc=OhMyThreads.Implementation.NotGiven(); kwargs...) + if isempty(kwargs) && sc isa OhMyThreads.Implementation.NotGiven + scheduler[] = Threads.nthreads() == 1 ? SerialScheduler() : DynamicScheduler() + else + scheduler[] = OhMyThreads.Implementation._scheduler_from_userinput( + sc; kwargs... + ) + end + return nothing + end + export set_scheduler! + + function __init__() + return set_scheduler!() + end +end diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 48b60f82..0a3cbecb 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -12,6 +12,8 @@ using MPSKitModels using FiniteDifferences using OhMyThreads: tmap +include("Defaults.jl") # Include first to allow for docstring interpolation with Defaults values + include("utility/util.jl") include("utility/diffable_threads.jl") include("utility/svd.jl") @@ -59,176 +61,6 @@ include("utility/symmetrization.jl") include("algorithms/optimization/fixed_point_differentiation.jl") include("algorithms/optimization/peps_optimization.jl") -""" - module Defaults - -Module containing default algorithm parameter values and arguments. - -# CTMRG -- `ctmrg_tol=1e-8`: Tolerance checking singular value and norm convergence -- `ctmrg_maxiter=100`: Maximal number of CTMRG iterations per run -- `ctmrg_miniter=4`: Minimal number of CTMRG carried out -- `ctmrg_alg_type=SimultaneousCTMRG`: Default CTMRG algorithm variant -- `ctmrg_verbosity=2`: CTMRG output information verbosity -- `trscheme=FixedSpaceTruncation()`: Truncation scheme for SVDs and other decompositions -- `svd_fwd_alg=TensorKit.SDD()`: SVD algorithm that is used in the forward pass -- `svd_rrule_type = Arnoldi`: Default solver type for SVD reverse-rule algorithm -- `svd_rrule_alg`: Reverse-rule algorithm for differentiating a SVD - - ``` - svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) - ``` - -- `svd_alg`: Combination of forward and reverse SVD algorithms - - ``` - svd_alg=SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) - ``` - -- `projector_alg_type=HalfInfiniteProjector`: Default type of projector algorithm -- `projector_alg`: Algorithm to compute CTMRG projectors - - ``` - projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) - ``` - -- `ctmrg_alg`: Algorithm for performing CTMRG runs - - ``` - ctmrg_alg = ctmrg_alg_type( - ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, 2, projector_alg - ) - ``` - -# Optimization -- `gradient_alg_tol=1e-6`: Convergence tolerance for the fixed-point gradient iteration -- `gradient_alg_maxiter=30`: Maximal number of iterations for computing the CTMRG fixed-point gradient -- `gradient_alg_iterscheme=:fixed`: Scheme for differentiating one CTMRG iteration -- `gradient_linsolver`: Default linear solver for the `LinSolver` gradient algorithm - - ``` - gradient_linsolver=KrylovKit.BiCGStab(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol) - ``` - -- `gradient_eigsolver`: Default eigsolver for the `EigSolver` gradient algorithm - - ``` - gradient_eigsolver = KrylovKit.Arnoldi(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true) - ``` - -- `gradient_alg`: Algorithm to compute the gradient fixed-point - - ``` - gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme) - ``` - -- `reuse_env=true`: If `true`, the current optimization step is initialized on the previous - environment, otherwise a random environment is used -- `optimizer_tol=1e-4`: Gradient norm tolerance of the optimizer -- `optimizer_maxiter=100`: Maximal number of optimization steps -- `lbfgs_memory=20`: Size of limited memory representation of BFGS Hessian matrix -- `optimizer`: Default `OptimKit.OptimizerAlgorithm` for PEPS optimization - - ``` - optimizer=LBFGS(lbfgs_memory; maxiter=optimizer_maxiter, gradtol=optimizer_tol, verbosity=3) - ``` - -# OhMyThreads scheduler -- `scheduler=Ref{Scheduler}(...)`: Multi-threading scheduler which can be accessed via `set_scheduler!` -""" -module Defaults - using TensorKit, KrylovKit, OptimKit, OhMyThreads - using PEPSKit: - LinSolver, - FixedSpaceTruncation, - SVDAdjoint, - HalfInfiniteProjector, - SimultaneousCTMRG - - # CTMRG - const ctmrg_tol = 1e-8 - const ctmrg_maxiter = 100 - const ctmrg_miniter = 4 - const ctmrg_alg_type = SimultaneousCTMRG - const ctmrg_verbosity = 2 - const sparse = false - const trscheme = FixedSpaceTruncation() - const svd_fwd_alg = TensorKit.SDD() - const svd_rrule_type = Arnoldi - const svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) - const svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) - const projector_alg_type = HalfInfiniteProjector - const projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) - const ctmrg_alg = ctmrg_alg_type( - ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, ctmrg_verbosity, projector_alg - ) - - # Optimization - const gradient_alg_tol = 1e-6 - const gradient_alg_maxiter = 30 - const gradient_linsolver = BiCGStab(; - maxiter=gradient_alg_maxiter, tol=gradient_alg_tol - ) - const gradient_eigsolver = Arnoldi(; - maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true - ) - const gradient_alg_iterscheme = :fixed - const gradient_alg_type = LinSolver - const gradient_alg = gradient_alg_type(; - solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme - ) - const reuse_env = true - const optimizer_tol = 1e-4 - const optimizer_maxiter = 100 - const lbfgs_memory = 20 - const optimizer = LBFGS( - lbfgs_memory; maxiter=optimizer_maxiter, gradtol=optimizer_tol, verbosity=3 - ) - - # OhMyThreads scheduler defaults - const scheduler = Ref{Scheduler}() - """ - set_scheduler!([scheduler]; kwargs...) - - Set `OhMyThreads` multi-threading scheduler parameters. - - The function either accepts a `scheduler` as an `OhMyThreads.Scheduler` or - as a symbol where the corresponding parameters are specificed as keyword arguments. - For instance, a static scheduler that uses four tasks with chunking enabled - can be set via - ``` - set_scheduler!(StaticScheduler(; ntasks=4, chunking=true)) - ``` - or equivalently with - ``` - set_scheduler!(:static; ntasks=4, chunking=true) - ``` - For a detailed description of all schedulers and their keyword arguments consult the - [`OhMyThreads` documentation](https://juliafolds2.github.io/OhMyThreads.jl/stable/refs/api/#Schedulers). - - If no `scheduler` is passed and only kwargs are provided, the `DynamicScheduler` - constructor is used with the provided kwargs. - - To reset the scheduler to its default value, one calls `set_scheduler!` without passing - arguments which then uses the default `DynamicScheduler()`. If the number of used threads is - just one it falls back to `SerialScheduler()`. - """ - function set_scheduler!(sc=OhMyThreads.Implementation.NotGiven(); kwargs...) - if isempty(kwargs) && sc isa OhMyThreads.Implementation.NotGiven - scheduler[] = Threads.nthreads() == 1 ? SerialScheduler() : DynamicScheduler() - else - scheduler[] = OhMyThreads.Implementation._scheduler_from_userinput( - sc; kwargs... - ) - end - return nothing - end - export set_scheduler! - - function __init__() - return set_scheduler!() - end -end using .Defaults: set_scheduler! export set_scheduler! From 8aa6ac6798bad71f3c7577387591b53aa1d5b8d1 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Mon, 3 Mar 2025 18:03:41 +0100 Subject: [PATCH 26/52] Only use concrete types in Defaults, use select_algorithm to map symbols to algorithms --- src/Defaults.jl | 37 ++-------------- src/algorithms/ctmrg/ctmrg.jl | 3 +- src/algorithms/ctmrg/projectors.jl | 4 ++ .../fixed_point_differentiation.jl | 24 +++++++++++ .../optimization/peps_optimization.jl | 35 ++------------- src/utility/svd.jl | 43 +++++++++++++++++++ 6 files changed, 80 insertions(+), 66 deletions(-) diff --git a/src/Defaults.jl b/src/Defaults.jl index 07baa077..9e11ada6 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -76,14 +76,6 @@ Module containing default algorithm parameter values and arguments. - `scheduler=Ref{Scheduler}(...)`: Multi-threading scheduler which can be accessed via `set_scheduler!` """ module Defaults - using TensorKit, KrylovKit, OptimKit, OhMyThreads - using PEPSKit: - LinSolver, - FixedSpaceTruncation, - SVDAdjoint, - HalfInfiniteProjector, - SimultaneousCTMRG - # CTMRG const ctmrg_tol = 1e-8 const ctmrg_maxiter = 100 @@ -93,43 +85,24 @@ module Defaults const sparse = false # TODO: implement sparse CTMRG # SVD forward & reverse - const trscheme = FixedSpaceTruncation() - # const trscheme = FixedSpaceTruncation() + const trscheme = :fixedspace const svd_fwd_alg = :sdd # ∈ {:sdd, :svd, :iterative} - # const svd_fwd_alg = TensorKit.SDD() const svd_rrule_alg = :arnoldi # ∈ {:gmres, :bicgstab, :arnoldi} - const svd_rrule_krylovdim_factor = 1.3 const svd_rrule_verbosity = -1 - # const svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) - # const svd_alg = SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) + const krylovdim_factor = 1.4 # Projector const projector_alg = :halfinfinite # ∈ {:halfinfinite, :fullinfinite} - # const projector_alg_type = HalfInfiniteProjector const projector_verbosity = 0 - # const projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) - # const ctmrg_alg = ctmrg_alg_type( - # ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, ctmrg_verbosity, projector_alg - # ) # Fixed-point gradient const gradient_alg_tol = 1e-6 const gradient_alg_maxiter = 30 const gradient_linsolver = :bicgstab # ∈ {:gmres, :bicgstab} - # const gradient_linsolver = BiCGStab(; - # maxiter=gradient_alg_maxiter, tol=gradient_alg_tol - # ) const gradient_eigsolver = :arnoldi const gradient_eigsolver_eager = true - # const gradient_eigsolver = Arnoldi(; - # maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true - # ) const gradient_alg_iterscheme = :fixed # ∈ {:fixed, :diffgauge} - const gradient_alg_type = :linsolver # ∈ {:geomsum, :manualiter, :linsolver, :eigsolver} - # const gradient_alg_type = LinSolver - # const gradient_alg = gradient_alg_type(; - # solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme - # ) + const gradient_alg = :linsolver # ∈ {:geomsum, :manualiter, :linsolver, :eigsolver} # Optimization const reuse_env = true @@ -137,10 +110,6 @@ module Defaults const optimizer_maxiter = 100 const lbfgs_memory = 20 const optimizer_verbosity = 3 - const optimizer_alg = :lbfgs - # const optimizer = LBFGS( - # lbfgs_memory; maxiter=optimizer_maxiter, gradtol=optimizer_tol, verbosity=3 - # ) # OhMyThreads scheduler defaults const scheduler = Ref{Scheduler}() diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index db8602a5..0fe4aaa1 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -131,11 +131,12 @@ function select_algorithm( χenv = maximum(env₀.corners) do corner return dim(space(corner, 1)) end + krylovdim = round(Int, Defaults.krylovdim_factor * χenv) svd_rrule_algorithm = if isnothing(svd_rrule_alg) nothing elseif svd_rrule_alg <: Union{GMRES,Arnoldi} - svd_rrule_alg(; tol=svd_rrule_tol, krylovdim=χenv + 24, verbosity=verbosity - 2) + svd_rrule_alg(; tol=svd_rrule_tol, krylovdim, verbosity=verbosity - 2) elseif svd_rrule_alg <: BiCGStab svd_rrule_alg(; tol=svd_rrule_tol, verbosity) end diff --git a/src/algorithms/ctmrg/projectors.jl b/src/algorithms/ctmrg/projectors.jl index a79b3dbe..6673b9fd 100644 --- a/src/algorithms/ctmrg/projectors.jl +++ b/src/algorithms/ctmrg/projectors.jl @@ -56,6 +56,10 @@ Projector algorithm implementing projectors from SVDing the full 4x4 CTMRG envir verbosity::Int = 0 end +function select_algorithm(::Type{ProjectorAlgorithm}; alg::Union{Symbol,<:ProjectorAlgorithm}, svd_alg, trscheme, verbosity) + # TODO +end + # TODO: add `LinearAlgebra.cond` to TensorKit # Compute condition number smax / smin for diagonal singular value TensorMap function _condition_number(S::AbstractTensorMap) diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index 870d8678..bc948753 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -96,6 +96,30 @@ function EigSolver(; return EigSolver{iterscheme}(solver) end +function select_algorithm(::Type{GradMode}; alg::Union{Symbol,GradMode{F}}, kwargs...) where {F} + # TODO + # if alg <: Union{GeomSum,ManualIter} + # gradient_kwargs.alg(; + # tol=gradient_kwargs.tol, + # maxiter=gradient_kwargs.maxiter, + # verbosity=gradient_kwargs.verbosity, + # iterscheme=gradient_kwargs.iterscheme, + # ) + # elseif gradient_kwargs.alg <: LinSolver + # solver = Defaults.gradient_linsolver + # @reset solver.maxiter = gradient_kwargs.maxiter + # @reset solver.tol = gradient_kwargs.tol + # @reset solver.verbosity = gradient_kwargs.verbosity + # LinSolver(; solver, iterscheme=gradient_kwargs.iterscheme) + # elseif gradient_kwargs.alg <: EigSolver + # solver = Defaults.gradient_eigsolver + # @reset solver.maxiter = gradient_kwargs.maxiter + # @reset solver.tol = gradient_kwargs.tol + # @reset solver.verbosity = gradient_kwargs.verbosity + # EigSolver(; solver, iterscheme=gradient_kwargs.iterscheme) + # end +end + #= Evaluating the gradient of the cost function for CTMRG: - The gradient of the cost function for CTMRG can be computed using automatic differentiation (AD) or explicit evaluation of the geometric sum. diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index 6686b3dc..b1683840 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -208,16 +208,7 @@ function fixedpoint( end """ - function select_algorithm( - ::typeof(fixedpoint), - env₀::CTMRGEnv; - tol=Defaults.optimizer_tol, - verbosity=2, - boundary_alg=(;), - gradient_alg=(;), - optimization_alg=(;), - (finalize!)=OptimKit._finalize!, - ) + function select_algorithm(::typeof(fixedpoint), env₀::CTMRGEnv; kwargs...) Parse optimization keyword arguments on to the corresponding algorithm structs and return a final `PEPSOptimize` to be used in `fixedpoint`. For a description of the keyword @@ -274,33 +265,14 @@ function select_algorithm( gradient_alg elseif gradient_alg isa NamedTuple gradient_kwargs = (; + alg=Defaults.gradient_alg_type, tol=1e-2tol, maxiter=Defaults.gradient_alg_maxiter, - alg=Defaults.gradient_alg_type, verbosity=gradient_verbosity, iterscheme=Defaults.gradient_alg_iterscheme, gradient_alg..., # replaces all specified kwargs ) - if gradient_kwargs.alg <: Union{GeomSum,ManualIter} - gradient_kwargs.alg(; - tol=gradient_kwargs.tol, - maxiter=gradient_kwargs.maxiter, - verbosity=gradient_kwargs.verbosity, - iterscheme=gradient_kwargs.iterscheme, - ) - elseif gradient_kwargs.alg <: LinSolver - solver = Defaults.gradient_linsolver - @reset solver.maxiter = gradient_kwargs.maxiter - @reset solver.tol = gradient_kwargs.tol - @reset solver.verbosity = gradient_kwargs.verbosity - LinSolver(; solver, iterscheme=gradient_kwargs.iterscheme) - elseif gradient_kwargs.alg <: EigSolver - solver = Defaults.gradient_eigsolver - @reset solver.maxiter = gradient_kwargs.maxiter - @reset solver.tol = gradient_kwargs.tol - @reset solver.verbosity = gradient_kwargs.verbosity - EigSolver(; solver, iterscheme=gradient_kwargs.iterscheme) - end + select_algorithm(GradMode; gradient_kwargs...) else throw(ArgumentError("unknown gradient algorithm: $gradient_alg")) end @@ -310,6 +282,7 @@ function select_algorithm( optimization_alg elseif optimization_alg isa NamedTuple optimization_kwargs = (; + alg=Defaults.optimizer_alg, tol=tol, maxiter=Defaults.optimizer_maxiter, lbfgs_memory=Defaults.lbfgs_memory, diff --git a/src/utility/svd.jl b/src/utility/svd.jl index 9565b210..1129e70d 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -39,6 +39,49 @@ function SVDAdjoint(; return SVDAdjoint(fwd_alg, rrule_alg, broadening) end +const svd_fwd_symbols = Dict( + :sdd => TensorKit.SDD, :svd => TensorKit.SVD, :iterative => IterSVD +) +const truncation_scheme_symbols = Dict( + :fixedspace => FixedSpaceTruncation, + :notrunc => TensorKit.NoTruncation, + :truncerr => TensorKit.TruncationError, + :truncspace => TensorKit.TruncationSpace, + :truncbelow => TensorKit.TruncationCutoff, +) +const svd_rrule_symbols = Dict(:gmres => GMRES, :bicgstab => BiCGStab, :arnoldi => Arnoldi) + +function select_algorithm( + ::Type{SVDAdjoint}; fwd_alg=(;), rrule_alg=(;), broadening=nothing +) + fwd_algorithm = if fwd_alg isa NamedTuple + fwd_kwargs = (; alg=Defaults.svd_fwd_alg, fwd_alg...) + alg = + fwd_kwargs.alg isa Symbol ? svd_fwd_symbols[fwd_kwargs.alg] : fwd_kwargs.alg + alg(fwd_kwargs...) + else + fwd_alg + end + + rrule_algorithm = if rrule_alg isa NamedTuple + rrule_kwargs = (; + alg=Defaults.svd_rrule_alg, + verbosity=Defaults.svd_rrule_verbosity, + rrule_alg..., + ) + alg = if rrule_kwargs.alg isa Symbol + svd_rrule_symbols[rrule_kwargs.alg] + else + rrule_kwargs.alg + end + alg(rrule_kwargs...) + else + rrule_alg + end + + return SVDAdjoint(fwd_algorithm, rrule_algorithm, broadening) +end + """ PEPSKit.tsvd(t, alg; trunc=notrunc(), p=2) From 09dfa59e1b9c3b587ea306b106930551aadb05c3 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Tue, 4 Mar 2025 10:24:03 +0100 Subject: [PATCH 27/52] Rename gradient algorithm defaults --- src/Defaults.jl | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/Defaults.jl b/src/Defaults.jl index 9e11ada6..cce20f8f 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -40,25 +40,25 @@ Module containing default algorithm parameter values and arguments. ``` # Optimization -- `gradient_alg_tol=1e-6`: Convergence tolerance for the fixed-point gradient iteration -- `gradient_alg_maxiter=30`: Maximal number of iterations for computing the CTMRG fixed-point gradient -- `gradient_alg_iterscheme=:fixed`: Scheme for differentiating one CTMRG iteration +- `gradient_tol=1e-6`: Convergence tolerance for the fixed-point gradient iteration +- `gradient_maxiter=30`: Maximal number of iterations for computing the CTMRG fixed-point gradient +- `gradient_iterscheme=:fixed`: Scheme for differentiating one CTMRG iteration - `gradient_linsolver`: Default linear solver for the `LinSolver` gradient algorithm ``` - gradient_linsolver=KrylovKit.BiCGStab(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol) + gradient_linsolver=KrylovKit.BiCGStab(; maxiter=gradient_maxiter, tol=gradient_tol) ``` - `gradient_eigsolver`: Default eigsolver for the `EigSolver` gradient algorithm ``` - gradient_eigsolver = KrylovKit.Arnoldi(; maxiter=gradient_alg_maxiter, tol=gradient_alg_tol, eager=true) + gradient_eigsolver = KrylovKit.Arnoldi(; maxiter=gradient_maxiter, tol=gradient_tol, eager=true) ``` - `gradient_alg`: Algorithm to compute the gradient fixed-point ``` - gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme=gradient_alg_iterscheme) + gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme=gradient_iterscheme) ``` - `reuse_env=true`: If `true`, the current optimization step is initialized on the previous @@ -96,12 +96,13 @@ module Defaults const projector_verbosity = 0 # Fixed-point gradient - const gradient_alg_tol = 1e-6 - const gradient_alg_maxiter = 30 + const gradient_tol = 1e-6 + const gradient_maxiter = 30 + const gradient_verbosity = -1 const gradient_linsolver = :bicgstab # ∈ {:gmres, :bicgstab} const gradient_eigsolver = :arnoldi const gradient_eigsolver_eager = true - const gradient_alg_iterscheme = :fixed # ∈ {:fixed, :diffgauge} + const gradient_iterscheme = :fixed # ∈ {:fixed, :diffgauge} const gradient_alg = :linsolver # ∈ {:geomsum, :manualiter, :linsolver, :eigsolver} # Optimization From 5634a34610fa418c2a5ca33996281089a4246fa6 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Tue, 4 Mar 2025 17:09:07 +0100 Subject: [PATCH 28/52] Add remaining select_algorithm methods and algorithm Symbols --- src/Defaults.jl | 152 +++++++++--------- src/PEPSKit.jl | 1 - src/algorithms/ctmrg/ctmrg.jl | 67 ++++---- src/algorithms/ctmrg/projectors.jl | 57 ++++++- .../fixed_point_differentiation.jl | 116 ++++++++----- .../optimization/peps_optimization.jl | 16 +- src/utility/svd.jl | 27 ++-- 7 files changed, 267 insertions(+), 169 deletions(-) diff --git a/src/Defaults.jl b/src/Defaults.jl index cce20f8f..3f334bdf 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -76,83 +76,81 @@ Module containing default algorithm parameter values and arguments. - `scheduler=Ref{Scheduler}(...)`: Multi-threading scheduler which can be accessed via `set_scheduler!` """ module Defaults - # CTMRG - const ctmrg_tol = 1e-8 - const ctmrg_maxiter = 100 - const ctmrg_miniter = 4 - const ctmrg_alg = :simultaneous # ∈ {:simultaneous, :sequential} - const ctmrg_verbosity = 2 - const sparse = false # TODO: implement sparse CTMRG - - # SVD forward & reverse - const trscheme = :fixedspace - const svd_fwd_alg = :sdd # ∈ {:sdd, :svd, :iterative} - const svd_rrule_alg = :arnoldi # ∈ {:gmres, :bicgstab, :arnoldi} - const svd_rrule_verbosity = -1 - const krylovdim_factor = 1.4 - - # Projector - const projector_alg = :halfinfinite # ∈ {:halfinfinite, :fullinfinite} - const projector_verbosity = 0 - - # Fixed-point gradient - const gradient_tol = 1e-6 - const gradient_maxiter = 30 - const gradient_verbosity = -1 - const gradient_linsolver = :bicgstab # ∈ {:gmres, :bicgstab} - const gradient_eigsolver = :arnoldi - const gradient_eigsolver_eager = true - const gradient_iterscheme = :fixed # ∈ {:fixed, :diffgauge} - const gradient_alg = :linsolver # ∈ {:geomsum, :manualiter, :linsolver, :eigsolver} - - # Optimization - const reuse_env = true - const optimizer_tol = 1e-4 - const optimizer_maxiter = 100 - const lbfgs_memory = 20 - const optimizer_verbosity = 3 - - # OhMyThreads scheduler defaults - const scheduler = Ref{Scheduler}() - """ - set_scheduler!([scheduler]; kwargs...) - - Set `OhMyThreads` multi-threading scheduler parameters. - - The function either accepts a `scheduler` as an `OhMyThreads.Scheduler` or - as a symbol where the corresponding parameters are specificed as keyword arguments. - For instance, a static scheduler that uses four tasks with chunking enabled - can be set via - ``` - set_scheduler!(StaticScheduler(; ntasks=4, chunking=true)) - ``` - or equivalently with - ``` - set_scheduler!(:static; ntasks=4, chunking=true) - ``` - For a detailed description of all schedulers and their keyword arguments consult the - [`OhMyThreads` documentation](https://juliafolds2.github.io/OhMyThreads.jl/stable/refs/api/#Schedulers). - - If no `scheduler` is passed and only kwargs are provided, the `DynamicScheduler` - constructor is used with the provided kwargs. - - To reset the scheduler to its default value, one calls `set_scheduler!` without passing - arguments which then uses the default `DynamicScheduler()`. If the number of used threads is - just one it falls back to `SerialScheduler()`. - """ - function set_scheduler!(sc=OhMyThreads.Implementation.NotGiven(); kwargs...) - if isempty(kwargs) && sc isa OhMyThreads.Implementation.NotGiven - scheduler[] = Threads.nthreads() == 1 ? SerialScheduler() : DynamicScheduler() - else - scheduler[] = OhMyThreads.Implementation._scheduler_from_userinput( - sc; kwargs... - ) - end - return nothing - end - export set_scheduler! +# CTMRG +const ctmrg_tol = 1e-8 +const ctmrg_maxiter = 100 +const ctmrg_miniter = 4 +const ctmrg_alg = :simultaneous # ∈ {:simultaneous, :sequential} +const ctmrg_verbosity = 2 +const sparse = false # TODO: implement sparse CTMRG + +# SVD forward & reverse +const trscheme = :fixedspace # ∈ {:fixedspace, :notrunc, :truncerr, :truncspace, :truncbelow} +const svd_fwd_alg = :sdd # ∈ {:sdd, :svd, :iterative} +const svd_rrule_alg = :arnoldi # ∈ {:gmres, :bicgstab, :arnoldi} +const svd_rrule_verbosity = -1 +const krylovdim_factor = 1.4 + +# Projector +const projector_alg = :halfinfinite # ∈ {:halfinfinite, :fullinfinite} +const projector_verbosity = 0 + +# Fixed-point gradient +const gradient_tol = 1e-6 +const gradient_maxiter = 30 +const gradient_verbosity = -1 +const gradient_linsolver = :bicgstab # ∈ {:gmres, :bicgstab} +const gradient_eigsolver = :arnoldi +const gradient_eigsolver_eager = true +const gradient_iterscheme = :fixed # ∈ {:fixed, :diffgauge} +const gradient_alg = :linsolver # ∈ {:geomsum, :manualiter, :linsolver, :eigsolver} - function __init__() - return set_scheduler!() +# Optimization +const reuse_env = true +const optimizer_tol = 1e-4 +const optimizer_maxiter = 100 +const lbfgs_memory = 20 +const optimizer_verbosity = 3 + +# OhMyThreads scheduler defaults +const scheduler = Ref{Scheduler}() +""" + set_scheduler!([scheduler]; kwargs...) + +Set `OhMyThreads` multi-threading scheduler parameters. + +The function either accepts a `scheduler` as an `OhMyThreads.Scheduler` or +as a symbol where the corresponding parameters are specificed as keyword arguments. +For instance, a static scheduler that uses four tasks with chunking enabled +can be set via +``` +set_scheduler!(StaticScheduler(; ntasks=4, chunking=true)) +``` +or equivalently with +``` +set_scheduler!(:static; ntasks=4, chunking=true) +``` +For a detailed description of all schedulers and their keyword arguments consult the +[`OhMyThreads` documentation](https://juliafolds2.github.io/OhMyThreads.jl/stable/refs/api/#Schedulers). + +If no `scheduler` is passed and only kwargs are provided, the `DynamicScheduler` +constructor is used with the provided kwargs. + +To reset the scheduler to its default value, one calls `set_scheduler!` without passing +arguments which then uses the default `DynamicScheduler()`. If the number of used threads is +just one it falls back to `SerialScheduler()`. +""" +function set_scheduler!(sc=OhMyThreads.Implementation.NotGiven(); kwargs...) + if isempty(kwargs) && sc isa OhMyThreads.Implementation.NotGiven + scheduler[] = Threads.nthreads() == 1 ? SerialScheduler() : DynamicScheduler() + else + scheduler[] = OhMyThreads.Implementation._scheduler_from_userinput(sc; kwargs...) end + return nothing +end +export set_scheduler! + +function __init__() + return set_scheduler!() +end end diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 0a3cbecb..7c4cc12b 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -61,7 +61,6 @@ include("utility/symmetrization.jl") include("algorithms/optimization/fixed_point_differentiation.jl") include("algorithms/optimization/peps_optimization.jl") - using .Defaults: set_scheduler! export set_scheduler! export SVDAdjoint, IterSVD diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index 0fe4aaa1..c0c002d6 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -34,18 +34,13 @@ supplied via the keyword arguments or directly as an [`CTMRGAlgorithm`](@ref) st 2. Initialization and convergence info 3. Iteration info 4. Debug info -* `alg::Union{Symbol,<:CTMRGAlgorithm}=$(Defaults.ctmrg_alg_type)`: Variant of the CTMRG algorithm. See also [`CTMRGAlgorithm`](@ref). +* `alg::Union{Symbol,Type{CTMRGAlgorithm}}=$(Defaults.ctmrg_alg)`: Variant of the CTMRG algorithm. See also [`CTMRGAlgorithm`](@ref). ### Projector algorithm -* `trscheme::TruncationScheme=$(Defaults.trscheme)`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. -* `svd_alg=Defaults.svd_fwd_alg`: SVD algorithm for computing projectors. See also [`PEPSKit.tsvd`](@ref). -* `projector_alg::Union{Symbol,<:ProjectorAlgorithm}=$(Defaults.projector_alg_type)`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). - -### Differentiation settings - -* `svd_rrule_alg::Union{Symbol,Algorithm}=$(Defaults.svd_rrule_alg_type)`: Algorithm used for differentiating SVDs. -* `svd_rrule_tol::Real=1e1tol`: Convergence tolerance for SVD `rrule` +* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg=$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. +* `svd_alg::Union{<:SVDAdjoint,NamedTuple}`: SVD algorithm for computing projectors. See also [`PEPSKit.tsvd`](@ref). +* `projector_alg::Union{Symbol,Type{ProjectorAlgorithm}}=$(Defaults.projector_alg)`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). """ function MPSKit.leading_boundary(env₀::CTMRGEnv, network::InfiniteSquareNetwork; kwargs...) alg = select_algorithm(leading_boundary, env₀; kwargs...) @@ -104,8 +99,11 @@ end @non_differentiable ctmrg_logfinish!(args...) @non_differentiable ctmrg_logcancel!(args...) -# TODO: support reasonable kwargs for `trscheme::Union{TruncationScheme,NamedTuple}`? -# TODO: bit strange to have `svd_rrule_alg` and `svd_rrule_tol`. Merge this into a single `svd_rrule_alg::Union{Symbol,Algorithm,NamedTuple}`? +# Available CTMRG algorithms as Symbols +const ctmrg_symbols = Dict( + :simultaneous => SimultaneousCTMRG, :sequential => SequentialCTMRG +) + """ select_algorithm(::typeof(leading_boundary), env₀::CTMRGEnv; kwargs...) -> CTMRGAlgorithm @@ -116,33 +114,48 @@ description of all keyword arguments. function select_algorithm( ::typeof(leading_boundary), env₀::CTMRGEnv; - alg=Defaults.ctmrg_alg_type, + alg=Defaults.ctmrg_alg, tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, verbosity=Defaults.ctmrg_verbosity, - trscheme=Defaults.trscheme, - svd_alg=Defaults.svd_fwd_alg, - svd_rrule_alg=Defaults.svd_rrule_type, - svd_rrule_tol=1e1tol, - projector_alg=Defaults.projector_alg_type, + trscheme=(; alg=Defaults.trscheme), + svd_alg=(;), + projector_alg=Defaults.projector_alg, # only allows for Symbol/Type{ProjectorAlgorithm} to expose projector kwargs ) - # extract maximal environment dimenions + # extract maximal environment dimensions χenv = maximum(env₀.corners) do corner return dim(space(corner, 1)) end krylovdim = round(Int, Defaults.krylovdim_factor * χenv) - svd_rrule_algorithm = if isnothing(svd_rrule_alg) - nothing - elseif svd_rrule_alg <: Union{GMRES,Arnoldi} - svd_rrule_alg(; tol=svd_rrule_tol, krylovdim, verbosity=verbosity - 2) - elseif svd_rrule_alg <: BiCGStab - svd_rrule_alg(; tol=svd_rrule_tol, verbosity) + # replace symbol with projector alg type + alg_type = if alg isa Symbol + projector_symbols[alg] + else + alg end - svd_algorithm = SVDAdjoint(; fwd_alg=svd_alg, rrule_alg=svd_rrule_algorithm) - projector_algorithm = projector_alg(svd_algorithm, trscheme, verbosity) - return alg(tol, maxiter, miniter, verbosity, projector_algorithm) + + # parse SVD forward & rrule algorithm + svd_algorithm = if svd_alg isa SVDAdjoint + svd_alg + elseif svd_alg isa NamedTuple + alg′ = select_algorithm( + SVDAdjoint; rrule_alg=(; tol=1e1tol, verbosity=verbosity - 2), svd_alg... + ) + if typeof(alg′.rrule_alg) <: Union{<:GMRES,<:Arnoldi} + @reset alg′.rrule_alg.krylovdim = krylovdim + end + else + throw(ArgumentError("unknown SVD algorithm: $svd_alg")) + end + + # parse CTMRG projector algorithm + projector_algorithm = select_algorithm( + ProjectorAlgorithm; alg=projector_alg, svd_alg, trscheme, verbosity + ) + + return alg_type(tol, maxiter, miniter, verbosity, projector_algorithm) end #= diff --git a/src/algorithms/ctmrg/projectors.jl b/src/algorithms/ctmrg/projectors.jl index 6673b9fd..9277455f 100644 --- a/src/algorithms/ctmrg/projectors.jl +++ b/src/algorithms/ctmrg/projectors.jl @@ -56,8 +56,61 @@ Projector algorithm implementing projectors from SVDing the full 4x4 CTMRG envir verbosity::Int = 0 end -function select_algorithm(::Type{ProjectorAlgorithm}; alg::Union{Symbol,<:ProjectorAlgorithm}, svd_alg, trscheme, verbosity) - # TODO +# Available truncation schemes as Symbols +const truncation_scheme_symbols = Dict( + :fixedspace => FixedSpaceTruncation, + :notrunc => TensorKit.NoTruncation, + :truncerr => TensorKit.TruncationError, + :truncspace => TensorKit.TruncationSpace, + :truncbelow => TensorKit.TruncationCutoff, +) + +function select_algorithm( + ::Type{TensorKit.TruncationScheme}; alg=Defaults.trscheme, kwargs... +) + alg_type = alg isa Symbol ? truncation_scheme_symbols[alg] : alg # replace Symbol with TruncationScheme type + args = map(k -> last(kwargs[k]), keys(kwargs)) # extract only values of supplied kwargs (empty Tuple, if kwargs is empty) + return alg_type(args...) +end + +# Available projector algorithms as Symbols +const projector_symbols = Dict( + :halfinfinite => HalfInfiniteProjector, :fullinfinite => FullInfiniteProjector +) + +function select_algorithm( + ::Type{ProjectorAlgorithm}; + alg=Defaults.projector_alg, + svd_alg=(;), + trscheme=(;), + verbosity=Defaults.projector_verbosity, +) + # replace symbol with projector alg type + alg_type = if alg isa Symbol + projector_symbols[alg] + else + alg + end + + # parse SVD forward & rrule algorithm + svd_algorithm = if svd_alg isa SVDAdjoint + svd_alg + elseif svd_alg isa NamedTuple + select_algorithm(SVDAdjoint; svd_alg...) + else + throw(ArgumentError("unknown SVD algorithm: $svd_alg")) + end + + # parse truncation scheme + truncation_scheme = if trscheme isa TruncationScheme + trscheme + elseif trscheme isa NamedTuple + select_algorithm(TruncationScheme; trscheme...) + else + throw(ArgumentError("unknown truncation scheme: $trscheme")) + end + + return alg_type(svd_algorithm, truncation_scheme, verbosity) end # TODO: add `LinearAlgebra.cond` to TensorKit diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index bc948753..f89b3f25 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -3,8 +3,8 @@ abstract type GradMode{F} end iterscheme(::GradMode{F}) where {F} = F """ - struct GeomSum(; tol=$(Defaults.gradient_alg_tol), maxiter=$(Defaults.gradient_alg_maxiter), - verbosity=0, iterscheme=$(Defaults.gradient_alg_iterscheme)) <: GradMode{iterscheme} + struct GeomSum(; tol=$(Defaults.gradient_tol), maxiter=$(Defaults.gradient_maxiter), + verbosity=0, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode for CTMRG using explicit evaluation of the geometric sum. @@ -20,17 +20,17 @@ struct GeomSum{F} <: GradMode{F} verbosity::Int end function GeomSum(; - tol=Defaults.gradient_alg_tol, - maxiter=Defaults.gradient_alg_maxiter, + tol=Defaults.gradient_tol, + maxiter=Defaults.gradient_maxiter, verbosity=0, - iterscheme=Defaults.gradient_alg_iterscheme, + iterscheme=Defaults.gradient_iterscheme, ) return GeomSum{iterscheme}(tol, maxiter, verbosity) end """ - struct ManualIter(; tol=$(Defaults.gradient_alg_tol), maxiter=$(Defaults.gradient_alg_maxiter), - verbosity=0, iterscheme=$(Defaults.gradient_alg_iterscheme)) <: GradMode{iterscheme} + struct ManualIter(; tol=$(Defaults.gradient_tol), maxiter=$(Defaults.gradient_maxiter), + verbosity=0, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode for CTMRG using manual iteration to solve the linear problem. @@ -46,16 +46,16 @@ struct ManualIter{F} <: GradMode{F} verbosity::Int end function ManualIter(; - tol=Defaults.gradient_alg_tol, - maxiter=Defaults.gradient_alg_maxiter, + tol=Defaults.gradient_tol, + maxiter=Defaults.gradient_maxiter, verbosity=0, - iterscheme=Defaults.gradient_alg_iterscheme, + iterscheme=Defaults.gradient_iterscheme, ) return ManualIter{iterscheme}(tol, maxiter, verbosity) end """ - struct LinSolver(; solver=$(Defaults.gradient_linsolver), iterscheme=$(Defaults.gradient_alg_iterscheme)) <: GradMode{iterscheme} + struct LinSolver(; solver=$(Defaults.gradient_linsolver), iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.LinearSolver` for solving the gradient linear problem using iterative solvers. @@ -70,13 +70,13 @@ struct LinSolver{F} <: GradMode{F} solver::KrylovKit.LinearSolver end function LinSolver(; - solver=Defaults.gradient_linsolver, iterscheme=Defaults.gradient_alg_iterscheme + solver=Defaults.gradient_linsolver, iterscheme=Defaults.gradient_iterscheme ) return LinSolver{iterscheme}(solver) end """ - struct EigSolver(; solver=$(Defaults.gradient_eigsolver), iterscheme=$(Defaults.gradient_alg_iterscheme)) <: GradMode{iterscheme} + struct EigSolver(; solver=$(Defaults.gradient_eigsolver), iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.KrylovAlgorithm` for solving the gradient linear problem as an eigenvalue problem. @@ -91,33 +91,77 @@ struct EigSolver{F} <: GradMode{F} solver::KrylovKit.KrylovAlgorithm end function EigSolver(; - solver=Defaults.gradient_eigsolver, iterscheme=Defaults.gradient_alg_iterscheme + solver=Defaults.gradient_eigsolver, iterscheme=Defaults.gradient_iterscheme ) return EigSolver{iterscheme}(solver) end -function select_algorithm(::Type{GradMode}; alg::Union{Symbol,GradMode{F}}, kwargs...) where {F} - # TODO - # if alg <: Union{GeomSum,ManualIter} - # gradient_kwargs.alg(; - # tol=gradient_kwargs.tol, - # maxiter=gradient_kwargs.maxiter, - # verbosity=gradient_kwargs.verbosity, - # iterscheme=gradient_kwargs.iterscheme, - # ) - # elseif gradient_kwargs.alg <: LinSolver - # solver = Defaults.gradient_linsolver - # @reset solver.maxiter = gradient_kwargs.maxiter - # @reset solver.tol = gradient_kwargs.tol - # @reset solver.verbosity = gradient_kwargs.verbosity - # LinSolver(; solver, iterscheme=gradient_kwargs.iterscheme) - # elseif gradient_kwargs.alg <: EigSolver - # solver = Defaults.gradient_eigsolver - # @reset solver.maxiter = gradient_kwargs.maxiter - # @reset solver.tol = gradient_kwargs.tol - # @reset solver.verbosity = gradient_kwargs.verbosity - # EigSolver(; solver, iterscheme=gradient_kwargs.iterscheme) - # end +# Available GradMode algorithms as Symbols +const gradmode_symbols = Dict( + :geomsum => GeomSum, + :manualiter => ManualIter, + :linsolver => LinSolver, + :eigsolver => EigSolver, +) +# Available LinSolver and EigSolver solver algorithms as Symbols +const linsolver_solver_symbols = Dict(:gmres => GMRES, :bicgstab => BiCGStab) +const eigsolver_solver_symbols = Dict(:arnoldi => Arnoldi) + +function select_algorithm( + ::Type{GradMode}; + alg=Defaults.gradient_alg, + tol=Defaults.gradient_tol, + maxiter=Defaults.gradient_maxiter, + verbosity=Defaults.gradient_verbosity, + iterscheme=Defaults.gradient_iterscheme, + solver_alg=(;), +) + # replace symbol with GradMode alg type + alg_type = if alg isa Symbol + gradmode_symbols[alg] + else + alg + end + + # parse GradMode algorithm + gradient_algorithm = if alg_type <: Union{GeomSum,ManualIter} + alg_type(; tol, maxiter, verbosity, iterscheme) + elseif alg_type <: Union{<:LinSolver,<:EigSolver} + solver = if solver_alg isa NamedTuple # determine linear/eigen solver algorithm + solver_kwargs = (; + alg=Defaults.gradient_solver, tol, maxiter, verbosity, solver_alg... + ) + + solver_type = if alg <: LinSolver # replace symbol with solver alg type + if solver_kwargs.alg isa Symbol + linsolver_solver_symbols[solver_kwargs.alg] + else + solver_kwargs.alg + end + elseif alg <: EigSolver + if solver_kwargs.alg isa Symbol + eigsolver_solver_symbols[solver_kwargs.alg] + else + solver_kwargs.alg + end + solver_kwargs = (; # use default eager for EigSolver + eager=Defaults.gradient_eigsolver_eager, + solver_kwargs..., + ) + end + + solver_kwargs = Base.structdiff(solver_kwargs, (; alg)) # remove `alg` keyword argument + solver_type(; solver_kwargs...) + else + solver_alg + end + + alg_type(; solver, iterscheme) + else + throw(ArgumentError("unknown gradient algorithm: $alg")) + end + + return gradient_algorithm end #= diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index b1683840..bfaeab83 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -87,10 +87,10 @@ either a `NamedTuple` of keyword arguments or a `GradMode` struct directly. The keyword arguments are: * `tol=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration. -* `maxiter=$(Defaults.gradient_alg_maxiter)`: Maximal number of gradient problem iterations. +* `maxiter=$(Defaults.gradient_maxiter)`: Maximal number of gradient problem iterations. * `alg=$(Defaults.gradient_alg_type)`: Gradient algorithm type, can be any `GradMode` type. * `verbosity`: Gradient output verbosity, ≤0 by default to disable too verbose printing. Should only be >0 for debug purposes. -* `iterscheme=$(Defaults.gradient_alg_iterscheme)`: CTMRG iteration scheme determining mode of differentiation. This can be `:fixed` (SVD with fixed gauge) or `:diffgauge` (differentiate gauge-fixing routine). +* `iterscheme=$(Defaults.gradient_iterscheme)`: CTMRG iteration scheme determining mode of differentiation. This can be `:fixed` (SVD with fixed gauge) or `:diffgauge` (differentiate gauge-fixing routine). ### PEPS optimization settings @@ -208,7 +208,7 @@ function fixedpoint( end """ - function select_algorithm(::typeof(fixedpoint), env₀::CTMRGEnv; kwargs...) + select_algorithm(::typeof(fixedpoint), env₀::CTMRGEnv; kwargs...) Parse optimization keyword arguments on to the corresponding algorithm structs and return a final `PEPSOptimize` to be used in `fixedpoint`. For a description of the keyword @@ -253,7 +253,7 @@ function select_algorithm( env₀; tol=1e-4tol, verbosity=boundary_verbosity, - svd_rrule_tol=1e-3tol, + svd_alg=(; rrule_alg=(; tol=1e-3tol)), boundary_alg..., ) else @@ -264,14 +264,6 @@ function select_algorithm( gradient_algorithm = if gradient_alg isa GradMode gradient_alg elseif gradient_alg isa NamedTuple - gradient_kwargs = (; - alg=Defaults.gradient_alg_type, - tol=1e-2tol, - maxiter=Defaults.gradient_alg_maxiter, - verbosity=gradient_verbosity, - iterscheme=Defaults.gradient_alg_iterscheme, - gradient_alg..., # replaces all specified kwargs - ) select_algorithm(GradMode; gradient_kwargs...) else throw(ArgumentError("unknown gradient algorithm: $gradient_alg")) diff --git a/src/utility/svd.jl b/src/utility/svd.jl index 1129e70d..cae13ac9 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -39,42 +39,41 @@ function SVDAdjoint(; return SVDAdjoint(fwd_alg, rrule_alg, broadening) end +# Available forward & reverse-rule SVD algorithms as Symbols const svd_fwd_symbols = Dict( :sdd => TensorKit.SDD, :svd => TensorKit.SVD, :iterative => IterSVD ) -const truncation_scheme_symbols = Dict( - :fixedspace => FixedSpaceTruncation, - :notrunc => TensorKit.NoTruncation, - :truncerr => TensorKit.TruncationError, - :truncspace => TensorKit.TruncationSpace, - :truncbelow => TensorKit.TruncationCutoff, -) const svd_rrule_symbols = Dict(:gmres => GMRES, :bicgstab => BiCGStab, :arnoldi => Arnoldi) function select_algorithm( ::Type{SVDAdjoint}; fwd_alg=(;), rrule_alg=(;), broadening=nothing ) + # parse forward SVD algorithm fwd_algorithm = if fwd_alg isa NamedTuple - fwd_kwargs = (; alg=Defaults.svd_fwd_alg, fwd_alg...) - alg = - fwd_kwargs.alg isa Symbol ? svd_fwd_symbols[fwd_kwargs.alg] : fwd_kwargs.alg - alg(fwd_kwargs...) + fwd_kwargs = (; alg=Defaults.svd_fwd_alg, fwd_alg...) # overwrite with specified kwargs + fwd_type = if fwd_kwargs.alg isa Symbol # replace symbol with alg type + svd_fwd_symbols[fwd_kwargs.alg] + else + fwd_kwargs.alg + end + fwd_type(fwd_kwargs...) else fwd_alg end + # parse reverse-rule SVD algorithm rrule_algorithm = if rrule_alg isa NamedTuple rrule_kwargs = (; alg=Defaults.svd_rrule_alg, verbosity=Defaults.svd_rrule_verbosity, rrule_alg..., - ) - alg = if rrule_kwargs.alg isa Symbol + ) # overwrite with specified kwargs + rrule_type = if rrule_kwargs.alg isa Symbol # replace symbol with alg type svd_rrule_symbols[rrule_kwargs.alg] else rrule_kwargs.alg end - alg(rrule_kwargs...) + rrule_type(rrule_kwargs...) else rrule_alg end From c4326ec5559425bf4730e8851e6926ad891d196f Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Tue, 4 Mar 2025 18:03:17 +0100 Subject: [PATCH 29/52] Rearrange files and update kwarg constructors --- src/Defaults.jl | 28 ++ src/PEPSKit.jl | 1 + src/algorithms/ctmrg/ctmrg.jl | 59 ---- src/algorithms/ctmrg/projectors.jl | 71 +---- src/algorithms/ctmrg/sequential.jl | 10 +- src/algorithms/ctmrg/simultaneous.jl | 10 +- .../fixed_point_differentiation.jl | 78 +---- .../optimization/peps_optimization.jl | 95 ------ src/algorithms/select_algorithm.jl | 284 ++++++++++++++++++ src/utility/svd.jl | 48 +-- 10 files changed, 339 insertions(+), 345 deletions(-) create mode 100644 src/algorithms/select_algorithm.jl diff --git a/src/Defaults.jl b/src/Defaults.jl index 3f334bdf..b285d690 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -76,6 +76,7 @@ Module containing default algorithm parameter values and arguments. - `scheduler=Ref{Scheduler}(...)`: Multi-threading scheduler which can be accessed via `set_scheduler!` """ module Defaults +using OhMyThreads # CTMRG const ctmrg_tol = 1e-8 const ctmrg_maxiter = 100 @@ -154,3 +155,30 @@ function __init__() return set_scheduler!() end end + +# Available algorithms as Symbols to replace algorithm struct types +const svd_fwd_symbols = Dict( + :sdd => TensorKit.SDD, :svd => TensorKit.SVD, :iterative => IterSVD +) +const svd_rrule_symbols = Dict(:gmres => GMRES, :bicgstab => BiCGStab, :arnoldi => Arnoldi) +const truncation_scheme_symbols = Dict( + :fixedspace => FixedSpaceTruncation, + :notrunc => TensorKit.NoTruncation, + :truncerr => TensorKit.TruncationError, + :truncspace => TensorKit.TruncationSpace, + :truncbelow => TensorKit.TruncationCutoff, +) +const gradmode_symbols = Dict( + :geomsum => GeomSum, + :manualiter => ManualIter, + :linsolver => LinSolver, + :eigsolver => EigSolver, +) +const linsolver_solver_symbols = Dict(:gmres => GMRES, :bicgstab => BiCGStab) +const eigsolver_solver_symbols = Dict(:arnoldi => Arnoldi) +const projector_symbols = Dict( + :halfinfinite => HalfInfiniteProjector, :fullinfinite => FullInfiniteProjector +) +const ctmrg_symbols = Dict( + :simultaneous => SimultaneousCTMRG, :sequential => SequentialCTMRG +) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 7c4cc12b..60f998ba 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -55,6 +55,7 @@ include("algorithms/time_evolution/evoltools.jl") include("algorithms/time_evolution/simpleupdate.jl") include("algorithms/toolbox.jl") +include("algorithms/select_algorithm.jl") include("utility/symmetrization.jl") diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index c0c002d6..2ab0aff7 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -99,65 +99,6 @@ end @non_differentiable ctmrg_logfinish!(args...) @non_differentiable ctmrg_logcancel!(args...) -# Available CTMRG algorithms as Symbols -const ctmrg_symbols = Dict( - :simultaneous => SimultaneousCTMRG, :sequential => SequentialCTMRG -) - -""" - select_algorithm(::typeof(leading_boundary), env₀::CTMRGEnv; kwargs...) -> CTMRGAlgorithm - -Parse and standardize CTMRG keyword arguments, and bundle them into a `CTMRGAlgorithm` struct, -which is passed on to [`leading_boundary`](@ref). See [`leading_boundary`](@ref) for a -description of all keyword arguments. -""" -function select_algorithm( - ::typeof(leading_boundary), - env₀::CTMRGEnv; - alg=Defaults.ctmrg_alg, - tol=Defaults.ctmrg_tol, - maxiter=Defaults.ctmrg_maxiter, - miniter=Defaults.ctmrg_miniter, - verbosity=Defaults.ctmrg_verbosity, - trscheme=(; alg=Defaults.trscheme), - svd_alg=(;), - projector_alg=Defaults.projector_alg, # only allows for Symbol/Type{ProjectorAlgorithm} to expose projector kwargs -) - # extract maximal environment dimensions - χenv = maximum(env₀.corners) do corner - return dim(space(corner, 1)) - end - krylovdim = round(Int, Defaults.krylovdim_factor * χenv) - - # replace symbol with projector alg type - alg_type = if alg isa Symbol - projector_symbols[alg] - else - alg - end - - # parse SVD forward & rrule algorithm - svd_algorithm = if svd_alg isa SVDAdjoint - svd_alg - elseif svd_alg isa NamedTuple - alg′ = select_algorithm( - SVDAdjoint; rrule_alg=(; tol=1e1tol, verbosity=verbosity - 2), svd_alg... - ) - if typeof(alg′.rrule_alg) <: Union{<:GMRES,<:Arnoldi} - @reset alg′.rrule_alg.krylovdim = krylovdim - end - else - throw(ArgumentError("unknown SVD algorithm: $svd_alg")) - end - - # parse CTMRG projector algorithm - projector_algorithm = select_algorithm( - ProjectorAlgorithm; alg=projector_alg, svd_alg, trscheme, verbosity - ) - - return alg_type(tol, maxiter, miniter, verbosity, projector_algorithm) -end - #= In order to compute an error measure, we compare the singular values of the current iteration with the previous one. However, when the virtual spaces change, this comparison is not directly possible. diff --git a/src/algorithms/ctmrg/projectors.jl b/src/algorithms/ctmrg/projectors.jl index 9277455f..4a7a8aa1 100644 --- a/src/algorithms/ctmrg/projectors.jl +++ b/src/algorithms/ctmrg/projectors.jl @@ -33,86 +33,27 @@ function truncation_scheme(alg::ProjectorAlgorithm, edge) end """ - struct HalfInfiniteProjector{S,T}(; svd_alg=$(Defaults.svd_alg), - trscheme=$(Defaults.trscheme), verbosity=0) + struct HalfInfiniteProjector{S,T}(; svd_alg=SVDAdjoint(), trscheme=$(truncation_scheme_symbols[Defaults.trscheme]), verbosity=0) Projector algorithm implementing projectors from SVDing the half-infinite CTMRG environment. """ @kwdef struct HalfInfiniteProjector{S<:SVDAdjoint,T} <: ProjectorAlgorithm - svd_alg::S = Defaults.svd_alg - trscheme::T = Defaults.trscheme + svd_alg::S = SVDAdjoint() + trscheme::T = truncation_scheme_symbols[Defaults.trscheme] verbosity::Int = 0 end """ - struct FullInfiniteProjector{S,T}(; svd_alg=$(Defaults.svd_alg), - trscheme=$(Defaults.trscheme), verbosity=0) + struct FullInfiniteProjector{S,T}(; svd_alg=SVDAdjoint(), trscheme=$(truncation_scheme_symbols[Defaults.trscheme]), verbosity=0) Projector algorithm implementing projectors from SVDing the full 4x4 CTMRG environment. """ @kwdef struct FullInfiniteProjector{S<:SVDAdjoint,T} <: ProjectorAlgorithm - svd_alg::S = Defaults.svd_alg - trscheme::T = Defaults.trscheme + svd_alg::S = SVDAdjoint() + trscheme::T = truncation_scheme_symbols[Defaults.trscheme] verbosity::Int = 0 end -# Available truncation schemes as Symbols -const truncation_scheme_symbols = Dict( - :fixedspace => FixedSpaceTruncation, - :notrunc => TensorKit.NoTruncation, - :truncerr => TensorKit.TruncationError, - :truncspace => TensorKit.TruncationSpace, - :truncbelow => TensorKit.TruncationCutoff, -) - -function select_algorithm( - ::Type{TensorKit.TruncationScheme}; alg=Defaults.trscheme, kwargs... -) - alg_type = alg isa Symbol ? truncation_scheme_symbols[alg] : alg # replace Symbol with TruncationScheme type - args = map(k -> last(kwargs[k]), keys(kwargs)) # extract only values of supplied kwargs (empty Tuple, if kwargs is empty) - return alg_type(args...) -end - -# Available projector algorithms as Symbols -const projector_symbols = Dict( - :halfinfinite => HalfInfiniteProjector, :fullinfinite => FullInfiniteProjector -) - -function select_algorithm( - ::Type{ProjectorAlgorithm}; - alg=Defaults.projector_alg, - svd_alg=(;), - trscheme=(;), - verbosity=Defaults.projector_verbosity, -) - # replace symbol with projector alg type - alg_type = if alg isa Symbol - projector_symbols[alg] - else - alg - end - - # parse SVD forward & rrule algorithm - svd_algorithm = if svd_alg isa SVDAdjoint - svd_alg - elseif svd_alg isa NamedTuple - select_algorithm(SVDAdjoint; svd_alg...) - else - throw(ArgumentError("unknown SVD algorithm: $svd_alg")) - end - - # parse truncation scheme - truncation_scheme = if trscheme isa TruncationScheme - trscheme - elseif trscheme isa NamedTuple - select_algorithm(TruncationScheme; trscheme...) - else - throw(ArgumentError("unknown truncation scheme: $trscheme")) - end - - return alg_type(svd_algorithm, truncation_scheme, verbosity) -end - # TODO: add `LinearAlgebra.cond` to TensorKit # Compute condition number smax / smin for diagonal singular value TensorMap function _condition_number(S::AbstractTensorMap) diff --git a/src/algorithms/ctmrg/sequential.jl b/src/algorithms/ctmrg/sequential.jl index 1cbee69e..866c3f1a 100644 --- a/src/algorithms/ctmrg/sequential.jl +++ b/src/algorithms/ctmrg/sequential.jl @@ -1,8 +1,8 @@ """ SequentialCTMRG(; tol=$(Defaults.ctmrg_tol), maxiter=$(Defaults.ctmrg_maxiter), miniter=$(Defaults.ctmrg_miniter), verbosity=$(Defaults.ctmrg_verbosity), - svd_alg=$(Defaults.svd_alg), trscheme=$(Defaults.trscheme), - projector_alg=$(Defaults.projector_alg_type)) + svd_alg=SVDAdjoint(), trscheme=truncation_scheme_symbols[Defaults.trscheme], + projector_alg=projector_symbols[Defaults.projector_alg]) CTMRG algorithm where the expansions and renormalization is performed sequentially column-wise. This is implemented as a growing and projecting step to the left, followed by @@ -21,9 +21,9 @@ function SequentialCTMRG(; maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, verbosity=Defaults.ctmrg_verbosity, - svd_alg=Defaults.svd_alg, - trscheme=Defaults.trscheme, - projector_alg=Defaults.projector_alg_type, + svd_alg=SVDAdjoint(), + trscheme=truncation_scheme_symbols[Defaults.trscheme], + projector_alg=projector_symbols[Defaults.projector_alg], ) return SequentialCTMRG( tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) diff --git a/src/algorithms/ctmrg/simultaneous.jl b/src/algorithms/ctmrg/simultaneous.jl index c09a1ab9..3d2a442c 100644 --- a/src/algorithms/ctmrg/simultaneous.jl +++ b/src/algorithms/ctmrg/simultaneous.jl @@ -1,8 +1,8 @@ """ SimultaneousCTMRG(; tol=$(Defaults.ctmrg_tol), maxiter=$(Defaults.ctmrg_maxiter), miniter=$(Defaults.ctmrg_miniter), verbosity=$(Defaults.ctmrg_verbosity), - svd_alg=$(Defaults.svd_alg), trscheme=$(Defaults.trscheme), - projector_alg=$(Defaults.projector_alg_type)) + svd_alg=SVDAdjoint(), trscheme=truncation_scheme_symbols[Defaults.trscheme], + projector_alg=projector_symbols[Defaults.projector_alg]) CTMRG algorithm where all sides are grown and renormalized at the same time. In particular, the projectors are applied to the corners from two sides simultaneously. The projectors are @@ -21,9 +21,9 @@ function SimultaneousCTMRG(; maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, verbosity=Defaults.ctmrg_verbosity, - svd_alg=Defaults.svd_alg, - trscheme=Defaults.trscheme, - projector_alg=Defaults.projector_alg_type, + svd_alg=SVDAdjoint(), + trscheme=truncation_scheme_symbols[Defaults.trscheme], + projector_alg=projector_symbols[Defaults.projector_alg], ) return SimultaneousCTMRG( tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index f89b3f25..217d1c36 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -55,7 +55,7 @@ function ManualIter(; end """ - struct LinSolver(; solver=$(Defaults.gradient_linsolver), iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct LinSolver(; solver=$(linsolver_solver_symbols[Defaults.gradient_linsolver]), iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.LinearSolver` for solving the gradient linear problem using iterative solvers. @@ -70,13 +70,14 @@ struct LinSolver{F} <: GradMode{F} solver::KrylovKit.LinearSolver end function LinSolver(; - solver=Defaults.gradient_linsolver, iterscheme=Defaults.gradient_iterscheme + solver=linsolver_solver_symbols[Defaults.gradient_linsolver], + iterscheme=Defaults.gradient_iterscheme, ) return LinSolver{iterscheme}(solver) end """ - struct EigSolver(; solver=$(Defaults.gradient_eigsolver), iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct EigSolver(; solver=$(eigsolver_solver_symbols[Defaults.gradient_eigsolver]), iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.KrylovAlgorithm` for solving the gradient linear problem as an eigenvalue problem. @@ -91,77 +92,10 @@ struct EigSolver{F} <: GradMode{F} solver::KrylovKit.KrylovAlgorithm end function EigSolver(; - solver=Defaults.gradient_eigsolver, iterscheme=Defaults.gradient_iterscheme -) - return EigSolver{iterscheme}(solver) -end - -# Available GradMode algorithms as Symbols -const gradmode_symbols = Dict( - :geomsum => GeomSum, - :manualiter => ManualIter, - :linsolver => LinSolver, - :eigsolver => EigSolver, -) -# Available LinSolver and EigSolver solver algorithms as Symbols -const linsolver_solver_symbols = Dict(:gmres => GMRES, :bicgstab => BiCGStab) -const eigsolver_solver_symbols = Dict(:arnoldi => Arnoldi) - -function select_algorithm( - ::Type{GradMode}; - alg=Defaults.gradient_alg, - tol=Defaults.gradient_tol, - maxiter=Defaults.gradient_maxiter, - verbosity=Defaults.gradient_verbosity, + solver=eigsolver_solver_symbols[Defaults.gradient_eigsolver], iterscheme=Defaults.gradient_iterscheme, - solver_alg=(;), ) - # replace symbol with GradMode alg type - alg_type = if alg isa Symbol - gradmode_symbols[alg] - else - alg - end - - # parse GradMode algorithm - gradient_algorithm = if alg_type <: Union{GeomSum,ManualIter} - alg_type(; tol, maxiter, verbosity, iterscheme) - elseif alg_type <: Union{<:LinSolver,<:EigSolver} - solver = if solver_alg isa NamedTuple # determine linear/eigen solver algorithm - solver_kwargs = (; - alg=Defaults.gradient_solver, tol, maxiter, verbosity, solver_alg... - ) - - solver_type = if alg <: LinSolver # replace symbol with solver alg type - if solver_kwargs.alg isa Symbol - linsolver_solver_symbols[solver_kwargs.alg] - else - solver_kwargs.alg - end - elseif alg <: EigSolver - if solver_kwargs.alg isa Symbol - eigsolver_solver_symbols[solver_kwargs.alg] - else - solver_kwargs.alg - end - solver_kwargs = (; # use default eager for EigSolver - eager=Defaults.gradient_eigsolver_eager, - solver_kwargs..., - ) - end - - solver_kwargs = Base.structdiff(solver_kwargs, (; alg)) # remove `alg` keyword argument - solver_type(; solver_kwargs...) - else - solver_alg - end - - alg_type(; solver, iterscheme) - else - throw(ArgumentError("unknown gradient algorithm: $alg")) - end - - return gradient_algorithm + return EigSolver{iterscheme}(solver) end #= diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index bfaeab83..0deeea5d 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -207,101 +207,6 @@ function fixedpoint( return peps_final, env_final, cost, info end -""" - select_algorithm(::typeof(fixedpoint), env₀::CTMRGEnv; kwargs...) - -Parse optimization keyword arguments on to the corresponding algorithm structs and return -a final `PEPSOptimize` to be used in `fixedpoint`. For a description of the keyword -arguments, see [`fixedpoint`](@ref). -""" -function select_algorithm( - ::typeof(fixedpoint), - env₀::CTMRGEnv; - tol=Defaults.optimizer_tol, # top-level tolerance - verbosity=2, # top-level verbosity - boundary_alg=(;), - gradient_alg=(;), - optimization_alg=(;), - (finalize!)=OptimKit._finalize!, -) - - # top-level verbosity - if verbosity ≤ 0 # disable output - boundary_verbosity = -1 - gradient_verbosity = -1 - optimizer_verbosity = -1 - elseif verbosity == 1 # output only optimization steps and degeneracy warnings - boundary_verbosity = -1 - gradient_verbosity = 1 - optimizer_verbosity = 3 - elseif verbosity == 2 # output optimization and boundary information - boundary_verbosity = 2 - gradient_verbosity = -1 - optimizer_verbosity = 3 - elseif verbosity == 3 # verbose debug output - boundary_verbosity = 3 - gradient_verbosity = 3 - optimizer_verbosity = 3 - end - - # parse boundary algorithm - boundary_algorithm = if boundary_alg isa CTMRGAlgorithm - boundary_alg - elseif boundary_alg isa NamedTuple - select_algorithm( - leading_boundary, - env₀; - tol=1e-4tol, - verbosity=boundary_verbosity, - svd_alg=(; rrule_alg=(; tol=1e-3tol)), - boundary_alg..., - ) - else - throw(ArgumentError("unknown boundary algorithm: $boundary_alg")) - end - - # parse fixed-point gradient algorithm - gradient_algorithm = if gradient_alg isa GradMode - gradient_alg - elseif gradient_alg isa NamedTuple - select_algorithm(GradMode; gradient_kwargs...) - else - throw(ArgumentError("unknown gradient algorithm: $gradient_alg")) - end - - # construct final PEPSOptimize optimization algorithm - optimization_algorithm = if optimization_alg isa PEPSOptimize - optimization_alg - elseif optimization_alg isa NamedTuple - optimization_kwargs = (; - alg=Defaults.optimizer_alg, - tol=tol, - maxiter=Defaults.optimizer_maxiter, - lbfgs_memory=Defaults.lbfgs_memory, - reuse_env=Defaults.reuse_env, - symmetrization=nothing, - optimization_alg..., # replaces all specified kwargs - ) - optimizer = LBFGS( - optimization_kwargs.lbfgs_memory; - gradtol=optimization_kwargs.tol, - maxiter=optimization_kwargs.maxiter, - verbosity=optimizer_verbosity, - ) - PEPSOptimize( - boundary_algorithm, - gradient_algorithm, - optimizer, - optimization_kwargs.reuse_env, - optimization_kwargs.symmetrization, - ) - else - throw(ArgumentError("unknown optimization algorithm: $optimization_alg")) - end - - return optimization_algorithm, finalize! -end - # Update PEPS unit cell in non-mutating way # Note: Both x and η are InfinitePEPS during optimization function peps_retract(x, η, α) diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl new file mode 100644 index 00000000..31d3d1d2 --- /dev/null +++ b/src/algorithms/select_algorithm.jl @@ -0,0 +1,284 @@ +""" + select_algorithm(::typeof(fixedpoint), env₀::CTMRGEnv; kwargs...) + +Parse optimization keyword arguments on to the corresponding algorithm structs and return +a final `PEPSOptimize` to be used in `fixedpoint`. For a description of the keyword +arguments, see [`fixedpoint`](@ref). +""" +function select_algorithm( + ::typeof(fixedpoint), + env₀::CTMRGEnv; + tol=Defaults.optimizer_tol, # top-level tolerance + verbosity=2, # top-level verbosity + boundary_alg=(;), + gradient_alg=(;), + optimization_alg=(;), + (finalize!)=OptimKit._finalize!, +) + + # top-level verbosity + if verbosity ≤ 0 # disable output + boundary_verbosity = -1 + gradient_verbosity = -1 + optimizer_verbosity = -1 + elseif verbosity == 1 # output only optimization steps and degeneracy warnings + boundary_verbosity = -1 + gradient_verbosity = 1 + optimizer_verbosity = 3 + elseif verbosity == 2 # output optimization and boundary information + boundary_verbosity = 2 + gradient_verbosity = -1 + optimizer_verbosity = 3 + elseif verbosity == 3 # verbose debug output + boundary_verbosity = 3 + gradient_verbosity = 3 + optimizer_verbosity = 3 + end + + # parse boundary algorithm + boundary_algorithm = if boundary_alg isa CTMRGAlgorithm + boundary_alg + elseif boundary_alg isa NamedTuple + select_algorithm( + leading_boundary, + env₀; + tol=1e-4tol, + verbosity=boundary_verbosity, + svd_alg=(; rrule_alg=(; tol=1e-3tol)), + boundary_alg..., + ) + else + throw(ArgumentError("unknown boundary algorithm: $boundary_alg")) + end + + # parse fixed-point gradient algorithm + gradient_algorithm = if gradient_alg isa GradMode + gradient_alg + elseif gradient_alg isa NamedTuple + select_algorithm(GradMode; gradient_kwargs...) + else + throw(ArgumentError("unknown gradient algorithm: $gradient_alg")) + end + + # construct final PEPSOptimize optimization algorithm + optimization_algorithm = if optimization_alg isa PEPSOptimize + optimization_alg + elseif optimization_alg isa NamedTuple + optimization_kwargs = (; + alg=Defaults.optimizer_alg, + tol=tol, + maxiter=Defaults.optimizer_maxiter, + lbfgs_memory=Defaults.lbfgs_memory, + reuse_env=Defaults.reuse_env, + symmetrization=nothing, + optimization_alg..., # replaces all specified kwargs + ) + optimizer = LBFGS( + optimization_kwargs.lbfgs_memory; + gradtol=optimization_kwargs.tol, + maxiter=optimization_kwargs.maxiter, + verbosity=optimizer_verbosity, + ) + PEPSOptimize( + boundary_algorithm, + gradient_algorithm, + optimizer, + optimization_kwargs.reuse_env, + optimization_kwargs.symmetrization, + ) + else + throw(ArgumentError("unknown optimization algorithm: $optimization_alg")) + end + + return optimization_algorithm, finalize! +end + +""" + select_algorithm(::typeof(leading_boundary), env₀::CTMRGEnv; kwargs...) -> CTMRGAlgorithm + +Parse and standardize CTMRG keyword arguments, and bundle them into a `CTMRGAlgorithm` struct, +which is passed on to [`leading_boundary`](@ref). See [`leading_boundary`](@ref) for a +description of all keyword arguments. +""" +function select_algorithm( + ::typeof(leading_boundary), + env₀::CTMRGEnv; + alg=Defaults.ctmrg_alg, + tol=Defaults.ctmrg_tol, + maxiter=Defaults.ctmrg_maxiter, + miniter=Defaults.ctmrg_miniter, + verbosity=Defaults.ctmrg_verbosity, + trscheme=(; alg=Defaults.trscheme), + svd_alg=(;), + projector_alg=Defaults.projector_alg, # only allows for Symbol/Type{ProjectorAlgorithm} to expose projector kwargs +) + # extract maximal environment dimensions + χenv = maximum(env₀.corners) do corner + return dim(space(corner, 1)) + end + krylovdim = round(Int, Defaults.krylovdim_factor * χenv) + + # replace symbol with projector alg type + alg_type = if alg isa Symbol + projector_symbols[alg] + else + alg + end + + # parse SVD forward & rrule algorithm + svd_algorithm = if svd_alg isa SVDAdjoint + svd_alg + elseif svd_alg isa NamedTuple + alg′ = select_algorithm( + SVDAdjoint; rrule_alg=(; tol=1e1tol, verbosity=verbosity - 2), svd_alg... + ) + if typeof(alg′.rrule_alg) <: Union{<:GMRES,<:Arnoldi} + @reset alg′.rrule_alg.krylovdim = krylovdim + end + else + throw(ArgumentError("unknown SVD algorithm: $svd_alg")) + end + + # parse CTMRG projector algorithm + projector_algorithm = select_algorithm( + ProjectorAlgorithm; alg=projector_alg, svd_alg=svd_algorithm, trscheme, verbosity + ) + + return alg_type(tol, maxiter, miniter, verbosity, projector_algorithm) +end + +function select_algorithm( + ::Type{ProjectorAlgorithm}; + alg=Defaults.projector_alg, + svd_alg=(;), + trscheme=(;), + verbosity=Defaults.projector_verbosity, +) + # replace symbol with projector alg type + alg_type = if alg isa Symbol + projector_symbols[alg] + else + alg + end + + # parse SVD forward & rrule algorithm + svd_algorithm = if svd_alg isa SVDAdjoint + svd_alg + elseif svd_alg isa NamedTuple + select_algorithm(SVDAdjoint; svd_alg...) + else + throw(ArgumentError("unknown SVD algorithm: $svd_alg")) + end + + # parse truncation scheme + truncation_scheme = if trscheme isa TruncationScheme + trscheme + elseif trscheme isa NamedTuple + select_algorithm(TruncationScheme; trscheme...) + else + throw(ArgumentError("unknown truncation scheme: $trscheme")) + end + + return alg_type(svd_algorithm, truncation_scheme, verbosity) +end + +function select_algorithm( + ::Type{GradMode}; + alg=Defaults.gradient_alg, + tol=Defaults.gradient_tol, + maxiter=Defaults.gradient_maxiter, + verbosity=Defaults.gradient_verbosity, + iterscheme=Defaults.gradient_iterscheme, + solver_alg=(;), +) + # replace symbol with GradMode alg type + alg_type = if alg isa Symbol + gradmode_symbols[alg] + else + alg + end + + # parse GradMode algorithm + gradient_algorithm = if alg_type <: Union{GeomSum,ManualIter} + alg_type(; tol, maxiter, verbosity, iterscheme) + elseif alg_type <: Union{<:LinSolver,<:EigSolver} + solver = if solver_alg isa NamedTuple # determine linear/eigen solver algorithm + solver_kwargs = (; + alg=Defaults.gradient_solver, tol, maxiter, verbosity, solver_alg... + ) + + solver_type = if alg <: LinSolver # replace symbol with solver alg type + if solver_kwargs.alg isa Symbol + linsolver_solver_symbols[solver_kwargs.alg] + else + solver_kwargs.alg + end + elseif alg <: EigSolver + if solver_kwargs.alg isa Symbol + eigsolver_solver_symbols[solver_kwargs.alg] + else + solver_kwargs.alg + end + solver_kwargs = (; # use default eager for EigSolver + eager=Defaults.gradient_eigsolver_eager, + solver_kwargs..., + ) + end + + solver_kwargs = Base.structdiff(solver_kwargs, (; alg)) # remove `alg` keyword argument + solver_type(; solver_kwargs...) + else + solver_alg + end + + alg_type(; solver, iterscheme) + else + throw(ArgumentError("unknown gradient algorithm: $alg")) + end + + return gradient_algorithm +end + +function select_algorithm( + ::Type{TensorKit.TruncationScheme}; alg=Defaults.trscheme, kwargs... +) + alg_type = alg isa Symbol ? truncation_scheme_symbols[alg] : alg # replace Symbol with TruncationScheme type + args = map(k -> last(kwargs[k]), keys(kwargs)) # extract only values of supplied kwargs (empty Tuple, if kwargs is empty) + return alg_type(args...) +end + +function select_algorithm( + ::Type{SVDAdjoint}; fwd_alg=(;), rrule_alg=(;), broadening=nothing +) + # parse forward SVD algorithm + fwd_algorithm = if fwd_alg isa NamedTuple + fwd_kwargs = (; alg=Defaults.svd_fwd_alg, fwd_alg...) # overwrite with specified kwargs + fwd_type = if fwd_kwargs.alg isa Symbol # replace symbol with alg type + svd_fwd_symbols[fwd_kwargs.alg] + else + fwd_kwargs.alg + end + fwd_type(fwd_kwargs...) + else + fwd_alg + end + + # parse reverse-rule SVD algorithm + rrule_algorithm = if rrule_alg isa NamedTuple + rrule_kwargs = (; + alg=Defaults.svd_rrule_alg, + verbosity=Defaults.svd_rrule_verbosity, + rrule_alg..., + ) # overwrite with specified kwargs + rrule_type = if rrule_kwargs.alg isa Symbol # replace symbol with alg type + svd_rrule_symbols[rrule_kwargs.alg] + else + rrule_kwargs.alg + end + rrule_type(rrule_kwargs...) + else + rrule_alg + end + + return SVDAdjoint(fwd_algorithm, rrule_algorithm, broadening) +end diff --git a/src/utility/svd.jl b/src/utility/svd.jl index cae13ac9..f6cb5d23 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -10,7 +10,7 @@ using TensorKit: const CRCExt = Base.get_extension(KrylovKit, :KrylovKitChainRulesCoreExt) """ - struct SVDAdjoint(; fwd_alg=$(Defaults.svd_fwd_alg), rrule_alg=$(Defaults.svd_rrule_alg), + struct SVDAdjoint(; fwd_alg=$(svd_fwd_symbols[Defaults.svd_fwd_alg]), rrule_alg=$(svd_rrule_symbols[Defaults.svd_rrule_alg]), broadening=nothing) Wrapper for a SVD algorithm `fwd_alg` with a defined reverse rule `rrule_alg`. @@ -34,53 +34,13 @@ struct SVDAdjoint{F,R,B} end end # Keep truncation algorithm separate to be able to specify CTMRG dependent information function SVDAdjoint(; - fwd_alg=Defaults.svd_fwd_alg, rrule_alg=Defaults.svd_rrule_alg, broadening=nothing + fwd_alg=svd_fwd_symbols[Defaults.svd_fwd_alg], + rrule_alg=svd_rrule_symbols[Defaults.svd_rrule_alg], + broadening=nothing, ) return SVDAdjoint(fwd_alg, rrule_alg, broadening) end -# Available forward & reverse-rule SVD algorithms as Symbols -const svd_fwd_symbols = Dict( - :sdd => TensorKit.SDD, :svd => TensorKit.SVD, :iterative => IterSVD -) -const svd_rrule_symbols = Dict(:gmres => GMRES, :bicgstab => BiCGStab, :arnoldi => Arnoldi) - -function select_algorithm( - ::Type{SVDAdjoint}; fwd_alg=(;), rrule_alg=(;), broadening=nothing -) - # parse forward SVD algorithm - fwd_algorithm = if fwd_alg isa NamedTuple - fwd_kwargs = (; alg=Defaults.svd_fwd_alg, fwd_alg...) # overwrite with specified kwargs - fwd_type = if fwd_kwargs.alg isa Symbol # replace symbol with alg type - svd_fwd_symbols[fwd_kwargs.alg] - else - fwd_kwargs.alg - end - fwd_type(fwd_kwargs...) - else - fwd_alg - end - - # parse reverse-rule SVD algorithm - rrule_algorithm = if rrule_alg isa NamedTuple - rrule_kwargs = (; - alg=Defaults.svd_rrule_alg, - verbosity=Defaults.svd_rrule_verbosity, - rrule_alg..., - ) # overwrite with specified kwargs - rrule_type = if rrule_kwargs.alg isa Symbol # replace symbol with alg type - svd_rrule_symbols[rrule_kwargs.alg] - else - rrule_kwargs.alg - end - rrule_type(rrule_kwargs...) - else - rrule_alg - end - - return SVDAdjoint(fwd_algorithm, rrule_algorithm, broadening) -end - """ PEPSKit.tsvd(t, alg; trunc=notrunc(), p=2) From 105a939caceb24e40611f2d9f3375c2e377f5bc5 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 5 Mar 2025 10:30:53 +0100 Subject: [PATCH 30/52] Remove Symbol Dicts and make kwarg constructors use select_algorithm --- src/Defaults.jl | 27 ------- src/algorithms/ctmrg/projectors.jl | 26 +++--- src/algorithms/ctmrg/sequential.jl | 31 +++---- src/algorithms/ctmrg/simultaneous.jl | 31 +++---- .../fixed_point_differentiation.jl | 36 ++------- src/algorithms/select_algorithm.jl | 81 +++++++++++++++++-- src/algorithms/time_evolution/simpleupdate.jl | 1 + src/utility/svd.jl | 10 +-- 8 files changed, 130 insertions(+), 113 deletions(-) diff --git a/src/Defaults.jl b/src/Defaults.jl index b285d690..d6eb0052 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -155,30 +155,3 @@ function __init__() return set_scheduler!() end end - -# Available algorithms as Symbols to replace algorithm struct types -const svd_fwd_symbols = Dict( - :sdd => TensorKit.SDD, :svd => TensorKit.SVD, :iterative => IterSVD -) -const svd_rrule_symbols = Dict(:gmres => GMRES, :bicgstab => BiCGStab, :arnoldi => Arnoldi) -const truncation_scheme_symbols = Dict( - :fixedspace => FixedSpaceTruncation, - :notrunc => TensorKit.NoTruncation, - :truncerr => TensorKit.TruncationError, - :truncspace => TensorKit.TruncationSpace, - :truncbelow => TensorKit.TruncationCutoff, -) -const gradmode_symbols = Dict( - :geomsum => GeomSum, - :manualiter => ManualIter, - :linsolver => LinSolver, - :eigsolver => EigSolver, -) -const linsolver_solver_symbols = Dict(:gmres => GMRES, :bicgstab => BiCGStab) -const eigsolver_solver_symbols = Dict(:arnoldi => Arnoldi) -const projector_symbols = Dict( - :halfinfinite => HalfInfiniteProjector, :fullinfinite => FullInfiniteProjector -) -const ctmrg_symbols = Dict( - :simultaneous => SimultaneousCTMRG, :sequential => SequentialCTMRG -) diff --git a/src/algorithms/ctmrg/projectors.jl b/src/algorithms/ctmrg/projectors.jl index 4a7a8aa1..85f6d467 100644 --- a/src/algorithms/ctmrg/projectors.jl +++ b/src/algorithms/ctmrg/projectors.jl @@ -33,25 +33,31 @@ function truncation_scheme(alg::ProjectorAlgorithm, edge) end """ - struct HalfInfiniteProjector{S,T}(; svd_alg=SVDAdjoint(), trscheme=$(truncation_scheme_symbols[Defaults.trscheme]), verbosity=0) + struct HalfInfiniteProjector{S,T}(; svd_alg=TODO, trscheme=TODO, verbosity=0) Projector algorithm implementing projectors from SVDing the half-infinite CTMRG environment. """ -@kwdef struct HalfInfiniteProjector{S<:SVDAdjoint,T} <: ProjectorAlgorithm - svd_alg::S = SVDAdjoint() - trscheme::T = truncation_scheme_symbols[Defaults.trscheme] - verbosity::Int = 0 +struct HalfInfiniteProjector{S<:SVDAdjoint,T} <: ProjectorAlgorithm + svd_alg::S + trscheme::T + verbosity::Int +end +function HalfInfiniteProjector(; kwargs...) + return select_algorithm(ProjectorAlgorithm; alg=:halfinfinite, kwargs...) end """ - struct FullInfiniteProjector{S,T}(; svd_alg=SVDAdjoint(), trscheme=$(truncation_scheme_symbols[Defaults.trscheme]), verbosity=0) + struct FullInfiniteProjector{S,T}(; svd_alg=TODO, trscheme=TODO, verbosity=0) Projector algorithm implementing projectors from SVDing the full 4x4 CTMRG environment. """ -@kwdef struct FullInfiniteProjector{S<:SVDAdjoint,T} <: ProjectorAlgorithm - svd_alg::S = SVDAdjoint() - trscheme::T = truncation_scheme_symbols[Defaults.trscheme] - verbosity::Int = 0 +struct FullInfiniteProjector{S<:SVDAdjoint,T} <: ProjectorAlgorithm + svd_alg::S + trscheme::T + verbosity::Int +end +function FullInfiniteProjector(; kwargs...) + return select_algorithm(ProjectorAlgorithm; alg=:fullinfinite, kwargs...) end # TODO: add `LinearAlgebra.cond` to TensorKit diff --git a/src/algorithms/ctmrg/sequential.jl b/src/algorithms/ctmrg/sequential.jl index 866c3f1a..d923f5a9 100644 --- a/src/algorithms/ctmrg/sequential.jl +++ b/src/algorithms/ctmrg/sequential.jl @@ -1,8 +1,8 @@ """ SequentialCTMRG(; tol=$(Defaults.ctmrg_tol), maxiter=$(Defaults.ctmrg_maxiter), miniter=$(Defaults.ctmrg_miniter), verbosity=$(Defaults.ctmrg_verbosity), - svd_alg=SVDAdjoint(), trscheme=truncation_scheme_symbols[Defaults.trscheme], - projector_alg=projector_symbols[Defaults.projector_alg]) + svd_alg=TODO, trscheme=TODO, + projector_alg=TODO) CTMRG algorithm where the expansions and renormalization is performed sequentially column-wise. This is implemented as a growing and projecting step to the left, followed by @@ -16,19 +16,20 @@ struct SequentialCTMRG <: CTMRGAlgorithm verbosity::Int projector_alg::ProjectorAlgorithm end -function SequentialCTMRG(; - tol=Defaults.ctmrg_tol, - maxiter=Defaults.ctmrg_maxiter, - miniter=Defaults.ctmrg_miniter, - verbosity=Defaults.ctmrg_verbosity, - svd_alg=SVDAdjoint(), - trscheme=truncation_scheme_symbols[Defaults.trscheme], - projector_alg=projector_symbols[Defaults.projector_alg], -) - return SequentialCTMRG( - tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) - ) -end +SequentialCTMRG(; kwargs...) = select_algorithm(CTMRGAlgorithm; alg=:simultaneous, kwargs...) +# function SequentialCTMRG(; +# tol=Defaults.ctmrg_tol, +# maxiter=Defaults.ctmrg_maxiter, +# miniter=Defaults.ctmrg_miniter, +# verbosity=Defaults.ctmrg_verbosity, +# svd_alg=SVDAdjoint(), +# trscheme=truncation_scheme_symbols[Defaults.trscheme], +# projector_alg=projector_symbols[Defaults.projector_alg], +# ) +# return SequentialCTMRG( +# tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) +# ) +# end """ ctmrg_leftmove(col::Int, network, env::CTMRGEnv, alg::SequentialCTMRG) diff --git a/src/algorithms/ctmrg/simultaneous.jl b/src/algorithms/ctmrg/simultaneous.jl index 3d2a442c..b4a7b399 100644 --- a/src/algorithms/ctmrg/simultaneous.jl +++ b/src/algorithms/ctmrg/simultaneous.jl @@ -1,8 +1,8 @@ """ SimultaneousCTMRG(; tol=$(Defaults.ctmrg_tol), maxiter=$(Defaults.ctmrg_maxiter), miniter=$(Defaults.ctmrg_miniter), verbosity=$(Defaults.ctmrg_verbosity), - svd_alg=SVDAdjoint(), trscheme=truncation_scheme_symbols[Defaults.trscheme], - projector_alg=projector_symbols[Defaults.projector_alg]) + svd_alg=TODO, trscheme=TODO, + projector_alg=TODO) CTMRG algorithm where all sides are grown and renormalized at the same time. In particular, the projectors are applied to the corners from two sides simultaneously. The projectors are @@ -16,19 +16,20 @@ struct SimultaneousCTMRG <: CTMRGAlgorithm verbosity::Int projector_alg::ProjectorAlgorithm end -function SimultaneousCTMRG(; - tol=Defaults.ctmrg_tol, - maxiter=Defaults.ctmrg_maxiter, - miniter=Defaults.ctmrg_miniter, - verbosity=Defaults.ctmrg_verbosity, - svd_alg=SVDAdjoint(), - trscheme=truncation_scheme_symbols[Defaults.trscheme], - projector_alg=projector_symbols[Defaults.projector_alg], -) - return SimultaneousCTMRG( - tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) - ) -end +SimultaneousCTMRG(; kwargs...) = select_algorithm(CTMRGAlgorithm; alg=:simultaneous, kwargs...) +# function SimultaneousCTMRG(; +# tol=Defaults.ctmrg_tol, +# maxiter=Defaults.ctmrg_maxiter, +# miniter=Defaults.ctmrg_miniter, +# verbosity=Defaults.ctmrg_verbosity, +# svd_alg=SVDAdjoint(), +# trscheme=truncation_scheme_symbols[Defaults.trscheme], +# projector_alg=projector_symbols[Defaults.projector_alg], +# ) +# return SimultaneousCTMRG( +# tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) +# ) +# end function ctmrg_iteration(network, env::CTMRGEnv, alg::SimultaneousCTMRG) enlarged_corners = dtmap(eachcoordinate(network, 1:4)) do idx diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index 217d1c36..b9f2705c 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -19,14 +19,7 @@ struct GeomSum{F} <: GradMode{F} maxiter::Int verbosity::Int end -function GeomSum(; - tol=Defaults.gradient_tol, - maxiter=Defaults.gradient_maxiter, - verbosity=0, - iterscheme=Defaults.gradient_iterscheme, -) - return GeomSum{iterscheme}(tol, maxiter, verbosity) -end +GeomSum(; kwargs...) = select_algorithm(GradMode; alg=:GeomSum, kwargs...) """ struct ManualIter(; tol=$(Defaults.gradient_tol), maxiter=$(Defaults.gradient_maxiter), @@ -45,17 +38,10 @@ struct ManualIter{F} <: GradMode{F} maxiter::Int verbosity::Int end -function ManualIter(; - tol=Defaults.gradient_tol, - maxiter=Defaults.gradient_maxiter, - verbosity=0, - iterscheme=Defaults.gradient_iterscheme, -) - return ManualIter{iterscheme}(tol, maxiter, verbosity) -end +ManualIter(; kwargs...) = select_algorithm(GradMode; alg=:manualiter, kwargs...) """ - struct LinSolver(; solver=$(linsolver_solver_symbols[Defaults.gradient_linsolver]), iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct LinSolver(; solver=TODO, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.LinearSolver` for solving the gradient linear problem using iterative solvers. @@ -69,15 +55,10 @@ such that `gauge_fix` will also be differentiated everytime a CTMRG derivative i struct LinSolver{F} <: GradMode{F} solver::KrylovKit.LinearSolver end -function LinSolver(; - solver=linsolver_solver_symbols[Defaults.gradient_linsolver], - iterscheme=Defaults.gradient_iterscheme, -) - return LinSolver{iterscheme}(solver) -end +LinSolver(; kwargs...) = select_algorithm(GradMode; alg=:linsolver, kwargs...) """ - struct EigSolver(; solver=$(eigsolver_solver_symbols[Defaults.gradient_eigsolver]), iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct EigSolver(; solver=TODO, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.KrylovAlgorithm` for solving the gradient linear problem as an eigenvalue problem. @@ -91,12 +72,7 @@ such that `gauge_fix` will also be differentiated everytime a CTMRG derivative i struct EigSolver{F} <: GradMode{F} solver::KrylovKit.KrylovAlgorithm end -function EigSolver(; - solver=eigsolver_solver_symbols[Defaults.gradient_eigsolver], - iterscheme=Defaults.gradient_iterscheme, -) - return EigSolver{iterscheme}(solver) -end +EigSolver(; kwargs...) = select_algorithm(GradMode; alg=:eigsolver, kwargs...) #= Evaluating the gradient of the cost function for CTMRG: diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 31d3d1d2..2d255a1d 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -120,7 +120,13 @@ function select_algorithm( # replace symbol with projector alg type alg_type = if alg isa Symbol - projector_symbols[alg] + if alg == :simultaneous + SimultaneousCTMRG + elseif alg == :sequential + SequentialCTMRG + else + throw(ArgumentError("unknown CTMRG algorithm: $alg")) + end else alg end @@ -156,7 +162,13 @@ function select_algorithm( ) # replace symbol with projector alg type alg_type = if alg isa Symbol - projector_symbols[alg] + if alg == :halfinfinite + HalfInfiniteProjector + elseif alg == :fullinfinite + FullInfiniteProjector + else + throw(ArgumentError("unknown projector algorithm: $alg")) + end else alg end @@ -193,7 +205,17 @@ function select_algorithm( ) # replace symbol with GradMode alg type alg_type = if alg isa Symbol - gradmode_symbols[alg] + if alg == :geomsum + GeomSum + elseif alg == :manualiter + ManualIter + elseif alg == :linsolver + LinSolver + elseif alg == :eigsolver + EigSolver + else + throw(ArgumentError("unknown GradMode algorithm: $alg")) + end else alg end @@ -209,13 +231,23 @@ function select_algorithm( solver_type = if alg <: LinSolver # replace symbol with solver alg type if solver_kwargs.alg isa Symbol - linsolver_solver_symbols[solver_kwargs.alg] + if solver_kwargs.alg == :gmres + GMRES + elseif solver_kwargs.alg == :bicgstab + BiCGStab + else + throw(ArgumentError("unknown LinSolver solver: $(solver_kwargs.alg)")) + end else solver_kwargs.alg end elseif alg <: EigSolver if solver_kwargs.alg isa Symbol - eigsolver_solver_symbols[solver_kwargs.alg] + if solver_kwargs.alg == :arnoldi + Arnoldi + else + throw(ArgumentError("unknown EigSolver solver: $(solver_kwargs.alg)")) + end else solver_kwargs.alg end @@ -242,7 +274,24 @@ end function select_algorithm( ::Type{TensorKit.TruncationScheme}; alg=Defaults.trscheme, kwargs... ) - alg_type = alg isa Symbol ? truncation_scheme_symbols[alg] : alg # replace Symbol with TruncationScheme type + alg_type = if alg isa Symbol # replace Symbol with TruncationScheme type + if alg == :fixedspace + FixedSpaceTruncation + elseif alg == :notrunc + TensorKit.NoTruncation + elseif alg == :truncerr + TensorKit.TruncationError + elseif alg == :truncspace + TensorKit.TruncationSpace + elseif alg == :truncbelow + TensorKit.TruncationCutoff + else + throw(ArgumentError("unknown truncation scheme: $alg")) + end + else + alg + end + args = map(k -> last(kwargs[k]), keys(kwargs)) # extract only values of supplied kwargs (empty Tuple, if kwargs is empty) return alg_type(args...) end @@ -254,7 +303,15 @@ function select_algorithm( fwd_algorithm = if fwd_alg isa NamedTuple fwd_kwargs = (; alg=Defaults.svd_fwd_alg, fwd_alg...) # overwrite with specified kwargs fwd_type = if fwd_kwargs.alg isa Symbol # replace symbol with alg type - svd_fwd_symbols[fwd_kwargs.alg] + if fwd_kwargs.alg == :sdd + TensorKit.SDD + elseif fwd_kwargs.alg == :svd + TensorKit.SVD + elseif fwd_kwargs.alg == :iterative + IterSVD + else + throw(ArgumentError("unknown forward algorithm: $(fwd_kwargs.alg)")) + end else fwd_kwargs.alg end @@ -271,7 +328,15 @@ function select_algorithm( rrule_alg..., ) # overwrite with specified kwargs rrule_type = if rrule_kwargs.alg isa Symbol # replace symbol with alg type - svd_rrule_symbols[rrule_kwargs.alg] + if rrule_kwargs.alg == :gmres + GMRES + elseif rrule_kwargs.alg == :bicgstab + BiCGStab + elseif rrule_kwargs.alg == :arnoldi + Arnoldi + else + throw(ArgumentError("unknown rrule algorithm: $(rrule_kwargs.alg)")) + end else rrule_kwargs.alg end diff --git a/src/algorithms/time_evolution/simpleupdate.jl b/src/algorithms/time_evolution/simpleupdate.jl index 06de206f..8659546c 100644 --- a/src/algorithms/time_evolution/simpleupdate.jl +++ b/src/algorithms/time_evolution/simpleupdate.jl @@ -10,6 +10,7 @@ struct SimpleUpdate maxiter::Int trscheme::TensorKit.TruncationScheme end +# TODO: add kwarg constructor and SU Defaults function truncation_scheme(alg::SimpleUpdate, v::ElementarySpace) if alg.trscheme isa FixedSpaceTruncation diff --git a/src/utility/svd.jl b/src/utility/svd.jl index f6cb5d23..d94187c9 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -10,7 +10,7 @@ using TensorKit: const CRCExt = Base.get_extension(KrylovKit, :KrylovKitChainRulesCoreExt) """ - struct SVDAdjoint(; fwd_alg=$(svd_fwd_symbols[Defaults.svd_fwd_alg]), rrule_alg=$(svd_rrule_symbols[Defaults.svd_rrule_alg]), + struct SVDAdjoint(; fwd_alg=TODO, rrule_alg=TODO, broadening=nothing) Wrapper for a SVD algorithm `fwd_alg` with a defined reverse rule `rrule_alg`. @@ -33,13 +33,7 @@ struct SVDAdjoint{F,R,B} return new{F,R,B}(fwd_alg, rrule_alg, broadening) end end # Keep truncation algorithm separate to be able to specify CTMRG dependent information -function SVDAdjoint(; - fwd_alg=svd_fwd_symbols[Defaults.svd_fwd_alg], - rrule_alg=svd_rrule_symbols[Defaults.svd_rrule_alg], - broadening=nothing, -) - return SVDAdjoint(fwd_alg, rrule_alg, broadening) -end +SVDAdjoint(; kwargs...) = select_algorithm(SVDAdjoint; kwargs...) """ PEPSKit.tsvd(t, alg; trunc=notrunc(), p=2) From 8f86cb7b49652f7887083bc0ba44fed13d8b5cb1 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 5 Mar 2025 11:51:18 +0100 Subject: [PATCH 31/52] Add CTMRGAlgorithm, PEPSOptimize and OptimizationAlgorithm select_algorithms and new docstring --- src/Defaults.jl | 3 +- .../optimization/peps_optimization.jl | 2 +- src/algorithms/select_algorithm.jl | 178 ++++++++++++------ 3 files changed, 119 insertions(+), 64 deletions(-) diff --git a/src/Defaults.jl b/src/Defaults.jl index d6eb0052..c5457be1 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -110,8 +110,9 @@ const gradient_alg = :linsolver # ∈ {:geomsum, :manualiter, :linsolver, :eigso const reuse_env = true const optimizer_tol = 1e-4 const optimizer_maxiter = 100 -const lbfgs_memory = 20 const optimizer_verbosity = 3 +const optimizer_alg = :lbfgs +const lbfgs_memory = 20 # OhMyThreads scheduler defaults const scheduler = Ref{Scheduler}() diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index 0deeea5d..14d72e53 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -119,7 +119,7 @@ information `NamedTuple` which contains the following entries: * `gradnorms_unitcell`: History of gradient norms for each respective unit cell entry. * `times`: History of optimization step execution times. """ -function fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) +function fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; (finalize!)=OptimKit._finalize!, kwargs...) alg, finalize! = select_algorithm(fixedpoint, env₀; kwargs...) return fixedpoint(operator, peps₀, env₀, alg; finalize!) end diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 2d255a1d..2f7f6818 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -1,19 +1,25 @@ """ - select_algorithm(::typeof(fixedpoint), env₀::CTMRGEnv; kwargs...) + select_algorithm(func_or_alg, args...; kwargs...) -> Algorithm -Parse optimization keyword arguments on to the corresponding algorithm structs and return -a final `PEPSOptimize` to be used in `fixedpoint`. For a description of the keyword -arguments, see [`fixedpoint`](@ref). +Parse arguments and keyword arguments to the algorithm struct corresponding to +`func_or_alg` and return an algorithm instance. To that end, we use a general interface +where all keyword arguments that can be algorithm themselves can be specified using + +* `alg::Algorithm`: an instance of the algorithm struct or +* `(; alg::Union{Symbol,AlgorithmType}, alg_kwargs...)`: a `NamedTuple` where the algorithm is specified by a symbol or the type of the algorithm struct, and the algorithm keyword arguments + +A full description of the keyword argument can be found in the respective function or +algorithm struct docstrings. """ +function select_algorithm end + function select_algorithm( ::typeof(fixedpoint), env₀::CTMRGEnv; tol=Defaults.optimizer_tol, # top-level tolerance verbosity=2, # top-level verbosity boundary_alg=(;), - gradient_alg=(;), - optimization_alg=(;), - (finalize!)=OptimKit._finalize!, + kwargs..., ) # top-level verbosity @@ -51,6 +57,27 @@ function select_algorithm( throw(ArgumentError("unknown boundary algorithm: $boundary_alg")) end + return select_algorithm(PEPSOptimize; boundary_alg=boundary_algorithm, kwargs...) +end + +function select_algorithm( + ::Type{PEPSOptimize}, + env₀::CTMRGEnv; + boundary_alg=(;), + gradient_alg=(;), + optimizer_alg=(;), + reuse_env=Defaults.reuse_env, + symmetrization=nothing, +) + # parse boundary algorithm + boundary_algorithm = if boundary_alg isa CTMRGAlgorithm + boundary_alg + elseif boundary_alg isa NamedTuple + select_algorithm(leading_boundary, env₀; boundary_alg...) + else + throw(ArgumentError("unknown boundary algorithm: $boundary_alg")) + end + # parse fixed-point gradient algorithm gradient_algorithm = if gradient_alg isa GradMode gradient_alg @@ -61,50 +88,97 @@ function select_algorithm( end # construct final PEPSOptimize optimization algorithm - optimization_algorithm = if optimization_alg isa PEPSOptimize + optimizer_algorithm = if optimizer_alg isa OptimKit.OptimizationAlgorithm optimization_alg - elseif optimization_alg isa NamedTuple - optimization_kwargs = (; - alg=Defaults.optimizer_alg, - tol=tol, - maxiter=Defaults.optimizer_maxiter, - lbfgs_memory=Defaults.lbfgs_memory, - reuse_env=Defaults.reuse_env, - symmetrization=nothing, - optimization_alg..., # replaces all specified kwargs - ) - optimizer = LBFGS( - optimization_kwargs.lbfgs_memory; - gradtol=optimization_kwargs.tol, - maxiter=optimization_kwargs.maxiter, - verbosity=optimizer_verbosity, - ) - PEPSOptimize( - boundary_algorithm, - gradient_algorithm, - optimizer, - optimization_kwargs.reuse_env, - optimization_kwargs.symmetrization, - ) + elseif optimizer_alg isa NamedTuple + select_algorithm(OptimKit.OptimizationAlgorithm; optimizer_alg...) else - throw(ArgumentError("unknown optimization algorithm: $optimization_alg")) + throw(ArgumentError("unknown optimization algorithm: $optimizer_alg")) end - return optimization_algorithm, finalize! + return PEPSOptimize( + boundary_algorithm, + gradient_algorithm, + optimizer_algorithm, + reuse_env, + symmetrization, + ) end -""" - select_algorithm(::typeof(leading_boundary), env₀::CTMRGEnv; kwargs...) -> CTMRGAlgorithm +function select_algorithm(::Type{OptimKit.OptimizationAlgorithm}; + alg=Defaults.optimizer_alg, + tol=Defaults.optimizer_tol, + maxiter=Defaults.optimizer_maxiter, + verbosity=Defaults.optimizer_verbosity, + lbfgs_memory=Defaults.lbfgs_memory, + # TODO: add linesearch, ... to kwargs and defaults? +) + # replace symbol with projector alg type + alg_type = if alg isa Symbol + if alg == :gradientdescent + GradientDescent + elseif alg == :conjugategradient + ConjugateGradient + elseif alg == :lbfgs + (; kwargs...) -> LBFGS(lbfgs_memory; kwargs...) + else + throw(ArgumentError("unknown optimizer algorithm: $alg")) + end + else + alg + end + + optimizer = alg_type(; + gradtol=tol, + maxiter, + verbosity, + ) + PEPSOptimize( + boundary_algorithm, + gradient_algorithm, + optimizer, + optimization_kwargs.reuse_env, + optimization_kwargs.symmetrization, + ) +end -Parse and standardize CTMRG keyword arguments, and bundle them into a `CTMRGAlgorithm` struct, -which is passed on to [`leading_boundary`](@ref). See [`leading_boundary`](@ref) for a -description of all keyword arguments. -""" function select_algorithm( ::typeof(leading_boundary), env₀::CTMRGEnv; alg=Defaults.ctmrg_alg, tol=Defaults.ctmrg_tol, + verbosity=Defaults.ctmrg_verbosity, + svd_alg=(;), + kwargs..., +) + # adjust SVD rrule settings to CTMRG tolerance, verbosity and environment dimension + svd_algorithm = if svd_alg isa SVDAdjoint + svd_alg + elseif svd_alg isa NamedTuple + alg′ = select_algorithm( + SVDAdjoint; rrule_alg=(; tol=1e1tol, verbosity=verbosity - 2), svd_alg... + ) + if typeof(alg′.rrule_alg) <: Union{<:GMRES,<:Arnoldi} + # extract maximal environment dimensions + χenv = maximum(env₀.corners) do corner + return dim(space(corner, 1)) + end + krylovdim = round(Int, Defaults.krylovdim_factor * χenv) + @reset alg′.rrule_alg.krylovdim = krylovdim + end + else + throw(ArgumentError("unknown SVD algorithm: $svd_alg")) + end + + return select_algorithm( + CTMRGAlgorithm; alg, tol, verbosity, svd_alg=svd_algorithm, kwargs... + ) +end + +function select_algorithm( + ::Type{CTMRGAlgorithm}; + alg=Defaults.ctmrg_alg, + tol=Defaults.ctmrg_tol, maxiter=Defaults.ctmrg_maxiter, miniter=Defaults.ctmrg_miniter, verbosity=Defaults.ctmrg_verbosity, @@ -112,12 +186,6 @@ function select_algorithm( svd_alg=(;), projector_alg=Defaults.projector_alg, # only allows for Symbol/Type{ProjectorAlgorithm} to expose projector kwargs ) - # extract maximal environment dimensions - χenv = maximum(env₀.corners) do corner - return dim(space(corner, 1)) - end - krylovdim = round(Int, Defaults.krylovdim_factor * χenv) - # replace symbol with projector alg type alg_type = if alg isa Symbol if alg == :simultaneous @@ -131,23 +199,9 @@ function select_algorithm( alg end - # parse SVD forward & rrule algorithm - svd_algorithm = if svd_alg isa SVDAdjoint - svd_alg - elseif svd_alg isa NamedTuple - alg′ = select_algorithm( - SVDAdjoint; rrule_alg=(; tol=1e1tol, verbosity=verbosity - 2), svd_alg... - ) - if typeof(alg′.rrule_alg) <: Union{<:GMRES,<:Arnoldi} - @reset alg′.rrule_alg.krylovdim = krylovdim - end - else - throw(ArgumentError("unknown SVD algorithm: $svd_alg")) - end - # parse CTMRG projector algorithm projector_algorithm = select_algorithm( - ProjectorAlgorithm; alg=projector_alg, svd_alg=svd_algorithm, trscheme, verbosity + ProjectorAlgorithm; alg=projector_alg, svd_alg, trscheme, verbosity ) return alg_type(tol, maxiter, miniter, verbosity, projector_algorithm) @@ -222,7 +276,7 @@ function select_algorithm( # parse GradMode algorithm gradient_algorithm = if alg_type <: Union{GeomSum,ManualIter} - alg_type(; tol, maxiter, verbosity, iterscheme) + alg_type{iterscheme}(tol, maxiter, verbosity) elseif alg_type <: Union{<:LinSolver,<:EigSolver} solver = if solver_alg isa NamedTuple # determine linear/eigen solver algorithm solver_kwargs = (; @@ -263,7 +317,7 @@ function select_algorithm( solver_alg end - alg_type(; solver, iterscheme) + alg_type{iterscheme}(solver) else throw(ArgumentError("unknown gradient algorithm: $alg")) end From c5277a63546eef04ef6be3758e62fe88e6f9aae8 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 5 Mar 2025 12:06:21 +0100 Subject: [PATCH 32/52] Fix fixedpoint select_algorithm --- src/algorithms/select_algorithm.jl | 63 ++++++++++++++++++++++++------ 1 file changed, 51 insertions(+), 12 deletions(-) diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 2f7f6818..0afd4bd0 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -21,7 +21,6 @@ function select_algorithm( boundary_alg=(;), kwargs..., ) - # top-level verbosity if verbosity ≤ 0 # disable output boundary_verbosity = -1 @@ -41,23 +40,66 @@ function select_algorithm( optimizer_verbosity = 3 end - # parse boundary algorithm + # adjust CTMRG tols and verbosity boundary_algorithm = if boundary_alg isa CTMRGAlgorithm boundary_alg elseif boundary_alg isa NamedTuple + svd_alg = if haskey(boundary_alg, :svd_alg) + if boundary_alg.svd_alg isa SVDAdjoint + boundary_alg.svd_alg + elseif boundary_alg.svd_alg isa NamedTuple + select_algorithm(SVDAdjoint; rrule_alg=(; tol=1e-3tol), svd_alg...) + else + throw(ArgumentError("unknown SVD algorithm: $(boundary_alg.svd_alg)")) + end + else + (; rrule_alg(; tol=1e-3tol)) + end + select_algorithm( leading_boundary, env₀; tol=1e-4tol, verbosity=boundary_verbosity, - svd_alg=(; rrule_alg=(; tol=1e-3tol)), + svd_alg, boundary_alg..., ) else throw(ArgumentError("unknown boundary algorithm: $boundary_alg")) end - return select_algorithm(PEPSOptimize; boundary_alg=boundary_algorithm, kwargs...) + # adjust gradient verbosity + gradient_algorithm = if gradient_alg isa GradMode + gradient_alg + elseif gradient_alg isa NamedTuple + select_algorithm( + GradMode; tol=1e-2tol, verbosity=gradient_verbosity, gradient_kwargs... + ) + else + throw(ArgumentError("unknown gradient algorithm: $gradient_alg")) + end + + # adjust optimizer tol and verbosity + optimizer_algorithm = if optimizer_alg isa OptimKit.OptimizationAlgorithm + optimization_alg + elseif optimizer_alg isa NamedTuple + select_algorithm( + OptimKit.OptimizationAlgorithm; + tol, + verbosity=optimizer_verbosity, + optimizer_alg..., + ) + else + throw(ArgumentError("unknown optimization algorithm: $optimizer_alg")) + end + + return select_algorithm( + PEPSOptimize; + boundary_alg=boundary_algorithm, + gradient_alg=gradient_algorithm, + optimizer_alg=optimizer_algorithm, + kwargs..., + ) end function select_algorithm( @@ -87,7 +129,7 @@ function select_algorithm( throw(ArgumentError("unknown gradient algorithm: $gradient_alg")) end - # construct final PEPSOptimize optimization algorithm + # parse optimizer algorithm optimizer_algorithm = if optimizer_alg isa OptimKit.OptimizationAlgorithm optimization_alg elseif optimizer_alg isa NamedTuple @@ -105,7 +147,8 @@ function select_algorithm( ) end -function select_algorithm(::Type{OptimKit.OptimizationAlgorithm}; +function select_algorithm( + ::Type{OptimKit.OptimizationAlgorithm}; alg=Defaults.optimizer_alg, tol=Defaults.optimizer_tol, maxiter=Defaults.optimizer_maxiter, @@ -128,12 +171,8 @@ function select_algorithm(::Type{OptimKit.OptimizationAlgorithm}; alg end - optimizer = alg_type(; - gradtol=tol, - maxiter, - verbosity, - ) - PEPSOptimize( + optimizer = alg_type(; gradtol=tol, maxiter, verbosity) + return PEPSOptimize( boundary_algorithm, gradient_algorithm, optimizer, From e74f77fbb40991f59649fbe702d601dbad4285ee Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 5 Mar 2025 14:22:07 +0100 Subject: [PATCH 33/52] Fix IterSVD docstring --- src/utility/svd.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utility/svd.jl b/src/utility/svd.jl index d94187c9..6d030318 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -67,7 +67,7 @@ function TensorKit._tsvd!(t, alg::FixedSVD, ::NoTruncation, ::Real=2) end """ - struct IterSVD(; alg=KrylovKit.GKL(), fallback_threshold = Inf) + struct IterSVD(; alg=KrylovKit.GKL(), fallback_threshold = Inf, start_vector=random_start_vector) Iterative SVD solver based on KrylovKit's GKL algorithm, adapted to (symmetric) tensors. The number of targeted singular values is set via the `TruncationSpace` in `ProjectorAlg`. From 5e94397a651972f76918cd3421d0a1cdb01a4791 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 5 Mar 2025 15:13:40 +0100 Subject: [PATCH 34/52] Make runnable --- src/PEPSKit.jl | 3 +- .../optimization/peps_optimization.jl | 24 ++- src/algorithms/select_algorithm.jl | 184 +++++++----------- 3 files changed, 83 insertions(+), 128 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 60f998ba..8d011e43 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -55,13 +55,14 @@ include("algorithms/time_evolution/evoltools.jl") include("algorithms/time_evolution/simpleupdate.jl") include("algorithms/toolbox.jl") -include("algorithms/select_algorithm.jl") include("utility/symmetrization.jl") include("algorithms/optimization/fixed_point_differentiation.jl") include("algorithms/optimization/peps_optimization.jl") +include("algorithms/select_algorithm.jl") + using .Defaults: set_scheduler! export set_scheduler! export SVDAdjoint, IterSVD diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index 14d72e53..a1a435be 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -1,6 +1,6 @@ """ PEPSOptimize{G}(; boundary_alg=$(Defaults.ctmrg_alg), gradient_alg::G=$(Defaults.gradient_alg), - optimizer::OptimKit.OptimizationAlgorithm=$(Defaults.optimizer) + optimizer::OptimKit.OptimizationAlgorithm=$(Defaults.optimizer_alg) reuse_env::Bool=$(Defaults.reuse_env), symmetrization::Union{Nothing,SymmetrizationStyle}=nothing) Algorithm struct that represent PEPS ground-state optimization using AD. @@ -39,15 +39,7 @@ struct PEPSOptimize{G} return new{G}(boundary_alg, gradient_alg, optimizer, reuse_env, symmetrization) end end -function PEPSOptimize(; - boundary_alg=Defaults.ctmrg_alg, - gradient_alg=Defaults.gradient_alg, - optimizer=Defaults.optimizer, - reuse_env=Defaults.reuse_env, - symmetrization=nothing, -) - return PEPSOptimize(boundary_alg, gradient_alg, optimizer, reuse_env, symmetrization) -end +PEPSOptimize(; kwargs...) = select_algorithm(PEPSOptimize; kwargs...) """ fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; kwargs...) @@ -88,7 +80,7 @@ keyword arguments are: * `tol=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration. * `maxiter=$(Defaults.gradient_maxiter)`: Maximal number of gradient problem iterations. -* `alg=$(Defaults.gradient_alg_type)`: Gradient algorithm type, can be any `GradMode` type. +* `alg=$(Defaults.gradient_alg)`: Gradient algorithm type, can be any `GradMode` type. * `verbosity`: Gradient output verbosity, ≤0 by default to disable too verbose printing. Should only be >0 for debug purposes. * `iterscheme=$(Defaults.gradient_iterscheme)`: CTMRG iteration scheme determining mode of differentiation. This can be `:fixed` (SVD with fixed gauge) or `:diffgauge` (differentiate gauge-fixing routine). @@ -119,8 +111,14 @@ information `NamedTuple` which contains the following entries: * `gradnorms_unitcell`: History of gradient norms for each respective unit cell entry. * `times`: History of optimization step execution times. """ -function fixedpoint(operator, peps₀::InfinitePEPS, env₀::CTMRGEnv; (finalize!)=OptimKit._finalize!, kwargs...) - alg, finalize! = select_algorithm(fixedpoint, env₀; kwargs...) +function fixedpoint( + operator, + peps₀::InfinitePEPS, + env₀::CTMRGEnv; + (finalize!)=OptimKit._finalize!, + kwargs..., +) + alg = select_algorithm(fixedpoint, env₀; kwargs...) return fixedpoint(operator, peps₀, env₀, alg; finalize!) end function fixedpoint( diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 0afd4bd0..a665a318 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -1,3 +1,13 @@ +function _select_alg_or_namedtuple(alg, alg_type, selects...; extra_kwargs...) + if alg isa alg_type + return alg + elseif alg isa NamedTuple + return select_algorithm(selects...; extra_kwargs..., alg...) + else + throw(ArgumentError("unknown algorithm: $alg")) + end +end + """ select_algorithm(func_or_alg, args...; kwargs...) -> Algorithm @@ -19,6 +29,8 @@ function select_algorithm( tol=Defaults.optimizer_tol, # top-level tolerance verbosity=2, # top-level verbosity boundary_alg=(;), + gradient_alg=(;), + optimizer_alg=(;), kwargs..., ) # top-level verbosity @@ -41,60 +53,37 @@ function select_algorithm( end # adjust CTMRG tols and verbosity - boundary_algorithm = if boundary_alg isa CTMRGAlgorithm - boundary_alg - elseif boundary_alg isa NamedTuple - svd_alg = if haskey(boundary_alg, :svd_alg) - if boundary_alg.svd_alg isa SVDAdjoint - boundary_alg.svd_alg - elseif boundary_alg.svd_alg isa NamedTuple - select_algorithm(SVDAdjoint; rrule_alg=(; tol=1e-3tol), svd_alg...) - else - throw(ArgumentError("unknown SVD algorithm: $(boundary_alg.svd_alg)")) - end - else - (; rrule_alg(; tol=1e-3tol)) - end - select_algorithm( - leading_boundary, - env₀; - tol=1e-4tol, - verbosity=boundary_verbosity, - svd_alg, - boundary_alg..., - ) - else - throw(ArgumentError("unknown boundary algorithm: $boundary_alg")) - end + boundary_algorithm = _select_alg_or_namedtuple( + boundary_alg, + CTMRGAlgorithm, + leading_boundary, + env₀; + tol=1e-4tol, + verbosity=boundary_verbosity, + ) + @reset boundary_algorithm.projector_alg.svd_alg.rrule_alg.tol = 1e-3tol # use @reset for nested algs # adjust gradient verbosity - gradient_algorithm = if gradient_alg isa GradMode - gradient_alg - elseif gradient_alg isa NamedTuple - select_algorithm( - GradMode; tol=1e-2tol, verbosity=gradient_verbosity, gradient_kwargs... - ) - else - throw(ArgumentError("unknown gradient algorithm: $gradient_alg")) - end + gradient_algorithm = _select_alg_or_namedtuple( + gradient_alg, GradMode, GradMode; tol=1e-2tol, verbosity=gradient_verbosity + ) # adjust optimizer tol and verbosity - optimizer_algorithm = if optimizer_alg isa OptimKit.OptimizationAlgorithm - optimization_alg - elseif optimizer_alg isa NamedTuple - select_algorithm( - OptimKit.OptimizationAlgorithm; - tol, - verbosity=optimizer_verbosity, - optimizer_alg..., - ) - else - throw(ArgumentError("unknown optimization algorithm: $optimizer_alg")) - end + optimizer_algorithm = _select_alg_or_namedtuple( + optimizer_alg, + OptimKit.OptimizationAlgorithm, + OptimKit.OptimizationAlgorithm; + tol, + verbosity=optimizer_verbosity, + ) + @show boundary_algorithm + @show gradient_algorithm + @show optimizer_algorithm return select_algorithm( - PEPSOptimize; + PEPSOptimize, + env₀; boundary_alg=boundary_algorithm, gradient_alg=gradient_algorithm, optimizer_alg=optimizer_algorithm, @@ -112,31 +101,17 @@ function select_algorithm( symmetrization=nothing, ) # parse boundary algorithm - boundary_algorithm = if boundary_alg isa CTMRGAlgorithm - boundary_alg - elseif boundary_alg isa NamedTuple - select_algorithm(leading_boundary, env₀; boundary_alg...) - else - throw(ArgumentError("unknown boundary algorithm: $boundary_alg")) - end + boundary_algorithm = _select_alg_or_namedtuple( + boundary_alg, CTMRGAlgorithm, leading_boundary, env₀ + ) # parse fixed-point gradient algorithm - gradient_algorithm = if gradient_alg isa GradMode - gradient_alg - elseif gradient_alg isa NamedTuple - select_algorithm(GradMode; gradient_kwargs...) - else - throw(ArgumentError("unknown gradient algorithm: $gradient_alg")) - end + gradient_algorithm = _select_alg_or_namedtuple(gradient_alg, GradMode, GradMode) # parse optimizer algorithm - optimizer_algorithm = if optimizer_alg isa OptimKit.OptimizationAlgorithm - optimization_alg - elseif optimizer_alg isa NamedTuple - select_algorithm(OptimKit.OptimizationAlgorithm; optimizer_alg...) - else - throw(ArgumentError("unknown optimization algorithm: $optimizer_alg")) - end + optimizer_algorithm = _select_alg_or_namedtuple( + optimizer_alg, OptimKit.OptimizationAlgorithm, OptimKit.OptimizationAlgorithm + ) return PEPSOptimize( boundary_algorithm, @@ -171,14 +146,7 @@ function select_algorithm( alg end - optimizer = alg_type(; gradtol=tol, maxiter, verbosity) - return PEPSOptimize( - boundary_algorithm, - gradient_algorithm, - optimizer, - optimization_kwargs.reuse_env, - optimization_kwargs.symmetrization, - ) + return alg_type(; gradtol=tol, maxiter, verbosity) end function select_algorithm( @@ -191,23 +159,17 @@ function select_algorithm( kwargs..., ) # adjust SVD rrule settings to CTMRG tolerance, verbosity and environment dimension - svd_algorithm = if svd_alg isa SVDAdjoint - svd_alg - elseif svd_alg isa NamedTuple - alg′ = select_algorithm( - SVDAdjoint; rrule_alg=(; tol=1e1tol, verbosity=verbosity - 2), svd_alg... - ) - if typeof(alg′.rrule_alg) <: Union{<:GMRES,<:Arnoldi} - # extract maximal environment dimensions - χenv = maximum(env₀.corners) do corner - return dim(space(corner, 1)) - end - krylovdim = round(Int, Defaults.krylovdim_factor * χenv) - @reset alg′.rrule_alg.krylovdim = krylovdim + if svd_alg isa NamedTuple && + haskey(svd_alg, :rrule_alg) && + svd_alg.rrule_alg isa NamedTuple + χenv = maximum(env₀.corners) do corner + return dim(space(corner, 1)) end - else - throw(ArgumentError("unknown SVD algorithm: $svd_alg")) + krylovdim = round(Int, Defaults.krylovdim_factor * χenv) + rrule_alg = (; tol=1e1tol, verbosity=verbosity - 2, krylovdim, svd_alg.rrule_alg...) + svd_alg = (; rrule_alg, svd_alg...) end + svd_algorithm = _select_alg_or_namedtuple(svd_alg, SVDAdjoint, SVDAdjoint) return select_algorithm( CTMRGAlgorithm; alg, tol, verbosity, svd_alg=svd_algorithm, kwargs... @@ -267,22 +229,12 @@ function select_algorithm( end # parse SVD forward & rrule algorithm - svd_algorithm = if svd_alg isa SVDAdjoint - svd_alg - elseif svd_alg isa NamedTuple - select_algorithm(SVDAdjoint; svd_alg...) - else - throw(ArgumentError("unknown SVD algorithm: $svd_alg")) - end + svd_algorithm = _select_alg_or_namedtuple(svd_alg, SVDAdjoint, SVDAdjoint) # parse truncation scheme - truncation_scheme = if trscheme isa TruncationScheme - trscheme - elseif trscheme isa NamedTuple - select_algorithm(TruncationScheme; trscheme...) - else - throw(ArgumentError("unknown truncation scheme: $trscheme")) - end + truncation_scheme = _select_alg_or_namedtuple( + trscheme, TruncationScheme, TruncationScheme + ) return alg_type(svd_algorithm, truncation_scheme, verbosity) end @@ -318,11 +270,10 @@ function select_algorithm( alg_type{iterscheme}(tol, maxiter, verbosity) elseif alg_type <: Union{<:LinSolver,<:EigSolver} solver = if solver_alg isa NamedTuple # determine linear/eigen solver algorithm - solver_kwargs = (; - alg=Defaults.gradient_solver, tol, maxiter, verbosity, solver_alg... - ) + solver_kwargs = (; tol, maxiter, verbosity, solver_alg...) - solver_type = if alg <: LinSolver # replace symbol with solver alg type + solver_type = if alg_type <: LinSolver # replace symbol with solver alg type + solver_kwargs = (; alg=Defaults.gradient_linsolver, solver_kwargs...) if solver_kwargs.alg isa Symbol if solver_kwargs.alg == :gmres GMRES @@ -334,7 +285,8 @@ function select_algorithm( else solver_kwargs.alg end - elseif alg <: EigSolver + elseif alg_type <: EigSolver + solver_kwargs = (; alg=Defaults.gradient_eigsolver, solver_kwargs...) if solver_kwargs.alg isa Symbol if solver_kwargs.alg == :arnoldi Arnoldi @@ -350,7 +302,7 @@ function select_algorithm( ) end - solver_kwargs = Base.structdiff(solver_kwargs, (; alg)) # remove `alg` keyword argument + solver_kwargs = Base.structdiff(solver_kwargs, (; alg=nothing)) # remove `alg` keyword argument solver_type(; solver_kwargs...) else solver_alg @@ -401,14 +353,17 @@ function select_algorithm( elseif fwd_kwargs.alg == :svd TensorKit.SVD elseif fwd_kwargs.alg == :iterative - IterSVD + # circumvent alg keyword in IterSVD constructor + (; tol=1e-14, krylovdim=25, kwargs...) -> + IterSVD(; alg=GKL(; tol, krylovdim), kwargs...) else throw(ArgumentError("unknown forward algorithm: $(fwd_kwargs.alg)")) end else fwd_kwargs.alg end - fwd_type(fwd_kwargs...) + fwd_kwargs = Base.structdiff(fwd_kwargs, (; alg=nothing)) # remove `alg` keyword argument + fwd_type(; fwd_kwargs...) else fwd_alg end @@ -433,7 +388,8 @@ function select_algorithm( else rrule_kwargs.alg end - rrule_type(rrule_kwargs...) + rrule_kwargs = Base.structdiff(rrule_kwargs, (; alg=nothing)) # remove `alg` keyword argument + rrule_type(; rrule_kwargs...) else rrule_alg end From 48c92c0468323eedaba4c38271d33cb62407bcd9 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 5 Mar 2025 15:15:20 +0100 Subject: [PATCH 35/52] Fix formatting --- src/algorithms/ctmrg/sequential.jl | 4 +++- src/algorithms/ctmrg/simultaneous.jl | 4 +++- src/algorithms/select_algorithm.jl | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/algorithms/ctmrg/sequential.jl b/src/algorithms/ctmrg/sequential.jl index d923f5a9..a45a190b 100644 --- a/src/algorithms/ctmrg/sequential.jl +++ b/src/algorithms/ctmrg/sequential.jl @@ -16,7 +16,9 @@ struct SequentialCTMRG <: CTMRGAlgorithm verbosity::Int projector_alg::ProjectorAlgorithm end -SequentialCTMRG(; kwargs...) = select_algorithm(CTMRGAlgorithm; alg=:simultaneous, kwargs...) +function SequentialCTMRG(; kwargs...) + return select_algorithm(CTMRGAlgorithm; alg=:simultaneous, kwargs...) +end # function SequentialCTMRG(; # tol=Defaults.ctmrg_tol, # maxiter=Defaults.ctmrg_maxiter, diff --git a/src/algorithms/ctmrg/simultaneous.jl b/src/algorithms/ctmrg/simultaneous.jl index b4a7b399..134d2759 100644 --- a/src/algorithms/ctmrg/simultaneous.jl +++ b/src/algorithms/ctmrg/simultaneous.jl @@ -16,7 +16,9 @@ struct SimultaneousCTMRG <: CTMRGAlgorithm verbosity::Int projector_alg::ProjectorAlgorithm end -SimultaneousCTMRG(; kwargs...) = select_algorithm(CTMRGAlgorithm; alg=:simultaneous, kwargs...) +function SimultaneousCTMRG(; kwargs...) + return select_algorithm(CTMRGAlgorithm; alg=:simultaneous, kwargs...) +end # function SimultaneousCTMRG(; # tol=Defaults.ctmrg_tol, # maxiter=Defaults.ctmrg_maxiter, diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index a665a318..1e29827f 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -79,7 +79,7 @@ function select_algorithm( ) @show boundary_algorithm @show gradient_algorithm - @show optimizer_algorithm + @show optimizer_algorithm return select_algorithm( PEPSOptimize, From 75d1fab1b8da2a75dbe060e216c28def4132e3ad Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Wed, 5 Mar 2025 15:38:36 +0100 Subject: [PATCH 36/52] Update fixedpoint docstring --- .../optimization/peps_optimization.jl | 27 +++++++++++++------ src/algorithms/select_algorithm.jl | 3 --- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index a1a435be..aba8111f 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -56,7 +56,7 @@ The optimization parameters can be supplied via the keyword arguments or directl ## Keyword arguments -### Global settings +### General settings * `tol::Real=$(Defaults.optimizer_tol)`: Overall tolerance for gradient norm convergence of the optimizer. Sets related tolerance such as the boundary and boundary-gradient tolerances to sensible defaults unless they are explictly specified. * `verbosity::Int=1`: Overall output information verbosity level, should be one of the following: @@ -64,6 +64,8 @@ The optimization parameters can be supplied via the keyword arguments or directl 1. Optimizer output and warnings 2. Additionally print boundary information 3. All information including AD debug outputs +* `reuse_env=$(Defaults.reuse_env)`: If `true`, the current optimization step is initialized on the previous environment, otherwise a random environment is used. +* `symmetrization=nothing`: Accepts `nothing` or a `SymmetrizationStyle`, in which case the PEPS and PEPS gradient are symmetrized after each optimization iteration. * `(finalize!)=OptimKit._finalize!`: Inserts a `finalize!` function call after each optimization step by utilizing the `finalize!` kwarg of `OptimKit.optimize`. The function maps `(peps, env), f, g = finalize!((peps, env), f, g, numiter)`. ### Boundary algorithm @@ -80,22 +82,31 @@ keyword arguments are: * `tol=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration. * `maxiter=$(Defaults.gradient_maxiter)`: Maximal number of gradient problem iterations. -* `alg=$(Defaults.gradient_alg)`: Gradient algorithm type, can be any `GradMode` type. +* `alg=$(Defaults.gradient_alg)`: Gradient algorithm type, can be any `GradMode` type or the corresponding symbol: + - `:geomsum`: Compute gradient directly from the geometric sum, see [`GeomSum`](@ref) + - `:manualiter`: Iterate gradient geometric sum manually, see ['ManualIter'](@ref) + - `:linsolver`: Solve fixed-point gradient linear problem using iterative solver, see ['LinSolver'](@ref) + - `:eigsolver`: Determine gradient via eigenvalue formulation of its Sylvester equation, see [`EigSolver`](@ref) * `verbosity`: Gradient output verbosity, ≤0 by default to disable too verbose printing. Should only be >0 for debug purposes. -* `iterscheme=$(Defaults.gradient_iterscheme)`: CTMRG iteration scheme determining mode of differentiation. This can be `:fixed` (SVD with fixed gauge) or `:diffgauge` (differentiate gauge-fixing routine). +* `iterscheme=$(Defaults.gradient_iterscheme)`: CTMRG iteration scheme determining mode of differentiation. This can be: + - `:fixed`: Use SVD with fixed gauge in for reverse pass + - `:diffgauge`: Differentiate gauge-fixing routine in addition to CTMRG iteration -### PEPS optimization settings +### Optimizer settings -Supply the optimization algorithm via `optimization_alg::Union{NamedTuple,<:PEPSOptimize}` -using either a `NamedTuple` of keyword arguments or a `PEPSOptimize` directly. By default, +Supply the optimizer algorithm via `optimization_alg::Union{NamedTuple,<:OptimKit.OptimizationAlgorithm}` +using either a `NamedTuple` of keyword arguments or a `OptimKit.OptimizationAlgorithm` directly. By default, `OptimKit.LBFGS` is used in combination with a `HagerZhangLineSearch`. The supported keyword arguments are: +* `alg=$(Defaults.optimizer_alg)`: Optimizer algorithm, can be any `OptimKit.OptimizationAlgorithm` type or the corresponding symbol: + - `gradientdescent`: Gradient descent algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) + - `conjugategradient`: Conjugate gradient algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) + - `lbfgs`: L-BFGS algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) * `tol=tol`: Gradient norm tolerance of the optimizer. * `maxiter=$(Defaults.optimizer_maxiter)`: Maximal number of optimization steps. +* `verbosity=$(Defaults.optimizer_verbosity)`: Optimizer output verbosity. * `lbfgs_memory=$(Defaults.lbfgs_memory)`: Size of limited memory representation of BFGS Hessian matrix. -* `reuse_env=$(Defaults.reuse_env)`: If `true`, the current optimization step is initialized on the previous environment, otherwise a random environment is used. -* `symmetrization=nothing`: Accepts `nothing` or a `SymmetrizationStyle`, in which case the PEPS and PEPS gradient are symmetrized after each optimization iteration. ## Return values diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 1e29827f..948bf9e5 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -77,9 +77,6 @@ function select_algorithm( tol, verbosity=optimizer_verbosity, ) - @show boundary_algorithm - @show gradient_algorithm - @show optimizer_algorithm return select_algorithm( PEPSOptimize, From adf66e925fbb64fc00760e59f17c3439b749c31e Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 6 Mar 2025 11:06:55 +0100 Subject: [PATCH 37/52] Rename solver to solver_alg in LinSolver and EigSolver --- examples/heisenberg.jl | 2 +- src/Defaults.jl | 2 +- .../fixed_point_differentiation.jl | 20 +++++++++---------- test/ctmrg/gradients.jl | 14 +++++++------ 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/examples/heisenberg.jl b/examples/heisenberg.jl index d54a7eb3..56540f50 100644 --- a/examples/heisenberg.jl +++ b/examples/heisenberg.jl @@ -15,7 +15,7 @@ ctm_alg = SimultaneousCTMRG(; tol=1e-10, verbosity=2) opt_alg = PEPSOptimize(; boundary_alg=ctm_alg, optimizer=LBFGS(4; maxiter=100, gradtol=1e-4, verbosity=3), - gradient_alg=LinSolver(; solver=GMRES(; tol=1e-6, maxiter=100)), + gradient_alg=LinSolver(; solver_alg=GMRES(; tol=1e-6, maxiter=100)), reuse_env=true, ) diff --git a/src/Defaults.jl b/src/Defaults.jl index c5457be1..9c91725b 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -58,7 +58,7 @@ Module containing default algorithm parameter values and arguments. - `gradient_alg`: Algorithm to compute the gradient fixed-point ``` - gradient_alg = LinSolver(; solver=gradient_linsolver, iterscheme=gradient_iterscheme) + gradient_alg = LinSolver(; solver_alg=gradient_linsolver, iterscheme=gradient_iterscheme) ``` - `reuse_env=true`: If `true`, the current optimization step is initialized on the previous diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index b9f2705c..76e5cff1 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -19,7 +19,7 @@ struct GeomSum{F} <: GradMode{F} maxiter::Int verbosity::Int end -GeomSum(; kwargs...) = select_algorithm(GradMode; alg=:GeomSum, kwargs...) +GeomSum(; kwargs...) = select_algorithm(GradMode; alg=:geomsum, kwargs...) """ struct ManualIter(; tol=$(Defaults.gradient_tol), maxiter=$(Defaults.gradient_maxiter), @@ -41,7 +41,7 @@ end ManualIter(; kwargs...) = select_algorithm(GradMode; alg=:manualiter, kwargs...) """ - struct LinSolver(; solver=TODO, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct LinSolver(; solver_alg=TODO, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.LinearSolver` for solving the gradient linear problem using iterative solvers. @@ -53,12 +53,12 @@ the differentiated iteration consists of a CTMRG iteration and a subsequent gaug such that `gauge_fix` will also be differentiated everytime a CTMRG derivative is computed. """ struct LinSolver{F} <: GradMode{F} - solver::KrylovKit.LinearSolver + solver_alg::KrylovKit.LinearSolver end LinSolver(; kwargs...) = select_algorithm(GradMode; alg=:linsolver, kwargs...) """ - struct EigSolver(; solver=TODO, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct EigSolver(; solver_alg=TODO, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.KrylovAlgorithm` for solving the gradient linear problem as an eigenvalue problem. @@ -70,7 +70,7 @@ the differentiated iteration consists of a CTMRG iteration and a subsequent gaug such that `gauge_fix` will also be differentiated everytime a CTMRG derivative is computed. """ struct EigSolver{F} <: GradMode{F} - solver::KrylovKit.KrylovAlgorithm + solver_alg::KrylovKit.KrylovAlgorithm end EigSolver(; kwargs...) = select_algorithm(GradMode; alg=:eigsolver, kwargs...) @@ -221,8 +221,8 @@ function fpgrad(∂F∂x, ∂f∂x, ∂f∂A, y₀, alg::ManualIter) end function fpgrad(∂F∂x, ∂f∂x, ∂f∂A, y₀, alg::LinSolver) - y, info = reallinsolve(∂f∂x, ∂F∂x, y₀, alg.solver, 1, -1) - if alg.solver.verbosity > 0 && info.converged != 1 + y, info = reallinsolve(∂f∂x, ∂F∂x, y₀, alg.solver_alg, 1, -1) + if alg.solver_alg.verbosity > 0 && info.converged != 1 @warn("gradient fixed-point iteration reached maximal number of iterations:", info) end @@ -235,11 +235,11 @@ function fpgrad(∂F∂x, ∂f∂x, ∂f∂A, x₀, alg::EigSolver) return (y + X[2] * ∂F∂x, X[2]) end X₀ = (x₀, one(scalartype(x₀))) - vals, vecs, info = realeigsolve(f, X₀, 1, :LM, alg.solver) - if alg.solver.verbosity > 0 && info.converged < 1 + vals, vecs, info = realeigsolve(f, X₀, 1, :LM, alg.solver_alg) + if alg.solver_alg.verbosity > 0 && info.converged < 1 @warn("gradient fixed-point iteration reached maximal number of iterations:", info) end - if norm(vecs[1][2]) < 1e-2 * alg.solver.tol + if norm(vecs[1][2]) < 1e-2 * alg.solver_alg.tol @warn "Fixed-point gradient computation using Arnoldi failed: auxiliary component should be finite but was $(vecs[1][2]). Possibly the Jacobian does not have a unique eigenvalue 1." end y = scale(vecs[1][1], 1 / vecs[1][2]) diff --git a/test/ctmrg/gradients.jl b/test/ctmrg/gradients.jl index 3e808ea1..cce77090 100644 --- a/test/ctmrg/gradients.jl +++ b/test/ctmrg/gradients.jl @@ -31,20 +31,22 @@ gradmodes = [ GeomSum(; tol=gradtol, iterscheme=:diffgauge), ManualIter(; tol=gradtol, iterscheme=:fixed), ManualIter(; tol=gradtol, iterscheme=:diffgauge), - LinSolver(; solver=KrylovKit.BiCGStab(; tol=gradtol), iterscheme=:fixed), - LinSolver(; solver=KrylovKit.BiCGStab(; tol=gradtol), iterscheme=:diffgauge), - EigSolver(; solver=KrylovKit.Arnoldi(; tol=gradtol, eager=true), iterscheme=:fixed), + LinSolver(; solver_alg=KrylovKit.BiCGStab(; tol=gradtol), iterscheme=:fixed), + LinSolver(; solver_alg=KrylovKit.BiCGStab(; tol=gradtol), iterscheme=:diffgauge), EigSolver(; - solver=KrylovKit.Arnoldi(; tol=gradtol, eager=true), iterscheme=:diffgauge + solver_alg=KrylovKit.Arnoldi(; tol=gradtol, eager=true), iterscheme=:fixed + ), + EigSolver(; + solver_alg=KrylovKit.Arnoldi(; tol=gradtol, eager=true), iterscheme=:diffgauge ), ], [ # Only use :diffgauge due to high gauge-sensitivity (perhaps due to small χenv?) nothing, GeomSum(; tol=gradtol, iterscheme=:diffgauge), ManualIter(; tol=gradtol, iterscheme=:diffgauge), - LinSolver(; solver=KrylovKit.BiCGStab(; tol=gradtol), iterscheme=:diffgauge), + LinSolver(; solver_alg=KrylovKit.BiCGStab(; tol=gradtol), iterscheme=:diffgauge), EigSolver(; - solver=KrylovKit.Arnoldi(; tol=gradtol, eager=true), iterscheme=:diffgauge + solver_alg=KrylovKit.Arnoldi(; tol=gradtol, eager=true), iterscheme=:diffgauge ), ], ] From c492f1377fbea9c0e510321ccc38063648c47f62 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 6 Mar 2025 12:09:52 +0100 Subject: [PATCH 38/52] Fix gradients test --- src/Defaults.jl | 2 ++ src/algorithms/ctmrg/sequential.jl | 15 +-------------- src/algorithms/ctmrg/simultaneous.jl | 13 ------------- src/algorithms/select_algorithm.jl | 8 +++++++- test/ctmrg/gradients.jl | 18 ++++++------------ test/heisenberg.jl | 2 +- 6 files changed, 17 insertions(+), 41 deletions(-) diff --git a/src/Defaults.jl b/src/Defaults.jl index 9c91725b..44549434 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -88,6 +88,8 @@ const sparse = false # TODO: implement sparse CTMRG # SVD forward & reverse const trscheme = :fixedspace # ∈ {:fixedspace, :notrunc, :truncerr, :truncspace, :truncbelow} const svd_fwd_alg = :sdd # ∈ {:sdd, :svd, :iterative} +const svd_rrule_tol = ctmrg_tol +const svd_rrule_min_krylovdim = 32 const svd_rrule_alg = :arnoldi # ∈ {:gmres, :bicgstab, :arnoldi} const svd_rrule_verbosity = -1 const krylovdim_factor = 1.4 diff --git a/src/algorithms/ctmrg/sequential.jl b/src/algorithms/ctmrg/sequential.jl index a45a190b..034933a9 100644 --- a/src/algorithms/ctmrg/sequential.jl +++ b/src/algorithms/ctmrg/sequential.jl @@ -17,21 +17,8 @@ struct SequentialCTMRG <: CTMRGAlgorithm projector_alg::ProjectorAlgorithm end function SequentialCTMRG(; kwargs...) - return select_algorithm(CTMRGAlgorithm; alg=:simultaneous, kwargs...) + return select_algorithm(CTMRGAlgorithm; alg=:sequential, kwargs...) end -# function SequentialCTMRG(; -# tol=Defaults.ctmrg_tol, -# maxiter=Defaults.ctmrg_maxiter, -# miniter=Defaults.ctmrg_miniter, -# verbosity=Defaults.ctmrg_verbosity, -# svd_alg=SVDAdjoint(), -# trscheme=truncation_scheme_symbols[Defaults.trscheme], -# projector_alg=projector_symbols[Defaults.projector_alg], -# ) -# return SequentialCTMRG( -# tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) -# ) -# end """ ctmrg_leftmove(col::Int, network, env::CTMRGEnv, alg::SequentialCTMRG) diff --git a/src/algorithms/ctmrg/simultaneous.jl b/src/algorithms/ctmrg/simultaneous.jl index 134d2759..41c94f2f 100644 --- a/src/algorithms/ctmrg/simultaneous.jl +++ b/src/algorithms/ctmrg/simultaneous.jl @@ -19,19 +19,6 @@ end function SimultaneousCTMRG(; kwargs...) return select_algorithm(CTMRGAlgorithm; alg=:simultaneous, kwargs...) end -# function SimultaneousCTMRG(; -# tol=Defaults.ctmrg_tol, -# maxiter=Defaults.ctmrg_maxiter, -# miniter=Defaults.ctmrg_miniter, -# verbosity=Defaults.ctmrg_verbosity, -# svd_alg=SVDAdjoint(), -# trscheme=truncation_scheme_symbols[Defaults.trscheme], -# projector_alg=projector_symbols[Defaults.projector_alg], -# ) -# return SimultaneousCTMRG( -# tol, maxiter, miniter, verbosity, projector_alg(; svd_alg, trscheme, verbosity) -# ) -# end function ctmrg_iteration(network, env::CTMRGEnv, alg::SimultaneousCTMRG) enlarged_corners = dtmap(eachcoordinate(network, 1:4)) do idx diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 948bf9e5..2aee917a 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -162,7 +162,9 @@ function select_algorithm( χenv = maximum(env₀.corners) do corner return dim(space(corner, 1)) end - krylovdim = round(Int, Defaults.krylovdim_factor * χenv) + krylovdim = max( + Defaults.svd_rrule_min_krylovdim, round(Int, Defaults.krylovdim_factor * χenv) + ) rrule_alg = (; tol=1e1tol, verbosity=verbosity - 2, krylovdim, svd_alg.rrule_alg...) svd_alg = (; rrule_alg, svd_alg...) end @@ -369,6 +371,8 @@ function select_algorithm( rrule_algorithm = if rrule_alg isa NamedTuple rrule_kwargs = (; alg=Defaults.svd_rrule_alg, + tol=Defaults.svd_rrule_tol, + krylovdim=Defaults.svd_rrule_min_krylovdim, verbosity=Defaults.svd_rrule_verbosity, rrule_alg..., ) # overwrite with specified kwargs @@ -386,6 +390,8 @@ function select_algorithm( rrule_kwargs.alg end rrule_kwargs = Base.structdiff(rrule_kwargs, (; alg=nothing)) # remove `alg` keyword argument + rrule_type <: BiCGStab && + (rrule_kwargs = Base.structdiff(rrule_kwargs, (; krylovdim=nothing))) # BiCGStab doens't take `krylovdim` rrule_type(; rrule_kwargs...) else rrule_alg diff --git a/test/ctmrg/gradients.jl b/test/ctmrg/gradients.jl index cce77090..56c9f579 100644 --- a/test/ctmrg/gradients.jl +++ b/test/ctmrg/gradients.jl @@ -31,23 +31,17 @@ gradmodes = [ GeomSum(; tol=gradtol, iterscheme=:diffgauge), ManualIter(; tol=gradtol, iterscheme=:fixed), ManualIter(; tol=gradtol, iterscheme=:diffgauge), - LinSolver(; solver_alg=KrylovKit.BiCGStab(; tol=gradtol), iterscheme=:fixed), - LinSolver(; solver_alg=KrylovKit.BiCGStab(; tol=gradtol), iterscheme=:diffgauge), - EigSolver(; - solver_alg=KrylovKit.Arnoldi(; tol=gradtol, eager=true), iterscheme=:fixed - ), - EigSolver(; - solver_alg=KrylovKit.Arnoldi(; tol=gradtol, eager=true), iterscheme=:diffgauge - ), + LinSolver(; solver_alg=BiCGStab(; tol=gradtol), iterscheme=:fixed), + LinSolver(; solver_alg=BiCGStab(; tol=gradtol), iterscheme=:diffgauge), + EigSolver(; solver_alg=Arnoldi(; tol=gradtol, eager=true), iterscheme=:fixed), + EigSolver(; solver_alg=Arnoldi(; tol=gradtol, eager=true), iterscheme=:diffgauge), ], [ # Only use :diffgauge due to high gauge-sensitivity (perhaps due to small χenv?) nothing, GeomSum(; tol=gradtol, iterscheme=:diffgauge), ManualIter(; tol=gradtol, iterscheme=:diffgauge), - LinSolver(; solver_alg=KrylovKit.BiCGStab(; tol=gradtol), iterscheme=:diffgauge), - EigSolver(; - solver_alg=KrylovKit.Arnoldi(; tol=gradtol, eager=true), iterscheme=:diffgauge - ), + LinSolver(; solver_alg=BiCGStab(; tol=gradtol), iterscheme=:diffgauge), + EigSolver(; solver_alg=Arnoldi(; tol=gradtol, eager=true), iterscheme=:diffgauge), ], ] steps = -0.01:0.005:0.01 diff --git a/test/heisenberg.jl b/test/heisenberg.jl index 6552fb19..3fdf8a60 100644 --- a/test/heisenberg.jl +++ b/test/heisenberg.jl @@ -91,7 +91,7 @@ end peps, env; tol=gradtol, - boundary_alg=(; svd_rrule_alg=GMRES, svd_rrule_tol=1e-5), + boundary_alg=(; svd_alg=(; rrule_alg=(; alg=:gmres, tol=1e-5))), ) # sensitivity warnings and degeneracies due to SU(2)? ξ_h, ξ_v, = correlation_length(peps_final, env_final) e_site2 = E_final / (N1 * N2) From 1bb64e94f948d14cb0c92e52011573f08de63778 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 6 Mar 2025 14:11:35 +0100 Subject: [PATCH 39/52] Fix more tests --- src/Defaults.jl | 2 +- src/algorithms/optimization/peps_optimization.jl | 2 +- test/ctmrg/jacobian_real_linear.jl | 6 +++--- test/j1j2_model.jl | 2 +- test/pwave.jl | 2 +- test/utility/svd_wrapper.jl | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Defaults.jl b/src/Defaults.jl index 44549434..5896b97c 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -89,7 +89,7 @@ const sparse = false # TODO: implement sparse CTMRG const trscheme = :fixedspace # ∈ {:fixedspace, :notrunc, :truncerr, :truncspace, :truncbelow} const svd_fwd_alg = :sdd # ∈ {:sdd, :svd, :iterative} const svd_rrule_tol = ctmrg_tol -const svd_rrule_min_krylovdim = 32 +const svd_rrule_min_krylovdim = 48 const svd_rrule_alg = :arnoldi # ∈ {:gmres, :bicgstab, :arnoldi} const svd_rrule_verbosity = -1 const krylovdim_factor = 1.4 diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index aba8111f..167dc1ee 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -94,7 +94,7 @@ keyword arguments are: ### Optimizer settings -Supply the optimizer algorithm via `optimization_alg::Union{NamedTuple,<:OptimKit.OptimizationAlgorithm}` +Supply the optimizer algorithm via `optimizer_alg::Union{NamedTuple,<:OptimKit.OptimizationAlgorithm}` using either a `NamedTuple` of keyword arguments or a `OptimKit.OptimizationAlgorithm` directly. By default, `OptimKit.LBFGS` is used in combination with a `HagerZhangLineSearch`. The supported keyword arguments are: diff --git a/test/ctmrg/jacobian_real_linear.jl b/test/ctmrg/jacobian_real_linear.jl index 1f51791a..68d34835 100644 --- a/test/ctmrg/jacobian_real_linear.jl +++ b/test/ctmrg/jacobian_real_linear.jl @@ -6,9 +6,9 @@ using TensorKit, KrylovKit, PEPSKit using PEPSKit: ctmrg_iteration, gauge_fix, fix_relative_phases, fix_global_phases algs = [ - (:fixed, SimultaneousCTMRG(; projector_alg=HalfInfiniteProjector)), - (:diffgauge, SequentialCTMRG(; projector_alg=HalfInfiniteProjector)), - (:diffgauge, SimultaneousCTMRG(; projector_alg=HalfInfiniteProjector)), + (:fixed, SimultaneousCTMRG(; projector_alg=:halfinfinite)), + (:diffgauge, SequentialCTMRG(; projector_alg=:halfinfinite)), + (:diffgauge, SimultaneousCTMRG(; projector_alg=:halfinfinite)), # TODO: FullInfiniteProjector errors since even real_err_∂A, real_err_∂x are finite? # (:fixed, SimultaneousCTMRG(; projector_alg=FullInfiniteProjector)), # (:diffgauge, SequentialCTMRG(; projector_alg=FullInfiniteProjector)), diff --git a/test/j1j2_model.jl b/test/j1j2_model.jl index 45588a0c..84b87ada 100644 --- a/test/j1j2_model.jl +++ b/test/j1j2_model.jl @@ -23,7 +23,7 @@ peps, env, E, = fixedpoint( env₀; tol=1e-3, gradient_alg=(; iterscheme=:diffgauge), - optimization_alg=(; symmetrization=RotateReflect()), + symmetrization=RotateReflect() ) ξ_h, ξ_v, = correlation_length(peps, env) diff --git a/test/pwave.jl b/test/pwave.jl index d2c70833..e0745605 100644 --- a/test/pwave.jl +++ b/test/pwave.jl @@ -20,7 +20,7 @@ peps₀ = InfinitePEPS(Pspace, Vspace, Vspace; unitcell) env₀, = leading_boundary(CTMRGEnv(peps₀, Envspace), peps₀) # find fixedpoint -_, _, E, = fixedpoint(H, peps₀, env₀; tol=1e-3, optimization_alg=(; maxiter=10)) +_, _, E, = fixedpoint(H, peps₀, env₀; tol=1e-3, optimizer_alg=(; maxiter=10)) # comparison with Gaussian PEPS minimum at D=2 on 1000x1000 square lattice with aPBC @test E / prod(size(peps₀)) ≈ -2.6241 atol = 5e-2 diff --git a/test/utility/svd_wrapper.jl b/test/utility/svd_wrapper.jl index 61af7940..e923a706 100644 --- a/test/utility/svd_wrapper.jl +++ b/test/utility/svd_wrapper.jl @@ -25,7 +25,7 @@ r = randn(dtype, ℂ^m, ℂ^n) R = randn(space(r)) full_alg = SVDAdjoint(; rrule_alg=nothing) -iter_alg = SVDAdjoint(; fwd_alg=IterSVD()) +iter_alg = SVDAdjoint(; fwd_alg=(; alg=:iterative)) @testset "Non-truncacted SVD" begin l_fullsvd, g_fullsvd = withgradient(A -> lossfun(A, full_alg, R), r) From e1cf0816f152abca42880b54e0e6ccd244c553e1 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 6 Mar 2025 16:27:57 +0100 Subject: [PATCH 40/52] Update docstrings --- src/Defaults.jl | 93 +++++++------------ src/algorithms/ctmrg/ctmrg.jl | 14 ++- src/algorithms/ctmrg/projectors.jl | 32 ++++++- .../fixed_point_differentiation.jl | 73 +++++++++------ .../optimization/peps_optimization.jl | 35 +++---- src/algorithms/select_algorithm.jl | 7 +- src/utility/svd.jl | 15 ++- test/j1j2_model.jl | 2 +- 8 files changed, 156 insertions(+), 115 deletions(-) diff --git a/src/Defaults.jl b/src/Defaults.jl index 5896b97c..7295aa9d 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -4,76 +4,49 @@ Module containing default algorithm parameter values and arguments. # CTMRG -- `ctmrg_tol=1e-8`: Tolerance checking singular value and norm convergence -- `ctmrg_maxiter=100`: Maximal number of CTMRG iterations per run -- `ctmrg_miniter=4`: Minimal number of CTMRG carried out -- `ctmrg_alg_type=SimultaneousCTMRG`: Default CTMRG algorithm variant -- `ctmrg_verbosity=2`: CTMRG output information verbosity -- `trscheme=FixedSpaceTruncation()`: Truncation scheme for SVDs and other decompositions -- `svd_fwd_alg=TensorKit.SDD()`: SVD algorithm that is used in the forward pass -- `svd_rrule_type = Arnoldi`: Default solver type for SVD reverse-rule algorithm -- `svd_rrule_alg`: Reverse-rule algorithm for differentiating a SVD - ``` - svd_rrule_alg = svd_rrule_type(; tol=ctmrg_tol, krylovdim=48, verbosity=-1) - ``` +- `ctmrg_tol=$(Defaults.ctmrg_tol)`: Tolerance checking singular value and norm convergence. +- `ctmrg_maxiter=$(Defaults.ctmrg_maxiter)`: Maximal number of CTMRG iterations per run. +- `ctmrg_miniter=$(Defaults.ctmrg_miniter)`: Minimal number of CTMRG carried out. +- `ctmrg_alg=:$(Defaults.ctmrg_alg)`: Default CTMRG algorithm variant. +- `ctmrg_verbosity=$(Defaults.ctmrg_verbosity)`: CTMRG output information verbosity -- `svd_alg`: Combination of forward and reverse SVD algorithms +# SVD forward & reverse - ``` - svd_alg=SVDAdjoint(; fwd_alg=svd_fwd_alg, rrule_alg=svd_rrule_alg) - ``` +- `trscheme=:$(Defaults.trscheme)`: Truncation scheme for SVDs and other decompositions. +- `svd_fwd_alg=:$(Defaults.svd_fwd_alg)`: SVD algorithm that is used in the forward pass. +- `svd_rrule_tol=$(Defaults.svd_rrule_tol)`: Accuracy of SVD reverse-rule. +- `svd_rrule_min_krylovdim=$(Defaults.svd_rrule_min_krylovdim)`: Minimal Krylov dimension of the reverse-rule algorithm (if it is a Krylov algorithm). +- `svd_rrule_verbosity=$(Defaults.svd_rrule_verbosity)`: SVD gradient output verbosity. +- `svd_rrule_alg=:$(Defaults.svd_rrule_alg)`: Reverse-rule algorithm for the SVD gradient. -- `projector_alg_type=HalfInfiniteProjector`: Default type of projector algorithm -- `projector_alg`: Algorithm to compute CTMRG projectors +# Projectors - ``` - projector_alg = projector_alg_type(; svd_alg, trscheme, verbosity=0) - ``` +- `projector_alg=:$(Defaults.projector_alg)`: Default variant of the CTMRG projector algorithm. +- `projector_verbosity=$(Defaults.projector_verbosity)`: Projector output information verbosity. -- `ctmrg_alg`: Algorithm for performing CTMRG runs +# Fixed-point gradient - ``` - ctmrg_alg = ctmrg_alg_type( - ctmrg_tol, ctmrg_maxiter, ctmrg_miniter, 2, projector_alg - ) - ``` +- `gradient_tol=$(Defaults.gradient_tol)`: Convergence tolerance for the fixed-point gradient iteration. +- `gradient_maxiter=$(Defaults.gradient_maxiter)`: Maximal number of iterations for computing the CTMRG fixed-point gradient. +- `gradient_verbosity=$(Defaults.gradient_verbosity)`: Gradient output information verbosity. +- `gradient_linsolver=:$(Defaults.gradient_linsolver)`: Default linear solver for the `LinSolver` gradient algorithm. +- `gradient_eigsolver=:$(Defaults.gradient_eigsolver)`: Default eigensolver for the `EigSolver` gradient algorithm. +- `gradient_eigsolver_eager=$(Defaults.gradient_eigsolver_eager)`: Enables `EigSolver` algorithm to finish before the full Krylov dimension is reached. +- `gradient_iterscheme=:$(Defaults.gradient_iterscheme)`: Scheme for differentiating one CTMRG iteration. +- `gradient_alg=:$(Defaults.gradient_alg)`: Algorithm variant for computing the gradient fixed-point. # Optimization -- `gradient_tol=1e-6`: Convergence tolerance for the fixed-point gradient iteration -- `gradient_maxiter=30`: Maximal number of iterations for computing the CTMRG fixed-point gradient -- `gradient_iterscheme=:fixed`: Scheme for differentiating one CTMRG iteration -- `gradient_linsolver`: Default linear solver for the `LinSolver` gradient algorithm - - ``` - gradient_linsolver=KrylovKit.BiCGStab(; maxiter=gradient_maxiter, tol=gradient_tol) - ``` - -- `gradient_eigsolver`: Default eigsolver for the `EigSolver` gradient algorithm - - ``` - gradient_eigsolver = KrylovKit.Arnoldi(; maxiter=gradient_maxiter, tol=gradient_tol, eager=true) - ``` -- `gradient_alg`: Algorithm to compute the gradient fixed-point - - ``` - gradient_alg = LinSolver(; solver_alg=gradient_linsolver, iterscheme=gradient_iterscheme) - ``` - -- `reuse_env=true`: If `true`, the current optimization step is initialized on the previous - environment, otherwise a random environment is used -- `optimizer_tol=1e-4`: Gradient norm tolerance of the optimizer -- `optimizer_maxiter=100`: Maximal number of optimization steps -- `lbfgs_memory=20`: Size of limited memory representation of BFGS Hessian matrix -- `optimizer`: Default `OptimKit.OptimizerAlgorithm` for PEPS optimization - - ``` - optimizer=LBFGS(lbfgs_memory; maxiter=optimizer_maxiter, gradtol=optimizer_tol, verbosity=3) - ``` +- `reuse_env=$(Defaults.reuse_env)`: If `true`, the current optimization step is initialized on the previous environment, otherwise a random environment is used. +- `optimizer_tol=$(Defaults.optimizer_tol)`: Gradient norm tolerance of the optimizer. +- `optimizer_maxiter=$(Defaults.optimizer_maxiter)`: Maximal number of optimization steps. +- `optimizer_verbosity=$(Defaults.optimizer_verbosity)`: Optimizer output information verbosity. +- `optimizer_alg=:$(Defaults.optimizer_alg)`: Default `OptimKit.OptimizerAlgorithm` for PEPS optimization. +- `lbfgs_memory=$(Defaults.lbfgs_memory)`: Size of limited memory representation of BFGS Hessian matrix. # OhMyThreads scheduler -- `scheduler=Ref{Scheduler}(...)`: Multi-threading scheduler which can be accessed via `set_scheduler!` +- `scheduler=Ref{Scheduler}(...)`: Multi-threading scheduler which can be accessed via `set_scheduler!`. """ module Defaults using OhMyThreads @@ -90,11 +63,11 @@ const trscheme = :fixedspace # ∈ {:fixedspace, :notrunc, :truncerr, :truncspac const svd_fwd_alg = :sdd # ∈ {:sdd, :svd, :iterative} const svd_rrule_tol = ctmrg_tol const svd_rrule_min_krylovdim = 48 -const svd_rrule_alg = :arnoldi # ∈ {:gmres, :bicgstab, :arnoldi} const svd_rrule_verbosity = -1 +const svd_rrule_alg = :arnoldi # ∈ {:gmres, :bicgstab, :arnoldi} const krylovdim_factor = 1.4 -# Projector +# Projectors const projector_alg = :halfinfinite # ∈ {:halfinfinite, :fullinfinite} const projector_verbosity = 0 diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index 2ab0aff7..be2a71a0 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -34,13 +34,19 @@ supplied via the keyword arguments or directly as an [`CTMRGAlgorithm`](@ref) st 2. Initialization and convergence info 3. Iteration info 4. Debug info -* `alg::Union{Symbol,Type{CTMRGAlgorithm}}=$(Defaults.ctmrg_alg)`: Variant of the CTMRG algorithm. See also [`CTMRGAlgorithm`](@ref). +* `alg::Union{Symbol,Type{CTMRGAlgorithm}}=:$(Defaults.ctmrg_alg)`: Variant of the CTMRG algorithm. See also [`CTMRGAlgorithm`](@ref). ### Projector algorithm -* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg=$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. -* `svd_alg::Union{<:SVDAdjoint,NamedTuple}`: SVD algorithm for computing projectors. See also [`PEPSKit.tsvd`](@ref). -* `projector_alg::Union{Symbol,Type{ProjectorAlgorithm}}=$(Defaults.projector_alg)`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). +* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg=:$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. Here, `alg` can be any `TensorKit.TruncationScheme` type or one of the following symbols: + - `:fixedspace`: Keep virtual spaces fixed during projection + - `:notrunc`: No singular values are truncated and the performed SVDs are exact + - `:truncerr`: Additionally supply error threshold `η`; truncate to the maximal virtual dimension of `η` + - `:truncdim`: Additionally supply truncation dimension `η`; truncate such that the 2-norm of the truncated values is smaller than `η` + - `:truncspace`: Additionally supply truncation space `η`; truncate according to the supplied vector space + - `:truncbelow`: Additionally supply singular value cutoff `η`; truncate such that every retained singular value is larger than `η` +* `svd_alg::Union{<:SVDAdjoint,NamedTuple}`: SVD algorithm for computing projectors. See also [`SVDAdjoint`](@ref). By default, a reverse-rule tolerance of `tol=1e1tol` where the `krylovdim` is adapted to the `env₀` environment dimension. +* `projector_alg::Union{Symbol,Type{ProjectorAlgorithm}}=:$(Defaults.projector_alg)`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). """ function MPSKit.leading_boundary(env₀::CTMRGEnv, network::InfiniteSquareNetwork; kwargs...) alg = select_algorithm(leading_boundary, env₀; kwargs...) diff --git a/src/algorithms/ctmrg/projectors.jl b/src/algorithms/ctmrg/projectors.jl index 85f6d467..a8d34c66 100644 --- a/src/algorithms/ctmrg/projectors.jl +++ b/src/algorithms/ctmrg/projectors.jl @@ -33,9 +33,23 @@ function truncation_scheme(alg::ProjectorAlgorithm, edge) end """ - struct HalfInfiniteProjector{S,T}(; svd_alg=TODO, trscheme=TODO, verbosity=0) + struct HalfInfiniteProjector{S,T} Projector algorithm implementing projectors from SVDing the half-infinite CTMRG environment. + +## Keyword arguments + +* `svd_alg::Union{<:SVDAdjoint,NamedTuple}=SVDAdjoint()`: SVD algorithm including the reverse rule. See ['SVDAdjoint'](@ref). +* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg=:$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. Here, `alg` can be any `TensorKit.TruncationScheme` type or one of the following symbols: + - `:fixedspace`: Keep virtual spaces fixed during projection + - `:notrunc`: No singular values are truncated and the performed SVDs are exact + - `:truncerr`: Additionally supply error threshold `η`; truncate to the maximal virtual dimension of `η` + - `:truncdim`: Additionally supply truncation dimension `η`; truncate such that the 2-norm of the truncated values is smaller than `η` + - `:truncspace`: Additionally supply truncation space `η`; truncate according to the supplied vector space + - `:truncbelow`: Additionally supply singular value cutoff `η`; truncate such that every retained singular value is larger than `η` +* `verbosity::Int=$(Defaults.projector_verbosity)`: Projector output verbosity which can be: + 0. Suppress output information + 1. Print singular value degeneracy warnings """ struct HalfInfiniteProjector{S<:SVDAdjoint,T} <: ProjectorAlgorithm svd_alg::S @@ -47,9 +61,23 @@ function HalfInfiniteProjector(; kwargs...) end """ - struct FullInfiniteProjector{S,T}(; svd_alg=TODO, trscheme=TODO, verbosity=0) + struct FullInfiniteProjector{S,T} Projector algorithm implementing projectors from SVDing the full 4x4 CTMRG environment. + +## Keyword arguments + +* `svd_alg::Union{<:SVDAdjoint,NamedTuple}=SVDAdjoint()`: SVD algorithm including the reverse rule. See ['SVDAdjoint'](@ref). +* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg=:$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. Here, `alg` can be any `TensorKit.TruncationScheme` type or one of the following symbols: + - `:fixedspace`: Keep virtual spaces fixed during projection + - `:notrunc`: No singular values are truncated and the performed SVDs are exact + - `:truncerr`: Additionally supply error threshold `η`; truncate to the maximal virtual dimension of `η` + - `:truncdim`: Additionally supply truncation dimension `η`; truncate such that the 2-norm of the truncated values is smaller than `η` + - `:truncspace`: Additionally supply truncation space `η`; truncate according to the supplied vector space + - `:truncbelow`: Additionally supply singular value cutoff `η`; truncate such that every retained singular value is larger than `η` +* `verbosity::Int=$(Defaults.projector_verbosity)`: Projector output verbosity which can be: + 0. Suppress output information + 1. Print singular value degeneracy warnings """ struct FullInfiniteProjector{S<:SVDAdjoint,T} <: ProjectorAlgorithm svd_alg::S diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index 76e5cff1..e288d840 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -3,16 +3,21 @@ abstract type GradMode{F} end iterscheme(::GradMode{F}) where {F} = F """ - struct GeomSum(; tol=$(Defaults.gradient_tol), maxiter=$(Defaults.gradient_maxiter), - verbosity=0, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct GeomSum <: GradMode{iterscheme} Gradient mode for CTMRG using explicit evaluation of the geometric sum. -With `iterscheme` the style of CTMRG iteration which is being differentiated can be chosen. -If set to `:fixed`, the differentiated CTMRG iteration is assumed to have a pre-computed -SVD of the environments with a fixed set of gauges. Alternatively, if set to `:diffgauge`, -the differentiated iteration consists of a CTMRG iteration and a subsequent gauge fixing step, -such that `gauge_fix` will also be differentiated everytime a CTMRG derivative is computed. +## Keyword arguments + +* `tol::Real=$(Defaults.gradient_tol)`: Convergence tolerance for the difference of norms of two consecutive summands in the geometric sum. +* `maxiter::Int=$(Defaults.gradient_maxiter)`: Maximal number of gradient iterations. +* `verbosity::Int=$(Defaults.gradient_verbosity)`: Output information verbosity that can be one of the following: + 0. Suppress output information + 1. Print convergence warnings + 2. Information at each gradient iteration +* `iterscheme::Symbol=:$(Defaults.gradient_iterscheme)`: Style of CTMRG iteration which is being differentiated, which can be: + - `:fixed`: the differentiated CTMRG iteration uses a pre-computed SVD with a fixed set of gauges + - `:diffgauge`: the differentiated iteration consists of a CTMRG iteration and a subsequent gauge-fixing step such that the gauge-fixing procedure is differentiated as well """ struct GeomSum{F} <: GradMode{F} tol::Real @@ -22,16 +27,21 @@ end GeomSum(; kwargs...) = select_algorithm(GradMode; alg=:geomsum, kwargs...) """ - struct ManualIter(; tol=$(Defaults.gradient_tol), maxiter=$(Defaults.gradient_maxiter), - verbosity=0, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct ManualIter <: GradMode{iterscheme} Gradient mode for CTMRG using manual iteration to solve the linear problem. -With `iterscheme` the style of CTMRG iteration which is being differentiated can be chosen. -If set to `:fixed`, the differentiated CTMRG iteration is assumed to have a pre-computed -SVD of the environments with a fixed set of gauges. Alternatively, if set to `:diffgauge`, -the differentiated iteration consists of a CTMRG iteration and a subsequent gauge fixing step, -such that `gauge_fix` will also be differentiated everytime a CTMRG derivative is computed. +## Keyword arguments + +* `tol::Real=$(Defaults.gradient_tol)`: Convergence tolerance for the norm difference of two consecutive `dx` contributions. +* `maxiter::Int=$(Defaults.gradient_maxiter)`: Maximal number of gradient iterations. +* `verbosity::Int=$(Defaults.gradient_verbosity)`: Output information verbosity that can be one of the following: + 0. Suppress output information + 1. Print convergence warnings + 2. Information at each gradient iteration +* `iterscheme::Symbol=:$(Defaults.gradient_iterscheme)`: Style of CTMRG iteration which is being differentiated, which can be: + - `:fixed`: the differentiated CTMRG iteration uses a pre-computed SVD with a fixed set of gauges + - `:diffgauge`: the differentiated iteration consists of a CTMRG iteration and a subsequent gauge-fixing step such that the gauge-fixing procedure is differentiated as well """ struct ManualIter{F} <: GradMode{F} tol::Real @@ -41,16 +51,22 @@ end ManualIter(; kwargs...) = select_algorithm(GradMode; alg=:manualiter, kwargs...) """ - struct LinSolver(; solver_alg=TODO, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct LinSolver <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.LinearSolver` for solving the gradient linear problem using iterative solvers. -With `iterscheme` the style of CTMRG iteration which is being differentiated can be chosen. -If set to `:fixed`, the differentiated CTMRG iteration is assumed to have a pre-computed -SVD of the environments with a fixed set of gauges. Alternatively, if set to `:diffgauge`, -the differentiated iteration consists of a CTMRG iteration and a subsequent gauge fixing step, -such that `gauge_fix` will also be differentiated everytime a CTMRG derivative is computed. +## Keyword arguments + +* `tol::Real=$(Defaults.gradient_tol)`: Convergence tolerance of the linear solver. +* `maxiter::Int=$(Defaults.gradient_maxiter)`: Maximal number of solver iterations. +* `verbosity::Int=$(Defaults.gradient_verbosity)`: Output information verbosity of the linear solver. +* `iterscheme::Symbol=:$(Defaults.gradient_iterscheme)`: Style of CTMRG iteration which is being differentiated, which can be: + - `:fixed`: the differentiated CTMRG iteration uses a pre-computed SVD with a fixed set of gauges + - `:diffgauge`: the differentiated iteration consists of a CTMRG iteration and a subsequent gauge-fixing step such that the gauge-fixing procedure is differentiated as well +* `solver_alg::Union{KrylovKit.LinearSolver,NamedTuple}=(; alg=:$(Defaults.gradient_linsolver)`: Linear solver algorithm which, if supplied directly as a `KrylovKit.LinearSolver` overrides the above specified `tol`, `maxiter` and `verbosity`. Alternatively, it can be supplied via a `NamedTuple` where `alg` can be a `KrylovKit.LinearSolver` type or the corresponding symbol: + - `:gmres`: GMRES iterative linear solver, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.GMRES) for details + - `:bicgstab`: BiCGStab iterative linear solver, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.BiCGStab) for details """ struct LinSolver{F} <: GradMode{F} solver_alg::KrylovKit.LinearSolver @@ -58,16 +74,21 @@ end LinSolver(; kwargs...) = select_algorithm(GradMode; alg=:linsolver, kwargs...) """ - struct EigSolver(; solver_alg=TODO, iterscheme=$(Defaults.gradient_iterscheme)) <: GradMode{iterscheme} + struct EigSolver <: GradMode{iterscheme} Gradient mode wrapper around `KrylovKit.KrylovAlgorithm` for solving the gradient linear problem as an eigenvalue problem. -With `iterscheme` the style of CTMRG iteration which is being differentiated can be chosen. -If set to `:fixed`, the differentiated CTMRG iteration is assumed to have a pre-computed -SVD of the environments with a fixed set of gauges. Alternatively, if set to `:diffgauge`, -the differentiated iteration consists of a CTMRG iteration and a subsequent gauge fixing step, -such that `gauge_fix` will also be differentiated everytime a CTMRG derivative is computed. +## Keyword arguments + +* `tol::Real=$(Defaults.gradient_tol)`: Convergence tolerance of the linear solver. +* `maxiter::Int=$(Defaults.gradient_maxiter)`: Maximal number of solver iterations. +* `verbosity::Int=$(Defaults.gradient_verbosity)`: Output information verbosity of the linear solver. +* `iterscheme::Symbol=:$(Defaults.gradient_iterscheme)`: Style of CTMRG iteration which is being differentiated, which can be: + - `:fixed`: the differentiated CTMRG iteration uses a pre-computed SVD with a fixed set of gauges + - `:diffgauge`: the differentiated iteration consists of a CTMRG iteration and a subsequent gauge-fixing step such that the gauge-fixing procedure is differentiated as well +* `solver_alg::Union{KrylovKit.KrylovAlgorithm,NamedTuple}=(; alg=:$(Defaults.gradient_eigsolver)`: Linear solver algorithm which, if supplied directly as a `KrylovKit.KrylovAlgorithm` overrides the above specified `tol`, `maxiter` and `verbosity`. Alternatively, it can be supplied via a `NamedTuple` where `alg` can be a `KrylovKit.KrylovAlgorithm` type or the corresponding symbol: + - `:arnoldi`: Arnoldi Krylov algorithm, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.Arnoldi) for details """ struct EigSolver{F} <: GradMode{F} solver_alg::KrylovKit.KrylovAlgorithm diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index 167dc1ee..ff366caa 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -64,8 +64,8 @@ The optimization parameters can be supplied via the keyword arguments or directl 1. Optimizer output and warnings 2. Additionally print boundary information 3. All information including AD debug outputs -* `reuse_env=$(Defaults.reuse_env)`: If `true`, the current optimization step is initialized on the previous environment, otherwise a random environment is used. -* `symmetrization=nothing`: Accepts `nothing` or a `SymmetrizationStyle`, in which case the PEPS and PEPS gradient are symmetrized after each optimization iteration. +* `reuse_env::Bool=$(Defaults.reuse_env)`: If `true`, the current optimization step is initialized on the previous environment, otherwise a random environment is used. +* `symmetrization::Union{Nothing,SymmetrizationStyle}=nothing`: Accepts `nothing` or a `SymmetrizationStyle`, in which case the PEPS and PEPS gradient are symmetrized after each optimization iteration. * `(finalize!)=OptimKit._finalize!`: Inserts a `finalize!` function call after each optimization step by utilizing the `finalize!` kwarg of `OptimKit.optimize`. The function maps `(peps, env), f, g = finalize!((peps, env), f, g, numiter)`. ### Boundary algorithm @@ -73,6 +73,7 @@ The optimization parameters can be supplied via the keyword arguments or directl Supply boundary algorithm parameters via `boundary_alg::Union{NamedTuple,<:CTMRGAlgorithm}` using either a `NamedTuple` of keyword arguments or a `CTMRGAlgorithm` directly. See [`leading_boundary`](@ref) for a description of all possible keyword arguments. +By default, a CTMRG tolerance of `tol=1e-4tol` and is used. ### Gradient algorithm @@ -80,17 +81,17 @@ Supply gradient algorithm parameters via `gradient_alg::Union{NamedTuple,<:GradM either a `NamedTuple` of keyword arguments or a `GradMode` struct directly. The supported keyword arguments are: -* `tol=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration. -* `maxiter=$(Defaults.gradient_maxiter)`: Maximal number of gradient problem iterations. -* `alg=$(Defaults.gradient_alg)`: Gradient algorithm type, can be any `GradMode` type or the corresponding symbol: +* `tol::Real=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration. +* `maxiter::Int=$(Defaults.gradient_maxiter)`: Maximal number of gradient problem iterations. +* `alg::Union{Symbol,Type{GradMode}}=:$(Defaults.gradient_alg)`: Gradient algorithm type, can be any `GradMode` type or the corresponding symbol: - `:geomsum`: Compute gradient directly from the geometric sum, see [`GeomSum`](@ref) - `:manualiter`: Iterate gradient geometric sum manually, see ['ManualIter'](@ref) - `:linsolver`: Solve fixed-point gradient linear problem using iterative solver, see ['LinSolver'](@ref) - `:eigsolver`: Determine gradient via eigenvalue formulation of its Sylvester equation, see [`EigSolver`](@ref) -* `verbosity`: Gradient output verbosity, ≤0 by default to disable too verbose printing. Should only be >0 for debug purposes. -* `iterscheme=$(Defaults.gradient_iterscheme)`: CTMRG iteration scheme determining mode of differentiation. This can be: - - `:fixed`: Use SVD with fixed gauge in for reverse pass - - `:diffgauge`: Differentiate gauge-fixing routine in addition to CTMRG iteration +* `verbosity::Int`: Gradient output verbosity, ≤0 by default to disable too verbose printing. Should only be >0 for debug purposes. +* `iterscheme::Symbol=:$(Defaults.gradient_iterscheme)`: CTMRG iteration scheme determining mode of differentiation. This can be: + - `:fixed`: the differentiated CTMRG iteration uses a pre-computed SVD with a fixed set of gauges + - `:diffgauge`: the differentiated iteration consists of a CTMRG iteration and a subsequent gauge-fixing step such that the gauge-fixing procedure is differentiated as well ### Optimizer settings @@ -99,14 +100,14 @@ using either a `NamedTuple` of keyword arguments or a `OptimKit.OptimizationAlgo `OptimKit.LBFGS` is used in combination with a `HagerZhangLineSearch`. The supported keyword arguments are: -* `alg=$(Defaults.optimizer_alg)`: Optimizer algorithm, can be any `OptimKit.OptimizationAlgorithm` type or the corresponding symbol: - - `gradientdescent`: Gradient descent algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) - - `conjugategradient`: Conjugate gradient algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) - - `lbfgs`: L-BFGS algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) -* `tol=tol`: Gradient norm tolerance of the optimizer. -* `maxiter=$(Defaults.optimizer_maxiter)`: Maximal number of optimization steps. -* `verbosity=$(Defaults.optimizer_verbosity)`: Optimizer output verbosity. -* `lbfgs_memory=$(Defaults.lbfgs_memory)`: Size of limited memory representation of BFGS Hessian matrix. +* `alg::Union{Symbol,Type{OptimKit.OptimizationAlgorithm}}=:$(Defaults.optimizer_alg)`: Optimizer algorithm, can be any `OptimKit.OptimizationAlgorithm` type or the corresponding symbol: + - `:gradientdescent`: Gradient descent algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) + - `:conjugategradient`: Conjugate gradient algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) + - `:lbfgs`: L-BFGS algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) +* `tol::Real=tol`: Gradient norm tolerance of the optimizer. +* `maxiter::Int=$(Defaults.optimizer_maxiter)`: Maximal number of optimization steps. +* `verbosity::Int=$(Defaults.optimizer_verbosity)`: Optimizer output verbosity. +* `lbfgs_memory::Int=$(Defaults.lbfgs_memory)`: Size of limited memory representation of BFGS Hessian matrix. ## Return values diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 2aee917a..828ef27c 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -316,7 +316,7 @@ function select_algorithm( end function select_algorithm( - ::Type{TensorKit.TruncationScheme}; alg=Defaults.trscheme, kwargs... + ::Type{TensorKit.TruncationScheme}; alg=Defaults.trscheme, η=nothing ) alg_type = if alg isa Symbol # replace Symbol with TruncationScheme type if alg == :fixedspace @@ -325,6 +325,8 @@ function select_algorithm( TensorKit.NoTruncation elseif alg == :truncerr TensorKit.TruncationError + elseif alg == :truncdim + TensorKit.TruncationDimension elseif alg == :truncspace TensorKit.TruncationSpace elseif alg == :truncbelow @@ -336,8 +338,7 @@ function select_algorithm( alg end - args = map(k -> last(kwargs[k]), keys(kwargs)) # extract only values of supplied kwargs (empty Tuple, if kwargs is empty) - return alg_type(args...) + return isnothing(arg) ? alg_type() : alg_type(η) end function select_algorithm( diff --git a/src/utility/svd.jl b/src/utility/svd.jl index 6d030318..f8b2f081 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -10,13 +10,24 @@ using TensorKit: const CRCExt = Base.get_extension(KrylovKit, :KrylovKitChainRulesCoreExt) """ - struct SVDAdjoint(; fwd_alg=TODO, rrule_alg=TODO, - broadening=nothing) + struct SVDAdjoint Wrapper for a SVD algorithm `fwd_alg` with a defined reverse rule `rrule_alg`. If `isnothing(rrule_alg)`, Zygote differentiates the forward call automatically. In case of degenerate singular values, one might need a `broadening` scheme which removes the divergences from the adjoint. + +## Keyword arguments + +* `fwd_alg::Union{Algorithm,NamedTuple}=(; alg=Defaults.svd_fwd_alg)`: SVD algorithm of the forward pass which can either be passed as an `Algorithm` instance or a `NamedTuple` where `alg` is an `Algorithm` type or the corresponding `Symbol`: + - `:sdd`: TensorKit's wrapper for LAPACK's `_gesdd` + - `:svd`: TensorKit's wrapper for LAPACK's `_gesvd` + - `:iterative`: Iterative SVD only computing the specifed number of singular values and vectors, see ['IterSVD'](@ref) +* `rrule_alg::Union{Algorithm,NamedTuple}=(; alg=Defaults.svd_rrule_alg)`: Reverse-rule algorithm for differentiating the SVD. Can be supplied by an `Algorithm` instance directly or as a `NamedTuple` where `alg` is an `Algorithm` type or the corresponding `Symbol`: + - `:gmres`: GMRES iterative linear solver, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.GMRES) for details + - `:bicgstab`: BiCGStab iterative linear solver, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.BiCGStab) for details + - `:arnoldi`: Arnoldi Krylov algorithm, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.Arnoldi) for details +* `broadening=nothing`: Broadening of singular value differences to stabilize the SVD gradient. Currently not implemented. """ struct SVDAdjoint{F,R,B} fwd_alg::F diff --git a/test/j1j2_model.jl b/test/j1j2_model.jl index 84b87ada..bd886bb1 100644 --- a/test/j1j2_model.jl +++ b/test/j1j2_model.jl @@ -23,7 +23,7 @@ peps, env, E, = fixedpoint( env₀; tol=1e-3, gradient_alg=(; iterscheme=:diffgauge), - symmetrization=RotateReflect() + symmetrization=RotateReflect(), ) ξ_h, ξ_v, = correlation_length(peps, env) From 52af6826605a53308dcea18591cdc955d72265e7 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 6 Mar 2025 16:33:08 +0100 Subject: [PATCH 41/52] Fix typo in TruncationScheme select_algorithm --- src/algorithms/select_algorithm.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 828ef27c..0f5c2b53 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -338,7 +338,7 @@ function select_algorithm( alg end - return isnothing(arg) ? alg_type() : alg_type(η) + return isnothing(η) ? alg_type() : alg_type(η) end function select_algorithm( From cd0b7814896db2591b5d95fc2ae81fbf9a3d7cbe Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Thu, 6 Mar 2025 17:42:30 +0100 Subject: [PATCH 42/52] Hopefully stabilize SU Heisenberg test on Windows --- test/heisenberg.jl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/heisenberg.jl b/test/heisenberg.jl index 3fdf8a60..d9b9a97a 100644 --- a/test/heisenberg.jl +++ b/test/heisenberg.jl @@ -47,12 +47,13 @@ end @testset "Simple update into AD optimization" begin # random initialization of 2x2 iPEPS with weights and CTMRGEnv (using real numbers) - Random.seed!(234829) + Random.seed!(100) N1, N2 = 2, 2 Pspace = ℂ^2 Vspace = ℂ^Dbond Espace = ℂ^χenv wpeps = InfiniteWeightPEPS(rand, Float64, Pspace, Vspace; unitcell=(N1, N2)) + ctmrg_tol = 1e-6 # normalize vertex tensors for ind in CartesianIndices(wpeps.vertices) @@ -64,7 +65,7 @@ end ham = LocalOperator(ham.lattice, Tuple(ind => real(op) for (ind, op) in ham.terms)...) # simple update - dts = [1e-2, 1e-3, 4e-4, 1e-4] + dts = [1e-2, 1e-3, 1e-3, 1e-4] tols = [1e-7, 1e-8, 1e-8, 1e-8] maxiter = 5000 for (n, (dt, tol)) in enumerate(zip(dts, tols)) @@ -77,7 +78,7 @@ end # absorb weight into site tensors and CTMRG peps = InfinitePEPS(wpeps) - env, = leading_boundary(CTMRGEnv(rand, Float64, peps, Espace), peps) + env, = leading_boundary(CTMRGEnv(rand, Float64, peps, Espace), peps; tol=ctmrg_tol) # measure physical quantities e_site = cost_function(peps, env, ham) / (N1 * N2) @@ -91,7 +92,7 @@ end peps, env; tol=gradtol, - boundary_alg=(; svd_alg=(; rrule_alg=(; alg=:gmres, tol=1e-5))), + boundary_alg=(; maxiter=150, svd_alg=(; rrule_alg=(; alg=:gmres, tol=1e-5))), ) # sensitivity warnings and degeneracies due to SU(2)? ξ_h, ξ_v, = correlation_length(peps_final, env_final) e_site2 = E_final / (N1 * N2) From feef4a619346ba3ecbb403b2a2f0b492e46fad49 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Fri, 7 Mar 2025 12:22:55 +0100 Subject: [PATCH 43/52] Rename optimizer to optimizer_alg --- .../optimization/peps_optimization.jl | 37 +++++++++---------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index ff366caa..eb6fdb38 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -1,30 +1,25 @@ """ - PEPSOptimize{G}(; boundary_alg=$(Defaults.ctmrg_alg), gradient_alg::G=$(Defaults.gradient_alg), - optimizer::OptimKit.OptimizationAlgorithm=$(Defaults.optimizer_alg) - reuse_env::Bool=$(Defaults.reuse_env), symmetrization::Union{Nothing,SymmetrizationStyle}=nothing) - -Algorithm struct that represent PEPS ground-state optimization using AD. -Set the algorithm to contract the infinite PEPS in `boundary_alg`; -currently only `CTMRGAlgorithm`s are supported. The `optimizer` computes the gradient directions -based on the CTMRG gradient and updates the PEPS parameters. In this optimization, -the CTMRG runs can be started on the converged environments of the previous optimizer -step by setting `reuse_env` to true. Otherwise a random environment is used at each -step. The CTMRG gradient itself is computed using the `gradient_alg` algorithm. -The `symmetrization` field accepts `nothing` or a `SymmetrizationStyle`, in which case the -PEPS and PEPS gradient are symmetrized after each optimization iteration. Note that this -requires an initial symmmetric PEPS and environment to converge properly. + struct PEPSOptimize{G} + +Algorithm struct for PEPS ground-state optimization using AD. + +## Keyword arguments + +TODO + + """ struct PEPSOptimize{G} boundary_alg::CTMRGAlgorithm gradient_alg::G - optimizer::OptimKit.OptimizationAlgorithm + optimizer_alg::OptimKit.OptimizationAlgorithm reuse_env::Bool symmetrization::Union{Nothing,SymmetrizationStyle} function PEPSOptimize( # Inner constructor to prohibit illegal setting combinations boundary_alg::CTMRGAlgorithm, gradient_alg::G, - optimizer, + optimizer_alg, reuse_env, symmetrization, ) where {G} @@ -36,7 +31,7 @@ struct PEPSOptimize{G} throw(ArgumentError(msg)) end end - return new{G}(boundary_alg, gradient_alg, optimizer, reuse_env, symmetrization) + return new{G}(boundary_alg, gradient_alg, optimizer_alg, reuse_env, symmetrization) end end PEPSOptimize(; kwargs...) = select_algorithm(PEPSOptimize; kwargs...) @@ -77,8 +72,10 @@ By default, a CTMRG tolerance of `tol=1e-4tol` and is used. ### Gradient algorithm -Supply gradient algorithm parameters via `gradient_alg::Union{NamedTuple,<:GradMode}` using -either a `NamedTuple` of keyword arguments or a `GradMode` struct directly. The supported +Supply gradient algorithm parameters via `gradient_alg::Union{NamedTuple,Nothing,<:GradMode}` +using either a `NamedTuple` of keyword arguments, `nothing`, or a `GradMode` struct directly. +Pass `nothing` to fully differentiate the CTMRG run, meaning that all iterations will be +taken into account, instead of differentiating the fixed point. The supported `NamedTuple` keyword arguments are: * `tol::Real=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration. @@ -180,7 +177,7 @@ function fixedpoint( # optimize operator cost function (peps_final, env_final), cost, ∂cost, numfg, convergence_history = optimize( - (peps₀, env₀), alg.optimizer; retract, inner=real_inner, finalize! + (peps₀, env₀), alg.optimizer_alg; retract, inner=real_inner, finalize! ) do (peps, env) start_time = time_ns() E, gs = withgradient(peps) do ψ From efe4319a0cd0c232054ab22c8be5da1aff37f1b4 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Fri, 7 Mar 2025 12:30:34 +0100 Subject: [PATCH 44/52] Update more docstrings --- README.md | 2 +- docs/src/index.md | 2 +- examples/heisenberg.jl | 2 +- src/algorithms/ctmrg/projectors.jl | 2 +- src/algorithms/optimization/peps_optimization.jl | 10 ++++++---- 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index fee1a4f5..8f22e28d 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ chi = 20 ctm_alg = SimultaneousCTMRG(; tol=1e-10, trscheme=truncdim(chi)) opt_alg = PEPSOptimize(; boundary_alg=ctm_alg, - optimizer=LBFGS(4; maxiter=100, gradtol=1e-4, verbosity=3), + optimizer_alg=LBFGS(4; maxiter=100, gradtol=1e-4, verbosity=3), gradient_alg=LinSolver(), reuse_env=true, ) diff --git a/docs/src/index.md b/docs/src/index.md index dd1d66e5..e6a6face 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -29,7 +29,7 @@ chi = 20 ctm_alg = SimultaneousCTMRG(; tol=1e-10, trscheme=truncdim(chi)) opt_alg = PEPSOptimize(; boundary_alg=ctm_alg, - optimizer=LBFGS(4; maxiter=100, gradtol=1e-4, verbosity=3), + optimizer_alg=LBFGS(4; maxiter=100, gradtol=1e-4, verbosity=3), gradient_alg=LinSolver(), reuse_env=true, ) diff --git a/examples/heisenberg.jl b/examples/heisenberg.jl index 56540f50..04394feb 100644 --- a/examples/heisenberg.jl +++ b/examples/heisenberg.jl @@ -14,7 +14,7 @@ H = heisenberg_XYZ(InfiniteSquare(); Jx=-1, Jy=1, Jz=-1) ctm_alg = SimultaneousCTMRG(; tol=1e-10, verbosity=2) opt_alg = PEPSOptimize(; boundary_alg=ctm_alg, - optimizer=LBFGS(4; maxiter=100, gradtol=1e-4, verbosity=3), + optimizer_alg=LBFGS(4; maxiter=100, gradtol=1e-4, verbosity=3), gradient_alg=LinSolver(; solver_alg=GMRES(; tol=1e-6, maxiter=100)), reuse_env=true, ) diff --git a/src/algorithms/ctmrg/projectors.jl b/src/algorithms/ctmrg/projectors.jl index a8d34c66..8f9751d2 100644 --- a/src/algorithms/ctmrg/projectors.jl +++ b/src/algorithms/ctmrg/projectors.jl @@ -1,5 +1,5 @@ """ - FixedSpaceTruncation <: TensorKit.TruncationScheme + struct FixedSpaceTruncation <: TensorKit.TruncationScheme CTMRG specific truncation scheme for `tsvd` which keeps the bond space on which the SVD is performed fixed. Since different environment directions and unit cell entries might diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index eb6fdb38..2e82371a 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -1,13 +1,15 @@ """ struct PEPSOptimize{G} -Algorithm struct for PEPS ground-state optimization using AD. +Algorithm struct for PEPS ground-state optimization using AD. See [`fixedpoint`](@ref) for details. ## Keyword arguments -TODO - - +* `boundary_alg::Union{NamedTuple,<:CTMRGAlgorithm}`: Supply boundary algorithm parameters using either a `NamedTuple` of keyword arguments or a `CTMRGAlgorithm` directly. See [`leading_boundary`](@ref) for a description of all possible keyword arguments. +* `gradient_alg::Union{NamedTuple,Nothing,<:GradMode}`: Supply gradient algorithm parameters using either a `NamedTuple` of keyword arguments, `nothing`, or a `GradMode` directly. See [`fixedpoint`](@ref) for a description of all possible keyword arguments. +* `optimizer_alg::Union{NamedTuple,<:OptimKit.OptimizationAlgorithm}`: Supply optimizer algorithm parameters using either a `NamedTuple` of keyword arguments, or a `OptimKit.OptimizationAlgorithm` directly. See [`fixedpoint`](@ref) for a description of all possible keyword arguments. +* `reuse_env::Bool=$(Defaults.reuse_env)`: If `true`, the current optimization step is initialized on the previous environment, otherwise a random environment is used. +* `symmetrization::Union{Nothing,SymmetrizationStyle}=nothing`: Accepts `nothing` or a `SymmetrizationStyle`, in which case the PEPS and PEPS gradient are symmetrized after each optimization iteration. """ struct PEPSOptimize{G} boundary_alg::CTMRGAlgorithm From 1becbb48fc6a498bce7c8d60a08c4acdb94c10d6 Mon Sep 17 00:00:00 2001 From: Lukas Devos Date: Fri, 7 Mar 2025 07:05:21 -0500 Subject: [PATCH 45/52] Defaults markdown formatting updates --- src/Defaults.jl | 76 ++++++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/src/Defaults.jl b/src/Defaults.jl index 7295aa9d..d384f573 100644 --- a/src/Defaults.jl +++ b/src/Defaults.jl @@ -3,53 +3,58 @@ Module containing default algorithm parameter values and arguments. -# CTMRG +## CTMRG -- `ctmrg_tol=$(Defaults.ctmrg_tol)`: Tolerance checking singular value and norm convergence. -- `ctmrg_maxiter=$(Defaults.ctmrg_maxiter)`: Maximal number of CTMRG iterations per run. -- `ctmrg_miniter=$(Defaults.ctmrg_miniter)`: Minimal number of CTMRG carried out. -- `ctmrg_alg=:$(Defaults.ctmrg_alg)`: Default CTMRG algorithm variant. -- `ctmrg_verbosity=$(Defaults.ctmrg_verbosity)`: CTMRG output information verbosity +- `ctmrg_tol=$(Defaults.ctmrg_tol)` : Tolerance checking singular value and norm convergence. +- `ctmrg_maxiter=$(Defaults.ctmrg_maxiter)` : Maximal number of CTMRG iterations per run. +- `ctmrg_miniter=$(Defaults.ctmrg_miniter)` : Minimal number of CTMRG carried out. +- `ctmrg_alg=:$(Defaults.ctmrg_alg)` : Default CTMRG algorithm variant. +- `ctmrg_verbosity=$(Defaults.ctmrg_verbosity)` : CTMRG output information verbosity -# SVD forward & reverse +## SVD forward & reverse -- `trscheme=:$(Defaults.trscheme)`: Truncation scheme for SVDs and other decompositions. -- `svd_fwd_alg=:$(Defaults.svd_fwd_alg)`: SVD algorithm that is used in the forward pass. -- `svd_rrule_tol=$(Defaults.svd_rrule_tol)`: Accuracy of SVD reverse-rule. -- `svd_rrule_min_krylovdim=$(Defaults.svd_rrule_min_krylovdim)`: Minimal Krylov dimension of the reverse-rule algorithm (if it is a Krylov algorithm). -- `svd_rrule_verbosity=$(Defaults.svd_rrule_verbosity)`: SVD gradient output verbosity. -- `svd_rrule_alg=:$(Defaults.svd_rrule_alg)`: Reverse-rule algorithm for the SVD gradient. +- `trscheme=:$(Defaults.trscheme)` : Truncation scheme for SVDs and other decompositions. +- `svd_fwd_alg=:$(Defaults.svd_fwd_alg)` : SVD algorithm that is used in the forward pass. +- `svd_rrule_tol=$(Defaults.svd_rrule_tol)` : Accuracy of SVD reverse-rule. +- `svd_rrule_min_krylovdim=$(Defaults.svd_rrule_min_krylovdim)` : Minimal Krylov dimension of the reverse-rule algorithm (if it is a Krylov algorithm). +- `svd_rrule_verbosity=$(Defaults.svd_rrule_verbosity)` : SVD gradient output verbosity. +- `svd_rrule_alg=:$(Defaults.svd_rrule_alg)` : Reverse-rule algorithm for the SVD gradient. -# Projectors +## Projectors -- `projector_alg=:$(Defaults.projector_alg)`: Default variant of the CTMRG projector algorithm. -- `projector_verbosity=$(Defaults.projector_verbosity)`: Projector output information verbosity. +- `projector_alg=:$(Defaults.projector_alg)` : Default variant of the CTMRG projector algorithm. +- `projector_verbosity=$(Defaults.projector_verbosity)` : Projector output information verbosity. -# Fixed-point gradient +## Fixed-point gradient -- `gradient_tol=$(Defaults.gradient_tol)`: Convergence tolerance for the fixed-point gradient iteration. -- `gradient_maxiter=$(Defaults.gradient_maxiter)`: Maximal number of iterations for computing the CTMRG fixed-point gradient. -- `gradient_verbosity=$(Defaults.gradient_verbosity)`: Gradient output information verbosity. -- `gradient_linsolver=:$(Defaults.gradient_linsolver)`: Default linear solver for the `LinSolver` gradient algorithm. -- `gradient_eigsolver=:$(Defaults.gradient_eigsolver)`: Default eigensolver for the `EigSolver` gradient algorithm. -- `gradient_eigsolver_eager=$(Defaults.gradient_eigsolver_eager)`: Enables `EigSolver` algorithm to finish before the full Krylov dimension is reached. -- `gradient_iterscheme=:$(Defaults.gradient_iterscheme)`: Scheme for differentiating one CTMRG iteration. -- `gradient_alg=:$(Defaults.gradient_alg)`: Algorithm variant for computing the gradient fixed-point. +- `gradient_tol=$(Defaults.gradient_tol)` : Convergence tolerance for the fixed-point gradient iteration. +- `gradient_maxiter=$(Defaults.gradient_maxiter)` : Maximal number of iterations for computing the CTMRG fixed-point gradient. +- `gradient_verbosity=$(Defaults.gradient_verbosity)` : Gradient output information verbosity. +- `gradient_linsolver=:$(Defaults.gradient_linsolver)` : Default linear solver for the `LinSolver` gradient algorithm. +- `gradient_eigsolver=:$(Defaults.gradient_eigsolver)` : Default eigensolver for the `EigSolver` gradient algorithm. +- `gradient_eigsolver_eager=$(Defaults.gradient_eigsolver_eager)` : Enables `EigSolver` algorithm to finish before the full Krylov dimension is reached. +- `gradient_iterscheme=:$(Defaults.gradient_iterscheme)` : Scheme for differentiating one CTMRG iteration. +- `gradient_alg=:$(Defaults.gradient_alg)` : Algorithm variant for computing the gradient fixed-point. -# Optimization +## Optimization + +- `reuse_env=$(Defaults.reuse_env)` : If `true`, the current optimization step is initialized on the previous environment, otherwise a random environment is used. +- `optimizer_tol=$(Defaults.optimizer_tol)` : Gradient norm tolerance of the optimizer. +- `optimizer_maxiter=$(Defaults.optimizer_maxiter)` : Maximal number of optimization steps. +- `optimizer_verbosity=$(Defaults.optimizer_verbosity)` : Optimizer output information verbosity. +- `optimizer_alg=:$(Defaults.optimizer_alg)` : Default `OptimKit.OptimizerAlgorithm` for PEPS optimization. +- `lbfgs_memory=$(Defaults.lbfgs_memory)` : Size of limited memory representation of BFGS Hessian matrix. -- `reuse_env=$(Defaults.reuse_env)`: If `true`, the current optimization step is initialized on the previous environment, otherwise a random environment is used. -- `optimizer_tol=$(Defaults.optimizer_tol)`: Gradient norm tolerance of the optimizer. -- `optimizer_maxiter=$(Defaults.optimizer_maxiter)`: Maximal number of optimization steps. -- `optimizer_verbosity=$(Defaults.optimizer_verbosity)`: Optimizer output information verbosity. -- `optimizer_alg=:$(Defaults.optimizer_alg)`: Default `OptimKit.OptimizerAlgorithm` for PEPS optimization. -- `lbfgs_memory=$(Defaults.lbfgs_memory)`: Size of limited memory representation of BFGS Hessian matrix. +## OhMyThreads scheduler -# OhMyThreads scheduler -- `scheduler=Ref{Scheduler}(...)`: Multi-threading scheduler which can be accessed via `set_scheduler!`. +- `scheduler=Ref{Scheduler}(...)` : Multi-threading scheduler which can be accessed via `set_scheduler!`. """ module Defaults + +export set_scheduler! + using OhMyThreads + # CTMRG const ctmrg_tol = 1e-8 const ctmrg_maxiter = 100 @@ -91,6 +96,7 @@ const lbfgs_memory = 20 # OhMyThreads scheduler defaults const scheduler = Ref{Scheduler}() + """ set_scheduler!([scheduler]; kwargs...) @@ -125,9 +131,9 @@ function set_scheduler!(sc=OhMyThreads.Implementation.NotGiven(); kwargs...) end return nothing end -export set_scheduler! function __init__() return set_scheduler!() end + end From 05d871301639cdcf4d6d251ae3ece31108f7ae80 Mon Sep 17 00:00:00 2001 From: Lukas Devos Date: Fri, 7 Mar 2025 07:21:05 -0500 Subject: [PATCH 46/52] Consistently import `leading_boundary` --- src/PEPSKit.jl | 2 +- src/algorithms/ctmrg/ctmrg.jl | 14 +++++++------- .../optimization/fixed_point_differentiation.jl | 2 +- src/operators/transfermatrix.jl | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/PEPSKit.jl b/src/PEPSKit.jl index 565ccdcf..eefef3ff 100644 --- a/src/PEPSKit.jl +++ b/src/PEPSKit.jl @@ -7,7 +7,7 @@ using VectorInterface using TensorKit, KrylovKit, MPSKit, OptimKit, TensorOperations using ChainRulesCore, Zygote using LoggingExtras -using MPSKit: loginit!, logiter!, logfinish!, logcancel! +import MPSKit: leading_boundary, loginit!, logiter!, logfinish!, logcancel! using MPSKitModels using FiniteDifferences using OhMyThreads: tmap diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index be2a71a0..a72585e8 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -14,9 +14,9 @@ Perform a single CTMRG iteration in which all directions are being grown and ren function ctmrg_iteration(network, env, alg::CTMRGAlgorithm) end """ - MPSKit.leading_boundary(env₀, network; kwargs...) + leading_boundary(env₀, network; kwargs...) # expert version: - MPSKit.leading_boundary(env₀, network, alg::CTMRGAlgorithm) + leading_boundary(env₀, network, alg::CTMRGAlgorithm) Contract `network` using CTMRG and return the CTM environment. The algorithm can be supplied via the keyword arguments or directly as an [`CTMRGAlgorithm`](@ref) struct. @@ -48,11 +48,11 @@ supplied via the keyword arguments or directly as an [`CTMRGAlgorithm`](@ref) st * `svd_alg::Union{<:SVDAdjoint,NamedTuple}`: SVD algorithm for computing projectors. See also [`SVDAdjoint`](@ref). By default, a reverse-rule tolerance of `tol=1e1tol` where the `krylovdim` is adapted to the `env₀` environment dimension. * `projector_alg::Union{Symbol,Type{ProjectorAlgorithm}}=:$(Defaults.projector_alg)`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). """ -function MPSKit.leading_boundary(env₀::CTMRGEnv, network::InfiniteSquareNetwork; kwargs...) +function leading_boundary(env₀::CTMRGEnv, network::InfiniteSquareNetwork; kwargs...) alg = select_algorithm(leading_boundary, env₀; kwargs...) - return MPSKit.leading_boundary(env₀, network, alg) + return leading_boundary(env₀, network, alg) end -function MPSKit.leading_boundary( +function leading_boundary( env₀::CTMRGEnv, network::InfiniteSquareNetwork, alg::CTMRGAlgorithm ) CS = map(x -> tsvd(x)[2], env₀.corners) @@ -82,8 +82,8 @@ function MPSKit.leading_boundary( return env, info end end -function MPSKit.leading_boundary(env₀::CTMRGEnv, state, args...; kwargs...) - return MPSKit.leading_boundary(env₀, InfiniteSquareNetwork(state), args...; kwargs...) +function leading_boundary(env₀::CTMRGEnv, state, args...; kwargs...) + return leading_boundary(env₀, InfiniteSquareNetwork(state), args...; kwargs...) end # custom CTMRG logging diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index e288d840..45c127a3 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -105,7 +105,7 @@ Evaluating the gradient of the cost function for CTMRG: function _rrule( gradmode::GradMode{:diffgauge}, config::RuleConfig, - ::typeof(MPSKit.leading_boundary), + ::typeof(leading_boundary), envinit, state, alg::CTMRGAlgorithm, diff --git a/src/operators/transfermatrix.jl b/src/operators/transfermatrix.jl index b4873977..4cabc9ba 100644 --- a/src/operators/transfermatrix.jl +++ b/src/operators/transfermatrix.jl @@ -183,13 +183,13 @@ the unit cell. """ MPSKit.expectation_value(st, op) @doc """ - MPSKit.leading_boundary( + leading_boundary( st::InfiniteMPS, op::Union{InfiniteTransferPEPS,InfiniteTransferPEPO}, alg, [env] ) - MPSKit.leading_boundary( + leading_boundary( st::MPSMulitline, op::Union{MultilineTransferPEPS,MultilineTransferPEPO}, alg, [env] ) Approximate the leading boundary MPS eigenvector for the transfer operator `op` using `st` as initial guess. -""" MPSKit.leading_boundary(st, op, alg) +""" leading_boundary(st, op, alg) From c7b7d586322bec46ebbde787101838fe36347e40 Mon Sep 17 00:00:00 2001 From: Lukas Devos Date: Fri, 7 Mar 2025 07:38:21 -0500 Subject: [PATCH 47/52] Replace if-else with `IdDict` for improved extensibility --- src/algorithms/select_algorithm.jl | 92 ++++++++++++++---------------- 1 file changed, 44 insertions(+), 48 deletions(-) diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 0f5c2b53..c0b641b4 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -119,6 +119,11 @@ function select_algorithm( ) end +const OPTIMIZATION_SYMBOLS = IdDict{Symbol,Type{<:OptimKit.OptimizationAlgorithm}}( + :gradientdescent => GradientDescent, + :conjugategradient => ConjugateGradient, + :lbfgs => LBFGS, +) function select_algorithm( ::Type{OptimKit.OptimizationAlgorithm}; alg=Defaults.optimizer_alg, @@ -128,22 +133,21 @@ function select_algorithm( lbfgs_memory=Defaults.lbfgs_memory, # TODO: add linesearch, ... to kwargs and defaults? ) - # replace symbol with projector alg type + # replace symbol with optimizer alg type alg_type = if alg isa Symbol - if alg == :gradientdescent - GradientDescent - elseif alg == :conjugategradient - ConjugateGradient - elseif alg == :lbfgs - (; kwargs...) -> LBFGS(lbfgs_memory; kwargs...) - else + haskey(OPTIMIZATION_SYMBOLS, alg) || throw(ArgumentError("unknown optimizer algorithm: $alg")) - end + OPTIMIZATION_SYMBOLS[alg] else alg end - return alg_type(; gradtol=tol, maxiter, verbosity) + # instantiate algorithm + return if alg_type <: LBFGS + alg_type(lbfgs_memory; gradtol=tol, maxiter, verbosity) + else + alg_type(; gradtol=tol, maxiter, verbosity) + end end function select_algorithm( @@ -175,6 +179,9 @@ function select_algorithm( ) end +const CTMRG_SYMBOLS = IdDict{Symbol,Type{<:CTMRGAlgorithm}}( + :simultaneous => SimultaneousCTMRG, :sequential => SequentialCTMRG +) function select_algorithm( ::Type{CTMRGAlgorithm}; alg=Defaults.ctmrg_alg, @@ -188,13 +195,8 @@ function select_algorithm( ) # replace symbol with projector alg type alg_type = if alg isa Symbol - if alg == :simultaneous - SimultaneousCTMRG - elseif alg == :sequential - SequentialCTMRG - else - throw(ArgumentError("unknown CTMRG algorithm: $alg")) - end + haskey(CTMRG_SYMBOLS, alg) || throw(ArgumentError("unknown CTMRG algorithm: $alg")) + CTMRG_SYMBOLS[alg] else alg end @@ -207,6 +209,9 @@ function select_algorithm( return alg_type(tol, maxiter, miniter, verbosity, projector_algorithm) end +const PROJECTOR_SYMBOLS = IdDict{Symbol,Type{<:ProjectorAlgorithm}}( + :halfinfinite => HalfInfiniteProjector, :fullinfinite => FullInfiniteProjector +) function select_algorithm( ::Type{ProjectorAlgorithm}; alg=Defaults.projector_alg, @@ -216,13 +221,9 @@ function select_algorithm( ) # replace symbol with projector alg type alg_type = if alg isa Symbol - if alg == :halfinfinite - HalfInfiniteProjector - elseif alg == :fullinfinite - FullInfiniteProjector - else + haskey(PROJECTOR_SYMBOLS, alg) || throw(ArgumentError("unknown projector algorithm: $alg")) - end + PROJECTOR_SYMBOLS[alg] else alg end @@ -238,6 +239,12 @@ function select_algorithm( return alg_type(svd_algorithm, truncation_scheme, verbosity) end +const GRADIENT_MODE_SYMBOLS = IdDict{Symbol,Type{<:GradMode}}( + :geomsum => GeomSum, + :manualiter => ManualIter, + :linsolver => LinSolver, + :eigsolver => EigSolver, +) function select_algorithm( ::Type{GradMode}; alg=Defaults.gradient_alg, @@ -249,17 +256,9 @@ function select_algorithm( ) # replace symbol with GradMode alg type alg_type = if alg isa Symbol - if alg == :geomsum - GeomSum - elseif alg == :manualiter - ManualIter - elseif alg == :linsolver - LinSolver - elseif alg == :eigsolver - EigSolver - else + haskey(GRADIENT_MODE_SYMBOLS, alg) || throw(ArgumentError("unknown GradMode algorithm: $alg")) - end + GRADIENT_MODE_SYMBOLS[alg] else alg end @@ -315,25 +314,22 @@ function select_algorithm( return gradient_algorithm end +const TRUNCATION_SCHEME_SYMBOLS = IdDict{Symbol,Type{<:TruncationScheme}}( + :fixedspace => FixedSpaceTruncation, + :notrunc => TensorKit.NoTruncation, + :truncerr => TensorKit.TruncationError, + :truncdim => TensorKit.TruncationDimension, + :truncspace => TensorKit.TruncationSpace, + :truncbelow => TensorKit.TruncationCutoff, +) function select_algorithm( ::Type{TensorKit.TruncationScheme}; alg=Defaults.trscheme, η=nothing ) - alg_type = if alg isa Symbol # replace Symbol with TruncationScheme type - if alg == :fixedspace - FixedSpaceTruncation - elseif alg == :notrunc - TensorKit.NoTruncation - elseif alg == :truncerr - TensorKit.TruncationError - elseif alg == :truncdim - TensorKit.TruncationDimension - elseif alg == :truncspace - TensorKit.TruncationSpace - elseif alg == :truncbelow - TensorKit.TruncationCutoff - else + # replace Symbol with TruncationScheme type + alg_type = if alg isa Symbol + haskey(TRUNCATION_SCHEME_SYMBOLS, alg) || throw(ArgumentError("unknown truncation scheme: $alg")) - end + TRUNCATION_SCHEME_SYMBOLS[alg] else alg end From 59ea7e4b2df5e16798ac3fa6fb93597f92710ace Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Fri, 7 Mar 2025 16:50:24 +0100 Subject: [PATCH 48/52] Remove rrule `@reset` in select_algorithm(fixedpoint; ...) --- src/algorithms/select_algorithm.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index c0b641b4..ac1e26e2 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -53,7 +53,6 @@ function select_algorithm( end # adjust CTMRG tols and verbosity - boundary_algorithm = _select_alg_or_namedtuple( boundary_alg, CTMRGAlgorithm, @@ -62,7 +61,6 @@ function select_algorithm( tol=1e-4tol, verbosity=boundary_verbosity, ) - @reset boundary_algorithm.projector_alg.svd_alg.rrule_alg.tol = 1e-3tol # use @reset for nested algs # adjust gradient verbosity gradient_algorithm = _select_alg_or_namedtuple( From 73922b67ab49a89d03fd18cc6ef4c072314eb993 Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Fri, 7 Mar 2025 17:10:38 +0100 Subject: [PATCH 49/52] Remove ::Type{Algorithm} syntax from select_algorithm --- src/algorithms/select_algorithm.jl | 133 +++++++++++------------------ 1 file changed, 48 insertions(+), 85 deletions(-) diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index ac1e26e2..6a0c3833 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -16,7 +16,7 @@ Parse arguments and keyword arguments to the algorithm struct corresponding to where all keyword arguments that can be algorithm themselves can be specified using * `alg::Algorithm`: an instance of the algorithm struct or -* `(; alg::Union{Symbol,AlgorithmType}, alg_kwargs...)`: a `NamedTuple` where the algorithm is specified by a symbol or the type of the algorithm struct, and the algorithm keyword arguments +* `(; alg::Symbol, alg_kwargs...)`: a `NamedTuple` where the algorithm is specified by a `Symbol` and the algorithm keyword arguments A full description of the keyword argument can be found in the respective function or algorithm struct docstrings. @@ -132,13 +132,9 @@ function select_algorithm( # TODO: add linesearch, ... to kwargs and defaults? ) # replace symbol with optimizer alg type - alg_type = if alg isa Symbol - haskey(OPTIMIZATION_SYMBOLS, alg) || - throw(ArgumentError("unknown optimizer algorithm: $alg")) - OPTIMIZATION_SYMBOLS[alg] - else - alg - end + haskey(OPTIMIZATION_SYMBOLS, alg) || + throw(ArgumentError("unknown optimizer algorithm: $alg")) + alg_type = OPTIMIZATION_SYMBOLS[alg] # instantiate algorithm return if alg_type <: LBFGS @@ -192,12 +188,8 @@ function select_algorithm( projector_alg=Defaults.projector_alg, # only allows for Symbol/Type{ProjectorAlgorithm} to expose projector kwargs ) # replace symbol with projector alg type - alg_type = if alg isa Symbol - haskey(CTMRG_SYMBOLS, alg) || throw(ArgumentError("unknown CTMRG algorithm: $alg")) - CTMRG_SYMBOLS[alg] - else - alg - end + haskey(CTMRG_SYMBOLS, alg) || throw(ArgumentError("unknown CTMRG algorithm: $alg")) + alg_type = CTMRG_SYMBOLS[alg] # parse CTMRG projector algorithm projector_algorithm = select_algorithm( @@ -218,13 +210,9 @@ function select_algorithm( verbosity=Defaults.projector_verbosity, ) # replace symbol with projector alg type - alg_type = if alg isa Symbol - haskey(PROJECTOR_SYMBOLS, alg) || - throw(ArgumentError("unknown projector algorithm: $alg")) - PROJECTOR_SYMBOLS[alg] - else - alg - end + haskey(PROJECTOR_SYMBOLS, alg) || + throw(ArgumentError("unknown projector algorithm: $alg")) + alg_type = PROJECTOR_SYMBOLS[alg] # parse SVD forward & rrule algorithm svd_algorithm = _select_alg_or_namedtuple(svd_alg, SVDAdjoint, SVDAdjoint) @@ -243,6 +231,12 @@ const GRADIENT_MODE_SYMBOLS = IdDict{Symbol,Type{<:GradMode}}( :linsolver => LinSolver, :eigsolver => EigSolver, ) +const LINSOLVER_SOLVER_SYMBOLS = IdDict{Symbol,Type{<:KrylovKit.LinearSolver}}( + :gmres => GMRES, :bicgstab => BiCGStab +) +const EIGSOLVER_SOLVER_SYMBOLS = IdDict{Symbol,Type{<:KrylovKit.KrylovAlgorithm}}( + :arnoldi => Arnoldi +) function select_algorithm( ::Type{GradMode}; alg=Defaults.gradient_alg, @@ -253,13 +247,9 @@ function select_algorithm( solver_alg=(;), ) # replace symbol with GradMode alg type - alg_type = if alg isa Symbol - haskey(GRADIENT_MODE_SYMBOLS, alg) || - throw(ArgumentError("unknown GradMode algorithm: $alg")) - GRADIENT_MODE_SYMBOLS[alg] - else - alg - end + haskey(GRADIENT_MODE_SYMBOLS, alg) || + throw(ArgumentError("unknown GradMode algorithm: $alg")) + alg_type = GRADIENT_MODE_SYMBOLS[alg] # parse GradMode algorithm gradient_algorithm = if alg_type <: Union{GeomSum,ManualIter} @@ -270,32 +260,20 @@ function select_algorithm( solver_type = if alg_type <: LinSolver # replace symbol with solver alg type solver_kwargs = (; alg=Defaults.gradient_linsolver, solver_kwargs...) - if solver_kwargs.alg isa Symbol - if solver_kwargs.alg == :gmres - GMRES - elseif solver_kwargs.alg == :bicgstab - BiCGStab - else - throw(ArgumentError("unknown LinSolver solver: $(solver_kwargs.alg)")) - end - else - solver_kwargs.alg - end + haskey(LINSOLVER_SOLVER_SYMBOLS, solver_kwargs.alg) || throw( + ArgumentError("unknown LinSolver solver: $(solver_kwargs.alg)"), + ) + LINSOLVER_SOLVER_SYMBOLS[solver_kwargs.alg] elseif alg_type <: EigSolver - solver_kwargs = (; alg=Defaults.gradient_eigsolver, solver_kwargs...) - if solver_kwargs.alg isa Symbol - if solver_kwargs.alg == :arnoldi - Arnoldi - else - throw(ArgumentError("unknown EigSolver solver: $(solver_kwargs.alg)")) - end - else - solver_kwargs.alg - end - solver_kwargs = (; # use default eager for EigSolver + solver_kwargs = (; + alg=Defaults.gradient_eigsolver, eager=Defaults.gradient_eigsolver_eager, solver_kwargs..., ) + haskey(EIGSOLVER_SOLVER_SYMBOLS, solver_kwargs.alg) || throw( + ArgumentError("unknown EigSolver solver: $(solver_kwargs.alg)"), + ) + EIGSOLVER_SOLVER_SYMBOLS[solver_kwargs.alg] end solver_kwargs = Base.structdiff(solver_kwargs, (; alg=nothing)) # remove `alg` keyword argument @@ -324,38 +302,32 @@ function select_algorithm( ::Type{TensorKit.TruncationScheme}; alg=Defaults.trscheme, η=nothing ) # replace Symbol with TruncationScheme type - alg_type = if alg isa Symbol - haskey(TRUNCATION_SCHEME_SYMBOLS, alg) || - throw(ArgumentError("unknown truncation scheme: $alg")) - TRUNCATION_SCHEME_SYMBOLS[alg] - else - alg - end + haskey(TRUNCATION_SCHEME_SYMBOLS, alg) || + throw(ArgumentError("unknown truncation scheme: $alg")) + alg_type = TRUNCATION_SCHEME_SYMBOLS[alg] return isnothing(η) ? alg_type() : alg_type(η) end +const SVD_FWD_SYMBOLS = IdDict{Symbol,<:Any}( + :sdd => TensorKit.SDD, + :svd => TensorKit.SVD, + :iterative => + (; tol=1e-14, krylovdim=25, kwargs...) -> + IterSVD(; alg=GKL(; tol, krylovdim), kwargs...), +) +const SVD_RRULE_SYMBOLS = IdDict{Symbol,Type{<:Any}}( + :gmres => GMRES, :bicgstab => BiCGStab, :arnoldi => Arnoldi +) function select_algorithm( ::Type{SVDAdjoint}; fwd_alg=(;), rrule_alg=(;), broadening=nothing ) # parse forward SVD algorithm fwd_algorithm = if fwd_alg isa NamedTuple fwd_kwargs = (; alg=Defaults.svd_fwd_alg, fwd_alg...) # overwrite with specified kwargs - fwd_type = if fwd_kwargs.alg isa Symbol # replace symbol with alg type - if fwd_kwargs.alg == :sdd - TensorKit.SDD - elseif fwd_kwargs.alg == :svd - TensorKit.SVD - elseif fwd_kwargs.alg == :iterative - # circumvent alg keyword in IterSVD constructor - (; tol=1e-14, krylovdim=25, kwargs...) -> - IterSVD(; alg=GKL(; tol, krylovdim), kwargs...) - else - throw(ArgumentError("unknown forward algorithm: $(fwd_kwargs.alg)")) - end - else - fwd_kwargs.alg - end + haskey(SVD_FWD_SYMBOLS, fwd_kwargs.alg) || + throw(ArgumentError("unknown forward algorithm: $(fwd_kwargs.alg)")) + fwd_type = SVD_FWD_SYMBOLS[fwd_kwargs.alg] fwd_kwargs = Base.structdiff(fwd_kwargs, (; alg=nothing)) # remove `alg` keyword argument fwd_type(; fwd_kwargs...) else @@ -371,19 +343,10 @@ function select_algorithm( verbosity=Defaults.svd_rrule_verbosity, rrule_alg..., ) # overwrite with specified kwargs - rrule_type = if rrule_kwargs.alg isa Symbol # replace symbol with alg type - if rrule_kwargs.alg == :gmres - GMRES - elseif rrule_kwargs.alg == :bicgstab - BiCGStab - elseif rrule_kwargs.alg == :arnoldi - Arnoldi - else - throw(ArgumentError("unknown rrule algorithm: $(rrule_kwargs.alg)")) - end - else - rrule_kwargs.alg - end + + haskey(SVD_RRULE_SYMBOLS, rrule_kwargs.alg) || + throw(ArgumentError("unknown rrule algorithm: $(rrule_kwargs.alg)")) + rrule_type = SVD_RRULE_SYMBOLS[rrule_kwargs.alg] rrule_kwargs = Base.structdiff(rrule_kwargs, (; alg=nothing)) # remove `alg` keyword argument rrule_type <: BiCGStab && (rrule_kwargs = Base.structdiff(rrule_kwargs, (; krylovdim=nothing))) # BiCGStab doens't take `krylovdim` From 63486562e4b3b931cd2a9ed983a5df76f7b9a83a Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Fri, 7 Mar 2025 17:25:35 +0100 Subject: [PATCH 50/52] Adapt docstrings --- src/algorithms/ctmrg/ctmrg.jl | 6 +++--- src/algorithms/ctmrg/projectors.jl | 4 ++-- src/algorithms/ctmrg/sequential.jl | 15 +++++++++++-- src/algorithms/ctmrg/simultaneous.jl | 21 ++++++++++++------- .../fixed_point_differentiation.jl | 4 ++-- .../optimization/peps_optimization.jl | 4 ++-- src/utility/svd.jl | 4 ++-- 7 files changed, 38 insertions(+), 20 deletions(-) diff --git a/src/algorithms/ctmrg/ctmrg.jl b/src/algorithms/ctmrg/ctmrg.jl index a72585e8..14b6f519 100644 --- a/src/algorithms/ctmrg/ctmrg.jl +++ b/src/algorithms/ctmrg/ctmrg.jl @@ -34,11 +34,11 @@ supplied via the keyword arguments or directly as an [`CTMRGAlgorithm`](@ref) st 2. Initialization and convergence info 3. Iteration info 4. Debug info -* `alg::Union{Symbol,Type{CTMRGAlgorithm}}=:$(Defaults.ctmrg_alg)`: Variant of the CTMRG algorithm. See also [`CTMRGAlgorithm`](@ref). +* `alg::Symbol=:$(Defaults.ctmrg_alg)`: Variant of the CTMRG algorithm. See also [`CTMRGAlgorithm`](@ref). ### Projector algorithm -* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg=:$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. Here, `alg` can be any `TensorKit.TruncationScheme` type or one of the following symbols: +* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg::Symbol=:$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. Here, `alg` can be one of the following: - `:fixedspace`: Keep virtual spaces fixed during projection - `:notrunc`: No singular values are truncated and the performed SVDs are exact - `:truncerr`: Additionally supply error threshold `η`; truncate to the maximal virtual dimension of `η` @@ -46,7 +46,7 @@ supplied via the keyword arguments or directly as an [`CTMRGAlgorithm`](@ref) st - `:truncspace`: Additionally supply truncation space `η`; truncate according to the supplied vector space - `:truncbelow`: Additionally supply singular value cutoff `η`; truncate such that every retained singular value is larger than `η` * `svd_alg::Union{<:SVDAdjoint,NamedTuple}`: SVD algorithm for computing projectors. See also [`SVDAdjoint`](@ref). By default, a reverse-rule tolerance of `tol=1e1tol` where the `krylovdim` is adapted to the `env₀` environment dimension. -* `projector_alg::Union{Symbol,Type{ProjectorAlgorithm}}=:$(Defaults.projector_alg)`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). +* `projector_alg::Symbol=:$(Defaults.projector_alg)`: Variant of the projector algorithm. See also [`ProjectorAlgorithm`](@ref). """ function leading_boundary(env₀::CTMRGEnv, network::InfiniteSquareNetwork; kwargs...) alg = select_algorithm(leading_boundary, env₀; kwargs...) diff --git a/src/algorithms/ctmrg/projectors.jl b/src/algorithms/ctmrg/projectors.jl index 8f9751d2..130748e8 100644 --- a/src/algorithms/ctmrg/projectors.jl +++ b/src/algorithms/ctmrg/projectors.jl @@ -40,7 +40,7 @@ Projector algorithm implementing projectors from SVDing the half-infinite CTMRG ## Keyword arguments * `svd_alg::Union{<:SVDAdjoint,NamedTuple}=SVDAdjoint()`: SVD algorithm including the reverse rule. See ['SVDAdjoint'](@ref). -* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg=:$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. Here, `alg` can be any `TensorKit.TruncationScheme` type or one of the following symbols: +* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg::Symbol=:$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. Here, `alg` can be one of the following: - `:fixedspace`: Keep virtual spaces fixed during projection - `:notrunc`: No singular values are truncated and the performed SVDs are exact - `:truncerr`: Additionally supply error threshold `η`; truncate to the maximal virtual dimension of `η` @@ -68,7 +68,7 @@ Projector algorithm implementing projectors from SVDing the full 4x4 CTMRG envir ## Keyword arguments * `svd_alg::Union{<:SVDAdjoint,NamedTuple}=SVDAdjoint()`: SVD algorithm including the reverse rule. See ['SVDAdjoint'](@ref). -* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg=:$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. Here, `alg` can be any `TensorKit.TruncationScheme` type or one of the following symbols: +* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg::Symbol=:$(Defaults.trscheme))`: Truncation scheme for the projector computation, which controls the resulting virtual spaces. Here, `alg` can be one of the following: - `:fixedspace`: Keep virtual spaces fixed during projection - `:notrunc`: No singular values are truncated and the performed SVDs are exact - `:truncerr`: Additionally supply error threshold `η`; truncate to the maximal virtual dimension of `η` diff --git a/src/algorithms/ctmrg/sequential.jl b/src/algorithms/ctmrg/sequential.jl index 034933a9..8b02d9c2 100644 --- a/src/algorithms/ctmrg/sequential.jl +++ b/src/algorithms/ctmrg/sequential.jl @@ -6,8 +6,19 @@ CTMRG algorithm where the expansions and renormalization is performed sequentially column-wise. This is implemented as a growing and projecting step to the left, followed by -a clockwise rotation (performed four times). The projectors are computed using -`projector_alg` from `svd_alg` SVDs where the truncation scheme is set via `trscheme`. +a clockwise rotation (performed four times). + +## Keyword arguments + +For a full description, see [`leading_boundary`](@ref). The supported keywords are: + +* `tol::Real=$(Defaults.ctmrg_tol)` +* `maxiter::Int=$(Defaults.ctmrg_maxiter)` +* `miniter::Int=$(Defaults.ctmrg_miniter)` +* `verbosity::Int=$(Defaults.ctmrg_verbosity)` +* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg::Symbol=:$(Defaults.trscheme))` +* `svd_alg::Union{<:SVDAdjoint,NamedTuple}` +* `projector_alg::Symbol=:$(Defaults.projector_alg)` """ struct SequentialCTMRG <: CTMRGAlgorithm tol::Float64 diff --git a/src/algorithms/ctmrg/simultaneous.jl b/src/algorithms/ctmrg/simultaneous.jl index 41c94f2f..04ecf56b 100644 --- a/src/algorithms/ctmrg/simultaneous.jl +++ b/src/algorithms/ctmrg/simultaneous.jl @@ -1,13 +1,20 @@ """ - SimultaneousCTMRG(; tol=$(Defaults.ctmrg_tol), maxiter=$(Defaults.ctmrg_maxiter), - miniter=$(Defaults.ctmrg_miniter), verbosity=$(Defaults.ctmrg_verbosity), - svd_alg=TODO, trscheme=TODO, - projector_alg=TODO) + struct SimultaneousCTMRG CTMRG algorithm where all sides are grown and renormalized at the same time. In particular, -the projectors are applied to the corners from two sides simultaneously. The projectors are -computed using `projector_alg` from `svd_alg` SVDs where the truncation scheme is set via -`trscheme`. +the projectors are applied to the corners from two sides simultaneously. + +## Keyword arguments + +For a full description, see [`leading_boundary`](@ref). The supported keywords are: + +* `tol::Real=$(Defaults.ctmrg_tol)` +* `maxiter::Int=$(Defaults.ctmrg_maxiter)` +* `miniter::Int=$(Defaults.ctmrg_miniter)` +* `verbosity::Int=$(Defaults.ctmrg_verbosity)` +* `trscheme::Union{TruncationScheme,NamedTuple}=(; alg::Symbol=:$(Defaults.trscheme))` +* `svd_alg::Union{<:SVDAdjoint,NamedTuple}` +* `projector_alg::Symbol=:$(Defaults.projector_alg)` """ struct SimultaneousCTMRG <: CTMRGAlgorithm tol::Float64 diff --git a/src/algorithms/optimization/fixed_point_differentiation.jl b/src/algorithms/optimization/fixed_point_differentiation.jl index 45c127a3..a4856f6e 100644 --- a/src/algorithms/optimization/fixed_point_differentiation.jl +++ b/src/algorithms/optimization/fixed_point_differentiation.jl @@ -64,7 +64,7 @@ problem using iterative solvers. * `iterscheme::Symbol=:$(Defaults.gradient_iterscheme)`: Style of CTMRG iteration which is being differentiated, which can be: - `:fixed`: the differentiated CTMRG iteration uses a pre-computed SVD with a fixed set of gauges - `:diffgauge`: the differentiated iteration consists of a CTMRG iteration and a subsequent gauge-fixing step such that the gauge-fixing procedure is differentiated as well -* `solver_alg::Union{KrylovKit.LinearSolver,NamedTuple}=(; alg=:$(Defaults.gradient_linsolver)`: Linear solver algorithm which, if supplied directly as a `KrylovKit.LinearSolver` overrides the above specified `tol`, `maxiter` and `verbosity`. Alternatively, it can be supplied via a `NamedTuple` where `alg` can be a `KrylovKit.LinearSolver` type or the corresponding symbol: +* `solver_alg::Union{KrylovKit.LinearSolver,NamedTuple}=(; alg::Symbol=:$(Defaults.gradient_linsolver)`: Linear solver algorithm which, if supplied directly as a `KrylovKit.LinearSolver` overrides the above specified `tol`, `maxiter` and `verbosity`. Alternatively, it can be supplied via a `NamedTuple` where `alg` can be one of the following: - `:gmres`: GMRES iterative linear solver, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.GMRES) for details - `:bicgstab`: BiCGStab iterative linear solver, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.BiCGStab) for details """ @@ -87,7 +87,7 @@ problem as an eigenvalue problem. * `iterscheme::Symbol=:$(Defaults.gradient_iterscheme)`: Style of CTMRG iteration which is being differentiated, which can be: - `:fixed`: the differentiated CTMRG iteration uses a pre-computed SVD with a fixed set of gauges - `:diffgauge`: the differentiated iteration consists of a CTMRG iteration and a subsequent gauge-fixing step such that the gauge-fixing procedure is differentiated as well -* `solver_alg::Union{KrylovKit.KrylovAlgorithm,NamedTuple}=(; alg=:$(Defaults.gradient_eigsolver)`: Linear solver algorithm which, if supplied directly as a `KrylovKit.KrylovAlgorithm` overrides the above specified `tol`, `maxiter` and `verbosity`. Alternatively, it can be supplied via a `NamedTuple` where `alg` can be a `KrylovKit.KrylovAlgorithm` type or the corresponding symbol: +* `solver_alg::Union{KrylovKit.KrylovAlgorithm,NamedTuple}=(; alg=:$(Defaults.gradient_eigsolver)`: Eigen solver algorithm which, if supplied directly as a `KrylovKit.KrylovAlgorithm` overrides the above specified `tol`, `maxiter` and `verbosity`. Alternatively, it can be supplied via a `NamedTuple` where `alg` can be one of the following: - `:arnoldi`: Arnoldi Krylov algorithm, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.Arnoldi) for details """ struct EigSolver{F} <: GradMode{F} diff --git a/src/algorithms/optimization/peps_optimization.jl b/src/algorithms/optimization/peps_optimization.jl index 2e82371a..c6cf63cb 100644 --- a/src/algorithms/optimization/peps_optimization.jl +++ b/src/algorithms/optimization/peps_optimization.jl @@ -82,7 +82,7 @@ keyword arguments are: * `tol::Real=1e-2tol`: Convergence tolerance for the fixed-point gradient iteration. * `maxiter::Int=$(Defaults.gradient_maxiter)`: Maximal number of gradient problem iterations. -* `alg::Union{Symbol,Type{GradMode}}=:$(Defaults.gradient_alg)`: Gradient algorithm type, can be any `GradMode` type or the corresponding symbol: +* `alg::Symbol=:$(Defaults.gradient_alg)`: Gradient algorithm variant, can be one of the following: - `:geomsum`: Compute gradient directly from the geometric sum, see [`GeomSum`](@ref) - `:manualiter`: Iterate gradient geometric sum manually, see ['ManualIter'](@ref) - `:linsolver`: Solve fixed-point gradient linear problem using iterative solver, see ['LinSolver'](@ref) @@ -99,7 +99,7 @@ using either a `NamedTuple` of keyword arguments or a `OptimKit.OptimizationAlgo `OptimKit.LBFGS` is used in combination with a `HagerZhangLineSearch`. The supported keyword arguments are: -* `alg::Union{Symbol,Type{OptimKit.OptimizationAlgorithm}}=:$(Defaults.optimizer_alg)`: Optimizer algorithm, can be any `OptimKit.OptimizationAlgorithm` type or the corresponding symbol: +* `alg::Symbol=:$(Defaults.optimizer_alg)`: Optimizer algorithm, can be one of the following: - `:gradientdescent`: Gradient descent algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) - `:conjugategradient`: Conjugate gradient algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) - `:lbfgs`: L-BFGS algorithm, see the [OptimKit README](https://github.com/Jutho/OptimKit.jl) diff --git a/src/utility/svd.jl b/src/utility/svd.jl index f8b2f081..40840841 100644 --- a/src/utility/svd.jl +++ b/src/utility/svd.jl @@ -19,11 +19,11 @@ removes the divergences from the adjoint. ## Keyword arguments -* `fwd_alg::Union{Algorithm,NamedTuple}=(; alg=Defaults.svd_fwd_alg)`: SVD algorithm of the forward pass which can either be passed as an `Algorithm` instance or a `NamedTuple` where `alg` is an `Algorithm` type or the corresponding `Symbol`: +* `fwd_alg::Union{Algorithm,NamedTuple}=(; alg::Symbol=Defaults.svd_fwd_alg)`: SVD algorithm of the forward pass which can either be passed as an `Algorithm` instance or a `NamedTuple` where `alg` is one of the following: - `:sdd`: TensorKit's wrapper for LAPACK's `_gesdd` - `:svd`: TensorKit's wrapper for LAPACK's `_gesvd` - `:iterative`: Iterative SVD only computing the specifed number of singular values and vectors, see ['IterSVD'](@ref) -* `rrule_alg::Union{Algorithm,NamedTuple}=(; alg=Defaults.svd_rrule_alg)`: Reverse-rule algorithm for differentiating the SVD. Can be supplied by an `Algorithm` instance directly or as a `NamedTuple` where `alg` is an `Algorithm` type or the corresponding `Symbol`: +* `rrule_alg::Union{Algorithm,NamedTuple}=(; alg::Symbol=Defaults.svd_rrule_alg)`: Reverse-rule algorithm for differentiating the SVD. Can be supplied by an `Algorithm` instance directly or as a `NamedTuple` where `alg` is one of the following: - `:gmres`: GMRES iterative linear solver, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.GMRES) for details - `:bicgstab`: BiCGStab iterative linear solver, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.BiCGStab) for details - `:arnoldi`: Arnoldi Krylov algorithm, see the [KrylovKit docs](https://jutho.github.io/KrylovKit.jl/stable/man/algorithms/#KrylovKit.Arnoldi) for details From 78f35e719fb0d928fe89b68d9c6f3483e6ca875d Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Fri, 7 Mar 2025 17:50:56 +0100 Subject: [PATCH 51/52] Adapt tests --- src/algorithms/select_algorithm.jl | 2 +- test/ctmrg/fixed_iterscheme.jl | 2 +- test/ctmrg/gaugefix.jl | 4 ++-- test/ctmrg/gradients.jl | 6 +++--- test/ctmrg/partition_function.jl | 4 ++-- test/ctmrg/unitcell.jl | 8 ++++---- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/algorithms/select_algorithm.jl b/src/algorithms/select_algorithm.jl index 6a0c3833..d5d99393 100644 --- a/src/algorithms/select_algorithm.jl +++ b/src/algorithms/select_algorithm.jl @@ -309,7 +309,7 @@ function select_algorithm( return isnothing(η) ? alg_type() : alg_type(η) end -const SVD_FWD_SYMBOLS = IdDict{Symbol,<:Any}( +const SVD_FWD_SYMBOLS = IdDict{Symbol,Any}( :sdd => TensorKit.SDD, :svd => TensorKit.SVD, :iterative => diff --git a/test/ctmrg/fixed_iterscheme.jl b/test/ctmrg/fixed_iterscheme.jl index 75e2e585..c7b36296 100644 --- a/test/ctmrg/fixed_iterscheme.jl +++ b/test/ctmrg/fixed_iterscheme.jl @@ -15,7 +15,7 @@ using PEPSKit: χbond = 2 χenv = 16 svd_algs = [SVDAdjoint(; fwd_alg=TensorKit.SDD()), SVDAdjoint(; fwd_alg=IterSVD())] -projector_algs = [HalfInfiniteProjector] #, FullInfiniteProjector] +projector_algs = [:halfinfinite] #, :fullinfinite] unitcells = [(1, 1), (3, 4)] atol = 1e-5 diff --git a/test/ctmrg/gaugefix.jl b/test/ctmrg/gaugefix.jl index 125c8760..775428c4 100644 --- a/test/ctmrg/gaugefix.jl +++ b/test/ctmrg/gaugefix.jl @@ -9,7 +9,7 @@ spacetypes = [ComplexSpace, Z2Space] scalartypes = [Float64, ComplexF64] unitcells = [(1, 1), (2, 2), (3, 2)] ctmrg_algs = [SequentialCTMRG, SimultaneousCTMRG] -projector_algs = [HalfInfiniteProjector, FullInfiniteProjector] +projector_algs = [:halfinfinite, :fullinfinite] tol = 1e-6 # large tol due to χ=6 χ = 6 atol = 1e-4 @@ -20,7 +20,7 @@ function _pre_converge_env( Random.seed!(seed) # Seed RNG to make random environment consistent psi = InfinitePEPS(rand, T, physical_space, peps_space; unitcell) env₀ = CTMRGEnv(psi, ctm_space) - env_conv, = leading_boundary(env₀, psi; alg=SequentialCTMRG, tol) + env_conv, = leading_boundary(env₀, psi; alg=:sequential, tol) return env_conv, psi end diff --git a/test/ctmrg/gradients.jl b/test/ctmrg/gradients.jl index 56c9f579..a14955c3 100644 --- a/test/ctmrg/gradients.jl +++ b/test/ctmrg/gradients.jl @@ -19,10 +19,10 @@ names = ["Heisenberg", "p-wave superconductor"] gradtol = 1e-4 ctmrg_algs = [ [ - SimultaneousCTMRG(; verbosity=0, projector_alg=HalfInfiniteProjector), - SimultaneousCTMRG(; verbosity=0, projector_alg=FullInfiniteProjector), + SimultaneousCTMRG(; verbosity=0, projector_alg=:halfinfinite), + SimultaneousCTMRG(; verbosity=0, projector_alg=:fullinfinite), ], - [SequentialCTMRG(; verbosity=0, projector_alg=HalfInfiniteProjector)], + [SequentialCTMRG(; verbosity=0, projector_alg=:halfinfinite)], ] gradmodes = [ [ diff --git a/test/ctmrg/partition_function.jl b/test/ctmrg/partition_function.jl index 9e5714e6..18f70ee9 100644 --- a/test/ctmrg/partition_function.jl +++ b/test/ctmrg/partition_function.jl @@ -89,8 +89,8 @@ Random.seed!(81812781143) env0 = CTMRGEnv(Z, χenv) # cover all different flavors -ctm_styles = [SequentialCTMRG, SimultaneousCTMRG] -projector_algs = [HalfInfiniteProjector, FullInfiniteProjector] +ctm_styles = [:sequential, :simultaneous] +projector_algs = [:halfinfinite, :fullinfinite] @testset "Classical Ising partition function using $alg with $projector_alg" for ( alg, projector_alg diff --git a/test/ctmrg/unitcell.jl b/test/ctmrg/unitcell.jl index b72ded9a..3b399afa 100644 --- a/test/ctmrg/unitcell.jl +++ b/test/ctmrg/unitcell.jl @@ -8,10 +8,10 @@ using TensorKit Random.seed!(91283219347) stype = ComplexF64 ctm_algs = [ - SequentialCTMRG(; projector_alg=HalfInfiniteProjector), - SequentialCTMRG(; projector_alg=FullInfiniteProjector), - SimultaneousCTMRG(; projector_alg=HalfInfiniteProjector), - SimultaneousCTMRG(; projector_alg=FullInfiniteProjector), + SequentialCTMRG(; projector_alg=:halfinfinite), + SequentialCTMRG(; projector_alg=:fullinfinite), + SimultaneousCTMRG(; projector_alg=:halfinfinite), + SimultaneousCTMRG(; projector_alg=:fullinfinite), ] function test_unitcell( From c06c20b437390199d0ad2f2d105b00fd71adbccf Mon Sep 17 00:00:00 2001 From: Paul Brehmer Date: Fri, 7 Mar 2025 18:36:15 +0100 Subject: [PATCH 52/52] Fix flavors.jl test --- test/ctmrg/flavors.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/ctmrg/flavors.jl b/test/ctmrg/flavors.jl index 75a08c02..590f4303 100644 --- a/test/ctmrg/flavors.jl +++ b/test/ctmrg/flavors.jl @@ -8,7 +8,7 @@ using PEPSKit χbond = 2 χenv = 16 unitcells = [(1, 1), (3, 4)] -projector_algs = [HalfInfiniteProjector, FullInfiniteProjector] +projector_algs = [:halfinfinite, :fullinfinite] @testset "$(unitcell) unit cell with $projector_alg" for (unitcell, projector_alg) in Iterators.product( @@ -18,10 +18,10 @@ projector_algs = [HalfInfiniteProjector, FullInfiniteProjector] Random.seed!(32350283290358) psi = InfinitePEPS(2, χbond; unitcell) env_sequential, = leading_boundary( - CTMRGEnv(psi, ComplexSpace(χenv)), psi; alg=SequentialCTMRG, projector_alg + CTMRGEnv(psi, ComplexSpace(χenv)), psi; alg=:sequential, projector_alg ) env_simultaneous, = leading_boundary( - CTMRGEnv(psi, ComplexSpace(χenv)), psi; alg=SimultaneousCTMRG, projector_alg + CTMRGEnv(psi, ComplexSpace(χenv)), psi; alg=:simultaneous, projector_alg ) # compare norms @@ -56,7 +56,7 @@ end # test fixedspace actually fixes space @testset "Fixedspace truncation using $alg and $projector_alg" for (alg, projector_alg) in Iterators.product( - [SequentialCTMRG, SimultaneousCTMRG], projector_algs + [:sequential, :simultaneous], projector_algs ) Ds = fill(2, 3, 3) χs = [16 17 18; 15 20 21; 14 19 22]