diff --git a/Project.toml b/Project.toml index 99acc64..7fb5af7 100644 --- a/Project.toml +++ b/Project.toml @@ -6,16 +6,12 @@ version = "0.1.0" [deps] LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LoggingExtras = "e6f89c97-d47a-5376-807f-9c37f3926c36" -OptimKit = "77e91f04-9b3b-57a6-a776-40b61faaebe0" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" TensorKit = "07d1fe3e-3e46-537d-9eac-e9e13d0d4cec" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [compat] LoggingExtras = "1.1.0" -OptimKit = "0.3.1" TensorKit = "0.14" -Zygote = "0.7.3" [extras] QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" diff --git a/src/TRGKit.jl b/src/TRGKit.jl index 8699d3f..2d9eb14 100644 --- a/src/TRGKit.jl +++ b/src/TRGKit.jl @@ -1,7 +1,6 @@ module TRGKit using TensorKit, LinearAlgebra using LoggingExtras, Printf -using Zygote, OptimKit # stop criteria include("utility/stopping.jl") @@ -16,7 +15,6 @@ include("schemes/hotrg.jl") include("schemes/gilt.jl") include("schemes/gilt-tnr.jl") include("schemes/atrg.jl") -include("schemes/slooptnr.jl") export TRGScheme export TRG, trg_convcrit @@ -24,7 +22,6 @@ export BTRG, btrg_convcrit export HOTRG, hotrg_convcrit export ATRG, atrg_convcrit export GILTTNR, gilttnr_convcrit -export SLoopTNR export run! diff --git a/src/schemes/slooptnr.jl b/src/schemes/slooptnr.jl deleted file mode 100644 index cdb16b7..0000000 --- a/src/schemes/slooptnr.jl +++ /dev/null @@ -1,69 +0,0 @@ -# TODO: rewrite SLoopTNR contractions to work with symmetric tensors -mutable struct SLoopTNR <: TRGScheme - T::TensorMap - - optimization_algorithm::OptimKit.OptimizationAlgorithm - finalize!::Function - function SLoopTNR(T::TensorMap; - optimization_algorithm=LBFGS(8; verbosity=1, maxiter=500, - gradtol=1e-4), finalize=finalize!) - @assert scalartype(T) <: Real "SLoopTNR only supports real-valued TensorMaps" - return new(T, optimization_algorithm, finalize) - end -end - -function step!(scheme::SLoopTNR, trunc::TensorKit.TruncationScheme) - f(A) = _SLoopTNR_cost(permute(scheme.T, ((1, 2), (4, 3))), A) # Another convention was used when implementing SLoopTNR - - function fg(f, A) - f, g = Zygote.withgradient(f, A) - return f, g[1] - end - - Zygote.refresh() - - U, S, _ = tsvd(permute(scheme.T, ((1, 2), (4, 3))); trunc=trunc) - S₀ = U * sqrt(S) - if norm(imag(S)) > 1e-12 - @error "S is not real" - end - S_opt, _, _, _, _ = optimize(A -> fg(f, A), S₀, scheme.optimization_algorithm) - - @tensor scheme.T[-1 -2; -4 -3] := S_opt[1 2 -3] * S_opt[1 4 -1] * S_opt[3 4 -2] * - S_opt[3 2 -4] -end - -function ψAψA(T::AbstractTensorMap) - @tensor M[-1 -2 -3 -4] := T[1 -2 2 -4] * conj(T[1 -1 2 -3]) - @tensor MM[-1 -2 -3 -4] := M[-1 -2 1 2] * M[-3 -4 1 2] - return @tensor MM[1 2 3 4] * MM[1 2 3 4] -end - -function ψAψB(T::AbstractTensorMap, S::AbstractTensorMap) - @tensor M[-1 -2 -3 -4] := T[1 -2 2 -4] * conj(S[1 -1 3]) * conj(S[2 -3 3]) - @tensor MM[-1 -2 -3 -4] := M[-1 -2 1 2] * M[-3 -4 1 2] - @tensor result = MM[1 2 3 4] * MM[1 2 3 4] - if norm(imag(result)) > 1e-12 - @error "We only support real tensors" - end - return result -end - -function ψBψB(S::AbstractTensorMap) - @tensor M[-1 -2 -3 -4] := S[1 -1 3] * conj(S[1 -2 4]) * S[2 -3 3] * conj(S[2 -4 4]) - @tensor MM[-1 -2 -3 -4] := M[-1 -2 1 2] * M[-3 -4 1 2] - return @tensor MM[1 2 3 4] * MM[1 2 3 4] # This seems very bad for complex numbers -end - -function _SLoopTNR_cost(T::AbstractTensorMap, S::AbstractTensorMap) - return ψAψA(T) - 2 * real(ψAψB(T, S)) + ψBψB(S) -end - -slooptnr_convcrit(steps::Int, data) = abs(log(data[end]) * 2.0^(-steps)) - -function Base.show(io::IO, scheme::SLoopTNR) - println(io, "SLoopTNR - Symmetric Loop TNR") - println(io, " * T: $(summary(scheme.T))") - return println(io, - " * Optimization algorithm: $(summary(scheme.optimization_algorithm))") -end diff --git a/src/utility/finalize.jl b/src/utility/finalize.jl index ed2075e..22b7306 100644 --- a/src/utility/finalize.jl +++ b/src/utility/finalize.jl @@ -1,4 +1,4 @@ -const simple_scheme = Union{TRG,GILTTNR,SLoopTNR} +const simple_scheme = Union{TRG,GILTTNR} const turning_scheme = Union{HOTRG,ATRG} # 1x1 unitcell finalize diff --git a/test/ising.jl b/test/ising.jl index 91a31a0..07c75fa 100644 --- a/test/ising.jl +++ b/test/ising.jl @@ -85,19 +85,3 @@ end relerror = abs((fs - f_onsager) / f_onsager) @test relerror < 2e-6 end - -# SLoopTNR -@testset "SLoopTNR - Ising Model" begin - scheme = SLoopTNR(classical_ising(Ising_βc)) # SLoopTNR is not compatible yet with symmetric tensors - data = run!(scheme, truncdim(8), maxiter(25)) - - lnz = 0 - for (i, d) in enumerate(data) - lnz += log(d) * 2.0^(1 - i) - end - - fs = lnz * -1 / Ising_βc - - relerror = abs((fs - f_onsager) / f_onsager) - @test relerror < 1e-5 -end