Skip to content

Commit

Permalink
Implement spatially symmetric LoopTNR (#7)
Browse files Browse the repository at this point in the history
* add Zygote and OptimKit as dependencies

* implement SLoopTNR

* add default `LBFGS` parameters

* add `SLoopTNR` to the spaces testset

* remove `SLoopTNR` from spaces test again

* add `SLoopTNR` to Ising test

* add comment to `SLoopTNR` implementation

* formatting

 Co-authored-by: darts <[email protected]>
  • Loading branch information
VictorVanthilt authored Jan 24, 2025
1 parent 5f6b372 commit dc6d972
Show file tree
Hide file tree
Showing 5 changed files with 93 additions and 1 deletion.
4 changes: 4 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,16 @@ version = "0.1.0"
[deps]
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
LoggingExtras = "e6f89c97-d47a-5376-807f-9c37f3926c36"
OptimKit = "77e91f04-9b3b-57a6-a776-40b61faaebe0"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
TensorKit = "07d1fe3e-3e46-537d-9eac-e9e13d0d4cec"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
LoggingExtras = "1.1.0"
OptimKit = "0.3.1"
TensorKit = "0.14"
Zygote = "0.7.3"

[extras]
QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
Expand Down
3 changes: 3 additions & 0 deletions src/TRGKit.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
module TRGKit
using TensorKit, LinearAlgebra
using LoggingExtras, Printf
using Zygote, OptimKit

# stop criteria
include("utility/stopping.jl")
Expand All @@ -15,13 +16,15 @@ include("schemes/hotrg.jl")
include("schemes/gilt.jl")
include("schemes/gilt-tnr.jl")
include("schemes/atrg.jl")
include("schemes/slooptnr.jl")

export TRGScheme
export TRG, trg_convcrit
export BTRG, btrg_convcrit
export HOTRG, hotrg_convcrit
export ATRG, atrg_convcrit
export GILTTNR, gilttnr_convcrit
export SLoopTNR

export run!

Expand Down
69 changes: 69 additions & 0 deletions src/schemes/slooptnr.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# TODO: rewrite SLoopTNR contractions to work with symmetric tensors
mutable struct SLoopTNR <: TRGScheme
T::TensorMap

optimization_algorithm::OptimKit.OptimizationAlgorithm
finalize!::Function
function SLoopTNR(T::TensorMap;
optimization_algorithm=LBFGS(8; verbosity=1, maxiter=500,
gradtol=1e-4), finalize=finalize!)
@assert scalartype(T) <: Real "SLoopTNR only supports real-valued TensorMaps"
return new(T, optimization_algorithm, finalize)
end
end

function step!(scheme::SLoopTNR, trunc::TensorKit.TruncationScheme)
f(A) = _SLoopTNR_cost(permute(scheme.T, ((1, 2), (4, 3))), A) # Another convention was used when implementing SLoopTNR

function fg(f, A)
f, g = Zygote.withgradient(f, A)
return f, g[1]
end

Zygote.refresh()

U, S, _ = tsvd(permute(scheme.T, ((1, 2), (4, 3))); trunc=trunc)
S₀ = U * sqrt(S)
if norm(imag(S)) > 1e-12
@error "S is not real"
end
S_opt, _, _, _, _ = optimize(A -> fg(f, A), S₀, scheme.optimization_algorithm)

@tensor scheme.T[-1 -2; -4 -3] := S_opt[1 2 -3] * S_opt[1 4 -1] * S_opt[3 4 -2] *
S_opt[3 2 -4]
end

function ψAψA(T::AbstractTensorMap)
@tensor M[-1 -2 -3 -4] := T[1 -2 2 -4] * conj(T[1 -1 2 -3])
@tensor MM[-1 -2 -3 -4] := M[-1 -2 1 2] * M[-3 -4 1 2]
return @tensor MM[1 2 3 4] * MM[1 2 3 4]
end

function ψAψB(T::AbstractTensorMap, S::AbstractTensorMap)
@tensor M[-1 -2 -3 -4] := T[1 -2 2 -4] * conj(S[1 -1 3]) * conj(S[2 -3 3])
@tensor MM[-1 -2 -3 -4] := M[-1 -2 1 2] * M[-3 -4 1 2]
@tensor result = MM[1 2 3 4] * MM[1 2 3 4]
if norm(imag(result)) > 1e-12
@error "We only support real tensors"
end
return result
end

function ψBψB(S::AbstractTensorMap)
@tensor M[-1 -2 -3 -4] := S[1 -1 3] * conj(S[1 -2 4]) * S[2 -3 3] * conj(S[2 -4 4])
@tensor MM[-1 -2 -3 -4] := M[-1 -2 1 2] * M[-3 -4 1 2]
return @tensor MM[1 2 3 4] * MM[1 2 3 4] # This seems very bad for complex numbers
end

function _SLoopTNR_cost(T::AbstractTensorMap, S::AbstractTensorMap)
return ψAψA(T) - 2 * real(ψAψB(T, S)) + ψBψB(S)
end

slooptnr_convcrit(steps::Int, data) = abs(log(data[end]) * 2.0^(-steps))

function Base.show(io::IO, scheme::SLoopTNR)
println(io, "SLoopTNR - Symmetric Loop TNR")
println(io, " * T: $(summary(scheme.T))")
return println(io,
" * Optimization algorithm: $(summary(scheme.optimization_algorithm))")
end
2 changes: 1 addition & 1 deletion src/utility/finalize.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
const simple_scheme = Union{TRG,GILTTNR}
const simple_scheme = Union{TRG,GILTTNR,SLoopTNR}
const turning_scheme = Union{HOTRG,ATRG}

# 1x1 unitcell finalize
Expand Down
16 changes: 16 additions & 0 deletions test/ising.jl
Original file line number Diff line number Diff line change
Expand Up @@ -85,3 +85,19 @@ end
relerror = abs((fs - f_onsager) / f_onsager)
@test relerror < 2e-6
end

# SLoopTNR
@testset "SLoopTNR - Ising Model" begin
scheme = SLoopTNR(classical_ising(Ising_βc)) # SLoopTNR is not compatible yet with symmetric tensors
data = run!(scheme, truncdim(8), maxiter(25))

lnz = 0
for (i, d) in enumerate(data)
lnz += log(d) * 2.0^(1 - i)
end

fs = lnz * -1 / Ising_βc

relerror = abs((fs - f_onsager) / f_onsager)
@test relerror < 1e-5
end

0 comments on commit dc6d972

Please sign in to comment.