Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve test stability #152

Merged
merged 10 commits into from
Jun 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 19 additions & 14 deletions src/algorithms/grassmann.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ function TensorKit.rmul!(a::Grassmann.GrassmannTangent, b::AbstractTensorMap)
Base.setfield!(a, :V, nothing)
return a
end
function Base.:/(a::Grassmann.GrassmannTangent, b::AbstractTensorMap)
return Grassmann.GrassmannTangent(a.W, a.Z / b)
end

# preconditioned gradient
struct PrecGrad{A,B}
Expand All @@ -33,7 +36,8 @@ end
function PrecGrad(v::Grassmann.GrassmannTangent)
return PrecGrad(v, v, isometry(storagetype(v.Z), domain(v.Z), domain(v.Z)))
end
PrecGrad(v::Grassmann.GrassmannTangent, rho) = PrecGrad(rmul!(copy(v), inv(rho)), v, rho)
PrecGrad(v::Grassmann.GrassmannTangent, rho) = PrecGrad(v / rho, v, rho)

Grassmann.base(g::PrecGrad) = Grassmann.base(g.Pg)

function inner(g1::PrecGrad, g2::PrecGrad, rho=one(g1.rho))
Expand Down Expand Up @@ -72,8 +76,9 @@ function ManifoldPoint(state::Union{InfiniteMPS,FiniteMPS}, envs)
g = Grassmann.project.(al_d, state.AL)

Rhoreg = Vector{eltype(state.CR)}(undef, length(state))
δmin = sqrt(eps(real(scalartype(state))))
for i in 1:length(state)
Rhoreg[i] = regularize(state.CR[i], norm(g[i]) / 10)
Rhoreg[i] = regularize(state.CR[i], max(norm(g[i]) / 10, δmin))
end

return ManifoldPoint(state, envs, g, Rhoreg)
Expand All @@ -85,16 +90,16 @@ function ManifoldPoint(state::MPSMultiline, envs)
g = [Grassmann.project(d, a) for (d, a) in zip(ac_d, state.AL)]

f = expectation_value(state, envs)
fi = imag.(f)
fr = real.(f)

sum(fi) > MPSKit.Defaults.tol && @warn "mpo is not hermitian $fi"
sum(imag(f)) > MPSKit.Defaults.tol && @warn "MPO might not be Hermitian $f"

g = -2 * g ./ abs.(fr)
# actual costfunction is F = -log(sum(f)^2) => ∂F = -2 * g / |sum(f)|
# TODO: check if elementwise thing is correct?
g .*= (-2 ./ abs.(real(f)))

Rhoreg = similar(state.CR)
δmin = sqrt(eps(real(scalartype(state))))
for (i, cg) in enumerate(g)
Rhoreg[i] = regularize(state.CR[i], norm(cg) / 10)
Rhoreg[i] = regularize(state.CR[i], max(norm(cg) / 10, δmin))
end

return ManifoldPoint(state, envs, g, Rhoreg)
Expand All @@ -112,21 +117,21 @@ function fg(x::ManifoldPoint{T}) where {T<:Union{InfiniteMPS,FiniteMPS}}
g_prec[i] = PrecGrad(rmul!(copy(x.g[i]), x.state.CR[i]'), x.Rhoreg[i])
end

f = real(sum(expectation_value(x.state, x.envs)))
f = sum(expectation_value(x.state, x.envs))
isapprox(imag(f), 0; atol=eps(abs(f))^(3 / 4)) || @warn "MPO might not be Hermitian: $f"

return f, g_prec
return real(f), g_prec
end
function fg(x::ManifoldPoint{<:MPSMultiline})
# the gradient I want to return is the preconditioned gradient!
g_prec = map(enumerate(x.g)) do (i, cg)
return PrecGrad(rmul!(copy(cg), x.state.CR[i]'), x.Rhoreg[i])
end

f = expectation_value(x.state, x.envs)
fi = imag.(f)
fr = real.(f)
f = sum(expectation_value(x.state, x.envs))
isapprox(imag(f), 0; atol=eps(abs(f))^(3 / 4)) || @warn "MPO might not be Hermitian: $f"

return -log(sum(fr)^2), g_prec[:]
return -log(sum(real(f))^2), g_prec[:]
end

"""
Expand Down
14 changes: 7 additions & 7 deletions src/utility/defaults.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,16 @@ const VERBOSE_ITER = 3
const VERBOSE_ALL = 4

const eltype = ComplexF64
const maxiter = 100
const tolgauge = 1e-14
const tol = 1e-12
const maxiter = 200
const tolgauge = 1e-13
const tol = 1e-10
const verbosity = VERBOSE_ITER
const dynamic_tols = true
const tol_min = 1e-14
const tol_max = 1e-5
const eigs_tolfactor = 1e-5
const gauge_tolfactor = 1e-8
const envs_tolfactor = 1e-5
const tol_max = 1e-4
const eigs_tolfactor = 1e-3
const gauge_tolfactor = 1e-6
const envs_tolfactor = 1e-4

_finalize(iter, state, opp, envs) = (state, envs)

Expand Down
68 changes: 30 additions & 38 deletions test/algorithms.jl
Original file line number Diff line number Diff line change
Expand Up @@ -157,122 +157,114 @@ end

@testset "LazySum FiniteMPS groundstate" verbose = true begin
tol = 1e-8
D = 20
D = 15
atol = 1e-2

local_operators = [TensorMap(randn, ComplexF64, ℙ^2 ⊗ ℙ^2 ← ℙ^2 ⊗ ℙ^2) for i in 1:3]
local_operators .+= adjoint.(local_operators)
# test using XXZ model, Δ > 1 is gapped
local_operators = [S_xx(), S_yy(), 0.7 * S_zz()]
mpo_hamiltonians = MPOHamiltonian.(local_operators)

H_lazy = LazySum(mpo_hamiltonians)
H = sum(H_lazy)

@testset "DMRG" begin
ψ₀ = FiniteMPS(randn, ComplexF64, 10, ℙ^2, ℙ^D)
ψ₀ = FiniteMPS(randn, ComplexF64, 10, ℂ^3, ℂ^D)
ψ₀, = find_groundstate(ψ₀, H; tol, verbosity=1)

@testset "DMRG" begin
# test logging passes
ψ, envs, δ = find_groundstate(ψ₀, H_lazy,
DMRG(; tol, verbosity=5, maxiter=1))

# compare states
alg = DMRG(; tol, verbosity=1)
ψ_lazy, envs, δ = find_groundstate(ψ₀, H_lazy, alg)
ψ, = find_groundstate(ψ₀, H, alg)

@test abs(dot(ψ, ψ_lazy)) ≈ 1
@test abs(dot(ψ, ψ_lazy)) ≈ 1 atol = atol
end

@testset "DMRG2" begin
ψ₀ = FiniteMPS(randn, ComplexF64, 10, ℙ^2, ℙ^D)

# test logging passes
trscheme = truncdim(12)
ψ, envs, δ = find_groundstate(ψ₀, H_lazy,
DMRG2(; tol, verbosity=5, maxiter=1, trscheme))

# compare states
alg = DMRG2(; tol, verbosity=1, trscheme)
ψ_lazy, envs, δ = find_groundstate(ψ₀, H_lazy, alg)
ψ, = find_groundstate(ψ₀, H, alg)
ψ_lazy, envs, δ = find_groundstate(ψ₀, H_lazy, alg)

@test abs(dot(ψ, ψ_lazy)) ≈ 1
@test abs(dot(ψ, ψ_lazy)) ≈ 1 atol = atol
end

@testset "GradientGrassmann" begin
ψ₀ = FiniteMPS(randn, ComplexF64, 10, ℙ^2, ℙ^D)

# test logging passes
ψ, envs, δ = find_groundstate(ψ₀, H_lazy,
GradientGrassmann(; tol, verbosity=5, maxiter=2))

# compare states
alg = GradientGrassmann(; tol, verbosity=1)
ψ_lazy, envs, δ = find_groundstate(ψ₀, H_lazy, alg)
ψ, = find_groundstate(ψ₀, H, alg)
ψ_lazy, envs, δ = find_groundstate(ψ₀, H_lazy, alg)

@test abs(dot(ψ, ψ_lazy)) ≈ 1
@test abs(dot(ψ, ψ_lazy)) ≈ 1 atol = atol
end
end

@testset "LazySum InfiniteMPS groundstate" verbose = true begin
tol = 1e-8
D = 6
D = 15
atol = 1e-2

local_operators = [TensorMap(randn, ComplexF64, ℙ^2 ⊗ ℙ^2 ← ℙ^2 ⊗ ℙ^2) for i in 1:3]
local_operators .+= adjoint.(local_operators)
# test using XXZ model, Δ > 1 is gapped
local_operators = [S_xx(), S_yy(), (0.7) * S_zz()]
mpo_hamiltonians = MPOHamiltonian.(local_operators)

H_lazy = LazySum(mpo_hamiltonians)
H = sum(H_lazy)

@testset "VUMPS" begin
ψ₀ = InfiniteMPS(ℙ^2, ℙ^D)
ψ₀ = InfiniteMPS(ℂ^3, ℂ^D)
ψ₀, = find_groundstate(ψ₀, H; tol, verbosity=1)

@testset "VUMPS" begin
# test logging passes
ψ, envs, δ = find_groundstate(ψ₀, H_lazy, VUMPS(; tol, verbosity=5, maxiter=2))

# compare states
alg = VUMPS(; tol, verbosity=1)
alg = VUMPS(; tol, verbosity=2)
ψ_lazy, envs, δ = find_groundstate(ψ₀, H_lazy, alg)
ψ, = find_groundstate(ψ₀, H, alg)

@test abs(dot(ψ, ψ_lazy)) ≈ 1
@test abs(dot(ψ, ψ_lazy)) ≈ 1 atol = atol
end

@testset "IDMRG1" begin
ψ₀ = InfiniteMPS(ℙ^2, ℙ^D)

# test logging passes
ψ, envs, δ = find_groundstate(ψ₀, H_lazy, IDMRG1(; tol, verbosity=5, maxiter=2))

# compare states
alg = IDMRG1(; tol, verbosity=1)
alg = IDMRG1(; tol, verbosity=2)
ψ_lazy, envs, δ = find_groundstate(ψ₀, H_lazy, alg)
ψ, = find_groundstate(ψ₀, H, alg)

@test abs(dot(ψ, ψ_lazy)) ≈ 1
@test abs(dot(ψ, ψ_lazy)) ≈ 1 atol = atol
end

@testset "IDMRG2" begin
ψ₀ = repeat(InfiniteMPS(ℙ^2, ℙ^D), 2)
ψ₀ = repeat(ψ₀, 2)
H_lazy′ = repeat(H_lazy, 2)
H′ = repeat(H, 2)

trscheme = truncdim(12)
trscheme = truncdim(D)
# test logging passes
ψ, envs, δ = find_groundstate(ψ₀, H_lazy′,
ψ, envs, δ = find_groundstate(ψ₀, H_lazy′,
IDMRG2(; tol, verbosity=5, maxiter=2, trscheme))

# compare states
alg = IDMRG2(; tol, verbosity=1, trscheme)
ψ_lazy, envs, δ = find_groundstate(ψ₀, H_lazy′, alg)
ψ, = find_groundstate(ψ₀, H′, alg)
alg = IDMRG2(; tol, verbosity=2, trscheme)
ψ_lazy, envs, δ = find_groundstate(ψ₀′, H_lazy′, alg)

@test abs(dot(ψ, ψ_lazy)) ≈ 1
@test abs(dot(ψ₀′, ψ_lazy)) ≈ 1 atol = atol
end

@testset "GradientGrassmann" begin
ψ₀ = InfiniteMPS(ℙ^2, ℙ^D)

# test logging passes
ψ, envs, δ = find_groundstate(ψ₀, H_lazy,
GradientGrassmann(; tol, verbosity=5, maxiter=2))
Expand All @@ -282,7 +274,7 @@ end
ψ_lazy, envs, δ = find_groundstate(ψ₀, H_lazy, alg)
ψ, = find_groundstate(ψ₀, H, alg)

@test abs(dot(ψ, ψ_lazy)) ≈ 1
@test abs(dot(ψ, ψ_lazy)) ≈ 1 atol = atol
end
end

Expand Down
32 changes: 32 additions & 0 deletions test/setup.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ using TensorKit: PlanarTrivial, ℙ
using LinearAlgebra: Diagonal

# exports
export S_xx, S_yy, S_zz
export force_planar
export transverse_field_ising, heisenberg_XXX, bilinear_biquadratic_model
export classical_ising, finite_classical_ising, sixvertex
Expand All @@ -35,6 +36,37 @@ force_planar(mpo::DenseMPO) = DenseMPO(force_planar.(mpo.opp))
# Toy models
# ----------------------------

function S_xx(::Type{Trivial}=Trivial, ::Type{T}=ComplexF64; spin=1) where {T<:Number}
X = if spin == 1 // 2
TensorMap(T[0 1; 1 0], ℂ^2 ← ℂ^2)
elseif spin == 1
TensorMap(T[0 1 0; 1 0 1; 0 1 0], ℂ^3 ← ℂ^3) / sqrt(2)
else
throw(ArgumentError("spin $spin not supported"))
end
return X ⊗ X
end
function S_yy(::Type{Trivial}=Trivial, ::Type{T}=ComplexF64; spin=1) where {T<:Number}
Y = if spin == 1 / 2
TensorMap(T[0 -im; im 0], ℂ^2 ← ℂ^2)
elseif spin == 1
TensorMap(T[0 -im 0; im 0 -im; 0 im 0], ℂ^3 ← ℂ^3) / sqrt(2)
else
throw(ArgumentError("spin $spin not supported"))
end
return Y ⊗ Y
end
function S_zz(::Type{Trivial}=Trivial, ::Type{T}=ComplexF64; spin=1) where {T<:Number}
Z = if spin == 1 // 2
TensorMap(T[1 0; 0 -1], ℂ^2 ← ℂ^2)
elseif spin == 1
TensorMap(T[1 0 0; 0 0 0; 0 0 -1], ℂ^3 ← ℂ^3)
else
throw(ArgumentError("spin $spin not supported"))
end
return Z ⊗ Z
end

function transverse_field_ising(; g=1.0)
X = TensorMap(ComplexF64[0 1; 1 0], ℂ^2 ← ℂ^2)
Z = TensorMap(ComplexF64[1 0; 0 -1], ℂ^2 ← ℂ^2)
Expand Down
Loading