Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a wrapper for Krylov.jl and the block-GMRES implementation in ExaPF.jl #73

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
6 changes: 5 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ version = "0.3.3"
[deps]
ExaPF = "0cf0e50c-a82e-488f-ac7e-41ffdff1b8aa"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
Krylov = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7"
KrylovPreconditioners = "45d422c2-293f-44ce-8315-2cb988662dec"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MadNLP = "2621e9c9-9eb4-46b1-8089-e8c72242dfb6"
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
Expand All @@ -21,8 +23,10 @@ CUSOLVERRF = "a8cc9031-bad2-4722-94f5-40deabb4245c"
ArgosCUDAExt = ["CUDA", "CUSOLVERRF"]

[compat]
ExaPF = "~0.9.3"
ExaPF = "0.11"
KernelAbstractions = "0.9"
Krylov = "0.9.5"
KrylovPreconditioners = "0.2.1"
MadNLP = "0.7"
MathOptInterface = "1"
NLPModels = "0.19, 0.20"
Expand Down
10 changes: 10 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[deps]
Argos = "ef244971-cf80-42b0-9762-2c2c832df5d5"
ArgosCUDA = "8946db8d-321b-4174-84ad-48e2f9b69c56"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
ExaPF = "0cf0e50c-a82e-488f-ac7e-41ffdff1b8aa"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
MadNLP = "2621e9c9-9eb4-46b1-8089-e8c72242dfb6"
MadNLPGPU = "d72a61cc-809d-412f-99be-fd81f4b8a598"
MadNLPHSL = "7fb6135f-58fe-4112-84ca-653cf5be0c77"
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
4 changes: 2 additions & 2 deletions scripts/kkt/benchmark_kkt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ end
cases = [
"case118.m",
"case1354pegase.m",
"case2869pegase.m",
"case9241pegase.m",
# "case2869pegase.m",
# "case9241pegase.m",
]


Expand Down
5 changes: 5 additions & 0 deletions src/Argos.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,15 @@ const PS = PowerSystem
import MathOptInterface
const MOI = MathOptInterface

import Krylov
import KrylovPreconditioners
const KP = KrylovPreconditioners

using NLPModels
using MadNLP

include("utils.jl")
include("krylov.jl")
include("reduction.jl")
include("Evaluators/Evaluators.jl")

Expand Down
26 changes: 15 additions & 11 deletions src/KKT/reduced_newton.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
abstract type AbstractSchurKKTSystem{T, VT, MT} <: MadNLP.AbstractReducedKKTSystem{T, VT, MT, MadNLP.ExactHessian{T, VT}} end

"""
BieglerKKTSystem{T, VI, VT, MT, SMT} <: MadNLP.AbstractReducedKKTSystem{T, VT, MT}
Expand Down Expand Up @@ -40,7 +41,7 @@ in the reduction algorithm.
[PSSMA2022] Pacaud, François, Sungho Shin, Michel Schanen, Daniel Adrian Maldonado, and Mihai Anitescu. "Condensed interior-point methods: porting reduced-space approaches on GPU hardware." arXiv preprint arXiv:2203.11875 (2022).

"""
struct BieglerKKTSystem{T, VI, VT, MT, SMT} <: MadNLP.AbstractReducedKKTSystem{T, VT, MT, MadNLP.ExactHessian{T, VT}}
struct BieglerKKTSystem{T, VI, VT, MT, SMT, LA} <: AbstractSchurKKTSystem{T, VT, MT}
K::HJDJ{VI,VT,SMT}
Wref::SMT
W::SMT
Expand All @@ -61,8 +62,7 @@ struct BieglerKKTSystem{T, VI, VT, MT, SMT} <: MadNLP.AbstractReducedKKTSystem{T
# Reduced KKT system
aug_com::MT
reduction::AbstractReduction
# Pivot
G_fac::LinearAlgebra.Factorization
linear_solver::LA
# Buffers
_wxu1::VT
_wxu2::VT
Expand All @@ -85,7 +85,11 @@ struct BieglerKKTSystem{T, VI, VT, MT, SMT} <: MadNLP.AbstractReducedKKTSystem{T
etc::Dict{Symbol,Any}
end

function BieglerKKTSystem{T, VI, VT, MT}(nlp::OPFModel, ind_cons=MadNLP.get_index_constraints(nlp); max_batches=256) where {T, VI, VT, MT}
function BieglerKKTSystem{T, VI, VT, MT}(
nlp::OPFModel,
ind_cons=MadNLP.get_index_constraints(nlp);
max_batches=256,
) where {T, VI, VT, MT}
n_slack = length(ind_cons.ind_ineq)
n = NLPModels.get_nvar(nlp)
m = NLPModels.get_ncon(nlp)
Expand Down Expand Up @@ -128,8 +132,8 @@ function BieglerKKTSystem{T, VI, VT, MT}(nlp::OPFModel, ind_cons=MadNLP.get_inde
du_diag = VT(undef, m) ; fill!(du_diag, zero(T))

nbatches = min(max_batches, nu)
linear_solver = LS.DirectSolver(Gx; nrhs=nbatches)
Gxi = linear_solver.factorization
# Initiate Krylov wrapper.
Gxi = KrylovWrapper{T, VT, MT}(Gx, nx, nbatches)
S = ImplicitSensitivity(Gxi, Gu)
reduction = if nbatches > 1
BatchReduction(evaluator.model, S, nx, nu, nbatches)
Expand Down Expand Up @@ -166,7 +170,7 @@ function BieglerKKTSystem{T, VI, VT, MT}(nlp::OPFModel, ind_cons=MadNLP.get_inde
# Buffers
etc = Dict{Symbol, Any}(:reduction_time=>0.0, :cond=>Float64[])

return BieglerKKTSystem{T, VI, VT, MT, SMT}(
return BieglerKKTSystem{T, VI, VT, MT, SMT, typeof(Gxi)}(
K, Wref, W, J, A, Gx, Gu, mapA, mapGx, mapGu,
h_V, j_V,
pr_diag, du_diag,
Expand Down Expand Up @@ -300,8 +304,8 @@ function MadNLP.compress_jacobian!(kkt::BieglerKKTSystem)
copy_index!(nonzeros(kkt.Gu), Jv, kkt.mapGu)
copy_index!(nonzeros(kkt.A), Jv, kkt.mapA)

Gxi = kkt.G_fac
lu!(Gxi, kkt.Gx)
# Update the block-Jacobi preconditioner
KP.update!(kkt.linear_solver.preconditioner, kkt.Gx)

fixed!(nonzeros(kkt.Gu), kkt.ind_Gu_fixed, 0.0)
fixed!(nonzeros(kkt.A), kkt.ind_A_fixed, 0.0)
Expand Down Expand Up @@ -380,7 +384,7 @@ function MadNLP.solve_refine_wrapper!(
khu = view(kh, nx+1:nx+nu)

# Gₓ⁻¹
Gxi = kkt.G_fac
Gxi = kkt.linear_solver
Gx = kkt.Gx
Gu = kkt.Gu
K = kkt.K
Expand Down Expand Up @@ -435,7 +439,7 @@ function MadNLP.solve_refine_wrapper!(
mul!(kh, K, dxu) # Kₓₓ dₓ + Kₓᵤ dᵤ
axpy!(-1.0, khx, dλ) # tₓ - Kₓₓ dₓ + Kₓᵤ dᵤ

# TODO: SEGFAULT
# TODO: SEGFAULT with CUDA 12.*
ldiv!(Gxi', dλ) # dₗ = Gₓ⁻ᵀ(tₓ - Kₓₓ dₓ + Kₓᵤ dᵤ)

# (2) Extract Condensed
Expand Down
47 changes: 47 additions & 0 deletions src/krylov.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
using KernelAbstractions

struct KrylovWrapper{T, SMT, K1, K2, P}
Gx::SMT
solver1::K1
solver2::K2
preconditioner::P
end

function KrylovWrapper{T, VT, MT}(Gx, n, p) where {T, VT, MT}
solver1 = ExaPF.BlockGmresSolver(n, n, p, 1, VT, MT)
solver2 = Krylov.GmresSolver(n, n, 1, VT)
device = KernelAbstractions.get_backend(solver1.X)
preconditioner = KP.BlockJacobiPreconditioner(Gx, 32, device)
return KrylovWrapper{T, typeof(Gx), typeof(solver1), typeof(solver2), typeof(preconditioner)}(Gx, solver1, solver2, preconditioner)
end

Base.size(kw::KrylovWrapper) = size(kw.Gx)
Base.size(kw::KrylovWrapper, dim::Integer) = size(kw.Gx, dim)
# Lazy-adjoint
LinearAlgebra.adjoint(kw::KrylovWrapper{T}) where T = LinearAlgebra.Adjoint{T, typeof(kw)}(kw)

function LinearAlgebra.ldiv!(kw::KrylovWrapper{T}, x::AbstractVector{T}) where T
Krylov.solve!(kw.solver2, kw.Gx, x; N=kw.preconditioner, verbose=1, atol=1e-8, rtol=0.0)
copyto!(x, kw.solver2.x)
return x
end

function LinearAlgebra.ldiv!(akw::LinearAlgebra.Adjoint{T, KW}, x::AbstractVector{T}) where {T, KW<:KrylovWrapper{T}}
kw = parent(akw)
Krylov.solve!(kw.solver2, kw.Gx', x; N=kw.preconditioner, verbose=1, atol=1e-8, rtol=0.0)
copyto!(x, kw.solver2.x)
return x
end

function LinearAlgebra.ldiv!(kw::KrylovWrapper{T}, X::AbstractMatrix{T}) where T
ExaPF.block_gmres!(kw.solver1, kw.Gx, X; N=kw.preconditioner, verbose=1, atol=1e-8, rtol=0.0)
copyto!(X, kw.solver1.X)
return X
end

function LinearAlgebra.ldiv!(akw::LinearAlgebra.Adjoint{T, KW}, X::AbstractMatrix{T}) where {T, KW<:KrylovWrapper{T}}
kw = parent(akw)
ExaPF.block_gmres!(kw.solver1, kw.Gx', X; N=kw.preconditioner, verbose=1, atol=1e-8, rtol=0.0)
copyto!(X, kw.solver1.X)
return X
end
86 changes: 86 additions & 0 deletions test/kkt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@

using Test
using LazyArtifacts
using LinearAlgebra

using MadNLP
using NLPModels
using Argos

const INSTANCES_DIR = joinpath(artifact"ExaData", "ExaData")

function test_biegler_kkt(casename)
datafile = joinpath(INSTANCES_DIR, casename)
nlp = Argos.FullSpaceEvaluator(datafile)
opf = Argos.OPFModel(nlp)

# Init OPF structure
x0 = NLPModels.get_x0(opf)
nnzj = NLPModels.get_nnzj(opf)
jac = zeros(nnzj)
NLPModels.jac_coord!(opf, x0, jac)

# Problem's dimensions
nx = nlp.nx
nu = nlp.nu
n, m = NLPModels.get_nvar(opf), NLPModels.get_ncon(opf)
ind_cons = MadNLP.get_index_constraints(opf)
n_ineq = length(ind_cons.ind_ineq)

# Build reference KKT system
T = Float64
VI = Vector{Int}
VT = Vector{Float64}
MT = Matrix{Float64}

# Build KKT system
KKT = Argos.BieglerKKTSystem{T, VI, VT, MT}
kkt = KKT(opf, ind_cons)
MadNLP.initialize!(kkt)
MadNLP.build_kkt!(kkt)

# Test sizes are matching.
@test size(kkt.Wref) == (n, n)
@test length(kkt.pr_diag) == n + n_ineq
@test size(kkt.Gx) == (nx, nx)
@test size(kkt.Gu) == (nx, nu)
@test size(kkt.A) == (n_ineq, nx + nu)
@test size(kkt.aug_com) == (nu, nu)

@test kkt.Wref === nlp.hess.H

# Test with MadNLP
madnlp_options = Dict{Symbol, Any}(
:lapack_algorithm=>MadNLP.CHOLESKY,
:linear_solver=>LapackCPUSolver,
)
opt_ipm, opt_linear, logger = MadNLP.load_options(; madnlp_options...)
solver = MadNLP.MadNLPSolver{T, KKT}(opf, opt_ipm, opt_linear; logger=logger)

@test isa(solver.kkt, KKT)
# Test building KKT within MadNLP.
MadNLP.initialize!(solver.kkt)
MadNLP.build_kkt!(solver.kkt)

# Load non-trivial values in KKT system.
MadNLP.eval_jac_wrapper!(solver, solver.kkt, solver.x)
MadNLP.eval_grad_f_wrapper!(solver, solver.f, solver.x)
MadNLP.set_initial_rhs!(solver, solver.kkt)

# Solve KKT system.
MadNLP.factorize_wrapper!(solver)
MadNLP.solve_refine_wrapper!(solver, solver.d, solver.p)

# Test solution is correct
p_ref = solver.p.values[1:n+n_ineq+m]
d = solver.d.values[1:n+n_ineq+m]
p = zeros(length(d))
mul!(p, solver.kkt, d)
@test isapprox(p, p_ref)
return
end

@testset "BieglerKKTSystem $case" for case in ["case9.m", "case30.m", "case118.m", "case300.m", "case2869pegase.m", "case1354pegase.m"]
test_biegler_kkt(case)
end

Loading