Skip to content

Commit

Permalink
Define common problem combos
Browse files Browse the repository at this point in the history
  • Loading branch information
dpo committed May 27, 2024
1 parent 698f055 commit bc6a282
Show file tree
Hide file tree
Showing 13 changed files with 119 additions and 23 deletions.
3 changes: 2 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537"
QuadraticModels = "f468eda6-eac5-11e8-05a5-ff9e497bcd19"
ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["ADNLPModels", "DifferentialEquations", "MLDatasets", "ProximalOperators", "QuadraticModels", "Test"]
test = ["ADNLPModels", "DifferentialEquations", "MLDatasets", "ProximalOperators", "QuadraticModels", "ShiftedProximalOperators", "Test"]
11 changes: 11 additions & 0 deletions src/RegularizedProblems.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,26 @@ function __init__()
include("testset_bpdn.jl")
include("testset_lrcomp.jl")
include("testset_matrand.jl")
include("testset_nnmf.jl")
end
@require ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263" begin
include("testset_group_lasso.jl")
end
@require ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" begin
@require DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" begin
include("fh_model.jl")
@require ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" begin
include("testset_fh.jl")
end
end
end
@require MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" begin
include("nonlin_svm_model.jl")
@require ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" begin
@require ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263" begin
include("testset_svm.jl")
end
end
end
@require QuadraticModels = "f468eda6-eac5-11e8-05a5-ff9e497bcd19" begin
include("qp_rand_model.jl")
Expand Down
4 changes: 2 additions & 2 deletions src/bpdn_model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ bpdn_data(compound::Int = 1, args...; bounds::Bool = false) =
bpdn_data(200 * compound, 512 * compound, 10 * compound, args...; bounds = bounds)

"""
model, nls_model, sol = bpdn_model(args...)
model, nls_model, sol = bpdn_model(compound = 1, args...)
model, nls_model, sol = bpdn_model(args...; kwargs...)
model, nls_model, sol = bpdn_model(compound = 1, args...; kwargs...)
Return an instance of an `NLPModel` and an instance of an `NLSModel` representing
the same basis-pursuit denoise problem, i.e., the under-determined linear
Expand Down
8 changes: 4 additions & 4 deletions src/qp_rand_model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@ export qp_rand_model
using .QuadraticModels

"""
model = qp_rand_model(n; dens = 1.0e-4, convex = false)
model = qp_rand_model(n = 100_000; dens = 1.0e-4, convex = false)
Return an instance of a `QuadraticModel` representing
min cᵀx + ½ xᵀHx s.t. l ≤ x ≤ u,
with H = A + A' or H = A * A' (see the `convex` keyword argument) where A is a random square matrix with density `dens`, `l = -e - tₗ` and `u = e + tᵤ` where `e` is the vector of ones, and `tₗ` and `tᵤ` are sampled from a uniform distribution between 0 and 1.
with H = A + A' or H = A * A' (see the `convex` keyword argument) where A is a random square matrix with density `dens`, `l = -e - tₗ` and `u = e + tᵤ` where `e` is the vector of ones, and `tₗ` and `tᵤ` are sampled from a uniform distribution between 0 and 1.
## Arguments
* `n :: Int`: size of the problem,
* `n :: Int`: size of the problem (default: `100_000`).
## Keyword arguments
Expand All @@ -23,7 +23,7 @@ with H = A + A' or H = A * A' (see the `convex` keyword argument) where A is a r
An instance of a `QuadraticModel`.
"""
function qp_rand_model(n::Int; dens::R = 1.0e-4, convex::Bool = false) where {R <: Real}
function qp_rand_model(n::Int = 100_000; dens::R = 1.0e-4, convex::Bool = false) where {R <: Real}
@assert 0 < dens 1
A = sprandn(R, n, n, dens)
H = convex ? (A * A') : (A + A')
Expand Down
12 changes: 6 additions & 6 deletions src/testset_bpdn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,21 @@
export setup_bpdn_l0, setup_bpdn_l1, setup_bpdn_B0

function setup_bpdn_l0(args...; kwargs...)
model, nls_model, _ = bpdn_model(args...)
model, nls_model, _ = bpdn_model(args...; kwargs...)
λ = norm(grad(model, zeros(model.meta.nvar)), Inf) / 10
h = NormL0(λ)
h = ProximalOperators.NormL0(λ)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_bpdn_l1(args...; kwargs...)
model, nls_model, _ = bpdn_model(args...)
model, nls_model, _ = bpdn_model(args...; kwargs...)
λ = norm(grad(model, zeros(model.meta.nvar)), Inf) / 10
h = NormL1(λ)
h = ProximalOperators.NormL1(λ)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_bpdn_B0(compound = 1, args...; kwargs...)
model, nls_model, _ = bpdn_model(compound, args...)
h = IndBallL0(10 * compound)
model, nls_model, _ = bpdn_model(compound, args...; kwargs...)
h = ProximalOperators.IndBallL0(10 * compound)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end
14 changes: 14 additions & 0 deletions src/testset_fh.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Predefine a set of common problem instances.
export setup_fh_l0, setup_fh_l1

function setup_fh_l0(; kwargs...)
model, nls_model, _ = fh_model(; kwargs...)
h = ProximalOperators.NormL0(1.0)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_fh_l1(; kwargs...)
model, nls_model, _ = fh_model(; kwargs...)
h = ProximalOperators.NormL1(10.0)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end
3 changes: 1 addition & 2 deletions src/testset_group_lasso.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ function setup_group_lasso_l12(args...; kwargs...)
model, nls_model, ng, _, idx = group_lasso_model(; kwargs...)
idx = [idx[i, :] for i = 1:ng]
λ = 0.2 * ones(ng)
h = GroupNormL2(λ, idx)
h = ShiftedProximalOperators.GroupNormL2(λ, idx)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

8 changes: 4 additions & 4 deletions src/testset_lrcomp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,15 @@
export setup_lrcomp_rank, setup_lrcomp_nuclear

function setup_lrcomp_rank(args...; kwargs...)
model, nls_model, _ = lrcomp_model(args...; kwargs...)
model, nls_model, _ = lrcomp_model(; kwargs...)
λ = 0.1
h = Rank(λ)
h = ProximalOperators.Rank(λ)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_lrcomp_nuclear(args...; kwargs...)
model, nls_model, _ = lrcomp_model(args...; kwargs...)
model, nls_model, _ = lrcomp_model(; kwargs...)
λ = 0.1
h = NuclearNorm(λ)
h = ProximalOperators.NuclearNorm(λ)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end
8 changes: 4 additions & 4 deletions src/testset_matrand.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,27 +5,27 @@ export setup_mit_completion_rank, setup_mit_completion_nuclear
function setup_random_completion_rank(args...; kwargs...)
model, nls_model, _ = random_matrix_completion_model(; kwargs...)
λ = 0.1
h = Rank(λ)
h = ProximalOperators.Rank(λ)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_random_completion_nuclear(args...; kwargs...)
model, nls_model, _ = random_matrix_completion_model(; kwargs...)
λ = 0.1
h = NuclearNorm(λ)
h = ProximalOperators.NuclearNorm(λ)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_mit_completion_rank(args...; kwargs...)
model, nls_model, _ = MIT_matrix_completion_model()
λ = 0.1
h = Rank(λ)
h = ProximalOperators.Rank(λ)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_mit_completion_nuclear(args...; kwargs...)
model, nls_model, _ = MIT_matrix_completion_model()
λ = 0.1
h = NuclearNorm(λ)
h = ProximalOperators.NuclearNorm(λ)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end
16 changes: 16 additions & 0 deletions src/testset_nnmf.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Predefine a set of common problem instances.
export setup_nnmf_l0, setup_nnmf_l1

function setup_nnmf_l0(args...; kwargs...)
model, nls_model, _, selected = nnmf_model(args...)
λ = norm(grad(model, rand(model.meta.nvar)), Inf) / 200
h = ProximalOperators.NormL0(λ)
return RegularizedNLPModel(model, h, selected), RegularizedNLSModel(nls_model, h, selected)
end

function setup_nnmf_l1(args...; kwargs...)
model, nls_model, _, selected = nnmf_model(args...)
λ = norm(grad(model, rand(model.meta.nvar)), Inf) / 100_000
h = ProximalOperators.NormL1(λ)
return RegularizedNLPModel(model, h, selected), RegularizedNLSModel(nls_model, h, selected)
end
9 changes: 9 additions & 0 deletions src/testset_qp_rand.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Predefine a set of common problem instances.
export setup_qp_rand_l1

function setup_qp_rand_l1(args...; kwargs...)
model, nls_model, _ = qp_rand_model(args...; kwargs...)
λ = 0.1
h = ProximalOperators.NormL1(λ)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end
39 changes: 39 additions & 0 deletions src/testset_svm.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# Predefine a set of common problem instances.
export setup_svm_train_lhalf,
setup_svm_test_lhalf, setup_svm_train_l0, setup_svm_test_l0, setup_svm_train_l1, setup_svm_test_l1

function setup_svm_train_lhalf(args...; kwargs...)
model, nls_model, _ = svm_train_model(args...)
h = ShiftedProximalOperators.RootNormLhalf(0.1)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_svm_test_lhalf(args...; kwargs...)
model, nls_model, _ = svm_test_model(args...)
h = ShiftedProximalOperators.RootNormLhalf(0.1)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_svm_train_l0(args...; kwargs...)
model, nls_model, _ = svm_train_model(args...)
h = ProximalOperators.NormL0(0.1)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_svm_test_l0(args...; kwargs...)
model, nls_model, _ = svm_test_model(args...)
h = ProximalOperators.NormL0(0.1)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_svm_train_l1(args...; kwargs...)
model, nls_model, _ = svm_train_model(args...)
h = ProximalOperators.NormL1(0.1)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end

function setup_svm_test_l1(args...; kwargs...)
model, nls_model, _ = svm_test_model(args...)
h = ProximalOperators.NormL1(0.1)
return RegularizedNLPModel(model, h), RegularizedNLSModel(nls_model, h)
end
7 changes: 7 additions & 0 deletions test/rmodel_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,10 @@ using ProximalOperators
obj(nls_model, nls_model.meta.x0)
@test neval_obj(rlsmodel) == neval_obj(nls_model)
end

@testset "Problem combos" begin
# Test that we can at least instantiate the models
rnlp, rnls = setup_bpdn_l0()
@test isa(rnlp, RegularizedNLPModel)
@test isa(rnls, RegularizedNLSModel)
end

0 comments on commit bc6a282

Please sign in to comment.