Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use NLPModelsTest instead of ADNLPModels #84

Merged
merged 1 commit into from
Jun 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ NLPModels = "0.15, 0.16, 0.17, 0.18, 0.19, 0.20, 0.21"
julia = "^1.6"

[extras]
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
NLPModelsTest = "7998695d-6960-4d3a-85c4-e1bceb8cd856"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["ADNLPModels", "LinearAlgebra", "Logging", "Test"]
test = ["LinearAlgebra", "Logging", "NLPModelsTest", "Test"]
4 changes: 2 additions & 2 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
using SolverCore

# Auxiliary packages
using ADNLPModels, NLPModels
using NLPModels, NLPModelsTest

# stdlib
using LinearAlgebra, Logging, Test

if VERSION ≥ v"1.6"
@testset "Test allocations of solver specific" begin
nlp = ADNLPModel(x -> sum(x), ones(2))
nlp = BROWNDEN()
stats = GenericExecutionStats(nlp) # stats = GenericExecutionStats(nlp, solver_specific = Dict{Symbol, Bool}())
function fake_solver(stats)
set_solver_specific!(stats, :test, true)
Expand Down
3 changes: 1 addition & 2 deletions test/test_callback.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
@testset "test callback" begin
nlp =
ADNLPModel(x -> dot(x, x) / 2, ones(2), x -> [sum(x .^ 3) - 1], [0.0], [0.0], name = "linquad")
nlp = HS10()
callback(nlp, solver, stats) = begin
if stats.iter ≥ 3
set_status!(stats, :user)
Expand Down
9 changes: 2 additions & 7 deletions test/test_logging.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,5 @@
function test_logging()
nlps = [ADNLPModel(x -> sum(x .^ k), ones(2k), name = "Sum of power $k") for k = 2:4]
push!(
nlps,
ADNLPModel(x -> dot(x, x), ones(2), x -> [sum(x) - 1], [0.0], [0.0], name = "linquad"),
)
nlp = HS10()

@info "Testing logger"
log_header([:col_float, :col_int, :col_symbol, :col_string], [Float64, Int, Symbol, String])
Expand All @@ -12,8 +8,7 @@ function test_logging()

with_logger(ConsoleLogger()) do
@info "Testing dummy solver with logger"
SolverCore.dummy_solver(nlps[1], max_eval = 20)
reset!.(nlps)
SolverCore.dummy_solver(nlp, max_eval = 20)
end
end

Expand Down
4 changes: 2 additions & 2 deletions test/test_restart.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
@testset "test restart" begin
nlp = ADNLPModel(x -> dot(x, x) / 2, ones(2), x -> [sum(x .^ 3) - 1], [0.0], [0.0])
nlp = HS10()
solver = SolverCore.DummySolver(nlp)
stats = GenericExecutionStats(nlp)
solve!(solver, nlp, stats, verbose = false)
Expand All @@ -10,7 +10,7 @@
solve!(solver, nlp, stats, verbose = false)
@test stats.status == :first_order
# Try with a new problem of the same size
nlp = ADNLPModel(x -> dot(x, x) / 2, ones(2), x -> [sum(x .^ 3)], [0.0], [0.0])
nlp = HS10()
reset!(solver, nlp)
solve!(solver, nlp, stats, verbose = false)
@test stats.status == :first_order
Expand Down
29 changes: 7 additions & 22 deletions test/test_stats.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function test_stats()
show_statuses()
nlp = ADNLPModel(x -> dot(x, x), zeros(2))
nlp = HS10()
stats = GenericExecutionStats(nlp)
set_status!(stats, :first_order)
set_objective!(stats, 1.0)
Expand Down Expand Up @@ -36,7 +36,7 @@ function test_stats()

@testset "Testing inference" begin
for T in (Float16, Float32, Float64, BigFloat)
nlp = ADNLPModel(x -> dot(x, x), ones(T, 2))
nlp = BROWNDEN(T)

stats = GenericExecutionStats(nlp)
set_status!(stats, :first_order)
Expand All @@ -46,7 +46,7 @@ function test_stats()
@test typeof(stats.dual_feas) == T
@test typeof(stats.primal_feas) == T

nlp = ADNLPModel(x -> dot(x, x), ones(T, 2), x -> [sum(x) - 1], T[0], T[0])
nlp = HS14(T)

stats = GenericExecutionStats(nlp)
set_status!(stats, :first_order)
Expand Down Expand Up @@ -78,7 +78,7 @@ function test_stats()

@testset "Testing Dummy Solver with multi-precision" begin
for T in (Float16, Float32, Float64, BigFloat)
nlp = ADNLPModel(x -> dot(x, x), ones(T, 2))
nlp = HS10(T)
solver = SolverCore.DummySolver(nlp)

stats = with_logger(NullLogger()) do
Expand All @@ -92,21 +92,6 @@ function test_stats()
@test eltype(stats.multipliers_L) == T
@test eltype(stats.multipliers_U) == T

nlp = ADNLPModel(x -> dot(x, x), ones(T, 2), x -> [sum(x) - 1], T[0], T[0])
solver = SolverCore.DummySolver(nlp)
stats = GenericExecutionStats(nlp)

with_logger(NullLogger()) do
solve!(solver, nlp, stats)
end
@test typeof(stats.objective) == T
@test typeof(stats.dual_feas) == T
@test typeof(stats.primal_feas) == T
@test eltype(stats.solution) == T
@test eltype(stats.multipliers) == T
@test eltype(stats.multipliers_L) == T
@test eltype(stats.multipliers_U) == T

stats = GenericExecutionStats{T, Vector{T}, Vector{T}, Any}()
reset!(stats, nlp)
with_logger(NullLogger()) do
Expand All @@ -124,7 +109,7 @@ function test_stats()

@testset "Test stats setters" begin
T = Float64
nlp = ADNLPModel(x -> dot(x, x), ones(T, 2), x -> [sum(x) - 1], T[0], T[0])
nlp = HS14(T)
stats = GenericExecutionStats(nlp)
fields = (
"status",
Expand Down Expand Up @@ -173,7 +158,7 @@ end
test_stats()

@testset "Test get_status" begin
nlp = ADNLPModel(x -> sum(x), ones(2))
nlp = BROWNDEN()
@test get_status(nlp, optimal = true) == :first_order
@test get_status(nlp, small_residual = true) == :small_residual
@test get_status(nlp, infeasible = true) == :infeasible
Expand All @@ -191,7 +176,7 @@ test_stats()
end

@testset "Test get_status for NLS" begin
nlp = ADNLSModel(x -> x, ones(2), 2)
nlp = BNDROSENBROCK()
@test get_status(nlp, optimal = true) == :first_order
@test get_status(nlp, small_residual = true) == :small_residual
@test get_status(nlp, infeasible = true) == :infeasible
Expand Down
Loading