diff --git a/src/R2_alg.jl b/src/R2_alg.jl index e7098f8b..da20a32f 100644 --- a/src/R2_alg.jl +++ b/src/R2_alg.jl @@ -132,7 +132,7 @@ For advanced usage, first define a solver "R2Solver" to preallocate the memory u solver = R2Solver(reg_nlp) solve!(solver, reg_nlp) - stats = GenericExecutionStats(reg_nlp.model) + stats = RegularizedExecutionStats(reg_nlp) solver = R2Solver(reg_nlp) solve!(solver, reg_nlp, stats) @@ -239,7 +239,7 @@ function R2( :status => stats.status, :fk => stats.solver_specific[:smooth_obj], :hk => stats.solver_specific[:nonsmooth_obj], - :ξ => stats.solver_specific[:xi], + :ξ => stats.dual_feas, :elapsed_time => stats.elapsed_time, ) return stats.solution, stats.iter, outdict @@ -292,7 +292,7 @@ function R2(reg_nlp::AbstractRegularizedNLPModel; kwargs...) kwargs_dict = Dict(kwargs...) max_iter = pop!(kwargs_dict, :max_iter, 10000) solver = R2Solver(reg_nlp, max_iter = max_iter) - stats = GenericExecutionStats(reg_nlp.model) + stats = GenericExecutionStats(reg_nlp.model) # TODO: change this to `stats = RegularizedExecutionStats(reg_nlp)` when FHist etc. is ruled out. cb = pop!( kwargs_dict, :callback, @@ -416,7 +416,6 @@ function SolverCore.solve!( (ξ < 0 && sqrt_ξ_νInv > neg_tol) && error("R2: prox-gradient step should produce a decrease but ξ = $(ξ)") - set_solver_specific!(stats, :xi, sqrt_ξ_νInv) set_status!( stats, get_status( @@ -501,7 +500,6 @@ function SolverCore.solve!( (ξ < 0 && sqrt_ξ_νInv > neg_tol) && error("R2: prox-gradient step should produce a decrease but ξ = $(ξ)") - set_solver_specific!(stats, :xi, sqrt_ξ_νInv) set_status!( stats, get_status( @@ -540,6 +538,7 @@ function SolverCore.solve!( end set_solution!(stats, xk) + set_residuals!(stats, zero(eltype(xk)), sqrt_ξ_νInv) return stats end diff --git a/src/TRDH_alg.jl b/src/TRDH_alg.jl index cfe4b3df..57e83155 100644 --- a/src/TRDH_alg.jl +++ b/src/TRDH_alg.jl @@ -67,12 +67,12 @@ function TRDH( u_bound = nlp.meta.uvar, kwargs..., ) - ξ = outdict[:ξ] + sqrt_ξ_νInv = outdict[:sqrt_ξ_νInv] stats = GenericExecutionStats(nlp) set_status!(stats, outdict[:status]) set_solution!(stats, xk) set_objective!(stats, outdict[:fk] + outdict[:hk]) - set_residuals!(stats, zero(eltype(xk)), ξ) + set_residuals!(stats, zero(eltype(xk)), sqrt_ξ_νInv) set_iter!(stats, k) set_time!(stats, outdict[:elapsed_time]) set_solver_specific!(stats, :Fhist, outdict[:Fhist]) @@ -362,7 +362,7 @@ function TRDH( :status => status, :fk => fk, :hk => hk, - :ξ => sqrt_ξ_νInv, + :sqrt_ξ_νInv => sqrt_ξ_νInv, :elapsed_time => elapsed_time, ) diff --git a/src/utils.jl b/src/utils.jl index b250dcb6..d1b2c028 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -1,3 +1,7 @@ +export RegularizedExecutionStats + +import SolverCore.GenericExecutionStats + # use Arpack to obtain largest eigenvalue in magnitude with a minimum of robustness function LinearAlgebra.opnorm(B; kwargs...) _, s, _ = tsvd(B) @@ -20,3 +24,18 @@ ShiftedProximalOperators.iprox!( LinearAlgebra.diag(op::AbstractDiagonalQuasiNewtonOperator) = copy(op.d) LinearAlgebra.diag(op::SpectralGradient{T}) where {T} = zeros(T, op.nrow) .* op.d[1] + +""" + GenericExecutionStats(reg_nlp :: AbstractRegularizedNLPModel{T, V}) + +Construct a GenericExecutionStats object from an AbstractRegularizedNLPModel. +More specifically, construct a GenericExecutionStats on the NLPModel of reg_nlp and add three solver_specific entries namely :smooth_obj, :nonsmooth_obj and :xi. +This is useful for reducing the number of allocations when calling solve!(..., reg_nlp, stats) and should be used by default. +Warning: This should *not* be used when adding other solver_specific entries that do not have the current scalar type. +""" +function RegularizedExecutionStats(reg_nlp :: AbstractRegularizedNLPModel{T, V}) where{T, V} + stats = GenericExecutionStats(reg_nlp.model, solver_specific = Dict{Symbol, T}()) + set_solver_specific!(stats, :smooth_obj, T(Inf)) + set_solver_specific!(stats, :nonsmooth_obj, T(Inf)) + return stats +end \ No newline at end of file diff --git a/test/test_allocs.jl b/test/test_allocs.jl index 62fbd64e..3eb218eb 100644 --- a/test/test_allocs.jl +++ b/test/test_allocs.jl @@ -41,7 +41,7 @@ end for solver ∈ (:R2Solver,) reg_nlp = RegularizedNLPModel(bpdn, h) solver = eval(solver)(reg_nlp) - stats = GenericExecutionStats(bpdn, solver_specific = Dict{Symbol, Float64}()) + stats = RegularizedExecutionStats(reg_nlp) @test @wrappedallocs(solve!(solver, reg_nlp, stats)) == 0 end end