From 02e0c930722f45bd522972a150431c9f8520ab02 Mon Sep 17 00:00:00 2001 From: tmigot Date: Thu, 24 Nov 2022 09:04:04 -0500 Subject: [PATCH] Update docs (#125) --- docs/Project.toml | 8 +++ docs/make.jl | 2 +- docs/src/gradient-lbfgs.md | 88 ++++++++++++++++++++------------- test/examples/gradient-lbfgs.jl | 2 +- 4 files changed, 64 insertions(+), 36 deletions(-) diff --git a/docs/Project.toml b/docs/Project.toml index b5aa6c15..6c396b21 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,4 +1,12 @@ [deps] +ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" +Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" +Stopping = "c4fe5a9e-e7fb-5c3d-89d5-7f405ab2214f" + +[compat] +ADNLPModels = "0.4" +Documenter = "0.27" +NLPModels = "0.19" diff --git a/docs/make.jl b/docs/make.jl index 4c306ef4..6f04c68a 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -45,5 +45,5 @@ makedocs( # Documenter can also automatically deploy documentation to gh-pages. # See "Hosting Documentation" and deploydocs() in the Documenter manual # for more information. -deploydocs(repo = "github.com/SolverStoppingJulia/Stopping.jl") +deploydocs(repo = "github.com/SolverStoppingJulia/Stopping.jl", push_preview = true) #https://juliadocs.github.io/Documenter.jl/stable/man/hosting/ ? diff --git a/docs/src/gradient-lbfgs.md b/docs/src/gradient-lbfgs.md index dd8ddb4b..74861cc9 100644 --- a/docs/src/gradient-lbfgs.md +++ b/docs/src/gradient-lbfgs.md @@ -1,7 +1,7 @@ ## Mixed-algorithms: a ListofStates tutorial -We illustrate here the use of `ListofStates` in dealing with a warm start -procedure. The full code of this tutorial can be found [here](https://github.com/SolverStoppingJulia/Stopping.jl/blob/master/test/examples/gradient-lbfgs.jl). +We illustrate here the use of `ListofStates` in dealing with a warm start procedure. +The full code of this tutorial can be found [here](https://github.com/SolverStoppingJulia/Stopping.jl/blob/master/test/examples/gradient-lbfgs.jl). `ListofStates` is designed to store the of the iteration process. In this tutorial, we compare the resolution of a convex unconstrained problem with 3 variants: @@ -9,13 +9,14 @@ In this tutorial, we compare the resolution of a convex unconstrained problem wi - an inverse-BFGS method - a mix of 5 steps of steepest descent and then switching to a BFGS initialized with the 5 previous steps. -``` -using Stopping, NLPModels, LinearAlgebra, Test, Printf +```@example ex1 +using Stopping, ADNLPModels, NLPModels, LinearAlgebra, Printf ``` First, we introduce our two implementations that both uses an backtracking Armijo linesearch. +First, we define a steepest descent method and a BFGS quasi-Newton method both using an elementary backtracking Armijo linesearch. -``` +```@example ex1 import Stopping.armijo function armijo(xk, dk, fk, slope, f) t = 1.0 @@ -27,7 +28,7 @@ function armijo(xk, dk, fk, slope, f) return t, fk_new end -function steepest_descent(stp :: NLPStopping) +function steepest_descent(stp::NLPStopping) xk = stp.current_state.x fk, gk = objgrad(stp.pb, xk) @@ -50,16 +51,13 @@ function steepest_descent(stp :: NLPStopping) return stp end -function bfgs_quasi_newton_armijo(stp :: NLPStopping; Hk = nothing) +function bfgs_quasi_newton_armijo(stp::NLPStopping; Hk = I) xk = stp.current_state.x fk, gk = objgrad(stp.pb, xk) gm = gk dk, t = similar(gk), 1. - if isnothing(Hk) - Hk = I #start from identity matrix - end OK = update_and_start!(stp, fx = fk, gx = gk) @@ -89,47 +87,70 @@ function bfgs_quasi_newton_armijo(stp :: NLPStopping; Hk = nothing) OK = update_and_stop!(stp, x = xk, fx = fk, gx = gk) @printf "%2d %7.1e %7.1e %7.1e %7.1e\n" stp.meta.nb_of_stop fk norm(stp.current_state.current_score) t slope end - stp.stopping_user_struct = Dict( :Hk => Hk) + stp.stopping_user_struct = Dict(:Hk => Hk) return stp end ``` We consider the following convex unconstrained problem model using `ADNLPModels.jl` and defines a related `NLPStopping`. -``` -fH(x) = (x[2]+x[1].^2-11).^2+(x[1]+x[2].^2-7).^2 +```@example ex1 +fH(x) = (x[2] + x[1] .^ 2 - 11) .^ 2 + (x[1] + x[2] .^ 2 - 7) .^ 2 nlp = ADNLPModel(fH, [10., 20.]) -stp = NLPStopping(nlp, optimality_check = unconstrained_check, - atol = 1e-6, rtol = 0.0, max_iter = 100) +stp = NLPStopping( + nlp, + optimality_check = unconstrained_check, + atol = 1e-6, + rtol = 0.0, + max_iter = 100, +) ``` -``` +Our first elementary runs will use separately the steepest descent method and the quasi-Newton method to solve the problem. + +## Steepest descent + +```@example ex1 reinit!(stp, rstate = true, x = nlp.meta.x0) steepest_descent(stp) -@test status(stp) == :Optimal -@test stp.listofstates == VoidListofStates() +(status(stp), elapsed_time(stp), get_list_of_states(stp), neval_obj(nlp), neval_grad(nlp)) +``` -@show elapsed_time(stp) -@show nlp.counters +## BFGS quasi-Newton +```@example ex1 reinit!(stp, rstate = true, x = nlp.meta.x0, rcounters = true) bfgs_quasi_newton_armijo(stp) -@test status(stp) == :Optimal -@test stp.listofstates == VoidListofStates() +(status(stp), elapsed_time(stp), get_list_of_states(stp), neval_obj(nlp), neval_grad(nlp)) +``` -@show elapsed_time(stp) -@show nlp.counters +## Mix of Algorithms +```@example ex1 NLPModels.reset!(nlp) -stp_warm = NLPStopping(nlp, optimality_check = unconstrained_check, - atol = 1e-6, rtol = 0.0, max_iter = 5, - n_listofstates = 5) #shortcut for list = ListofStates(5, Val{NLPAtX{Float64,Array{Float64,1},Array{Float64,2}}}())) +stp_warm = NLPStopping( + nlp, + optimality_check = unconstrained_check, + atol = 1e-6, + rtol = 0.0, + max_iter = 5, + n_listofstates = 5, #shortcut for list = ListofStates(5, Val{NLPAtX{Float64,Array{Float64,1},Array{Float64,2}}}())) +) +get_list_of_states(stp_warm) +``` + +```@example ex1 steepest_descent(stp_warm) -@test status(stp_warm) == :IterationLimit -@test length(stp_warm.listofstates) == 5 +status(stp_warm) # :IterationLimit +``` + +```@example ex1 +length(get_list_of_states(stp_warm)) # 5 +``` +```@example ex1 Hwarm = I for i=2:5 sk = stp_warm.listofstates.list[i][1].x - stp_warm.listofstates.list[i-1][1].x @@ -139,12 +160,11 @@ for i=2:5 global Hwarm = (I - ρk * sk * yk') * Hwarm * (I - ρk * yk * sk') + ρk * sk * sk' end end +``` +```@example ex1 reinit!(stp_warm) stp_warm.meta.max_iter = 100 bfgs_quasi_newton_armijo(stp_warm, Hk = Hwarm) -status(stp_warm) - -@show elapsed_time(stp_warm) -@show nlp.counters -``` \ No newline at end of file +(status(stp_warm), elapsed_time(stp_warm), get_list_of_states(stp_warm), neval_obj(nlp), neval_grad(nlp)) +``` diff --git a/test/examples/gradient-lbfgs.jl b/test/examples/gradient-lbfgs.jl index 347e7364..793b9445 100644 --- a/test/examples/gradient-lbfgs.jl +++ b/test/examples/gradient-lbfgs.jl @@ -14,7 +14,7 @@ #the history (using the strength of the ListofStates). # ############################################################################### -using Stopping, NLPModels, LinearAlgebra, Test, Printf +using Stopping, NLPModels, LinearAlgebra, Printf import Stopping.armijo function armijo(xk, dk, fk, slope, f)