diff --git a/previews/PR249/.documenter-siteinfo.json b/previews/PR249/.documenter-siteinfo.json index b13f688..afa563e 100644 --- a/previews/PR249/.documenter-siteinfo.json +++ b/previews/PR249/.documenter-siteinfo.json @@ -1 +1 @@ -{"documenter":{"julia_version":"1.10.1","generation_timestamp":"2024-02-18T11:26:09","documenter_version":"1.2.1"}} \ No newline at end of file +{"documenter":{"julia_version":"1.10.1","generation_timestamp":"2024-02-26T17:52:05","documenter_version":"1.2.1"}} \ No newline at end of file diff --git a/previews/PR249/assets/Manifest.toml b/previews/PR249/assets/Manifest.toml index bbcbcdc..99d2846 100644 --- a/previews/PR249/assets/Manifest.toml +++ b/previews/PR249/assets/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.10.1" manifest_format = "2.0" -project_hash = "adfa74819f53813ac8a6bfd6d95095d6b0655cea" +project_hash = "de0d6e45a22e6a4510dc57906c315a0b297eefd6" [[deps.ADTypes]] git-tree-sha1 = "41c37aa88889c171f1300ceac1313c06e891d245" @@ -14,12 +14,6 @@ git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c" uuid = "a4c015fc-c6ff-483c-b24f-f7ea428134e9" version = "0.0.1" -[[deps.ASL_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "6252039f98492252f9e47c312c8ffda0e3b9e78d" -uuid = "ae81ac8f-d209-56e5-92de-9978fef736f9" -version = "0.1.3+0" - [[deps.AbstractFFTs]] deps = ["LinearAlgebra"] git-tree-sha1 = "d92ad398961a3ed262d8bf04a1a2b8340f915fef" @@ -32,15 +26,15 @@ weakdeps = ["ChainRulesCore", "Test"] AbstractFFTsTestExt = "Test" [[deps.AbstractTrees]] -git-tree-sha1 = "faa260e4cb5aba097a73fab382dd4b5819d8ec8c" +git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177" uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" -version = "0.4.4" +version = "0.4.5" [[deps.Adapt]] deps = ["LinearAlgebra", "Requires"] -git-tree-sha1 = "cde29ddf7e5726c9fb511f340244ea3481267608" +git-tree-sha1 = "0fb305e0253fd4e833d486914367a2ee2c2e78d0" uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "3.7.2" +version = "4.0.1" weakdeps = ["StaticArrays"] [deps.Adapt.extensions] @@ -144,9 +138,9 @@ weakdeps = ["HTTP", "Sockets"] [[deps.BoundaryValueDiffEq]] deps = ["ADTypes", "Adapt", "ArrayInterface", "BandedMatrices", "ConcreteStructs", "DiffEqBase", "FastAlmostBandedMatrices", "ForwardDiff", "LinearAlgebra", "LinearSolve", "NonlinearSolve", "PreallocationTools", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "Setfield", "SparseArrays", "SparseDiffTools", "Tricks", "TruncatedStacktraces", "UnPack"] -git-tree-sha1 = "dd234c9a030350d5ff4c45761d6cad0cfb358cb9" +git-tree-sha1 = "3ff968887be48760b0e9e8650c2d05c96cdea9d8" uuid = "764a87c0-6b3e-53db-9096-fe964310641d" -version = "5.6.0" +version = "5.6.3" [deps.BoundaryValueDiffEq.extensions] BoundaryValueDiffEqODEInterfaceExt = "ODEInterface" @@ -209,9 +203,9 @@ version = "1.63.0" [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra"] -git-tree-sha1 = "ad25e7d21ce10e01de973cdc68ad0f850a953c52" +git-tree-sha1 = "aef70bb349b20aa81a82a19704c3ef339d4ee494" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "1.21.1" +version = "1.22.1" weakdeps = ["SparseArrays"] [deps.ChainRulesCore.extensions] @@ -287,9 +281,9 @@ version = "0.3.0" [[deps.Compat]] deps = ["TOML", "UUIDs"] -git-tree-sha1 = "75bd5b6fc5089df449b5d35fa501c846c9b6549b" +git-tree-sha1 = "c955881e3c981181362ae4088b35995446298b80" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.12.0" +version = "4.14.0" weakdeps = ["Dates", "LinearAlgebra"] [deps.Compat.extensions] @@ -356,9 +350,9 @@ version = "1.16.0" [[deps.DataStructures]] deps = ["Compat", "InteractiveUtils", "OrderedCollections"] -git-tree-sha1 = "ac67408d9ddf207de5cfa9a97e114352430f01ed" +git-tree-sha1 = "1fb174f0d48fe7d142e1109a10636bc1d14f5ac2" uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.18.16" +version = "0.18.17" [[deps.DataValueInterfaces]] git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" @@ -371,9 +365,9 @@ uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" [[deps.DelayDiffEq]] deps = ["ArrayInterface", "DataStructures", "DiffEqBase", "LinearAlgebra", "Logging", "OrdinaryDiffEq", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SimpleNonlinearSolve", "SimpleUnPack"] -git-tree-sha1 = "6725c56e3e3d563e37d8fd5e6c5eb66ac19321fd" +git-tree-sha1 = "10c892dd12113644bd6c7fae520691f8a78032dc" uuid = "bcd4f6db-9728-5f36-b5f7-82caef46ccdb" -version = "5.46.0" +version = "5.47.0" [[deps.DelimitedFiles]] deps = ["Mmap"] @@ -395,9 +389,9 @@ version = "0.1.0+0" [[deps.DiffEqBase]] deps = ["ArrayInterface", "DataStructures", "DocStringExtensions", "EnumX", "EnzymeCore", "FastBroadcast", "ForwardDiff", "FunctionWrappers", "FunctionWrappersWrappers", "LinearAlgebra", "Logging", "Markdown", "MuladdMacro", "Parameters", "PreallocationTools", "PrecompileTools", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "Setfield", "SparseArrays", "Static", "StaticArraysCore", "Statistics", "Tricks", "TruncatedStacktraces"] -git-tree-sha1 = "3089c8295ab6d7c728cd6929121c1b4567457306" +git-tree-sha1 = "2ad3a2dcd5f28f535aa884d199cc2f0a9d335729" uuid = "2b5f629d-d688-5b77-993f-72d75c75574e" -version = "6.147.0" +version = "6.147.1" [deps.DiffEqBase.extensions] DiffEqBaseChainRulesCoreExt = "ChainRulesCore" @@ -543,9 +537,9 @@ version = "1.0.4" [[deps.Enzyme]] deps = ["CEnum", "EnzymeCore", "Enzyme_jll", "GPUCompiler", "LLVM", "Libdl", "LinearAlgebra", "ObjectFile", "Preferences", "Printf", "Random"] -git-tree-sha1 = "090a6f1996d357329bea66f0b1c7621b64d70bb0" +git-tree-sha1 = "16b3a7980cdbf6f036ab46f7e9131afb273f1231" uuid = "7da242da-08ed-463a-9acd-ee780be4f1d9" -version = "0.11.16" +version = "0.11.17" weakdeps = ["SpecialFunctions"] [deps.Enzyme.extensions] @@ -736,15 +730,15 @@ version = "3.3.9+0" [[deps.GPUArrays]] deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"] -git-tree-sha1 = "85d7fb51afb3def5dcb85ad31c3707795c8bccc1" +git-tree-sha1 = "47e4686ec18a9620850bad110b79966132f14283" uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" -version = "9.1.0" +version = "10.0.2" [[deps.GPUArraysCore]] deps = ["Adapt"] -git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0" +git-tree-sha1 = "ec632f177c0d990e64d955ccc1b8c04c485a0950" uuid = "46192b85-c4d5-4398-a991-12ede77f4527" -version = "0.1.5" +version = "0.1.6" [[deps.GPUCompiler]] deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"] @@ -778,15 +772,15 @@ version = "0.21.0+0" [[deps.Git]] deps = ["Git_jll"] -git-tree-sha1 = "51764e6c2e84c37055e846c516e9015b4a291c7d" +git-tree-sha1 = "04eff47b1354d702c3a85e8ab23d539bb7d5957e" uuid = "d7ba0133-e1db-5d97-8f8c-041e4b3a1eb2" -version = "1.3.0" +version = "1.3.1" [[deps.Git_jll]] deps = ["Artifacts", "Expat_jll", "JLLWrappers", "LibCURL_jll", "Libdl", "Libiconv_jll", "OpenSSL_jll", "PCRE2_jll", "Zlib_jll"] -git-tree-sha1 = "b30c473c97fcc1e1e44fab8f3e88fd1b89c9e9d1" +git-tree-sha1 = "99f27817475017260f1ff24c771a5efef5765e34" uuid = "f8c6e375-362e-5223-8a59-34ff63f689eb" -version = "2.43.0+0" +version = "2.44.0+0" [[deps.Glib_jll]] deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE2_jll", "Zlib_jll"] @@ -883,12 +877,6 @@ weakdeps = ["Random", "RecipesBase", "Statistics"] IntervalSetsRecipesBaseExt = "RecipesBase" IntervalSetsStatisticsExt = "Statistics" -[[deps.Ipopt_jll]] -deps = ["ASL_jll", "Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "MUMPS_seq_jll", "OpenBLAS32_jll", "Pkg"] -git-tree-sha1 = "e3e202237d93f18856b6ff1016166b0f172a49a8" -uuid = "9cc047cb-c261-5740-88fc-0cf96f7bdcc7" -version = "300.1400.400+0" - [[deps.IrrationalConstants]] git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2" uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" @@ -919,15 +907,15 @@ version = "0.21.4" [[deps.JpegTurbo_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "60b1194df0a3298f460063de985eae7b01bc011a" +git-tree-sha1 = "3336abae9a713d2210bb57ab484b1e065edd7d23" uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8" -version = "3.0.1+0" +version = "3.0.2+0" [[deps.JuliaFormatter]] deps = ["CSTParser", "Combinatorics", "CommonMark", "DataStructures", "Glob", "Pkg", "PrecompileTools", "Tokenize"] -git-tree-sha1 = "40f1eb7837b7b75d07c062790942406b9a010725" +git-tree-sha1 = "fde717f9e3fe6ffb0336bcc2142cfa71dd9df1f5" uuid = "98e50ef6-434e-11e9-1051-2b60c6c9e899" -version = "1.0.49" +version = "1.0.50" [[deps.JumpProcesses]] deps = ["ArrayInterface", "DataStructures", "DiffEqBase", "DocStringExtensions", "FunctionWrappers", "Graphs", "LinearAlgebra", "Markdown", "PoissonRandom", "Random", "RandomNumbers", "RecursiveArrayTools", "Reexport", "SciMLBase", "StaticArrays", "UnPack"] @@ -941,15 +929,15 @@ weakdeps = ["FastBroadcast"] [[deps.KLU]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse_jll"] -git-tree-sha1 = "884c2968c2e8e7e6bf5956af88cb46aa745c854b" +git-tree-sha1 = "01bb1d420d959b9f668e252a5732444a61ff25de" uuid = "ef3ab10e-7fda-4108-b977-705223b18434" -version = "0.4.1" +version = "0.5.0" [[deps.KernelAbstractions]] deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "Requires", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"] -git-tree-sha1 = "4e0cb2f5aad44dcfdc91088e85dee4ecb22c791c" +git-tree-sha1 = "c7753cc3febe006708ce6798482004241f7d890b" uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c" -version = "0.9.16" +version = "0.9.17" weakdeps = ["EnzymeCore"] [deps.KernelAbstractions.extensions] @@ -975,9 +963,9 @@ version = "3.0.0+1" [[deps.LLVM]] deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Preferences", "Printf", "Requires", "Unicode"] -git-tree-sha1 = "9e70165cca7459d25406367f0c55e517a9a7bfe7" +git-tree-sha1 = "ddab4d40513bce53c8e3157825e245224f74fae7" uuid = "929cbde3-209d-540e-8aea-75f648917ca0" -version = "6.5.0" +version = "6.6.0" [deps.LLVM.extensions] BFloat16sExt = "BFloat16s" @@ -987,9 +975,9 @@ version = "6.5.0" [[deps.LLVMExtra_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] -git-tree-sha1 = "114e3a48f13d4c18ddd7fd6a00107b4b96f60f9c" +git-tree-sha1 = "88b916503aac4fb7f701bb625cd84ca5dd1677bc" uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab" -version = "0.0.28+0" +version = "0.0.29+0" [[deps.LLVMOpenMP_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -1168,10 +1156,10 @@ deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"] uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[deps.LinearSolve]] -deps = ["ArrayInterface", "ConcreteStructs", "DocStringExtensions", "EnumX", "FastLapackInterface", "GPUArraysCore", "InteractiveUtils", "KLU", "Krylov", "Libdl", "LinearAlgebra", "MKL_jll", "PrecompileTools", "Preferences", "RecursiveFactorization", "Reexport", "SciMLBase", "SciMLOperators", "Setfield", "SparseArrays", "Sparspak", "StaticArraysCore", "UnPack"] -git-tree-sha1 = "6f8e084deabe3189416c4e505b1c53e1b590cae8" +deps = ["ArrayInterface", "ChainRulesCore", "ConcreteStructs", "DocStringExtensions", "EnumX", "FastLapackInterface", "GPUArraysCore", "InteractiveUtils", "KLU", "Krylov", "Libdl", "LinearAlgebra", "MKL_jll", "Markdown", "PrecompileTools", "Preferences", "RecursiveFactorization", "Reexport", "SciMLBase", "SciMLOperators", "Setfield", "SparseArrays", "Sparspak", "StaticArraysCore", "UnPack"] +git-tree-sha1 = "40e698d20063b1a8dbbe40b259a24f7a71f53298" uuid = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" -version = "2.22.1" +version = "2.25.0" [deps.LinearSolve.extensions] LinearSolveBandedMatricesExt = "BandedMatrices" @@ -1238,12 +1226,6 @@ weakdeps = ["ChainRulesCore", "ForwardDiff", "SpecialFunctions"] ForwardDiffExt = ["ChainRulesCore", "ForwardDiff"] SpecialFunctionsExt = "SpecialFunctions" -[[deps.METIS_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "1fd0a97409e418b78c53fac671cf4622efdf0f21" -uuid = "d00139f3-1899-568f-a2f0-47f597d42d70" -version = "5.1.2+0" - [[deps.MKL_jll]] deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl"] git-tree-sha1 = "72dc3cf284559eb8f53aa593fe62cb33f83ed0c0" @@ -1255,12 +1237,6 @@ git-tree-sha1 = "bc38dff0548128765760c79eb7388a4b37fae2c8" uuid = "d8e11817-5142-5d16-987a-aa16d5891078" version = "0.4.17" -[[deps.MUMPS_seq_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "METIS_jll", "OpenBLAS32_jll", "Pkg"] -git-tree-sha1 = "29de2841fa5aefe615dea179fcde48bb87b58f57" -uuid = "d7ed1dd3-d0ae-5e8e-bfb4-87a502085b8d" -version = "5.4.1+0" - [[deps.MacroTools]] deps = ["Markdown", "Random"] git-tree-sha1 = "2fa9ee3e63fd3a4f7a9a4f4744a52f4856de82df" @@ -1284,9 +1260,9 @@ version = "0.1.2" [[deps.MathOptInterface]] deps = ["BenchmarkTools", "CodecBzip2", "CodecZlib", "DataStructures", "ForwardDiff", "JSON", "LinearAlgebra", "MutableArithmetics", "NaNMath", "OrderedCollections", "PrecompileTools", "Printf", "SparseArrays", "SpecialFunctions", "Test", "Unicode"] -git-tree-sha1 = "569a003f93d7c64068d3afaab908d21f67a22cd5" +git-tree-sha1 = "e8b98c868029d007102dc5f98986c81f33b0ec37" uuid = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" -version = "1.25.3" +version = "1.26.0" [[deps.MatrixFactorizations]] deps = ["ArrayLayouts", "LinearAlgebra", "Printf", "Random"] @@ -1418,9 +1394,9 @@ version = "1.2.0" [[deps.NonlinearSolve]] deps = ["ADTypes", "ArrayInterface", "ConcreteStructs", "DiffEqBase", "FastBroadcast", "FastClosures", "FiniteDiff", "ForwardDiff", "LazyArrays", "LineSearches", "LinearAlgebra", "LinearSolve", "MaybeInplace", "PrecompileTools", "Preferences", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SimpleNonlinearSolve", "SparseArrays", "SparseDiffTools", "StaticArraysCore", "TimerOutputs"] -git-tree-sha1 = "b377521f4810057a99b0fa8cb7a1311c6cb1c8cd" +git-tree-sha1 = "f409959245f04c8004bd3711915d71c93b2043f7" uuid = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" -version = "3.5.6" +version = "3.7.3" [deps.NonlinearSolve.extensions] NonlinearSolveBandedMatricesExt = "BandedMatrices" @@ -1428,6 +1404,7 @@ version = "3.5.6" NonlinearSolveFixedPointAccelerationExt = "FixedPointAcceleration" NonlinearSolveLeastSquaresOptimExt = "LeastSquaresOptim" NonlinearSolveMINPACKExt = "MINPACK" + NonlinearSolveNLSolversExt = "NLSolvers" NonlinearSolveNLsolveExt = "NLsolve" NonlinearSolveSIAMFANLEquationsExt = "SIAMFANLEquations" NonlinearSolveSpeedMappingExt = "SpeedMapping" @@ -1440,6 +1417,7 @@ version = "3.5.6" FixedPointAcceleration = "817d07cb-a79a-5c30-9a31-890123675176" LeastSquaresOptim = "0fc2ff8b-aaa3-5acd-a817-1944a5e08891" MINPACK = "4854310b-de5a-5eb6-a2a5-c1dee2bd17f9" + NLSolvers = "337daf1e-9722-11e9-073e-8b9effe078ba" NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" SIAMFANLEquations = "084e46ad-d928-497d-ad5e-07fa361a48c4" SpeedMapping = "f1835b91-879b-4a3f-a438-e4baacf14412" @@ -1467,12 +1445,6 @@ git-tree-sha1 = "887579a3eb005446d514ab7aeac5d1d027658b8f" uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051" version = "1.3.5+1" -[[deps.OpenBLAS32_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl"] -git-tree-sha1 = "6065c4cff8fee6c6770b277af45d5082baacdba1" -uuid = "656ef2d0-ae68-5445-9ca0-591084a874a2" -version = "0.3.24+0" - [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" @@ -1537,10 +1509,10 @@ uuid = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b" version = "0.2.1" [[deps.OptimizationMOI]] -deps = ["Ipopt_jll", "MathOptInterface", "ModelingToolkit", "Optimization", "Reexport", "SparseArrays", "SymbolicIndexingInterface", "Symbolics"] -git-tree-sha1 = "570e686c7a076ce2439eafb6d75499697a9dbfdf" +deps = ["MathOptInterface", "ModelingToolkit", "Optimization", "Reexport", "SparseArrays", "SymbolicIndexingInterface", "Symbolics"] +git-tree-sha1 = "75c381f438afd5c474f460442af6738d6d597b76" uuid = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" -version = "0.3.4" +version = "0.4.0" [[deps.OptimizationNLopt]] deps = ["NLopt", "Optimization", "Reexport"] @@ -1567,9 +1539,9 @@ version = "1.6.3" [[deps.OrdinaryDiffEq]] deps = ["ADTypes", "Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DocStringExtensions", "ExponentialUtilities", "FastBroadcast", "FastClosures", "FillArrays", "FiniteDiff", "ForwardDiff", "FunctionWrappersWrappers", "IfElse", "InteractiveUtils", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "MacroTools", "MuladdMacro", "NonlinearSolve", "Polyester", "PreallocationTools", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SimpleNonlinearSolve", "SimpleUnPack", "SparseArrays", "SparseDiffTools", "StaticArrayInterface", "StaticArrays", "TruncatedStacktraces"] -git-tree-sha1 = "ed171bfea6156d6458007b19790a22f4754bd501" +git-tree-sha1 = "d6ec73f3066cf4269f762f07cec2af93c8ef4798" uuid = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -version = "6.71.0" +version = "6.72.0" [[deps.PCRE2_jll]] deps = ["Artifacts", "Libdl"] @@ -1742,9 +1714,9 @@ uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[deps.Random123]] deps = ["Random", "RandomNumbers"] -git-tree-sha1 = "c860e84651f58ce240dd79e5d9e055d55234c35a" +git-tree-sha1 = "4743b43e5a9c4a2ede372de7061eed81795b12e7" uuid = "74087812-796a-5b5d-8853-05524746bad3" -version = "1.6.2" +version = "1.7.0" [[deps.RandomNumbers]] deps = ["Random", "Requires"] @@ -1772,9 +1744,9 @@ version = "0.6.12" [[deps.RecursiveArrayTools]] deps = ["Adapt", "ArrayInterface", "DocStringExtensions", "GPUArraysCore", "IteratorInterfaceExtensions", "LinearAlgebra", "RecipesBase", "SparseArrays", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables"] -git-tree-sha1 = "09c906ce9fa905d40e0706cdb62422422091c22f" +git-tree-sha1 = "1bbc4bb050165cc57ca2876cd53cc23395948650" uuid = "731186ca-8d62-57ce-b412-fbd966d074cd" -version = "3.8.1" +version = "3.10.0" [deps.RecursiveArrayTools.extensions] RecursiveArrayToolsFastBroadcastExt = "FastBroadcast" @@ -1870,9 +1842,9 @@ version = "0.6.42" [[deps.SciMLBase]] deps = ["ADTypes", "ArrayInterface", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "EnumX", "FillArrays", "FunctionWrappersWrappers", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "Markdown", "PrecompileTools", "Preferences", "Printf", "RecipesBase", "RecursiveArrayTools", "Reexport", "RuntimeGeneratedFunctions", "SciMLOperators", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables", "TruncatedStacktraces"] -git-tree-sha1 = "33e40003f4ef424e8a8700e0a3a189c6ece2af27" +git-tree-sha1 = "375256db2d99fc730d2d134cca17939324d284d1" uuid = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -version = "2.26.1" +version = "2.28.0" [deps.SciMLBase.extensions] SciMLBaseChainRulesCoreExt = "ChainRulesCore" @@ -1901,9 +1873,9 @@ version = "0.3.7" [[deps.SciMLSensitivity]] deps = ["ADTypes", "Adapt", "ArrayInterface", "ChainRulesCore", "DiffEqBase", "DiffEqCallbacks", "DiffEqNoiseProcess", "Distributions", "EllipsisNotation", "Enzyme", "FiniteDiff", "ForwardDiff", "FunctionProperties", "FunctionWrappersWrappers", "Functors", "GPUArraysCore", "LinearAlgebra", "LinearSolve", "Markdown", "OrdinaryDiffEq", "Parameters", "PreallocationTools", "QuadGK", "Random", "RandomNumbers", "RecursiveArrayTools", "Reexport", "ReverseDiff", "SciMLBase", "SciMLOperators", "SparseDiffTools", "StaticArrays", "StaticArraysCore", "Statistics", "StochasticDiffEq", "Tracker", "TruncatedStacktraces", "Zygote"] -git-tree-sha1 = "9f536ead920f79bd0ce2ac60839c48e4a7f6eb19" +git-tree-sha1 = "458d859240b6176c4610e79d5b75d40afa141f00" uuid = "1ed8b502-d754-442c-8d5d-10ac956f44a1" -version = "7.55.0" +version = "7.56.0" [[deps.Scratch]] deps = ["Dates"] @@ -2042,9 +2014,9 @@ weakdeps = ["OffsetArrays", "StaticArrays"] [[deps.StaticArrays]] deps = ["LinearAlgebra", "PrecompileTools", "Random", "StaticArraysCore"] -git-tree-sha1 = "7b0e9c14c624e435076d19aea1e5cbdec2b9ca37" +git-tree-sha1 = "bf074c045d3d5ffd956fa0a461da38a44685d6b2" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.9.2" +version = "1.9.3" weakdeps = ["ChainRulesCore", "Statistics"] [deps.StaticArrays.extensions] @@ -2095,9 +2067,9 @@ version = "2.0.1" [[deps.StochasticDiffEq]] deps = ["Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DiffEqNoiseProcess", "DocStringExtensions", "FiniteDiff", "ForwardDiff", "JumpProcesses", "LevyArea", "LinearAlgebra", "Logging", "MuladdMacro", "NLsolve", "OrdinaryDiffEq", "Random", "RandomNumbers", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SparseArrays", "SparseDiffTools", "StaticArrays", "UnPack"] -git-tree-sha1 = "f5eb6f4794a2a56d6b9d3dcdb9d6cb217a2ac660" +git-tree-sha1 = "97e5d0b7e5ec2e68eec6626af97c59e9f6b6c3d0" uuid = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0" -version = "6.65.0" +version = "6.65.1" [[deps.StrideArraysCore]] deps = ["ArrayInterface", "CloseOpenIntervals", "IfElse", "LayoutPointers", "ManualMemory", "SIMDTypes", "Static", "StaticArrayInterface", "ThreadingUtilities"] @@ -2106,10 +2078,17 @@ uuid = "7792a7ef-975c-4747-a70f-980b88e8d1da" version = "0.5.2" [[deps.StructArrays]] -deps = ["Adapt", "ConstructionBase", "DataAPI", "GPUArraysCore", "StaticArraysCore", "Tables"] -git-tree-sha1 = "1b0b1205a56dc288b71b1961d48e351520702e24" +deps = ["ConstructionBase", "DataAPI", "Tables"] +git-tree-sha1 = "f4dc295e983502292c4c3f951dbb4e985e35b3be" uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" -version = "0.6.17" +version = "0.6.18" +weakdeps = ["Adapt", "GPUArraysCore", "SparseArrays", "StaticArrays"] + + [deps.StructArrays.extensions] + StructArraysAdaptExt = "Adapt" + StructArraysGPUArraysCoreExt = "GPUArraysCore" + StructArraysSparseArraysExt = "SparseArrays" + StructArraysStaticArraysExt = "StaticArrays" [[deps.StructIO]] deps = ["Test"] @@ -2139,9 +2118,9 @@ uuid = "fb77eaff-e24c-56d4-86b1-d163f2edb164" version = "5.2.2+0" [[deps.SymbolicIndexingInterface]] -git-tree-sha1 = "dc7186d456f9ff2bef0cb754a59758920f0b2382" +git-tree-sha1 = "251bb311585143931a306175c3b7ced220300578" uuid = "2efcf032-c050-4f8e-a9bb-153293bab1f5" -version = "0.3.6" +version = "0.3.8" [[deps.SymbolicUtils]] deps = ["AbstractTrees", "Bijections", "ChainRulesCore", "Combinatorics", "ConstructionBase", "DataStructures", "DocStringExtensions", "DynamicPolynomials", "IfElse", "LabelledArrays", "LinearAlgebra", "MultivariatePolynomials", "NaNMath", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArrays", "SymbolicIndexingInterface", "TimerOutputs", "Unityper"] @@ -2151,9 +2130,9 @@ version = "1.5.0" [[deps.Symbolics]] deps = ["ArrayInterface", "Bijections", "ConstructionBase", "DataStructures", "DiffRules", "Distributions", "DocStringExtensions", "DomainSets", "DynamicPolynomials", "ForwardDiff", "IfElse", "LaTeXStrings", "LambertW", "Latexify", "Libdl", "LinearAlgebra", "LogExpFunctions", "MacroTools", "Markdown", "NaNMath", "PrecompileTools", "RecipesBase", "Reexport", "Requires", "RuntimeGeneratedFunctions", "SciMLBase", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArrays", "SymbolicIndexingInterface", "SymbolicUtils"] -git-tree-sha1 = "a5d599a4a8a0671cf9944d8d43520670990f437a" +git-tree-sha1 = "3e1e42a51142f0ee982bca641b1827cf73c84c89" uuid = "0c5d862f-8b57-4792-8d23-62f2024744c7" -version = "5.18.0" +version = "5.22.0" [deps.Symbolics.extensions] SymbolicsGroebnerExt = "Groebner" @@ -2353,9 +2332,9 @@ version = "1.31.0+0" [[deps.XML2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Zlib_jll"] -git-tree-sha1 = "801cbe47eae69adc50f36c3caec4758d2650741b" +git-tree-sha1 = "07e470dabc5a6a4254ffebc29a1b3fc01464e105" uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" -version = "2.12.2+0" +version = "2.12.5+0" [[deps.XSLT_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"] @@ -2365,9 +2344,9 @@ version = "1.1.34+0" [[deps.XZ_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "ac88fb95ae6447c8dda6a5503f3bafd496ae8632" +git-tree-sha1 = "37195dcb94a5970397ad425b95a9a26d0befce3a" uuid = "ffd25f8a-64ca-5728-b0f7-c24cf3aae800" -version = "5.4.6+0" +version = "5.6.0+0" [[deps.Xorg_libICE_jll]] deps = ["Libdl", "Pkg"] diff --git a/previews/PR249/assets/Project.toml b/previews/PR249/assets/Project.toml index 39769f1..0733e33 100644 --- a/previews/PR249/assets/Project.toml +++ b/previews/PR249/assets/Project.toml @@ -30,7 +30,7 @@ NLopt = "0.6, 1" Optim = "1" Optimization = "3" OptimizationBBO = "0.1, 0.2" -OptimizationMOI = "0.1, 0.2, 0.3" +OptimizationMOI = "0.1, 0.2, 0.3, 0.4" OptimizationNLopt = "0.1, 0.2" OptimizationOptimJL = "0.1, 0.2" Plots = "1" diff --git a/previews/PR249/getting_started/5b809c38.svg b/previews/PR249/getting_started/20472bab.svg similarity index 94% rename from previews/PR249/getting_started/5b809c38.svg rename to previews/PR249/getting_started/20472bab.svg index 890f47e..726774b 100644 --- a/previews/PR249/getting_started/5b809c38.svg +++ b/previews/PR249/getting_started/20472bab.svg @@ -1,58 +1,58 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/previews/PR249/getting_started/3e4bacb9.svg b/previews/PR249/getting_started/3e4bacb9.svg new file mode 100644 index 0000000..8ec3a68 --- /dev/null +++ b/previews/PR249/getting_started/3e4bacb9.svg @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/previews/PR249/getting_started/5eb57303.svg b/previews/PR249/getting_started/5eb57303.svg deleted file mode 100644 index c49de67..0000000 --- a/previews/PR249/getting_started/5eb57303.svg +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/previews/PR249/getting_started/b0b154be.svg b/previews/PR249/getting_started/b0b154be.svg deleted file mode 100644 index b6e0631..0000000 --- a/previews/PR249/getting_started/b0b154be.svg +++ /dev/null @@ -1,58 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/previews/PR249/getting_started/c3dfece5.svg b/previews/PR249/getting_started/c3dfece5.svg new file mode 100644 index 0000000..39d134d --- /dev/null +++ b/previews/PR249/getting_started/c3dfece5.svg @@ -0,0 +1,58 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/previews/PR249/getting_started/index.html b/previews/PR249/getting_started/index.html index 775f5a3..3b68d36 100644 --- a/previews/PR249/getting_started/index.html +++ b/previews/PR249/getting_started/index.html @@ -19,27 +19,27 @@ using RecursiveArrayTools # for VectorOfArray randomized = VectorOfArray([(sol(t[i]) + 0.01randn(2)) for i in 1:length(t)]) data = convert(Array, randomized)
2×200 Matrix{Float64}:
- 0.983837  1.02189   1.06586   1.10061   …  0.967578  1.00051  1.03221
- 1.00859   0.902989  0.823045  0.757726     1.10479   1.00488  0.91817

Here, we used VectorOfArray from RecursiveArrayTools.jl to turn the result of an ODE into a matrix.

If we plot the solution with the parameter at a=1.42, we get the following:

newprob = remake(prob, p = [1.42])
+ 1.0227  1.02318  1.08149   1.09258   …  0.976921  1.01103  1.03312
+ 1.0062  0.90209  0.817249  0.738111     1.0885    1.00231  0.916369

Here, we used VectorOfArray from RecursiveArrayTools.jl to turn the result of an ODE into a matrix.

If we plot the solution with the parameter at a=1.42, we get the following:

newprob = remake(prob, p = [1.42])
 newsol = solve(newprob, Tsit5())
 plot(sol)
-plot!(newsol)
Example block output

Notice that after one period, this solution begins to drift very far off: this problem is sensitive to the choice of a.

To build the objective function for Optim.jl, we simply call the build_loss_objective function:

cost_function = build_loss_objective(prob, Tsit5(), L2Loss(t, data),
+plot!(newsol)
Example block output

Notice that after one period, this solution begins to drift very far off: this problem is sensitive to the choice of a.

To build the objective function for Optim.jl, we simply call the build_loss_objective function:

cost_function = build_loss_objective(prob, Tsit5(), L2Loss(t, data),
     Optimization.AutoForwardDiff(),
-    maxiters = 10000, verbose = false)
(::SciMLBase.OptimizationFunction{true, ADTypes.AutoForwardDiff{nothing, Nothing}, DiffEqParamEstim.var"#29#30"{Nothing, typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR), Base.Pairs{Symbol, Integer, Tuple{Symbol, Symbol}, @NamedTuple{maxiters::Int64, verbose::Bool}}, SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, typeof(Main.f), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, L2Loss{Vector{Float64}, Matrix{Float64}, Nothing, Nothing, Nothing}, Nothing, Tuple{}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

This objective function internally is calling the ODE solver to get solutions to test against the data. The keyword arguments are passed directly to the solver. Note that we set maxiters in a way that causes the differential equation solvers to error more quickly when in bad regions of the parameter space, speeding up the process. If the integrator stops early (due to divergence), then those parameters are given an infinite loss, and thus this is a quick way to avoid bad parameters. We set verbose=false because this divergence can get noisy. The Optimization.AutoForwardDiff() is a choice of automatic differentiation, i.e., how the gradients are calculated. For more information on this choice, see the automatic differentiation choice API.

Note

A good rule of thumb is to use Optimization.AutoForwardDiff() for less than 100 parameters + states, and Optimization.AutoZygote() for more.

Before optimizing, let's visualize our cost function by plotting it for a range of parameter values:

vals = 0.0:0.1:10.0
+    maxiters = 10000, verbose = false)
(::SciMLBase.OptimizationFunction{true, ADTypes.AutoForwardDiff{nothing, Nothing}, DiffEqParamEstim.var"#29#30"{Nothing, typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR), Base.Pairs{Symbol, Integer, Tuple{Symbol, Symbol}, @NamedTuple{maxiters::Int64, verbose::Bool}}, SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, typeof(Main.f), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, SymbolicIndexingInterface.SymbolCache{Nothing, Nothing, Nothing}, Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, L2Loss{Vector{Float64}, Matrix{Float64}, Nothing, Nothing, Nothing}, Nothing, Tuple{}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

This objective function internally is calling the ODE solver to get solutions to test against the data. The keyword arguments are passed directly to the solver. Note that we set maxiters in a way that causes the differential equation solvers to error more quickly when in bad regions of the parameter space, speeding up the process. If the integrator stops early (due to divergence), then those parameters are given an infinite loss, and thus this is a quick way to avoid bad parameters. We set verbose=false because this divergence can get noisy. The Optimization.AutoForwardDiff() is a choice of automatic differentiation, i.e., how the gradients are calculated. For more information on this choice, see the automatic differentiation choice API.

Note

A good rule of thumb is to use Optimization.AutoForwardDiff() for less than 100 parameters + states, and Optimization.AutoZygote() for more.

Before optimizing, let's visualize our cost function by plotting it for a range of parameter values:

vals = 0.0:0.1:10.0
 plot(vals, [cost_function(i) for i in vals], yscale = :log10,
     xaxis = "Parameter", yaxis = "Cost", title = "1-Parameter Cost Function",
-    lw = 3)
Example block output

Here we see that there is a very well-defined minimum in our cost function at the real parameter (because this is where the solution almost exactly fits the dataset).

Now we can use the BFGS algorithm to optimize the parameter starting at a=1.42. We do this by creating an optimization problem and solving that with BFGS():

optprob = Optimization.OptimizationProblem(cost_function, [1.42])
-optsol = solve(optprob, BFGS())
retcode: Failure
+    lw = 3)
Example block output

Here we see that there is a very well-defined minimum in our cost function at the real parameter (because this is where the solution almost exactly fits the dataset).

Now we can use the BFGS algorithm to optimize the parameter starting at a=1.42. We do this by creating an optimization problem and solving that with BFGS():

optprob = Optimization.OptimizationProblem(cost_function, [1.42])
+optsol = solve(optprob, BFGS())
retcode: Success
 u: 1-element Vector{Float64}:
- 1.5003735694001976

Now let's see how well the fit performed:

newprob = remake(prob, p = optsol.u)
+ 1.500330845435876

Now let's see how well the fit performed:

newprob = remake(prob, p = optsol.u)
 newsol = solve(newprob, Tsit5())
 plot(sol)
-plot!(newsol)
Example block output

Note that some algorithms may be sensitive to the initial condition. For more details on using Optim.jl, see the documentation for Optim.jl.

Adding Bounds Constraints

We can improve our solution by noting that the Lotka-Volterra equation requires that the parameters are positive. Thus, following the Optimization.jl documentation we can add box constraints to ensure the optimizer only checks between 0.0 and 3.0 which improves the efficiency of our algorithm. We pass the lb and ub keyword arguments to the OptimizationProblem to pass these bounds to the optimizer:

lower = [0.0]
+plot!(newsol)
Example block output

Note that some algorithms may be sensitive to the initial condition. For more details on using Optim.jl, see the documentation for Optim.jl.

Adding Bounds Constraints

We can improve our solution by noting that the Lotka-Volterra equation requires that the parameters are positive. Thus, following the Optimization.jl documentation we can add box constraints to ensure the optimizer only checks between 0.0 and 3.0 which improves the efficiency of our algorithm. We pass the lb and ub keyword arguments to the OptimizationProblem to pass these bounds to the optimizer:

lower = [0.0]
 upper = [3.0]
 optprob = Optimization.OptimizationProblem(cost_function, [1.42], lb = lower, ub = upper)
 result = solve(optprob, BFGS())
retcode: Success
 u: 1-element Vector{Float64}:
- 1.5003744811807647

Estimating Multiple Parameters Simultaneously

Lastly, we can use the same tools to estimate multiple parameters simultaneously. Let's use the Lotka-Volterra equation with all parameters free:

function f2(du, u, p, t)
+ 1.500334849730979

Estimating Multiple Parameters Simultaneously

Lastly, we can use the same tools to estimate multiple parameters simultaneously. Let's use the Lotka-Volterra equation with all parameters free:

function f2(du, u, p, t)
     du[1] = dx = p[1] * u[1] - p[2] * u[1] * u[2]
     du[2] = dy = -p[3] * u[2] + p[4] * u[1] * u[2]
 end
@@ -57,10 +57,10 @@
 optprob = Optimization.OptimizationProblem(cost_function, [1.3, 0.8, 2.8, 1.2])
 result_bfgs = solve(optprob, BFGS())
retcode: Success
 u: 4-element Vector{Float64}:
- 1.4997554523151548
- 1.0004227408198396
- 3.0003859357915856
- 1.0008823827103905

Alternative Cost Functions for Increased Robustness

The build_loss_objective with L2Loss is the most naive approach for parameter estimation. There are many others.

We can also use First-Differences in L2Loss by passing the kwarg differ_weight which decides the contribution of the differencing loss to the total loss.

cost_function = build_loss_objective(prob, Tsit5(),
+ 1.5010931152675056
+ 1.0015618867593885
+ 2.997036696996743
+ 0.9992720137993332

Alternative Cost Functions for Increased Robustness

The build_loss_objective with L2Loss is the most naive approach for parameter estimation. There are many others.

We can also use First-Differences in L2Loss by passing the kwarg differ_weight which decides the contribution of the differencing loss to the total loss.

cost_function = build_loss_objective(prob, Tsit5(),
     L2Loss(t, data, differ_weight = 0.3,
         data_weight = 0.7),
     Optimization.AutoForwardDiff(),
@@ -68,10 +68,10 @@
 optprob = Optimization.OptimizationProblem(cost_function, [1.3, 0.8, 2.8, 1.2])
 result_bfgs = solve(optprob, BFGS())
retcode: Failure
 u: 4-element Vector{Float64}:
- 1.4995624719386118
- 1.000126610184397
- 3.0017660764437344
- 1.00102596293136

We can also use Multiple Shooting method by creating a multiple_shooting_objective

function ms_f1(du, u, p, t)
+ 1.500794894914081
+ 1.0009064979879116
+ 2.9979187681753614
+ 0.9997012155328623

We can also use Multiple Shooting method by creating a multiple_shooting_objective

function ms_f1(du, u, p, t)
     du[1] = p[1] * u[1] - p[2] * u[1] * u[2]
     du[2] = -3.0 * u[2] + u[1] * u[2]
 end
@@ -89,30 +89,30 @@
 ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data),
     Optimization.AutoForwardDiff();
     discontinuity_weight = 1.0, abstol = 1e-12,
-    reltol = 1e-12)
(::SciMLBase.OptimizationFunction{true, ADTypes.AutoForwardDiff{nothing, Nothing}, DiffEqParamEstim.var"#43#48"{Nothing, Float64, DiffEqParamEstim.var"#1#2", Base.Pairs{Symbol, Float64, Tuple{Symbol, Symbol}, @NamedTuple{abstol::Float64, reltol::Float64}}, SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, typeof(Main.ms_f1), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, L2Loss{Vector{Float64}, Matrix{Float64}, Nothing, Nothing, Nothing}, Nothing}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

This creates the objective function that can be passed to an optimizer, from which we can then get the parameter values and the initial values of the short time periods, keeping in mind the indexing. Now we mix this with a global optimization method to improve robustness even more:

optprob = Optimization.OptimizationProblem(ms_obj, zeros(18), lb = first.(bound),
+    reltol = 1e-12)
(::SciMLBase.OptimizationFunction{true, ADTypes.AutoForwardDiff{nothing, Nothing}, DiffEqParamEstim.var"#43#48"{Nothing, Float64, DiffEqParamEstim.var"#1#2", Base.Pairs{Symbol, Float64, Tuple{Symbol, Symbol}, @NamedTuple{abstol::Float64, reltol::Float64}}, SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, typeof(Main.ms_f1), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, SymbolicIndexingInterface.SymbolCache{Nothing, Nothing, Nothing}, Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, L2Loss{Vector{Float64}, Matrix{Float64}, Nothing, Nothing, Nothing}, Nothing}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

This creates the objective function that can be passed to an optimizer, from which we can then get the parameter values and the initial values of the short time periods, keeping in mind the indexing. Now we mix this with a global optimization method to improve robustness even more:

optprob = Optimization.OptimizationProblem(ms_obj, zeros(18), lb = first.(bound),
     ub = last.(bound))
 optsol_ms = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 10_000)
retcode: Failure
 u: 18-element Vector{Float64}:
- 0.911322656937296
- 1.1266787773646716
- 3.5006585319727703
- 0.2581565028106773
- 2.6729470845263212
- 4.695872235004817
- 1.3355182732930886
- 0.8057052583335481
- 4.8427140131072415
- 0.37306985699367085
- 1.2771819371346613
- 3.520691733864322
- 1.7035243914223086
- 0.4306520080126741
- 6.388765407461336
- 0.7747149171072861
- 1.498912664368806
- 1.0264052358197866
optsol_ms.u[(end - 1):end]
2-element Vector{Float64}:
- 1.498912664368806
- 1.0264052358197866

Here as our model had 2 parameters, we look at the last 2 indexes of result to get our parameter values and the rest of the values are the initial values of the shorter timespans as described in the reference section. We can also use a gradient-based optimizer with the multiple shooting objective.

optsol_ms = solve(optprob, BFGS())
+ 0.8956492828785643
+ 1.3160211458098436
+ 3.2036079521090404
+ 0.34414204633933954
+ 2.9690542283041736
+ 4.679574111928217
+ 1.114899259968644
+ 0.7867338732898105
+ 4.7282992464471
+ 0.39043062426262265
+ 1.4498868316927416
+ 3.709004168905337
+ 1.5496759773692697
+ 0.6687725955065645
+ 6.191636920691155
+ 0.824008168705664
+ 1.7032464763211117
+ 1.0632935540792416
optsol_ms.u[(end - 1):end]
2-element Vector{Float64}:
+ 1.7032464763211117
+ 1.0632935540792416

Here as our model had 2 parameters, we look at the last 2 indexes of result to get our parameter values and the rest of the values are the initial values of the shorter timespans as described in the reference section. We can also use a gradient-based optimizer with the multiple shooting objective.

optsol_ms = solve(optprob, BFGS())
 optsol_ms.u[(end - 1):end]
2-element Vector{Float64}:
  1.5000000000037266
  1.000000000000063

The objective function for the Two Stage method can be created and passed to an optimizer as

two_stage_obj = two_stage_objective(ms_prob, t, data, Optimization.AutoForwardDiff())
@@ -122,4 +122,4 @@
  1.5035938533664905
  0.99257311537469
  2.8
- 1.2

The default kernel used in the method is Epanechnikov, available others are Uniform, Triangular, Quartic, Triweight, Tricube, Gaussian, Cosine, Logistic and Sigmoid, this can be passed by the kernel keyword argument. loss_func keyword argument can be used to pass the loss function (cost function) you want to use and passing a valid adtype argument enables Auto Differentiation.

Conclusion

There are many more choices for how to improve the robustness of a parameter estimation. With all of these tools, one likely should never do the simple “solve it with p and check the L2 loss”. Instead, we should use these tricks to improve the loss landscape and increase the ability for optimizers to find globally the best parameters.

+ 1.2

The default kernel used in the method is Epanechnikov, available others are Uniform, Triangular, Quartic, Triweight, Tricube, Gaussian, Cosine, Logistic and Sigmoid, this can be passed by the kernel keyword argument. loss_func keyword argument can be used to pass the loss function (cost function) you want to use and passing a valid adtype argument enables Auto Differentiation.

Conclusion

There are many more choices for how to improve the robustness of a parameter estimation. With all of these tools, one likely should never do the simple “solve it with p and check the L2 loss”. Instead, we should use these tricks to improve the loss landscape and increase the ability for optimizers to find globally the best parameters.

diff --git a/previews/PR249/index.html b/previews/PR249/index.html index 262c0ec..5b76901 100644 --- a/previews/PR249/index.html +++ b/previews/PR249/index.html @@ -12,12 +12,12 @@ [429524aa] Optim v1.9.2 [7f7a1694] Optimization v3.22.0 [3e6eede4] OptimizationBBO v0.2.1 - [fd9f6733] OptimizationMOI v0.3.4 + [fd9f6733] OptimizationMOI v0.4.0 [4e6fcdb7] OptimizationNLopt v0.2.0 [36348300] OptimizationOptimJL v0.2.2 [91a5bcdd] Plots v1.40.1 - [731186ca] RecursiveArrayTools v3.8.1 - [1ed8b502] SciMLSensitivity v7.55.0 + [731186ca] RecursiveArrayTools v3.10.0 + [1ed8b502] SciMLSensitivity v7.56.0 [e88e6eb3] Zygote v0.6.69
and using this machine and Julia version.
Julia Version 1.10.1
 Commit 7790d6f0641 (2024-02-13 20:41 UTC)
 Build Info:
@@ -32,8 +32,8 @@
   [47edcb42] ADTypes v0.2.6
   [a4c015fc] ANSIColoredPrinters v0.0.1
   [621f4979] AbstractFFTs v1.5.0
-  [1520ce14] AbstractTrees v0.4.4
-⌅ [79e6a3ab] Adapt v3.7.2
+  [1520ce14] AbstractTrees v0.4.5
+  [79e6a3ab] Adapt v4.0.1
 ⌅ [ec485272] ArnoldiMethod v0.2.0
   [4fba245c] ArrayInterface v7.7.1
   [4c555306] ArrayLayouts v1.6.0
@@ -44,7 +44,7 @@
   [d1d4a3ce] BitFlags v0.1.8
   [62783981] BitTwiddlingConvenienceFunctions v0.1.5
   [a134a8b2] BlackBoxOptim v0.6.3
-⌃ [764a87c0] BoundaryValueDiffEq v5.6.0
+  [764a87c0] BoundaryValueDiffEq v5.6.3
   [fa961155] CEnum v0.5.0
   [2a0fbf3d] CPUSummary v0.2.4
   [a9c8d775] CPUTime v1.0.0
@@ -52,7 +52,7 @@
   [49dc2e85] Calculus v0.5.1
   [7057c7e9] Cassette v0.3.13
   [082447d4] ChainRules v1.63.0
-  [d360d2e6] ChainRulesCore v1.21.1
+  [d360d2e6] ChainRulesCore v1.22.1
   [fb6a15b2] CloseOpenIntervals v0.1.12
   [523fee87] CodecBzip2 v0.8.2
   [944b1d66] CodecZlib v0.7.4
@@ -64,7 +64,7 @@
   [a80b9123] CommonMark v0.8.12
   [38540f10] CommonSolve v0.2.4
   [bbf7d656] CommonSubexpressions v0.3.0
-  [34da2185] Compat v4.12.0
+  [34da2185] Compat v4.14.0
   [b152e2b5] CompositeTypes v0.1.3
   [2569d6c7] ConcreteStructs v0.2.3
   [f0e56b4a] ConcurrentUtilities v2.3.1
@@ -74,13 +74,13 @@
   [adafc99b] CpuId v0.3.1
   [a8cc5b0e] Crayons v4.1.1
   [9a962f9c] DataAPI v1.16.0
-  [864edb3b] DataStructures v0.18.16
+  [864edb3b] DataStructures v0.18.17
   [e2d170a0] DataValueInterfaces v1.0.0
-  [bcd4f6db] DelayDiffEq v5.46.0
+  [bcd4f6db] DelayDiffEq v5.47.0
   [8bb1440f] DelimitedFiles v1.9.1
   [39dd38d3] Dierckx v0.5.3
-  [2b5f629d] DiffEqBase v6.147.0
-  [459566f4] DiffEqCallbacks v2.37.0
+  [2b5f629d] DiffEqBase v6.147.1
+⌅ [459566f4] DiffEqCallbacks v2.37.0
   [77a26b50] DiffEqNoiseProcess v5.20.1
   [1130ab10] DiffEqParamEstim v2.2.0 `~/work/DiffEqParamEstim.jl/DiffEqParamEstim.jl`
   [163ba53b] DiffResults v1.1.0
@@ -95,7 +95,7 @@
   [7c1d4256] DynamicPolynomials v0.5.5
   [da5c29d0] EllipsisNotation v1.8.0
   [4e289a0a] EnumX v1.0.4
-  [7da242da] Enzyme v0.11.16
+  [7da242da] Enzyme v0.11.17
   [f151be2c] EnzymeCore v0.6.5
   [460bff9d] ExceptionUnwrapping v0.1.10
   [d4d017d3] ExponentialUtilities v1.26.1
@@ -114,12 +114,12 @@
   [069b7b12] FunctionWrappers v1.1.3
   [77dc65aa] FunctionWrappersWrappers v0.1.3
   [d9f16b24] Functors v0.4.7
-⌃ [0c68f7d7] GPUArrays v9.1.0
-⌅ [46192b85] GPUArraysCore v0.1.5
-  [61eb1bfa] GPUCompiler v0.25.0
+  [0c68f7d7] GPUArrays v10.0.2
+  [46192b85] GPUArraysCore v0.1.6
+⌅ [61eb1bfa] GPUCompiler v0.25.0
   [28b8d3ca] GR v0.73.2
   [c145ed77] GenericSchur v0.5.3
-  [d7ba0133] Git v1.3.0
+  [d7ba0133] Git v1.3.1
   [c27321d9] Glob v1.3.1
   [86223c79] Graphs v1.9.0
   [42e2da0e] Grisu v1.0.2
@@ -136,12 +136,12 @@
   [1019f520] JLFzf v0.1.7
   [692b3bcd] JLLWrappers v1.5.0
   [682c06a0] JSON v0.21.4
-  [98e50ef6] JuliaFormatter v1.0.49
+  [98e50ef6] JuliaFormatter v1.0.50
   [ccbc3e58] JumpProcesses v9.10.1
-⌅ [ef3ab10e] KLU v0.4.1
-  [63c18a36] KernelAbstractions v0.9.16
+  [ef3ab10e] KLU v0.5.0
+  [63c18a36] KernelAbstractions v0.9.17
   [ba0b0d4f] Krylov v0.9.5
-  [929cbde3] LLVM v6.5.0
+  [929cbde3] LLVM v6.6.0
   [b964fa9f] LaTeXStrings v1.3.1
   [2ee39098] LabelledArrays v1.15.1
   [984bce1d] LambertW v0.4.6
@@ -154,7 +154,7 @@
   [1d6d02ad] LeftChildRightSiblingTrees v0.2.0
   [2d8b4e74] LevyArea v1.0.0
   [d3d80556] LineSearches v7.2.0
-⌃ [7ed4a6bd] LinearSolve v2.22.1
+  [7ed4a6bd] LinearSolve v2.25.0
   [2ab3a3ac] LogExpFunctions v0.3.27
   [e6f89c97] LoggingExtras v1.0.3
   [bdcacae8] LoopVectorization v0.12.166
@@ -162,13 +162,13 @@
   [1914dd2f] MacroTools v0.5.13
   [d125e4d3] ManualMemory v0.1.8
   [d0879d2d] MarkdownAST v0.1.2
-  [b8f27783] MathOptInterface v1.25.3
+  [b8f27783] MathOptInterface v1.26.0
   [a3b82374] MatrixFactorizations v2.1.0
   [bb5d69b7] MaybeInplace v0.1.1
   [739be429] MbedTLS v1.1.9
   [442fdcdd] Measures v0.3.2
   [e1d29d7a] Missings v1.1.0
-  [961ee093] ModelingToolkit v8.75.0
+⌅ [961ee093] ModelingToolkit v8.75.0
   [46d2c3a1] MuladdMacro v0.2.4
   [102ac46a] MultivariatePolynomials v0.5.4
   [d8a4904e] MutableArithmetics v1.4.1
@@ -177,7 +177,7 @@
   [2774e3e8] NLsolve v4.5.1
   [872c559c] NNlib v0.9.12
   [77ba4419] NaNMath v1.0.2
-  [8913a72c] NonlinearSolve v3.5.6
+  [8913a72c] NonlinearSolve v3.7.3
   [d8793406] ObjectFile v0.4.1
   [6fe1bfb0] OffsetArrays v1.13.0
   [4d8831e6] OpenSSL v1.4.1
@@ -185,11 +185,11 @@
   [3bd65402] Optimisers v0.3.2
   [7f7a1694] Optimization v3.22.0
   [3e6eede4] OptimizationBBO v0.2.1
-  [fd9f6733] OptimizationMOI v0.3.4
+  [fd9f6733] OptimizationMOI v0.4.0
   [4e6fcdb7] OptimizationNLopt v0.2.0
   [36348300] OptimizationOptimJL v0.2.2
   [bac558e1] OrderedCollections v1.6.3
-  [1dea7af3] OrdinaryDiffEq v6.71.0
+  [1dea7af3] OrdinaryDiffEq v6.72.0
   [90014a1f] PDMats v0.11.31
   [65ce6f38] PackageExtensionCompat v1.0.2
   [d96e819e] Parameters v0.12.3
@@ -209,12 +209,12 @@
   [33c8b6b6] ProgressLogging v0.1.4
   [92933f4c] ProgressMeter v1.9.0
   [1fd47b50] QuadGK v2.9.4
-  [74087812] Random123 v1.6.2
+  [74087812] Random123 v1.7.0
   [e6cf234a] RandomNumbers v1.5.3
   [c1ae055f] RealDot v0.1.0
   [3cdcf5f2] RecipesBase v1.3.4
   [01d81517] RecipesPipeline v0.6.12
-  [731186ca] RecursiveArrayTools v3.8.1
+  [731186ca] RecursiveArrayTools v3.10.0
   [f2c3362d] RecursiveFactorization v0.2.21
   [189a3867] Reexport v1.2.2
   [2792f1a3] RegistryInstances v0.1.0
@@ -226,9 +226,9 @@
   [7e49a35a] RuntimeGeneratedFunctions v0.5.12
   [94e857df] SIMDTypes v0.1.0
   [476501e8] SLEEFPirates v0.6.42
-  [0bca4576] SciMLBase v2.26.1
+  [0bca4576] SciMLBase v2.28.0
   [c0aeaf25] SciMLOperators v0.3.7
-  [1ed8b502] SciMLSensitivity v7.55.0
+  [1ed8b502] SciMLSensitivity v7.56.0
   [6c6a2e73] Scratch v1.2.1
   [efcf1570] Setfield v1.1.1
   [992d4aef] Showoff v1.0.3
@@ -244,20 +244,20 @@
   [276daf66] SpecialFunctions v2.3.1
   [aedffcd0] Static v0.8.10
   [0d7ed370] StaticArrayInterface v1.5.0
-  [90137ffa] StaticArrays v1.9.2
+  [90137ffa] StaticArrays v1.9.3
   [1e83bf80] StaticArraysCore v1.4.2
   [82ae8749] StatsAPI v1.7.0
   [2913bbd2] StatsBase v0.34.2
   [4c63d2b9] StatsFuns v1.3.1
   [9672c7b4] SteadyStateDiffEq v2.0.1
-  [789caeaf] StochasticDiffEq v6.65.0
+  [789caeaf] StochasticDiffEq v6.65.1
   [7792a7ef] StrideArraysCore v0.5.2
-  [09ab397b] StructArrays v0.6.17
+  [09ab397b] StructArrays v0.6.18
   [53d494c1] StructIO v0.3.0
   [c3572dad] Sundials v4.24.0
-  [2efcf032] SymbolicIndexingInterface v0.3.6
+  [2efcf032] SymbolicIndexingInterface v0.3.8
   [d1185830] SymbolicUtils v1.5.0
-  [0c5d862f] Symbolics v5.18.0
+  [0c5d862f] Symbolics v5.22.0
   [3783bdb8] TableTraits v1.0.1
   [bd369af6] Tables v1.11.1
   [62fd8b95] TensorCore v0.1.1
@@ -283,7 +283,6 @@
   [19fa3120] VertexSafeGraphs v0.2.0
   [e88e6eb3] Zygote v0.6.69
   [700de1a5] ZygoteRules v0.2.5
-  [ae81ac8f] ASL_jll v0.1.3+0
   [6e34b625] Bzip2_jll v1.0.8+1
   [83423d85] Cairo_jll v1.16.1+1
   [cd4c43a9] Dierckx_jll v0.1.0+0
@@ -297,16 +296,15 @@
   [0656b61e] GLFW_jll v3.3.9+0
   [d2c73de3] GR_jll v0.73.2+0
   [78b55507] Gettext_jll v0.21.0+0
-  [f8c6e375] Git_jll v2.43.0+0
+  [f8c6e375] Git_jll v2.44.0+0
   [7746bdde] Glib_jll v2.76.5+0
   [3b182d85] Graphite2_jll v1.3.14+0
   [2e76f6c2] HarfBuzz_jll v2.8.1+1
   [1d5cc7b8] IntelOpenMP_jll v2024.0.2+0
-⌅ [9cc047cb] Ipopt_jll v300.1400.400+0
-  [aacddb02] JpegTurbo_jll v3.0.1+0
+  [aacddb02] JpegTurbo_jll v3.0.2+0
   [c1c5ebd0] LAME_jll v3.100.1+0
   [88015f11] LERC_jll v3.0.0+1
-  [dad2f222] LLVMExtra_jll v0.0.28+0
+  [dad2f222] LLVMExtra_jll v0.0.29+0
   [1d63c593] LLVMOpenMP_jll v15.0.7+0
   [dd4b983a] LZO_jll v2.10.1+0
 ⌅ [e9f186c6] Libffi_jll v3.2.2+1
@@ -317,12 +315,9 @@
   [4b2f31a3] Libmount_jll v2.35.0+0
 ⌅ [89763e89] Libtiff_jll v4.5.1+1
   [38a345b3] Libuuid_jll v2.36.0+0
-  [d00139f3] METIS_jll v5.1.2+0
   [856f044c] MKL_jll v2024.0.0+0
-⌅ [d7ed1dd3] MUMPS_seq_jll v5.4.1+0
   [079eb43e] NLopt_jll v2.7.1+0
   [e7412a2a] Ogg_jll v1.3.5+1
-⌅ [656ef2d0] OpenBLAS32_jll v0.3.24+0
   [458c3c95] OpenSSL_jll v3.0.13+0
   [efe28fd5] OpenSpecFun_jll v0.5.5+0
   [91d4177d] Opus_jll v1.3.2+0
@@ -333,9 +328,9 @@
   [a44049a8] Vulkan_Loader_jll v1.3.243+0
   [a2964d1f] Wayland_jll v1.21.0+1
   [2381bf8a] Wayland_protocols_jll v1.31.0+0
-  [02c8fc9c] XML2_jll v2.12.2+0
+  [02c8fc9c] XML2_jll v2.12.5+0
   [aed1982a] XSLT_jll v1.1.34+0
-  [ffd25f8a] XZ_jll v5.4.6+0
+  [ffd25f8a] XZ_jll v5.6.0+0
   [f67eecfb] Xorg_libICE_jll v1.0.10+1
   [c834827a] Xorg_libSM_jll v1.2.3+0
   [4f6342f7] Xorg_libX11_jll v1.8.6+0
@@ -424,4 +419,4 @@
   [8e850b90] libblastrampoline_jll v5.8.0+1
   [8e850ede] nghttp2_jll v1.52.0+1
   [3f19e933] p7zip_jll v17.4.0+2
-Info Packages marked with ⌃ and ⌅ have new versions available. Those with ⌃ may be upgradable, but those with ⌅ are restricted by compatibility constraints from upgrading. To see why use `status --outdated -m`

You can also download the manifest file and the project file.

+Info Packages marked with ⌅ have new versions available but compatibility constraints restrict them from upgrading. To see why use `status --outdated -m`

You can also download the manifest file and the project file.

diff --git a/previews/PR249/methods/collocation_loss/index.html b/previews/PR249/methods/collocation_loss/index.html index 4f98dc2..74ff25f 100644 --- a/previews/PR249/methods/collocation_loss/index.html +++ b/previews/PR249/methods/collocation_loss/index.html @@ -2,4 +2,4 @@ Two Stage method (Non-Parametric Collocation) · DiffEqParamEstim.jl

Two Stage method (Non-Parametric Collocation)

The two-stage method is a collocation method for estimating parameters without requiring repeated solving of the differential equation. It does so by determining a smoothed estimated trajectory of the data (local quadratic polynomial fit by least squares) and optimizing the derivative function and the data's timepoints to match the derivatives of the smoothed trajectory. This method has less accuracy than other methods but is much faster, and is a good method to try first to get in the general “good parameter” region, to then finish using one of the other methods.

function two_stage_objective(prob::DEProblem, tpoints, data, adtype = SciMLBase.NoAD(), ;
         kernel = :Epanechnikov,
         loss_func = L2DistLoss)
-end
+end diff --git a/previews/PR249/methods/optimization_based_methods/index.html b/previews/PR249/methods/optimization_based_methods/index.html index b555bb3..d9cb43a 100644 --- a/previews/PR249/methods/optimization_based_methods/index.html +++ b/previews/PR249/methods/optimization_based_methods/index.html @@ -29,4 +29,4 @@ build_loss_objective(prob, alg, loss, Optimization.AutoForwardDiff()) multiple_shooting_objective(prob, alg, loss, Optimization.AutoForwardDiff())

The Problem Generator Function

The argument prob_generator allows one to specify a function for generating new problems from a given parameter set. By default, this just builds a new problem which fixes the element types in a way that's autodifferentiation compatible and adds the new parameter vector p. For example, the code for this is:

prob_generator = (prob, p) -> remake(prob, u0 = convert.(eltype(p), prob.u0), p = p)

Then the new problem with these new values is returned.

One can use this to change the meaning of the parameters using this function. For example, if one instead wanted to optimize the initial conditions for a function without parameters, you could change this to:

prob_generator = (prob, p) -> remake(prob.f, u0 = p)

which simply uses p as the initial condition in the initial value problem.

Using the Objectives for MAP estimates

You can also add a prior option to build_loss_objective and multiple_shooting_objective that essentially turns it into MAP by multiplying the log-likelihood (the cost) by the prior. The option is available as the keyword argument priors, it can take in either an array of univariate distributions for each of the parameters or a multivariate distribution.

ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data); priors = priors,
     discontinuity_weight = 1.0, abstol = 1e-12,
-    reltol = 1e-12)
+ reltol = 1e-12) diff --git a/previews/PR249/methods/recommended_methods/index.html b/previews/PR249/methods/recommended_methods/index.html index 1800e6b..249fe53 100644 --- a/previews/PR249/methods/recommended_methods/index.html +++ b/previews/PR249/methods/recommended_methods/index.html @@ -1,2 +1,2 @@ -Recommended Methods · DiffEqParamEstim.jl

Recommended Methods

The recommended method is to use build_loss_objective with the optimizer of your choice. This method can thus be paired with global optimizers from packages like BlackBoxOptim.jl or NLopt.jl which can be much less prone to finding local minima than local optimization methods. Also, it allows the user to define the cost function in the way they choose as a function loss(sol). This package can thus fit using any cost function on the solution, making it applicable to fitting non-temporal data and other types of problems. Also, build_loss_objective works for all the DEProblem types, allowing it to optimize parameters on ODEs, SDEs, DDEs, DAEs, etc.

However, this method requires repeated solution of the differential equation. If the data is temporal data, the most efficient method is the two_stage_objective which does not require repeated solutions but is not as accurate. Usage of the two_stage_objective should have a post-processing step which refines using a method like build_loss_objective.

+Recommended Methods · DiffEqParamEstim.jl

Recommended Methods

The recommended method is to use build_loss_objective with the optimizer of your choice. This method can thus be paired with global optimizers from packages like BlackBoxOptim.jl or NLopt.jl which can be much less prone to finding local minima than local optimization methods. Also, it allows the user to define the cost function in the way they choose as a function loss(sol). This package can thus fit using any cost function on the solution, making it applicable to fitting non-temporal data and other types of problems. Also, build_loss_objective works for all the DEProblem types, allowing it to optimize parameters on ODEs, SDEs, DDEs, DAEs, etc.

However, this method requires repeated solution of the differential equation. If the data is temporal data, the most efficient method is the two_stage_objective which does not require repeated solutions but is not as accurate. Usage of the two_stage_objective should have a post-processing step which refines using a method like build_loss_objective.

diff --git a/previews/PR249/tutorials/ensemble/3ce7972a.svg b/previews/PR249/tutorials/ensemble/8852de6d.svg similarity index 98% rename from previews/PR249/tutorials/ensemble/3ce7972a.svg rename to previews/PR249/tutorials/ensemble/8852de6d.svg index 8f8c60e..e007ff1 100644 --- a/previews/PR249/tutorials/ensemble/3ce7972a.svg +++ b/previews/PR249/tutorials/ensemble/8852de6d.svg @@ -1,64 +1,64 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/previews/PR249/tutorials/ensemble/index.html b/previews/PR249/tutorials/ensemble/index.html index 32cdc2a..0be7b94 100644 --- a/previews/PR249/tutorials/ensemble/index.html +++ b/previews/PR249/tutorials/ensemble/index.html @@ -33,7 +33,7 @@ end enprob = EnsembleProblem(prob, prob_func = prob_func)
EnsembleProblem with problem ODEProblem

We can check this does what we want by solving it:

# Check above does what we want
 sim = solve(enprob, Tsit5(), trajectories = N)
-plot(sim)
Example block output

trajectories=N means “run N times”, and each time it runs the problem returned by the prob_func, which is always the same problem but with the ith initial condition.

Now let's generate a dataset from that. Let's get data points at every t=0.1 using saveat, and then convert the solution into an array.

# Generate a dataset from these runs
+plot(sim)
Example block output

trajectories=N means “run N times”, and each time it runs the problem returned by the prob_func, which is always the same problem but with the ith initial condition.

Now let's generate a dataset from that. Let's get data points at every t=0.1 using saveat, and then convert the solution into an array.

# Generate a dataset from these runs
 data_times = 0.0:0.1:10.0
 sim = solve(enprob, Tsit5(), trajectories = N, saveat = data_times)
 data = Array(sim)
2×101×10 Array{Float64, 3}:
@@ -94,7 +94,7 @@
 sim = solve(enprob, Tsit5(), trajectories = N, saveat = data_times)
 loss(sim)
10108.69414420129

and get a non-zero loss. So, we now have our problem, our data, and our loss function… we have what we need.

Put this into buildlossobjective.

obj = build_loss_objective(enprob, Tsit5(), loss, Optimization.AutoForwardDiff(),
     trajectories = N,
-    saveat = data_times)
(::SciMLBase.OptimizationFunction{true, ADTypes.AutoForwardDiff{nothing, Nothing}, DiffEqParamEstim.var"#29#30"{Nothing, typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR), Base.Pairs{Symbol, Any, Tuple{Symbol, Symbol}, @NamedTuple{trajectories::Int64, saveat::StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}}}, SciMLBase.EnsembleProblem{SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, typeof(Main.pf_func), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, typeof(Main.prob_func), typeof(SciMLBase.DEFAULT_OUTPUT_FUNC), typeof(SciMLBase.DEFAULT_REDUCTION), Nothing}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, typeof(Main.loss), Nothing, Tuple{}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

Notice that we added the kwargs for solve of the EnsembleProblem into this. They get passed to the internal solve command, so then the loss is computed on N trajectories at data_times.

Thus, we take this objective function over to any optimization package. Here, since the Lotka-Volterra equation requires positive parameters, we use Fminbox to make sure the parameters stay within the passed bounds. Let's start the optimization with [1.3,0.9], Optim spits out that the true parameters are:

lower = zeros(2)
+    saveat = data_times)
(::SciMLBase.OptimizationFunction{true, ADTypes.AutoForwardDiff{nothing, Nothing}, DiffEqParamEstim.var"#29#30"{Nothing, typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR), Base.Pairs{Symbol, Any, Tuple{Symbol, Symbol}, @NamedTuple{trajectories::Int64, saveat::StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}}}, SciMLBase.EnsembleProblem{SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, typeof(Main.pf_func), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, SymbolicIndexingInterface.SymbolCache{Nothing, Nothing, Nothing}, Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, typeof(Main.prob_func), typeof(SciMLBase.DEFAULT_OUTPUT_FUNC), typeof(SciMLBase.DEFAULT_REDUCTION), Nothing}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, typeof(Main.loss), Nothing, Tuple{}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

Notice that we added the kwargs for solve of the EnsembleProblem into this. They get passed to the internal solve command, so then the loss is computed on N trajectories at data_times.

Thus, we take this objective function over to any optimization package. Here, since the Lotka-Volterra equation requires positive parameters, we use Fminbox to make sure the parameters stay within the passed bounds. Let's start the optimization with [1.3,0.9], Optim spits out that the true parameters are:

lower = zeros(2)
 upper = fill(2.0, 2)
 optprob = OptimizationProblem(obj, [1.3, 0.9], lb = lower, ub = upper)
 result = solve(optprob, Fminbox(BFGS()))
retcode: Success
@@ -114,4 +114,4 @@
  1.0012380201889803
result
retcode: Success
 u: 2-element Vector{Float64}:
  1.5007432843504753
- 1.0012380201889803

if you suspect error is the problem. However, if you're having problems it's most likely not the ODE solver tolerance and mostly because parameter inference is a very hard optimization problem.

+ 1.0012380201889803

if you suspect error is the problem. However, if you're having problems it's most likely not the ODE solver tolerance and mostly because parameter inference is a very hard optimization problem.

diff --git a/previews/PR249/tutorials/generalized_likelihood/index.html b/previews/PR249/tutorials/generalized_likelihood/index.html index 4646561..0a8a33b 100644 --- a/previews/PR249/tutorials/generalized_likelihood/index.html +++ b/previews/PR249/tutorials/generalized_likelihood/index.html @@ -59,34 +59,34 @@ end aggregate_data = convert(Array, VectorOfArray([generate_data(sol, t) for i in 1:100]))
2×200×100 Array{Float64, 3}:
 [:, :, 1] =
- 1.00183   1.03507  1.0553   1.1053    …  0.981619  1.00135  1.02712
- 0.993983  0.90356  0.80675  0.721991     1.09244   1.0093   0.901177
+ 1.00848  1.03541   1.07709   1.09758   …  0.972281  0.991853  1.02219
+ 1.0007   0.889716  0.825136  0.752135     1.12096   0.992856  0.894526
 
 [:, :, 2] =
- 1.00048   1.02611   1.0678    1.08865   …  0.969866  1.00094   1.0279
- 0.988323  0.916621  0.820955  0.734125     1.09092   0.990295  0.89927
+ 0.997555  1.03587   1.05624   1.09601   …  0.986795  1.00638  1.04033
+ 0.994281  0.897937  0.825423  0.751373     1.0947    1.01385  0.903016
 
 [:, :, 3] =
- 0.990819  1.01026   1.04637   1.09822   …  0.989428  1.00207  1.03894
- 0.993753  0.895319  0.799255  0.740525     1.10049   1.00971  0.903551
+ 0.987339  1.0219    1.08947   1.09404   …  0.976175  1.01367   1.03909
+ 0.99631   0.900265  0.806729  0.742526     1.12116   0.994996  0.911344
 
 ;;; … 
 
 [:, :, 98] =
- 0.990565  1.01641   1.05974   1.10027   …  0.974042  0.998717  1.0285
- 0.996744  0.900526  0.809558  0.729908     1.08667   1.00174   0.905122
+ 1.0195   1.02749   1.06059   1.08942   …  0.985329  1.02774   1.02703
+ 1.01199  0.914641  0.817401  0.741986     1.10588   0.982941  0.899752
 
 [:, :, 99] =
- 0.999502  1.0458    1.06307  1.10875   …  0.970544  1.01544   1.029
- 0.992039  0.910526  0.83276  0.737395     1.11352   0.999896  0.928549
+ 1.00263  1.017     1.06295   1.0968    …  0.98176  0.997833  1.01964
+ 1.0061   0.908831  0.830171  0.749531     1.08743  0.998142  0.903247
 
 [:, :, 100] =
- 1.00675  1.02423   1.06984   1.1013    …  0.969086  0.99183   1.04424
- 1.00188  0.913091  0.825878  0.735741     1.11567   0.998706  0.906113

here, with t we measure the solution at 200 evenly spaced points. Thus, aggregate_data is a 2x200x100 matrix where aggregate_data[i,j,k] is the ith component at time j of the kth dataset. What we first want to do is get a matrix of distributions where distributions[i,j] is the likelihood of component i at take j. We can do this via fit_mle on a chosen distributional form. For simplicity, we choose the Normal distribution. aggregate_data[i,j,:] is the array of points at the given component and time, and thus we find the distribution parameters which fits best at each time point via:

using Distributions
+ 0.99612   1.04308   1.07727  1.11055   …  0.975517  1.01088  1.04228
+ 0.995807  0.902986  0.82285  0.738545     1.0955    1.0003   0.903343

here, with t we measure the solution at 200 evenly spaced points. Thus, aggregate_data is a 2x200x100 matrix where aggregate_data[i,j,k] is the ith component at time j of the kth dataset. What we first want to do is get a matrix of distributions where distributions[i,j] is the likelihood of component i at take j. We can do this via fit_mle on a chosen distributional form. For simplicity, we choose the Normal distribution. aggregate_data[i,j,:] is the array of points at the given component and time, and thus we find the distribution parameters which fits best at each time point via:

using Distributions
 distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200]
2×200 Matrix{Distributions.Normal{Float64}}:
- Distributions.Normal{Float64}(μ=0.999355, σ=0.00972273)  …  Distributions.Normal{Float64}(μ=1.03591, σ=0.0104808)
- Distributions.Normal{Float64}(μ=0.998381, σ=0.00950554)     Distributions.Normal{Float64}(μ=0.906563, σ=0.00943444)

Notice for example that we have:

distributions[1, 1]
Distributions.Normal{Float64}(μ=0.9993548665398648, σ=0.009722729806952124)

that is, it fits the distribution to have its mean just about where our original solution was, and the variance is about how much noise we added to the dataset. This is a good check to see that the distributions we are trying to fit our parameters to makes sense.

Note that in this case the Normal distribution was a good choice, and often it's a nice go-to choice, but one should experiment with other choices of distributions as well. For example, a TDist can be an interesting way to incorporate robustness to outliers since low degrees of free T-distributions act like Normal distributions but with longer tails (though fit_mle does not work with a T-distribution, you can get the means/variances and build appropriate distribution objects yourself).

Once we have the matrix of distributions, we can build the objective function corresponding to that distribution fit:

obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions),
-    maxiters = 10000, verbose = false)
(::SciMLBase.OptimizationFunction{true, SciMLBase.NoAD, DiffEqParamEstim.var"#29#30"{Nothing, typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR), Base.Pairs{Symbol, Integer, Tuple{Symbol, Symbol}, @NamedTuple{maxiters::Int64, verbose::Bool}}, SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, Main.var"#1#2", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, LogLikeLoss{Vector{Float64}, Matrix{Distributions.Normal{Float64}}}, Nothing, Tuple{}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

First, let's use the objective function to plot the likelihood landscape:

using Plots;
+ Distributions.Normal{Float64}(μ=1.00114, σ=0.00964076)  …  Distributions.Normal{Float64}(μ=1.03343, σ=0.00945979)
+ Distributions.Normal{Float64}(μ=1.00023, σ=0.00945095)     Distributions.Normal{Float64}(μ=0.908715, σ=0.0100358)

Notice for example that we have:

distributions[1, 1]
Distributions.Normal{Float64}(μ=1.0011437936572505, σ=0.009640764333154996)

that is, it fits the distribution to have its mean just about where our original solution was, and the variance is about how much noise we added to the dataset. This is a good check to see that the distributions we are trying to fit our parameters to makes sense.

Note that in this case the Normal distribution was a good choice, and often it's a nice go-to choice, but one should experiment with other choices of distributions as well. For example, a TDist can be an interesting way to incorporate robustness to outliers since low degrees of free T-distributions act like Normal distributions but with longer tails (though fit_mle does not work with a T-distribution, you can get the means/variances and build appropriate distribution objects yourself).

Once we have the matrix of distributions, we can build the objective function corresponding to that distribution fit:

obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions),
+    maxiters = 10000, verbose = false)
(::SciMLBase.OptimizationFunction{true, SciMLBase.NoAD, DiffEqParamEstim.var"#29#30"{Nothing, typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR), Base.Pairs{Symbol, Integer, Tuple{Symbol, Symbol}, @NamedTuple{maxiters::Int64, verbose::Bool}}, SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, Main.var"#1#2", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, SymbolicIndexingInterface.SymbolCache{Nothing, Nothing, Nothing}, Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, LogLikeLoss{Vector{Float64}, Matrix{Distributions.Normal{Float64}}}, Nothing, Tuple{}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

First, let's use the objective function to plot the likelihood landscape:

using Plots;
 plotly();
 prange = 0.5:0.1:5.0
 heatmap(prange, prange, [obj([j, i]) for i in prange, j in prange],
@@ -97,4 +97,4 @@
 optprob = OptimizationProblem(obj, [2.0, 2.0], lb = first.(bound1), ub = last.(bound1))
OptimizationProblem. In-place: true
 u0: 2-element Vector{Float64}:
  2.0
- 2.0

This shows that it found the true parameters as the best fit to the likelihood.

+ 2.0

This shows that it found the true parameters as the best fit to the likelihood.

diff --git a/previews/PR249/tutorials/global_optimization/index.html b/previews/PR249/tutorials/global_optimization/index.html index b2e3396..e340b6d 100644 --- a/previews/PR249/tutorials/global_optimization/index.html +++ b/previews/PR249/tutorials/global_optimization/index.html @@ -17,24 +17,24 @@ randomized = VectorOfArray([(sol(t[i]) + 0.01randn(2)) for i in 1:length(t)]) data = convert(Array, randomized) -obj = build_loss_objective(prob, Tsit5(), L2Loss(t, data), Optimization.AutoForwardDiff())
(::SciMLBase.OptimizationFunction{true, ADTypes.AutoForwardDiff{nothing, Nothing}, DiffEqParamEstim.var"#29#30"{Nothing, typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR), Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, typeof(Main.f), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, L2Loss{Vector{Float64}, Matrix{Float64}, Nothing, Nothing, Nothing}, Nothing, Tuple{}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

You can either use the NLopt package directly or through either the OptimizationNLopt or OptimizationMOI which provides an interface for all MathOptInterface compatible non-linear solvers.

We can now use this obj as the objective function with MathProgBase solvers. For our example, we will use NLopt. To use the local derivative-free Constrained Optimization BY Linear Approximations algorithm, we can simply do:

opt = Opt(:LN_COBYLA, 1)
+obj = build_loss_objective(prob, Tsit5(), L2Loss(t, data), Optimization.AutoForwardDiff())
(::SciMLBase.OptimizationFunction{true, ADTypes.AutoForwardDiff{nothing, Nothing}, DiffEqParamEstim.var"#29#30"{Nothing, typeof(DiffEqParamEstim.STANDARD_PROB_GENERATOR), Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, SciMLBase.ODEFunction{true, SciMLBase.AutoSpecialize, typeof(Main.f), LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, SymbolicIndexingInterface.SymbolCache{Nothing, Nothing, Nothing}, Nothing, Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}, SciMLBase.StandardODEProblem}, OrdinaryDiffEq.Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, L2Loss{Vector{Float64}, Matrix{Float64}, Nothing, Nothing, Nothing}, Nothing, Tuple{}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}) (generic function with 1 method)

You can either use the NLopt package directly or through either the OptimizationNLopt or OptimizationMOI which provides an interface for all MathOptInterface compatible non-linear solvers.

We can now use this obj as the objective function with MathProgBase solvers. For our example, we will use NLopt. To use the local derivative-free Constrained Optimization BY Linear Approximations algorithm, we can simply do:

opt = Opt(:LN_COBYLA, 1)
 optprob = Optimization.OptimizationProblem(obj, [1.3])
 res = solve(optprob, opt)
retcode: Success
 u: 1-element Vector{Float64}:
- 1.4999228854669655

For a modified evolutionary algorithm, we can use:

opt = Opt(:GN_ESCH, 1)
+ 1.5000513662053687

For a modified evolutionary algorithm, we can use:

opt = Opt(:GN_ESCH, 1)
 lower_bounds!(opt, [0.0])
 upper_bounds!(opt, [5.0])
 xtol_rel!(opt, 1e-3)
 maxeval!(opt, 100000)
 res = solve(optprob, opt)
retcode: MaxIters
 u: 1-element Vector{Float64}:
- 1.499820226042792

We can even use things like the Improved Stochastic Ranking Evolution Strategy (and add constraints if needed). Let's use this through OptimizationMOI:

optprob = Optimization.OptimizationProblem(obj, [0.2], lb = [-1.0], ub = [5.0])
+ 1.5000050023701488

We can even use things like the Improved Stochastic Ranking Evolution Strategy (and add constraints if needed). Let's use this through OptimizationMOI:

optprob = Optimization.OptimizationProblem(obj, [0.2], lb = [-1.0], ub = [5.0])
 res = solve(optprob,
     OptimizationMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer,
         "algorithm" => :GN_ISRES,
         "xtol_rel" => 1e-3,
         "maxeval" => 10000))
retcode: Success
 u: 1-element Vector{Float64}:
- 1.499933798678003

which is very robust to the initial condition. We can also directly use the NLopt interface as below. The fastest result comes from the following algorithm choice:

opt = Opt(:LN_BOBYQA, 1)
+ 1.507928171897144

which is very robust to the initial condition. We can also directly use the NLopt interface as below. The fastest result comes from the following algorithm choice:

opt = Opt(:LN_BOBYQA, 1)
 min_objective!(opt, obj)
-(minf,minx,ret) = NLopt.optimize(opt,[1.3])

For more information, see the NLopt documentation for more details. And give IPOPT or MOSEK a try!

+(minf,minx,ret) = NLopt.optimize(opt,[1.3])

For more information, see the NLopt documentation for more details. And give IPOPT or MOSEK a try!

diff --git a/previews/PR249/tutorials/stochastic_evaluations/index.html b/previews/PR249/tutorials/stochastic_evaluations/index.html index 0fb2d48..f16efd2 100644 --- a/previews/PR249/tutorials/stochastic_evaluations/index.html +++ b/previews/PR249/tutorials/stochastic_evaluations/index.html @@ -29,36 +29,36 @@ 0.03571484690132414 0.04105769139296714 ⋮ - 9.844773742866538 - 9.865422514840034 - 9.886410764464223 - 9.907754671488167 - 9.929470960724519 - 9.951576773210256 - 9.974089487866337 - 9.997026518625685 + 9.844814370823165 + 9.865464011315295 + 9.88645316489197 + 9.907798015732128 + 9.929515291792404 + 9.951622136069403 + 9.974135920593 + 9.997074063661291 10.0 u: 565-element Vector{Vector{Float64}}: [1.0, 1.0] - [1.006011484333905, 0.9768816913355973] - [1.0072504359415368, 0.9723304691576193] - [1.0086588719185485, 0.9672391149545432] - [1.0102619932332817, 0.9615475169383896] - [1.0120888424676189, 0.9551900122328972] - [1.014173590460398, 0.9480949716356187] - [1.016556314436265, 0.9401849288253421] - [1.0192840561097274, 0.9313762966060669] - [1.022412184300141, 0.9215795864405527] + [1.006011486130506, 0.9768817253647569] + [1.0072505482610221, 0.9723305929727436] + [1.008659086823637, 0.9672392796156307] + [1.0102620355574123, 0.9615477261965778] + [1.0120889141874378, 0.9551902227061063] + [1.014173592915749, 0.9480951944386267] + [1.016556224850058, 0.9401851257874725] + [1.0192839641628981, 0.9313764587882697] + [1.0224121899175915, 0.9215797209048201] ⋮ - [0.9579092377294298, 1.245636460272093] - [0.9634751106391353, 1.194282354863734] - [0.9701955946368963, 1.144401231975004] - [0.9781034639539454, 1.0959821604652982] - [0.9872372181495415, 1.0490149553280907] - [0.9976414417929722, 1.0034890000324619] - [1.009367549303591, 0.9593946458266437] - [1.022473026273728, 0.9167224095297635] - [1.024256193765442, 0.9113502774265059]

Now let's generate a dataset from 10,000 solutions of the SDE

using RecursiveArrayTools # for VectorOfArray
+ [0.9579273006591895, 1.245510468253876]
+ [0.9634961010583939, 1.1941601808801732]
+ [0.9702193066617227, 1.1442825285608862]
+ [0.9781301743019848, 1.095867127416187]
+ [0.9872669336017075, 1.048903567841663]
+ [0.9976747029035332, 1.0033810255169566]
+ [1.0094040777705433, 0.9592898926607711]
+ [1.0225130631973516, 0.9166212071729494]
+ [1.0242678595501056, 0.9113353830614552]

Now let's generate a dataset from 10,000 solutions of the SDE

using RecursiveArrayTools # for VectorOfArray
 t = collect(range(0, stop = 10, length = 200))
 function generate_data(t)
     sol = solve(prob, SRIW1())
@@ -67,49 +67,49 @@
 end
 aggregate_data = convert(Array, VectorOfArray([generate_data(t) for i in 1:10000]))
2×200×10000 Array{Float64, 3}:
 [:, :, 1] =
- 1.01399   1.02828   1.06175  1.11856   …  0.980115  1.00781  1.043
- 0.979843  0.909086  0.8141   0.724433     1.09761   1.01796  0.916162
+ 1.0029   1.03849   1.04741   1.10649   …  0.956683  1.00656  1.00774
+ 1.00224  0.905403  0.814878  0.744776     1.11134   1.01202  0.910574
 
 [:, :, 2] =
- 1.00876  1.02161  1.06501   1.10445   …  0.969157  1.01247  1.04503
- 1.01383  0.90984  0.813062  0.734901     1.10395   1.0314   0.900772
+ 0.997367  1.03365   1.05418   1.09397   …  0.983404  0.995469  1.0336
+ 0.994132  0.903197  0.824195  0.748614     1.12413   0.997972  0.902934
 
 [:, :, 3] =
- 1.00921   1.02015   1.06684   1.10653   …  0.961011  1.01063  1.02099
- 0.998827  0.899139  0.809548  0.755224     1.11371   1.01192  0.893269
+ 1.00215   1.0146    1.04616  1.09582   …  0.978272  1.00574  1.02808
+ 0.996754  0.921081  0.82474  0.743158     1.10505   1.00908  0.896299
 
 ;;; … 
 
 [:, :, 9998] =
- 0.997833  1.03447   1.06436  1.10749   …  0.961878  0.981823  1.02039
- 0.994187  0.901915  0.80838  0.753355     1.12529   1.00561   0.895804
+ 1.03254   1.03561   1.06905   1.10554   …  0.973602  0.990007  1.01806
+ 0.982134  0.909016  0.824261  0.747808     1.12665   0.989489  0.899049
 
 [:, :, 9999] =
- 1.00726   1.02732   1.06888  1.10341   …  0.975453  1.00981  1.01389
- 0.988531  0.897251  0.82058  0.748868     1.12051   1.00411  0.909179
+ 0.990667  1.02705   1.05198   1.09703  …  0.969819  1.00408  1.03291
+ 1.00348   0.916009  0.817525  0.73142     1.1081    1.01201  0.89782
 
 [:, :, 10000] =
- 0.998866  1.02162   1.06181   1.09262   …  0.961639  1.00406   1.02056
- 1.00327   0.908184  0.825475  0.771821     1.10566   0.999981  0.918405

Now let's estimate the parameters. Instead of using single runs from the SDE, we will use a EnsembleProblem. This means that it will solve the SDE N times to come up with an approximate probability distribution at each time point and use that in the likelihood estimate.

monte_prob = EnsembleProblem(prob)
EnsembleProblem with problem SDEProblem

We use Optim.jl for optimization below

obj = build_loss_objective(monte_prob, SOSRI(), L2Loss(t, aggregate_data),
+ 1.00509   1.01726   1.05922   1.09062   …  0.987088  0.996987  1.02145
+ 0.990982  0.898134  0.832817  0.748377     1.10993   1.00219   0.894549

Now let's estimate the parameters. Instead of using single runs from the SDE, we will use a EnsembleProblem. This means that it will solve the SDE N times to come up with an approximate probability distribution at each time point and use that in the likelihood estimate.

monte_prob = EnsembleProblem(prob)
EnsembleProblem with problem SDEProblem

We use Optim.jl for optimization below

obj = build_loss_objective(monte_prob, SOSRI(), L2Loss(t, aggregate_data),
     Optimization.AutoForwardDiff(),
     maxiters = 10000, verbose = false, trajectories = 1000)
 optprob = Optimization.OptimizationProblem(obj, [1.0, 0.5])
 result = solve(optprob, Optim.BFGS())
retcode: Success
 u: 2-element Vector{Float64}:
- 3.6989428672533964
- 5.8963174501164675

Parameter Estimation in case of SDE's with a regular L2Loss can have poor accuracy due to only fitting against the mean properties as mentioned in First Differencing.

result.original
 * Status: success
+ 3.689364446838662
+ 5.891726229873359

Parameter Estimation in case of SDE's with a regular L2Loss can have poor accuracy due to only fitting against the mean properties as mentioned in First Differencing.

result.original
 * Status: success
 
  * Candidate solution
-    Final objective value:     2.523618e+03
+    Final objective value:     2.530276e+03
 
  * Found with
     Algorithm:     BFGS
 
  * Convergence measures
-    |x - x'|               = 5.40e+00 ≰ 0.0e+00
+    |x - x'|               = 5.39e+00 ≰ 0.0e+00
     |x - x'|/|x'|          = 9.15e-01 ≰ 0.0e+00
-    |f(x) - f(x')|         = 9.78e+02 ≰ 0.0e+00
-    |f(x) - f(x')|/|f(x')| = 3.88e-01 ≰ 0.0e+00
+    |f(x) - f(x')|         = 9.69e+02 ≰ 0.0e+00
+    |f(x) - f(x')|/|f(x')| = 3.83e-01 ≰ 0.0e+00
     |g(x)|                 = 0.00e+00 ≤ 1.0e-08
 
  * Work counters
@@ -123,10 +123,10 @@
     verbose = false, trajectories = 1000, maxiters = 1000)
 optprob = Optimization.OptimizationProblem(obj, [1.0, 0.5])
 result = solve(optprob, Optim.BFGS())
-result.original
 * Status: success
+result.original
 * Status: failure (line search failed)
 
  * Candidate solution
-    Final objective value:     9.221681e+02
+    Final objective value:     9.783515e-02
 
  * Found with
     Algorithm:     BFGS
@@ -134,13 +134,13 @@
  * Convergence measures
     |x - x'|               = 0.00e+00 ≤ 0.0e+00
     |x - x'|/|x'|          = 0.00e+00 ≤ 0.0e+00
-    |f(x) - f(x')|         = 2.99e-05 ≰ 0.0e+00
-    |f(x) - f(x')|/|f(x')| = 3.25e-08 ≰ 0.0e+00
-    |g(x)|                 = 1.12e-04 ≰ 1.0e-08
+    |f(x) - f(x')|         = 1.21e-07 ≰ 0.0e+00
+    |f(x) - f(x')|/|f(x')| = 1.23e-06 ≰ 0.0e+00
+    |g(x)|                 = 1.39e-03 ≰ 1.0e-08
 
  * Work counters
-    Seconds run:   1030  (vs limit Inf)
-    Iterations:    125
-    f(x) calls:    478
-    ∇f(x) calls:   478
-

Here, we see that we successfully recovered the drift parameter, and got close to the original noise parameter after searching a two-orders-of-magnitude range.

println(result.u)
[2.7747761600418044, 3.2721518527312448]
+ Seconds run: 239 (vs limit Inf) + Iterations: 19 + f(x) calls: 138 + ∇f(x) calls: 138 +

Here, we see that we successfully recovered the drift parameter, and got close to the original noise parameter after searching a two-orders-of-magnitude range.

println(result.u)
[1.4998168702454413, 0.9993126743184898]