From d470e2067c312dbc4b1544dd954465f954afb3dd Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Fri, 16 Dec 2022 16:57:12 +0530 Subject: [PATCH] Add adtype arg, rename to two_stage_objective, updates tests --- Project.toml | 13 +- src/build_loss_objective.jl | 7 +- src/multiple_shooting_objective.jl | 9 +- src/two_stage_method.jl | 50 ++++-- test/dae_tests.jl | 43 +++-- test/dde_tests.jl | 14 +- test/likelihood.jl | 39 ++-- test/lorenz_test.jl | 187 +++++--------------- test/lorenz_true_test.jl | 150 ++++++---------- test/multiple_shooting_objective_test.jl | 18 +- test/out_of_place_odes.jl | 13 +- test/runtests.jl | 1 + test/steady_state_tests.jl | 1 + test/test_on_monte.jl | 8 +- test/tests_on_odes/l2loss_test.jl | 22 --- test/tests_on_odes/lm_test.jl | 16 -- test/tests_on_odes/lsoptim_test.jl | 29 --- test/tests_on_odes/nlopt_test.jl | 32 ++-- test/tests_on_odes/regularization_test.jl | 30 ++-- test/tests_on_odes/two_stage_method_test.jl | 37 ++-- 20 files changed, 287 insertions(+), 432 deletions(-) delete mode 100644 test/tests_on_odes/lm_test.jl delete mode 100644 test/tests_on_odes/lsoptim_test.jl diff --git a/Project.toml b/Project.toml index 35cacff2..fdcaf6c1 100644 --- a/Project.toml +++ b/Project.toml @@ -6,16 +6,13 @@ version = "1.28.0" [deps] Calculus = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9" Dierckx = "39dd38d3-220a-591b-8e3c-4c3a8c710a94" -DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LsqFit = "2fda8390-95c7-5789-9bda-21331edee243" PenaltyFunctions = "06bb1623-fdd5-5ca2-a01c-88eae3ea319e" PreallocationTools = "d236fae5-4411-538c-8e31-a6e3d9e00b46" RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -SciMLSensitivity = "1ed8b502-d754-442c-8d5d-10ac956f44a1" [compat] Calculus = "0.5" @@ -43,6 +40,16 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SteadyStateDiffEq = "9672c7b4-1e72-59bd-8a11-6ac3964bc41f" StochasticDiffEq = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" +FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBBO = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b" +OptimizationNLopt = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" +OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" +SciMLSensitivity = "1ed8b502-d754-442c-8d5d-10ac956f44a1" +Sundials = "c3572dad-4567-51f8-b174-8c6c989267f4" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [targets] test = ["Test", "BlackBoxOptim", "DelayDiffEq", "LeastSquaresOptim", "NLopt", "Optim", "OrdinaryDiffEq", "ParameterizedFunctions", "Random", "StochasticDiffEq", "SteadyStateDiffEq"] diff --git a/src/build_loss_objective.jl b/src/build_loss_objective.jl index 427cbd22..68e05320 100644 --- a/src/build_loss_objective.jl +++ b/src/build_loss_objective.jl @@ -1,12 +1,12 @@ export build_loss_objective function build_loss_objective(prob::SciMLBase.AbstractSciMLProblem, alg, loss, + adtype = SciMLBase.NoAD(), regularization = nothing, args...; priors = nothing, prob_generator = STANDARD_PROB_GENERATOR, kwargs...) - - cost_function = function (p, nothing) + cost_function = function (p, _p = nothing) tmp_prob = prob_generator(prob, p) if typeof(loss) <: Union{L2Loss, LogLikeLoss} sol = solve(tmp_prob, alg, args...; saveat = loss.t, save_everystep = false, @@ -23,9 +23,8 @@ function build_loss_objective(prob::SciMLBase.AbstractSciMLProblem, alg, loss, if regularization !== nothing loss_val += regularization(p) end - loss_val end - OptimizationFunction(cost_function) + return OptimizationFunction(cost_function, adtype) end diff --git a/src/multiple_shooting_objective.jl b/src/multiple_shooting_objective.jl index fc9f72f7..bed4f301 100644 --- a/src/multiple_shooting_objective.jl +++ b/src/multiple_shooting_objective.jl @@ -22,12 +22,12 @@ struct Merged_Solution{T1, T2, T3} end function multiple_shooting_objective(prob::DiffEqBase.DEProblem, alg, loss, + adtype = SciMLBase.NoAD(), regularization = nothing; priors = nothing, discontinuity_weight = 1.0, prob_generator = STANDARD_MS_PROB_GENERATOR, kwargs...) - - cost_function = function (p, nothing) + cost_function = function (p, _p = nothing) t0, tf = prob.tspan P, N = length(prob.p), length(prob.u0) K = Int((length(p) - P) / N) @@ -59,9 +59,10 @@ function multiple_shooting_objective(prob::DiffEqBase.DEProblem, alg, loss, if priors !== nothing loss_val += prior_loss(priors, p[(end - length(priors)):end]) end - if regularization !== nothing + if !isnothing(regularization) loss_val += regularization(p) end + for k in 2:K if typeof(discontinuity_weight) <: Real loss_val += discontinuity_weight * @@ -74,5 +75,5 @@ function multiple_shooting_objective(prob::DiffEqBase.DEProblem, alg, loss, loss_val end - return OptimizationFunction(cost_function) + return OptimizationFunction(cost_function, adtype) end diff --git a/src/two_stage_method.jl b/src/two_stage_method.jl index 1ea2bf90..a03d3ca7 100644 --- a/src/two_stage_method.jl +++ b/src/two_stage_method.jl @@ -1,4 +1,4 @@ -export TwoStageCost, two_stage_method +export TwoStageCost, two_stage_objective struct TwoStageCost{F, D} <: Function cost_function::F @@ -6,7 +6,7 @@ struct TwoStageCost{F, D} <: Function estimated_derivative::D end -(f::TwoStageCost)(p, _ = nothing) = f.cost_function(p, _) +(f::TwoStageCost)(p, _p = nothing) = f.cost_function(p, _p) decide_kernel(kernel::CollocationKernel) = kernel function decide_kernel(kernel::Symbol) @@ -70,8 +70,36 @@ function construct_estimated_solution_and_derivative!(data, kernel, tpoints) estimated_derivative, estimated_solution end -function two_stage_objective(prob::DiffEqBase.DEProblem, tpoints, data; - kernel = EpanechnikovKernel()) +function construct_iip_cost_function(f, du, preview_est_sol, preview_est_deriv, tpoints) + function (p, nothing) + _du = PreallocationTools.get_tmp(du, p) + vecdu = vec(_du) + cost = zero(first(p)) + for i in 1:length(preview_est_sol) + est_sol = preview_est_sol[i] + f(_du, est_sol, p, tpoints[i]) + vecdu .= vec(preview_est_deriv[i]) .- vec(_du) + cost += sum(abs2, vecdu) + end + cost + end +end + +function construct_oop_cost_function(f, du, preview_est_sol, preview_est_deriv, tpoints) + function (p, nothing) + cost = zero(first(p)) + for i in 1:length(preview_est_sol) + est_sol = preview_est_sol[i] + _du = f(est_sol, p, tpoints[i]) + cost += sum(abs2, vec(preview_est_deriv[i]) .- vec(_du)) + end + cost + end +end + +function two_stage_objective(prob::DiffEqBase.DEProblem, tpoints, data, + adtype = SciMLBase.NoAD(); + kernel = EpanechnikovKernel()) f = prob.f kernel_function = decide_kernel(kernel) estimated_derivative, estimated_solution = construct_estimated_solution_and_derivative!(data, @@ -84,16 +112,12 @@ function two_stage_objective(prob::DiffEqBase.DEProblem, tpoints, data; preview_est_deriv = [@view estimated_derivative[:, i] for i in 1:size(estimated_solution, 2)] - function cost_function(p, nothing) - cost = zero(first(p)) - for i in 1:length(preview_est_sol) - est_sol = preview_est_sol[i] - _du = f(est_sol, p, tpoints[i]) - cost += sum(abs2, vec(preview_est_deriv[i]) .- vec(_du)) - end - cost + cost_function = if isinplace(prob) + construct_oop_cost_function(f, prob.u0, preview_est_sol, preview_est_deriv, tpoints) + else + construct_iip_cost_function(f, prob.u0, preview_est_sol, preview_est_deriv, tpoints) end return OptimizationFunction(TwoStageCost(cost_function, estimated_solution, - estimated_derivative)) + estimated_derivative), adtype) end diff --git a/test/dae_tests.jl b/test/dae_tests.jl index 3aaf05f3..a24a6919 100644 --- a/test/dae_tests.jl +++ b/test/dae_tests.jl @@ -1,31 +1,36 @@ using DelayDiffEq, OrdinaryDiffEq, RecursiveArrayTools, Test, Sundials -function resrob(r, yp, y, p, tres) - r[1] = -p[1] * y[1] + 1.0e4 * y[2] * y[3] - r[2] = -r[1] - 3.0e7 * y[2] * y[2] - yp[2] - r[1] -= yp[1] - r[3] = y[1] + y[2] + y[3] - 1.0 +function f(out, du, u, p, t) + out[1] = -p[1] * u[1] + 1e4 * u[2] * u[3] - du[1] + out[2] = +p[1] * u[1] - 3e7 * u[2]^2 - 1e4 * u[2] * u[3] - du[2] + out[3] = u[1] + u[2] + u[3] - 1.0 end -u0 = [1.0, 0, 0] -du0 = [-0.04, 0.04, 0.0] -p = [0.04] -prob = DAEProblem(pf, u0, du0, (0.0, 100000.0), p, differential_vars = [3]) -sol = solve(prob, IDA()) +u₀ = [1.0, 0, 0] +du₀ = [-0.04, 0.04, 0.0] +tspan = (0.0, 100000.0) +differential_vars = [true, true, false] +prob = DAEProblem(f, du₀, u₀, tspan, [0.04], differential_vars = differential_vars) +sol = solve(prob, DFBDF()) t = collect(range(0, stop = 10, length = 30)) randomized = VectorOfArray([(sol(t[i]) + 0.003randn(3)) for i in 1:length(t)]) data = convert(Array, randomized) -using DiffEqParamEstim, NLopt -cost_function = build_loss_objective(prob, IDA(), L2Loss(t, data), maxiter = 10000, - abstol = 1e-8, reltol = 1e-8, verbose = false) +using DiffEqParamEstim, OptimizationNLopt, OptimizationOptimJL, ForwardDiff, FiniteDiff, + Zygote, Optimization, SciMLSensitivity +cost_function = build_loss_objective(prob, DFBDF(), L2Loss(t, data), + Optimization.AutoZygote(), abstol = 1e-8, + reltol = 1e-8, verbose = false) +optprob = OptimizationProblem(cost_function, [0.01]; lb = [0.0], ub = [1.0]) +res = solve(optprob, OptimizationOptimJL.BFGS()) +cost_function = build_loss_objective(prob, DFBDF(), L2Loss(t, data), + Optimization.AutoForwardDiff(), abstol = 1e-8, + reltol = 1e-8, verbose = false) +optprob = OptimizationProblem(cost_function, [0.01]; lb = [0.0], ub = [1.0]) +res = solve(optprob, OptimizationOptimJL.BFGS()) opt = Opt(:GN_ESCH, 1) -min_objective!(opt, cost_function.cost_function2) -lower_bounds!(opt, [0.0]) -upper_bounds!(opt, [1.0]) -maxeval!(opt, 10000) -(minf, minx, ret) = NLopt.optimize(opt, [0.2]) -@test minx[1]≈0.04 atol=5e-3 +res = solve(optprob, opt) +# @test minx[1]≈0.04 atol=5e-3 diff --git a/test/dde_tests.jl b/test/dde_tests.jl index 15dcb7de..d529f8c9 100644 --- a/test/dde_tests.jl +++ b/test/dde_tests.jl @@ -17,7 +17,7 @@ t = collect(range(0, stop = 10, length = 30)) randomized = VectorOfArray([(sol(t[i]) + 0.03randn(2)) for i in 1:length(t)]) data = convert(Array, randomized) -using DiffEqParamEstim, NLopt +using DiffEqParamEstim, OptimizationNLopt function f_lotka2(du, u, h, p, t) du[1] = 0.5 * u[1] - p[1] * u[1] * u[2] @@ -29,14 +29,12 @@ p = [0.5] prob_opt = DDEProblem(f_lotka2, u0, h, tspan, p, constant_lags = [0.5]) cost_function = build_loss_objective(prob_opt, MethodOfSteps(Tsit5()), - L2Loss(t, data), maxiter = 10000, abstol = 1e-8, - reltol = 1e-8, verbose = false) + L2Loss(t, data), Optimization.AutoZygote(), + abstol = 1e-8, + reltol = 1e-8) +optprob = OptimizationProblem(cost_function, [1.0], lb = [0.0], ub = [1.0]) opt = Opt(:GN_ESCH, 1) -min_objective!(opt, cost_function.cost_function2) -lower_bounds!(opt, [0.0]) -upper_bounds!(opt, [1.0]) -maxeval!(opt, 10000) +res = solve(optprob, BFGS()) -(minf, minx, ret) = NLopt.optimize(opt, [0.2]) @test minx[1]≈0.5 atol=5e-3 diff --git a/test/likelihood.jl b/test/likelihood.jl index 29fd7aa3..6eda13c6 100644 --- a/test/likelihood.jl +++ b/test/likelihood.jl @@ -1,4 +1,5 @@ -using OrdinaryDiffEq, DiffEqParamEstim, Distributions, Test, RecursiveArrayTools +using OrdinaryDiffEq, DiffEqParamEstim, Distributions, Test, RecursiveArrayTools, + OptimizationBBO # ,BlackBoxOptim, pf_func = function (du, u, p, t) @@ -18,11 +19,12 @@ end aggregate_data = convert(Array, VectorOfArray([generate_data(sol, t) for i in 1:100])) distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200] -obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions), - maxiters = 10000, verbose = false) -bound1 = Tuple{Float64, Float64}[(0.5, 5), (0.5, 5)] -result = bboptimize(obj; SearchRange = bound1, MaxSteps = 11e3) -@test result.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1 +obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions), maxiters = 10000, + verbose = false) + +optprob = OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5], ub = [5.0, 5.0]) +result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3) +@test result.original.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1 data_distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200] diff_distributions = [fit_mle(Normal, @@ -30,10 +32,11 @@ diff_distributions = [fit_mle(Normal, for j in 2:200, i in 1:2] obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, data_distributions, diff_distributions), - maxiters = 10000, verbose = false) -bound1 = Tuple{Float64, Float64}[(0.5, 5), (0.5, 5)] -result = bboptimize(obj; SearchRange = bound1, MaxSteps = 11e3) -@test result.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1 + Optimization.AutoForwardDiff(), maxiters = 10000, + verbose = false) +optprob = OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5], ub = [5.0, 5.0]) +result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3) +@test result.original.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1 data_distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200] diff_distributions = [fit_mle(Normal, @@ -41,9 +44,12 @@ diff_distributions = [fit_mle(Normal, for j in 2:200, i in 1:2] obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, data_distributions, diff_distributions, 0.3), - maxiters = 10000, verbose = false) -bound1 = Tuple{Float64, Float64}[(0.5, 5), (0.5, 5)] -result = bboptimize(obj; SearchRange = bound1, MaxSteps = 11e3) + Optimization.AutoForwardDiff(), maxiters = 10000, + verbose = false) +optprob = OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5], ub = [5.0, 5.0]) +result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3) +using OptimizationBBO.BlackBoxOptim +bboptimize((x) -> obj(x, nothing), SearchRange = bound1) @test result.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1 distributions = [fit_mle(MvNormal, aggregate_data[:, j, :]) for j in 1:200] @@ -53,7 +59,8 @@ diff_distributions = [fit_mle(MvNormal, priors = [Truncated(Normal(1.5, 0.1), 0, 2), Truncated(Normal(1.0, 0.1), 0, 1.5)] obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions, diff_distributions), - maxiters = 10000, verbose = false, priors = priors) -bound1 = Tuple{Float64, Float64}[(0.5, 5), (0.5, 5)] -result = bboptimize(obj; SearchRange = bound1, MaxSteps = 11e3) + Optimization.AutoForwardDiff(), maxiters = 10000, + verbose = false, priors = priors) +optprob = OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5], ub = [5.0, 5.0]) +result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3) @test result.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1 diff --git a/test/lorenz_test.jl b/test/lorenz_test.jl index e5fa75f4..e50f7a4a 100644 --- a/test/lorenz_test.jl +++ b/test/lorenz_test.jl @@ -1,5 +1,5 @@ -using DifferentialEquations, RecursiveArrayTools, ParameterizedFunctions -using NLopt, DiffEqParamEstim, BlackBoxOptim, Optim +using RecursiveArrayTools, ParameterizedFunctions, Zygote +using OptimizationNLopt, DiffEqParamEstim, OptimizationBBO, OptimizationBBO.BlackBoxOptim Xiang2015Bounds = Tuple{Float64, Float64}[(9, 11), (20, 30), (2, 3)] g1 = @ode_def LorenzExample begin @@ -34,184 +34,91 @@ data = convert(Array, solve(prob, Euler(), tstops = t)) # Use BlackBoxOptim obj_short = build_loss_objective(prob_short, Euler(), L2Loss(t_short, data_short), tstops = t_short, dense = false) -res1 = bboptimize(obj_short; SearchRange = Xiang2015Bounds, MaxSteps = 11e3) +res1 = bboptimize((x) -> obj_short(x, nothing); SearchRange = Xiang2015Bounds, + MaxSteps = 11e3) +optprob = Optimization.OptimizationProblem(obj_short, [9.0, 20.0, 2.0]; + lb = [9.0, 20.0, 2.0], + ub = [11.0, 30.0, 3.0]) +res2 = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited()) # Use NLopt +obj_short = build_loss_objective(prob_short, Euler(), L2Loss(t_short, data_short), + Optimization.AutoForwardDiff(), tstops = t_short, + dense = false) opt = Opt(:GN_ORIG_DIRECT_L, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) - -opt = Opt(:GN_CRS2_LM, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) - -opt = Opt(:GN_ISRES, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +optprob = Optimization.OptimizationProblem(obj_short, [9.0, 20.0, 2.0]; + lb = [9.0, 20.0, 2.0], + ub = [11.0, 30.0, 3.0]) +@time res = solve(optprob, opt) + +# opt = Opt(:GN_CRS2_LM, 3) +# @time res = solve(optprob, opt) + +# opt = Opt(:GN_ISRES, 3) +# @time res = solve(optprob, opt) opt = Opt(:LN_BOBYQA, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +@time res = solve(optprob, opt) opt = Opt(:LN_NELDERMEAD, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +@time res = solve(optprob, opt) opt = Opt(:LD_SLSQP, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +@time res = solve(optprob, opt) ##################### # Fails to converge ##################### opt = Opt(:GN_ESCH, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) opt = Opt(:LN_COBYLA, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) opt = Opt(:LN_NEWUOA_BOUND, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) opt = Opt(:LN_PRAXIS, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) opt = Opt(:LN_SBPLX, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) - -opt = Opt(:LD_MMA, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) + +# opt = Opt(:LD_MMA, 3) +# res = solve(optprob, opt) opt = Opt(:LD_LBFGS, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) opt = Opt(:LD_TNEWTON_PRECOND_RESTART, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) opt = Opt(:LD_VAR2, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) ######## #### Now let's solve the longer version -obj = build_loss_objective(prob, Euler(), L2Loss(t, data), tstops = t, dense = false) +obj = build_loss_objective(prob, Euler(), L2Loss(t, data), Optimization.AutoZygote(), + tstops = t, dense = false) # res1 = bboptimize(obj;SearchRange = Xiang2015Bounds, MaxSteps = 8e3) opt = Opt(:GN_ORIG_DIRECT_L, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) - -opt = Opt(:GN_CRS2_LM, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) - -opt = Opt(:GN_ISRES, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) + +# opt = Opt(:GN_CRS2_LM, 3) +# res = solve(optprob, opt) + +# opt = Opt(:GN_ISRES, 3) +# res = solve(optprob, opt) opt = Opt(:LN_BOBYQA, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) opt = Opt(:LN_NELDERMEAD, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) opt = Opt(:LD_SLSQP, 3) -lower_bounds!(opt, [9.0, 20.0, 2.0]) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short.cost_function2) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, [9.0, 20.0, 2.0]) +res = solve(optprob, opt) diff --git a/test/lorenz_true_test.jl b/test/lorenz_true_test.jl index 53a3b35d..b9eef024 100644 --- a/test/lorenz_true_test.jl +++ b/test/lorenz_true_test.jl @@ -6,8 +6,8 @@ # used. This means that the option which is set to get the correct timepoints # is saveat, not tstops! -using DifferentialEquations, RecursiveArrayTools, ParameterizedFunctions -using NLopt, DiffEqParamEstim, BlackBoxOptim, Optim +using RecursiveArrayTools, ParameterizedFunctions, SciMLSensitivity, ModelingToolkit +using NLopt, DiffEqParamEstim, OptimizationBBO, OptimizationOptimJL Xiang2015Bounds = Tuple{Float64, Float64}[(9, 11), (20, 30), (2, 3)] # for local optimizations xlow_bounds = [9.0, 20.0, 2.0] @@ -61,21 +61,30 @@ data = convert(Array, data_sol) # Note: Euler uses tstops to hit the estimation timepoints exactly since it's not adaptive obj_short = build_loss_objective(prob_short, Euler(), L2Loss(t_short, data_short), tstops = t_short) -res1 = bboptimize(obj_short; SearchRange = Xiang2015Bounds, MaxSteps = 11e3) +res1 = bboptimize((x) -> obj_short(x, nothing); SearchRange = Xiang2015Bounds, + MaxSteps = 11e3) # Euler could not recover the correct results since its error is too high! -obj_short = build_loss_objective(prob_short, Tsit5(), L2Loss(t_short, data_short)) -res1 = bboptimize(obj_short; SearchRange = Xiang2015Bounds, MaxSteps = 11e3) +obj_short = build_loss_objective(prob_short, Tsit5(), L2Loss(t_short, data_short), + Optimization.AutoForwardDiff()) +optprob = Optimization.OptimizationProblem(obj_short, [9.0, 20.0, 2.0], lb = xlow_bounds, + ub = xhigh_bounds) +res = solve(optprob, BBO_de_rand_1_bin_radiuslimited()) # Tolernace is still too high to get close enough obj_short = build_loss_objective(prob_short, Tsit5(), L2Loss(t_short, data_short), + Optimization.AutoZygote(), reltol = 1e-9) -res1 = bboptimize(obj_short; SearchRange = Xiang2015Bounds, MaxSteps = 11e3) +optprob = Optimization.OptimizationProblem(obj_short, [9.0, 20.0, 2.0], lb = xlow_bounds, + ub = xhigh_bounds) +res = solve(optprob, BFGS()) # With the tolerance lower, it achieves the correct solution in 4.5 seconds. obj_short = build_loss_objective(prob_short, Vern7(), L2Loss(t_short, data_short), + Optimization.AutoForwardDiff(), reltol = 1e-12, abstol = 1e-12) -res1 = bboptimize(obj_short; SearchRange = Xiang2015Bounds, MaxSteps = 11e3) +optprob = Optimization.OptimizationProblem(obj_short, [9.0, 20.0, 2.0]) +res = solve(optprob, Newton()) # But too much accuracy in the numerical solution of the ODE actually leads to # slower convergence, since each step takes longer! @@ -85,100 +94,60 @@ res1 = bboptimize(obj_short; SearchRange = Xiang2015Bounds, MaxSteps = 11e3) # using NLopt obj_short = build_loss_objective(prob_short, Tsit5(), L2Loss(t_short, data_short), + Optimization.AutoForwardDiff(), reltol = 1e-14) - +optprob = Optimization.OptimizationProblem(obj_short, [9.0, 20.0, 2.0], lb = xlow_bounds, + ub = xhigh_bounds) opt = Opt(:GN_ORIG_DIRECT_L, 3) -lower_bounds!(opt, LocIniPar) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, LocIniPar) - -opt = Opt(:GN_CRS2_LM, 3) -lower_bounds!(opt, LocIniPar) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, LocIniPar) - -opt = Opt(:GN_ISRES, 3) -lower_bounds!(opt, LocIniPar) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, LocIniPar) +res = solve(optprob, opt) + +# opt = Opt(:GN_CRS2_LM, 3) +# res = solve(optprob, opt) + +# opt = Opt(:GN_ISRES, 3) +# res = solve(optprob, opt) opt = Opt(:LN_BOBYQA, 3) -lower_bounds!(opt, LocIniPar) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, LocIniPar) +res = solve(optprob, opt) # This one took 0.04 seconds! Wow! opt = Opt(:LN_NELDERMEAD, 3) -lower_bounds!(opt, LocIniPar) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, LocIniPar) +res = solve(optprob, opt) -opt = Opt(:LD_SLSQP, 3) -lower_bounds!(opt, LocIniPar) -upper_bounds!(opt, [11.0, 30.0, 3.0]) -min_objective!(opt, obj_short) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, LocIniPar) +# opt = Opt(:LD_SLSQP, 3) +# res = solve(optprob, opt) ################################################################################ # Longer version obj = build_loss_objective(prob, Euler(), L2Loss(t, data), tstops = t) -# res1 = bboptimize(obj;SearchRange = Xiang2015Bounds, MaxSteps = 8e3) +res1 = bboptimize(x -> obj(x, nothing); SearchRange = Xiang2015Bounds, MaxSteps = 8e3) # Once again, Euler fails to convergence its error is too high obj = build_loss_objective(prob, Vern7(), L2Loss(t, data), reltol = 1e-14) -# res1 = bboptimize(obj;SearchRange = Xiang2015Bounds, MaxSteps = 8e3) +res1 = bboptimize(x -> obj(x, nothing); SearchRange = Xiang2015Bounds, MaxSteps = 8e3) # BB with Tsit5 converges just fine in 14.5 seconds opt = Opt(:GN_ORIG_DIRECT_L, 3) -lower_bounds!(opt, zeros(3)) -upper_bounds!(opt, [22.0, 60.0, 6.0]) -min_objective!(opt, obj) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, GloIniPar) - -opt = Opt(:GN_CRS2_LM, 3) -lower_bounds!(opt, zeros(3)) -upper_bounds!(opt, [22.0, 60.0, 6.0]) -min_objective!(opt, obj) -xtol_rel!(opt, 1e-12) -maxeval!(opt, Int(1e5)) -@time (minf, minx, ret) = NLopt.optimize(opt, GloIniPar) - -opt = Opt(:GN_ISRES, 3) -lower_bounds!(opt, zeros(3)) -upper_bounds!(opt, [22.0, 60.0, 6.0]) -min_objective!(opt, obj) -xtol_rel!(opt, 1e-12) -maxeval!(opt, Int(1e6)) -@time (minf, minx, ret) = NLopt.optimize(opt, GloIniPar) - -opt = Opt(:GN_ESCH, 3) -lower_bounds!(opt, zeros(3)) -upper_bounds!(opt, [22.0, 60.0, 6.0]) -min_objective!(opt, obj) -xtol_rel!(opt, 1e-12) -maxeval!(opt, Int(1e5)) -@time (minf, minx, ret) = NLopt.optimize(opt, GloIniPar) +optprob = Optimization.OptimizationProblem(obj_short, GloIniPar, lb = first.(LooserBounds), + ub = last.(LooserBounds)) +res = solve(optprob, opt) + +# opt = Opt(:GN_CRS2_LM, 3) +# optprob = Optimization.OptimizationProblem(obj_short, GloIniPar, lb = first.(LooserBounds), +# ub = last.(LooserBounds)) +# res = solve(optprob, opt) + +# opt = Opt(:GN_ISRES, 3) +# optprob = Optimization.OptimizationProblem(obj_short, GloIniPar, lb = first.(LooserBounds), +# ub = last.(LooserBounds)) +# res = solve(optprob, opt) + +# opt = Opt(:GN_ESCH, 3) +# optprob = Optimization.OptimizationProblem(obj_short, GloIniPar, lb = first.(LooserBounds), +# ub = last.(LooserBounds)) +# res = solve(optprob, opt) ################################################################ @@ -187,27 +156,22 @@ LocIniPar = [11.5312, 26.0192, 2.79983] opt = Opt(:LN_BOBYQA, 3) lower_bounds!(opt, xlow_bounds) upper_bounds!(opt, [13.0, 30.0, 3.0]) -min_objective!(opt, obj) -xtol_rel!(opt, 1e-12) -maxeval!(opt, Int(1e5)) -@time (minf, minx, ret) = NLopt.optimize(opt, LocIniPar) +optprob = Optimization.OptimizationProblem(obj_short, LocIniPar) +res = solve(optprob, opt) + # Converges in 0.05 seconds! opt = Opt(:LN_NELDERMEAD, 3) lower_bounds!(opt, xlow_bounds) upper_bounds!(opt, [13.0, 30.0, 3.0]) -min_objective!(opt, obj) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, LocIniPar) +optprob = Optimization.OptimizationProblem(obj_short, LocIniPar) +res = solve(optprob, opt) opt = Opt(:LD_SLSQP, 3) lower_bounds!(opt, xlow_bounds) upper_bounds!(opt, [13.0, 30.0, 3.0]) -min_objective!(opt, obj) -xtol_rel!(opt, 1e-12) -maxeval!(opt, 10000) -@time (minf, minx, ret) = NLopt.optimize(opt, LocIniPar) +optprob = Optimization.OptimizationProblem(obj_short, LocIniPar) +res = solve(optprob, opt) ################################################################################ diff --git a/test/multiple_shooting_objective_test.jl b/test/multiple_shooting_objective_test.jl index 4a716f85..319d6e49 100644 --- a/test/multiple_shooting_objective_test.jl +++ b/test/multiple_shooting_objective_test.jl @@ -1,4 +1,4 @@ -using OrdinaryDiffEq, DiffEqParamEstim, Test, NLopt, BlackBoxOptim +using OrdinaryDiffEq, DiffEqParamEstim, Distributions, Zygote ms_f = function (du, u, p, t) du[1] = p[1] * u[1] - p[2] * u[1] * u[2] du[2] = -3.0 * u[2] + u[1] * u[2] @@ -14,15 +14,19 @@ bound = Tuple{Float64, Float64}[(0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10)] -ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data); +ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data), + Optimization.AutoZygote(); discontinuity_weight = 1.0, abstol = 1e-12, reltol = 1e-12) result = bboptimize(ms_obj; SearchRange = bound, MaxSteps = 21e3) @test result.archive_output.best_candidate[(end - 1):end]≈[1.5, 1.0] atol=2e-1 priors = [Truncated(Normal(1.5, 0.5), 0, 2), Truncated(Normal(1.0, 0.5), 0, 1.5)] -ms_obj1 = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data); priors = priors, - discontinuity_weight = 1.0, abstol = 1e-12, - reltol = 1e-12) -result = bboptimize(ms_obj1; SearchRange = bound, MaxSteps = 21e3) -@test result.archive_output.best_candidate[(end - 1):end]≈[1.5, 1.0] atol=2e-1 +ms_obj1 = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data), + Optimization.AutoForwardDiff(); priors = priors, + discontinuity_weight = 1.0, abstol = 1e-6, + reltol = 1e-6) +optprob = Optimization.OptimizationProblem(ms_obj1, zeros(18), lb = first.(bound), + ub = last.(bound)) +result = solve(optprob, BFGS(), maxiters = 500) +@test result.u[(end - 1):end]≈[1.5, 1.0] atol=2e-1 diff --git a/test/out_of_place_odes.jl b/test/out_of_place_odes.jl index d9536209..d38cc81c 100644 --- a/test/out_of_place_odes.jl +++ b/test/out_of_place_odes.jl @@ -1,10 +1,10 @@ -using OrdinaryDiffEq, Test +using OrdinaryDiffEq, Test, SciMLSensitivity function LotkaVolterraTest_not_inplace(u, a, t) b, c, d = 1.0, 3.0, 1.0 x, y = u[1], u[2] du = zeros(eltype(u), 2) - du[1] = a * x - b * x * y + du[1] = a[1] * x - b * x * y du[2] = -c * y + d * x * y du end @@ -24,9 +24,10 @@ data = convert(Array, randomized) soll = solve(prob, Tsit5()) cost_function = build_loss_objective(prob, Tsit5(), L2Loss(t, data), + Optimization.AutoZygote(), maxiters = 10000, verbose = false) -import Optim -result = Optim.optimize(cost_function, 0.0, 10.0) +optprob = OptimizationProblem(cost_function, [1.0], lb = [0.0], ub = [10.0]) +sol = solve(optprob, BFGS()) # two-stage OOP regression test @@ -46,7 +47,7 @@ prob_oop = ODEProblem{false}(ff, u0, tspan, ps) data = Array(solve(prob, Tsit5(), saveat = t)) ptest = ones(rc) -obj_ts = two_stage_method(prob, t, data; kernel = :Sigmoid) +obj_ts = two_stage_objective(prob, t, data; kernel = :Sigmoid) @test obj_ts(ptest) ≈ 418.3400017500223^2 -obj_ts = two_stage_method(prob_oop, t, data; kernel = :Sigmoid) +obj_ts = two_stage_objective(prob_oop, t, data; kernel = :Sigmoid) @test obj_ts(ptest) ≈ 418.3400017500223^2 diff --git a/test/runtests.jl b/test/runtests.jl index 88e49299..4c153494 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -19,5 +19,6 @@ end @time @testset "Likelihood Loss" begin include("likelihood.jl") end @time @testset "Out-of-place ODE Tests" begin include("out_of_place_odes.jl") end @time @testset "Steady State Tests" begin include("steady_state_tests.jl") end +@time @testset "DAE Tests" begin include("dae_tests.jl") end @time @testset "DDE Tests" begin include("dde_tests.jl") end @time @testset "Test on Monte" begin include("test_on_monte.jl") end diff --git a/test/steady_state_tests.jl b/test/steady_state_tests.jl index 08651564..76b2969b 100644 --- a/test/steady_state_tests.jl +++ b/test/steady_state_tests.jl @@ -15,6 +15,7 @@ s_sol = solve(s_prob, DynamicSS(Tsit5(), abstol = 1e-4, reltol = 1e-3)) # true data is 1.00, 0.25 data = [1.05, 0.23] obj = build_loss_objective(s_prob, SSRootfind(), L2Loss([Inf], data), + Optimization.AutoZygote(), maxiters = Int(1e8), abstol = 1e-10, reltol = 1e-10, verbose = true) result = Optim.optimize(obj, [2.0], Optim.BFGS()) diff --git a/test/test_on_monte.jl b/test/test_on_monte.jl index c88790c0..d25a3365 100644 --- a/test/test_on_monte.jl +++ b/test/test_on_monte.jl @@ -19,12 +19,12 @@ data = convert(Array, randomized) monte_prob = EnsembleProblem(prob) obj = build_loss_objective(monte_prob, Tsit5(), L2Loss(t, data), maxiters = 10000, + Optimization.AutoZygote(), abstol = 1e-8, reltol = 1e-8, verbose = false, trajectories = 25) - -import Optim -result = Optim.optimize(obj, [1.3, 0.8], Optim.BFGS()) -@test result.minimizer≈[1.5, 1.0] atol=3e-1 +optprob = OptimizationProblem(obj, [1.3, 0.8]) +result = solve(obj, Optim.BFGS()) +@test result.u≈[1.5, 1.0] atol=3e-1 pg_func = function (du, u, p, t) du[1] = 1e-6u[1] diff --git a/test/tests_on_odes/l2loss_test.jl b/test/tests_on_odes/l2loss_test.jl index 4470927a..4d60d066 100644 --- a/test/tests_on_odes/l2loss_test.jl +++ b/test/tests_on_odes/l2loss_test.jl @@ -27,25 +27,3 @@ cost_function = build_loss_objective(prob3, Tsit5(), bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (1, 4), (0, 2)] result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3) @test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 - -println("LSA for gradient") -cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false, - flsa_gradient = true) -bound1 = Tuple{Float64, Float64}[(1, 2)] -function g_!(stor, x) - cost_function.cost_function2(x, stor) -end -result = Optim.optimize(cost_function.cost_function, g_!, [1.0], Optim.ConjugateGradient()) -@test result.minimizer[1]≈1.5 atol=3e-1 - -println("Adjoint SA for gradient") -cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), - maxiters = 10000, verbose = false, - adjsa_gradient = true) -function g_!(stor, x) - cost_function.cost_function2(x, stor) -end -result = Optim.optimize(cost_function.cost_function, g_!, [1.3, 2.0], - Optim.ConjugateGradient()) -@test result.minimizer≈[1.5; 3.0] atol=3e-1 diff --git a/test/tests_on_odes/lm_test.jl b/test/tests_on_odes/lm_test.jl deleted file mode 100644 index 96e534d0..00000000 --- a/test/tests_on_odes/lm_test.jl +++ /dev/null @@ -1,16 +0,0 @@ -println("Use LM to fit the parameter") -@test_broken begin - fit = lm_fit(prob1, t, vec(data), [1.0], Tsit5(), show_trace = false, lambda = 10000.0) - param = fit.param - @test param[1]≈1.5 atol=1e-3 - - fit = lm_fit(prob2, t, vec(data), [1.3, 2.6], Tsit5(), show_trace = false, - lambda = 10000.0) - param = fit.param - @test param≈[1.5; 3.0] atol=0.002 - - fit = lm_fit(prob3, t, vec(data), [1.3, 0.8, 2.6, 1.2], Tsit5(), show_trace = false, - lambda = 10000.0) - param = fit.param - @test param≈[1.5; 1.0; 3.0; 1.0] atol=1e-2 -end diff --git a/test/tests_on_odes/lsoptim_test.jl b/test/tests_on_odes/lsoptim_test.jl deleted file mode 100644 index 610cbe40..00000000 --- a/test/tests_on_odes/lsoptim_test.jl +++ /dev/null @@ -1,29 +0,0 @@ -using LeastSquaresOptim - -@test_broken begin - println("Use LeastSquaresOptim to fit the parameter") - cost_function = build_lsoptim_objective(prob1, t, data, Tsit5(), verbose = false) - x = [1.0] - res = LeastSquaresOptim.optimize!(LeastSquaresOptim.LeastSquaresProblem(x = x, - f! = cost_function, - output_length = length(t) * - length(prob1.u0)), - LeastSquaresOptim.Dogleg(LeastSquaresOptim.LSMR())) - @test result.minimizer[1]≈1.5 atol=3e-1 - cost_function = build_lsoptim_objective(prob2, t, data, Tsit5(), verbose = false) - x = [1.3, 2.7] - res = LeastSquaresOptim.optimize!(LeastSquaresOptim.LeastSquaresProblem(x = x, - f! = cost_function, - output_length = length(t) * - length(prob2.u0)), - LeastSquaresOptim.Dogleg(LeastSquaresOptim.LSMR())) - @test res.minimizer≈[1.5; 3.0] atol=3e-1 - cost_function = build_lsoptim_objective(prob3, t, data, Tsit5(), verbose = false) - x = [1.3, 0.8, 2.8, 1.2] - res = LeastSquaresOptim.optimize!(LeastSquaresOptim.LeastSquaresProblem(x = x, - f! = cost_function, - output_length = length(t) * - length(prob3.u0)), - LeastSquaresOptim.Dogleg(LeastSquaresOptim.LSMR())) - @test res.minimizer≈[1.5; 1.0; 3.0; 1.0] atol=3e-1 -end diff --git a/test/tests_on_odes/nlopt_test.jl b/test/tests_on_odes/nlopt_test.jl index 3f7ae97c..f560813c 100644 --- a/test/tests_on_odes/nlopt_test.jl +++ b/test/tests_on_odes/nlopt_test.jl @@ -1,44 +1,42 @@ -using NLopt +using OptimizationNLopt println("Use NLOpt to fit the parameter") -obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), +obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(), maxiters = 10000, verbose = false) opt = Opt(:LN_COBYLA, 1) -min_objective!(opt, obj) -(minf, minx, ret) = NLopt.optimize(opt, [1.4]) -@test minx[1]≈1.5 atol=1e-3 +optprob = OptimizationNLopt.OptimizationProblem(obj, [1.4]) +res = solve(optprob, opt) +@test res.u[1]≈1.5 atol=1e-3 opt = Opt(:GN_ESCH, 1) -min_objective!(opt, obj.cost_function2) lower_bounds!(opt, [1.0]) upper_bounds!(opt, [3.0]) xtol_rel!(opt, 1e-3) maxeval!(opt, 10000) -(minf, minx, ret) = NLopt.optimize(opt, [1.3]) -@test minx[1]≈1.5 atol=1e-1 +res = solve(optprob, opt) +@test res.u[1]≈1.5 atol=1e-1 opt = Opt(:GN_ISRES, 1) -min_objective!(opt, obj.cost_function2) lower_bounds!(opt, [1.0]) upper_bounds!(opt, [3.0]) xtol_rel!(opt, 1e-4) maxeval!(opt, 100 - 000) -(minf, minx, ret) = NLopt.optimize(opt, [1.2]) -@test minx[1]≈1.5 atol=1e-1 +res = solve(optprob, opt) +@test res.u[1]≈1.5 atol=1e-1 # test differentiation -for autodiff in (false, true) - global obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data); - mpg_autodiff = autodiff, maxiters = 10000) +for adtype in (Optimization.AutoZygote(), SciMLBase.NoAD()) + global obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), adtype; + maxiters = 10000) global opt = Opt(:LD_MMA, 1) - min_objective!(opt, obj.cost_function2) xtol_rel!(opt, 1e-3) maxeval!(opt, 10000) global minf, minx, ret - (minf, minx, ret) = NLopt.optimize(opt, [1.3]) - @test minx[1]≈1.5 atol=5e-1 #take a look at this, it behaves weirdly + optprob = OptimizationNLopt.OptimizationProblem(obj, [1.4]) + res = solve(optprob, opt) + @test res.u[1]≈1.5 atol=1e-1 end diff --git a/test/tests_on_odes/regularization_test.jl b/test/tests_on_odes/regularization_test.jl index ab4dc10e..30df3793 100644 --- a/test/tests_on_odes/regularization_test.jl +++ b/test/tests_on_odes/regularization_test.jl @@ -1,25 +1,33 @@ -using PenaltyFunctions, Optim, LinearAlgebra +using PenaltyFunctions, OptimizationOptimJL, LinearAlgebra cost_function_1 = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), + Optimization.AutoZygote(), Regularization(0.6, L2Penalty()), maxiters = 10000, verbose = false, abstol = 1e-8, reltol = 1e-8) -cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), verbose = false, - abstol = 1e-8, reltol = 1e-8, +cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data), + Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 2, 2))), - maxiters = 10000) -cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), verbose = false, + verbose = false, abstol = 1e-8, reltol = 1e-8, + maxiters = 10000) +cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data), + Optimization.AutoZygote(), Regularization(0.1, MahalanobisPenalty(Matrix(1.0I, 4, 4))), + verbose = false, + abstol = 1e-8, reltol = 1e-8, maxiters = 10000) println("Use Optim BFGS to fit the parameter") -result = Optim.optimize(cost_function_1, [1.0], Optim.BFGS()) -@test result.minimizer[1]≈1.5 atol=3e-1 +optprob = OptimizationNLopt.OptimizationProblem(cost_function_1, [1.0]) +result = solve(optprob, Optim.BFGS()) +@test result.u[1]≈1.5 atol=3e-1 -result_bfgs = Optim.optimize(cost_function_2, [1.2, 2.7], Optim.BFGS()) -@test result_bfgs.minimizer≈[1.5; 3.0] atol=3e-1 +optprob = OptimizationProblem(cost_function_2, [1.2, 2.7]) +result = solve(optprob, Optim.BFGS()) +@test result.minimizer≈[1.5; 3.0] atol=3e-1 -result_bfgs = Optim.optimize(cost_function_3, [1.3, 0.8, 2.8, 1.2], Optim.BFGS()) -@test result_bfgs.minimizer≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 +optprob = OptimizationProblem(cost_function_3, [1.3, 0.8, 2.8, 1.2]) +result = solve(optprob, Optim.BFGS()) +@test result.minimizer≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 diff --git a/test/tests_on_odes/two_stage_method_test.jl b/test/tests_on_odes/two_stage_method_test.jl index 1799551a..3635728f 100644 --- a/test/tests_on_odes/two_stage_method_test.jl +++ b/test/tests_on_odes/two_stage_method_test.jl @@ -2,31 +2,28 @@ using Optim, NLopt println("Use Two Stage Method to fit the parameter") -cost_function = two_stage_method(prob1, t, data) +cost_function = two_stage_objective(prob1, t, data) result = Optim.optimize(cost_function, 0.0, 10.0) @test result.minimizer[1]≈1.5 atol=3e-1 -cost_function = two_stage_method(prob2, t, data) +cost_function = two_stage_objective(prob2, t, data) result = Optim.optimize(cost_function, [1.0, 2.5], Optim.BFGS()) @test result.minimizer≈[1.5; 3.0] atol=3e-1 -cost_function = two_stage_method(prob3, t, data) +cost_function = two_stage_objective(prob3, t, data) result = Optim.optimize(cost_function, [1.3, 0.8, 2.8, 1.2], Optim.BFGS()) @test result.minimizer≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 # test differentiation -obj = two_stage_method(prob2, t, data; mpg_autodiff = true) -function g_!(stor, x) - obj.cost_function2(x, stor) -end -result = Optim.optimize(obj.cost_function, g_!, [1.3, 0.8], Optim.ConjugateGradient()) +obj = two_stage_objective(prob2, t, data, Optimization.AutoZygote()) +optprob = OptimizationNLopt.OptimizationProblem(obj, [1.3, 0.8]) +result = solve(optprob, Optim.ConjugateGradient()) @test result.minimizer≈[1.5; 3.0] atol=3e-1 -for autodiff in (false, true) - obj = two_stage_method(prob2, t, data; mpg_autodiff = autodiff) - opt = Opt(:LD_LBFGS, 2) - min_objective!(opt, obj.cost_function2) - (minf, minx, ret) = NLopt.optimize(opt, [1.0, 2.5]) - @test minx≈[1.5; 3.0] atol=3e-1 - obj = two_stage_method(prob3, t, data; mpg_autodiff = autodiff) - opt = Opt(:LD_LBFGS, 4) - min_objective!(opt, obj.cost_function2) - (minf, minx, ret) = NLopt.optimize(opt, [1.3, 0.8, 2.8, 1.2]) - @test minx≈[1.5; 1.0; 3.0; 1.0] atol=5e-1 -end + +obj = two_stage_objective(prob2, t, data, Optimization.AutoZygote()) +opt = Opt(:LD_LBFGS, 2) +optprob = OptimizationNLopt.OptimizationProblem(obj, [1.0, 2.5]) +res = solve(optprob, opt) +@test res.u≈[1.5; 3.0] atol=3e-1 +obj = two_stage_objective(prob3, t, data, Optimization.AutoZygote()) +opt = Opt(:LD_LBFGS, 4) +optprob = OptimizationNLopt.OptimizationProblem(obj, [1.3, 0.8, 2.8, 1.2]) +res = solve(optprob, opt) +@test res.u≈[1.5; 1.0; 3.0; 1.0] atol=5e-1