] add NLopt BenchmarkTools ForwardDiff NLSolversBase DiffResults Flux using NLopt, BenchmarkTools, ForwardDiff, NLSolversBase, DiffResults, Flux using Flux.Tracker: gradient_ # call g! then return f(x) function fg!(x::Vector, grad::Vector) if length(grad) > 0 # gradient of f(x) grad[1] = -2*x[1]*(x[1]^2 + x[2]^2) grad[2] = -2*x[2]*(x[1]^2 + x[2]^2) end return -(x[1]^2 + x[2]^2) end opt = Opt(:LD_LBFGS, 2) # 2 indicates the length of `x` lower_bounds!(opt, [-1.0, -1.0]) # find `x` above -2.0 upper_bounds!(opt, [2.0, 2.0]) # find `x` below 2.0 min_objective!(opt, fg!) # specifies that optimization problem is on minimization (minf,minx,ret) = @btime optimize($opt, [1.0, 1.0]) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") function fg!(x::Vector, grad::Vector) if length(grad) > 0 # gradient of f(x) grad[1] = 0 grad[2] = 0.5/sqrt(x[2]) end return sqrt(x[2]) # f(x) end opt = Opt(:LD_SLSQP, 2) # 2 indicates the length of `x` lower_bounds!(opt, [-Inf, 0.]) # forces `x` to have a non-negative value min_objective!(opt, fg!) # specifies that optimization problem is on minimization xtol_rel!(opt,1e-4) # set a lower relative xtol for convergence criteria function constraint_f(x::Vector, a, b) (a*x[1] + b)^3 - x[2] # constraint_f(x); constraint_f(x) <= 0 is imposed end function constraint_g!(x::Vector, grad::Vector, a, b) grad[1] = 3a * (a*x[1] + b)^2 grad[2] = -1 end function constraint_fg!(x::Vector, grad::Vector, a, b) if length(grad) > 0 # gradient of constraint_f(x) constraint_g!(x, grad, a, b) end return constraint_f(x, a, b) end inequality_constraint!(opt, (x,g) -> constraint_fg!(x,g,2,0), 1e-8) inequality_constraint!(opt, (x,g) -> constraint_fg!(x,g,-1,1), 1e-8) (minf,minx,ret) = @btime optimize($opt, [1.234, 5.678]) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") function fg!(x::Vector, grad::Vector) if length(grad) > 0 # gradient of f(x) grad[1] = 0 grad[2] = 0.5/sqrt(x[2]) end return sqrt(x[2]) # f(x) end opt = Opt(:LD_SLSQP, 2) # 2 indicates the length of `x` lower_bounds!(opt, [-Inf, 0.]) # forces `x` to have a non-negative value min_objective!(opt, fg!) # specifies that optimization problem is on minimization xtol_rel!(opt,1e-4) # set a lower relative xtol for convergence criteria # define a vectorized constraint function constraints_fg!(result, x, jacobian_t, a, b) if length(jacobian_t) > 0 # transpose of the Jacobian matrix jacobian_t[1,1] = 3a[1] * (a[1]*x[1] + b[1])^2 jacobian_t[2,1] = -1 jacobian_t[1,2] = 3a[2] * (a[2]*x[1] + b[2])^2 jacobian_t[2,2] = -1 end result[:] = [constraint_f(x,a[1],b[1]); constraint_f(x,a[2],b[2])] end # add a vectorized constraint inequality_constraint!(opt, (result, x, jacobian_t) -> constraints_fg!(result, x, jacobian_t, [2; -1], [0; 1]), [1e-8; 1e-8]) (minf,minx,ret) = @btime optimize($opt, [1.234, 5.678]) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") function f(x) return -(x[1]^2 + x[2])^2 end # compute gradient by forward automatic differentiation function g!(G::Vector, x::Vector) ForwardDiff.gradient!(G, f, x) end function fg!(x::Vector, grad::Vector) if length(grad) > 0 # gradient of f(x) g!(grad, x) end f(x) end # define the optimization problem opt = Opt(:LD_LBFGS, 2) # 2 indicates the length of `x` lower_bounds!(opt, [-1.0, -1.0]) # find `x` above -2.0 upper_bounds!(opt, [2.0, 2.0]) # find `x` below 2.0 min_objective!(opt, fg!) # specifies that optimization problem is on minimization # solve the optimization problem (minf,minx,ret) = @btime optimize($opt, [1.0, 1.0]) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") function fg!(x::Vector, grad::Vector) result = DiffResults.GradientResult(x) # gen a result object result = ForwardDiff.gradient!(result, f, x) # update grad[:] = DiffResults.gradient(result) # run g!(x) return DiffResults.value(result) # return f(x) end # define the optimization problem opt = Opt(:LD_LBFGS, 2) # 2 indicates the length of `x` lower_bounds!(opt, [-1.0, -1.0]) # find `x` above -2.0 upper_bounds!(opt, [2.0, 2.0]) # find `x` below 2.0 min_objective!(opt, fg!) # specifies that optimization problem is on minimization # solve the optimization problem (minf,minx,ret) = @btime optimize($opt, [1.0, 1.0]) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") function fg!(x::Vector, grad::Vector) val = f(x) grad[:] = gradient_(f, x)[1] return val end # define the optimization problem opt = Opt(:LD_LBFGS, 2) # 2 indicates the length of `x` lower_bounds!(opt, [-1.0, -1.0]) # find `x` above -2.0 upper_bounds!(opt, [2.0, 2.0]) # find `x` below 2.0 min_objective!(opt, fg!) # specifies that optimization problem is on minimization # solve the optimization problem (minf,minx,ret) = @btime optimize($opt, [1.0, 1.0]) numevals = opt.numevals # the number of function evaluations println("got $minf after $numevals iterations (returned $ret)") struct NLoptAdapter{T} <: Function where T <: AbstractObjective nlsolver_base::T end # implement fg!; note that the order is reversed (adapter::NLoptAdapter)(x, df) = adapter.nlsolver_base.fdf(df, x) (adapter::NLoptAdapter)(result, x, jacobian_transpose) = adapter.nlsolver_base.fdf(result, jacobian_transpose', x) # constructors NLoptAdapter(f, x, autodiff = :forward) = NLoptAdapter(OnceDifferentiable(f, x, autodiff = autodiff)) NLoptAdapter(f!, x::Vector, F::Vector, autodiff = :forward) = NLoptAdapter(OnceDifferentiable(f!, x, F, autodiff = autodiff)) f_opt = NLoptAdapter(x -> -(x[1]^2 + x[2])^2, zeros(2), :forward) # define the optimization problem opt = Opt(:LD_LBFGS, 2) # 2 indicates the length of `x` lower_bounds!(opt, [-1.0, -1.0]) # find `x` above -2.0 upper_bounds!(opt, [2.0, 2.0]) # find `x` below 2.0 min_objective!(opt, fg!) # specifies that optimization problem is on minimization # solve the optimization problem (minf,minx,ret) = @btime optimize($opt, [1.0, 1.0]) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") myfunc(x) = sqrt(x[2]) x0 = [1.234, 5.678] function myconstraint(x, a, b) (a*x[1] + b)^3 - x[2] end # define objective and constraint, using NLoptAdapter f_opt = NLoptAdapter(myfunc, x0) c_1_opt = NLoptAdapter(x -> myconstraint(x,2,0), x0) c_2_opt = NLoptAdapter(x -> myconstraint(x,-1,1), x0) # define the optimization problem opt = Opt(:LD_MMA, 2) lower_bounds!(opt, [-Inf, 0.]) xtol_rel!(opt,1e-4) min_objective!(opt, f_opt) inequality_constraint!(opt, c_1_opt, 1e-8) inequality_constraint!(opt, c_2_opt, 1e-8) # solve (minf,minx,ret) = @btime optimize($opt, $x0) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") # define objective and constraint, using NLoptAdapter f_opt = NLoptAdapter(myfunc, x0, :central) c_1_opt = NLoptAdapter(x -> myconstraint(x,2,0), x0, :central) c_2_opt = NLoptAdapter(x -> myconstraint(x,-1,1), x0, :central) # define the optimization problem opt = Opt(:LD_MMA, 2) lower_bounds!(opt, [-Inf, 0.]) xtol_rel!(opt,1e-4) min_objective!(opt, f_opt) inequality_constraint!(opt, c_1_opt, 1e-8) inequality_constraint!(opt, c_2_opt, 1e-8) # solve (minf,minx,ret) = @btime optimize($opt, $x0) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") function myconstraints!(F, x) F[:] = [myconstraint(x,2,0); myconstraint(x,-1,1)] end # define objective and constraint, using NLoptAdapter f_opt = NLoptAdapter(myfunc, x0, :central) c_opt = NLoptAdapter(myconstraints!, x0, zeros(2), :central) # 2 is the length of myconstraints # define the optimization problem opt = Opt(:LD_MMA, 2) lower_bounds!(opt, [-Inf, 0.]) xtol_rel!(opt,1e-4) min_objective!(opt, f_opt) inequality_constraint!(opt, c_opt, fill(1e-8, 2)) # solve (minf,minx,ret) = @btime optimize($opt, $x0) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") function myfunc(x::Vector, grad::Vector) return sqrt(x[1]^2 + x[2]^2) end opt = Opt(:LN_NELDERMEAD, 2) lower_bounds!(opt, [0.0, 0.0]) xtol_rel!(opt,1e-4) min_objective!(opt, myfunc) (minf,minx,ret) = optimize(opt, [1.234, 5.678]) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)") f_opt = NLoptAdapter(x -> sqrt(x[1]^2 + x[2]^2), x0, :central) opt = Opt(:LN_NELDERMEAD, 2) lower_bounds!(opt, [0.0, 0.0]) xtol_rel!(opt,1e-4) min_objective!(opt, f_opt) (minf,minx,ret) = optimize(opt, [1.234, 5.678]) numevals = opt.numevals # the number of function evaluations println("got $minf at $minx after $numevals iterations (returned $ret)")