using InstantiateFromURL activate_github("QuantEcon/QuantEconLectureAllPackages", tag = "v0.9.0") # activate the QuantEcon environment using LinearAlgebra, Statistics, Compat # load common packages using ForwardDiff h(x) = sin(x[1]) + x[1] * x[2] + sinh(x[1] * x[2]) # multivariate. x = [1.4 2.2] @show ForwardDiff.gradient(h,x) # use AD, seeds from x #Or, can use complicated functions of many variables f(x) = sum(sin, x) + prod(tan, x) * sum(sqrt, x) g = (x) -> ForwardDiff.gradient(f, x); # g() is now the gradient @show g(rand(20)); # gradient at a random point # ForwardDiff.hessian(f,x2) # or the hessian function squareroot(x) #pretending we don't know sqrt() z = copy(x) # Initial starting point for Newton’s method while abs(z*z - x) > 1e-13 z = z - (z*z-x)/(2z) end return z end sqrt(2.0) using ForwardDiff dsqrt(x) = ForwardDiff.derivative(squareroot, x) dsqrt(2.0) using Flux using Flux.Tracker using Flux.Tracker: update! f(x) = 3x^2 + 2x + 1 # df/dx = 6x + 2 df(x) = Tracker.gradient(f, x)[1] df(2) # 14.0 (tracked) A = rand(2,2) f(x) = A * x x0 = [0.1, 2.0] f(x0) Flux.jacobian(f, x0) dsquareroot(x) = Tracker.gradient(squareroot, x) W = rand(2, 5) b = rand(2) predict(x) = W*x .+ b function loss(x, y) ŷ = predict(x) sum((y .- ŷ).^2) end x, y = rand(5), rand(2) # Dummy data loss(x, y) # ~ 3 W = param(W) b = param(b) gs = Tracker.gradient(() -> loss(x, y), Params([W, b])) Δ = gs[W] # Update the parameter and reset the gradient update!(W, -0.1Δ) loss(x, y) # ~ 2.5 using Optim using Optim: converged, maximum, maximizer, minimizer, iterations #some extra functions result = optimize(x-> x^2, -2.0, 1.0) converged(result) || error("Failed to converge in $(iterations(result)) iterations") xmin = result.minimizer result.minimum f(x) = -x^2 result = maximize(f, -2.0, 1.0) converged(result) || error("Failed to converge in $(iterations(result)) iterations") xmin = maximizer(result) fmax = maximum(result) f(x) = (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 x_iv = [0.0, 0.0] results = optimize(f, x_iv) # i.e. optimize(f, x_iv, NelderMead()) results = optimize(f, x_iv, LBFGS()) println("minimum = $(results.minimum) with argmin = $(results.minimizer) in $(results.iterations) iterations") f(x) = (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 x_iv = [0.0, 0.0] results = optimize(f, x_iv, LBFGS(), autodiff=:forward) # i.e. use ForwardDiff.jl println("minimum = $(results.minimum) with argmin = $(results.minimizer) in $(results.iterations) iterations") f(x) = (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 x_iv = [0.0, 0.0] function g!(G, x) G[1] = -2.0 * (1.0 - x[1]) - 400.0 * (x[2] - x[1]^2) * x[1] G[2] = 200.0 * (x[2] - x[1]^2) end results = optimize(f, g!, x0, LBFGS()) # or ConjugateGradient() println("minimum = $(results.minimum) with argmin = $(results.minimizer) in $(results.iterations) iterations") f(x) = (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 x_iv = [0.0, 0.0] results = optimize(f, x_iv, SimulatedAnnealing()) # or ParticleSwarm() or NelderMead() using JuMP, Ipopt # solve # max( x[1] + x[2] ) # st sqrt(x[1]^2 + x[2]^2) <= 1 function squareroot(x) # pretending we don't know sqrt() z = x # Initial starting point for Newton’s method while abs(z*z - x) > 1e-13 z = z - (z*z-x)/(2z) end return z end m = Model(solver = IpoptSolver()) JuMP.register(m,:squareroot, 1, squareroot, autodiff=true) # need to register user defined functions for AD @variable(m, x[1:2], start=0.5) # start is the initial condition @objective(m, Max, sum(x)) @NLconstraint(m, squareroot(x[1]^2+x[2]^2) <= 1) solve(m) # solve # min (1-x)^2 + 100(y-x^2)^2) # st x + y >= 10 using JuMP,Ipopt m = Model(solver = IpoptSolver(print_level=0)) # settings for the solver, e.g. suppress output @variable(m, x, start = 0.0) @variable(m, y, start = 0.0) @NLobjective(m, Min, (1-x)^2 + 100(y-x^2)^2) solve(m) println("x = ", getvalue(x), " y = ", getvalue(y)) # adding a (linear) constraint @constraint(m, x + y == 10) solve(m) println("x = ", getvalue(x), " y = ", getvalue(y)) using BlackBoxOptim function rosenbrock2d(x) return (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 end results = bboptimize(rosenbrock2d; SearchRange = (-5.0, 5.0), NumDimensions = 2); using Roots f(x) = sin(4 * (x - 1/4)) + x + x^20 - 1 fzero(f, 0, 1) using NLsolve f(x) = [(x[1]+3)*(x[2]^3-7)+18 sin(x[2]*exp(x[1])-1)] # returns an array results = nlsolve(f, [ 0.1; 1.2]) results = nlsolve(f, [ 0.1; 1.2], autodiff=:forward) println("converged=$(NLsolve.converged(results)) at root=$(results.zero) in $(results.iterations) iterations and $(results.f_calls) function calls") function f!(F, x) # modifies the first argument F[1] = (x[1]+3)*(x[2]^3-7)+18 F[2] = sin(x[2]*exp(x[1])-1) end results = nlsolve(f!, [ 0.1; 1.2], autodiff=:forward) println("converged=$(NLsolve.converged(results)) at root=$(results.zero) in $(results.iterations) iterations and $(results.f_calls) function calls") using LeastSquaresOptim function rosenbrock(x) [1 - x[1], 100 * (x[2]-x[1]^2)] end LeastSquaresOptim.optimize(rosenbrock, zeros(2), Dogleg()) function rosenbrock_f!(out, x) out[1] = 1 - x[1] out[2] = 100 * (x[2]-x[1]^2) end LeastSquaresOptim.optimize!(LeastSquaresProblem(x = zeros(2), f! = rosenbrock_f!, output_length = 2)) # if you want to use gradient function rosenbrock_g!(J, x) J[1, 1] = -1 J[1, 2] = 0 J[2, 1] = -200 * x[1] J[2, 2] = 100 end LeastSquaresOptim.optimize!(LeastSquaresProblem(x = zeros(2), f! = rosenbrock_f!, g! = rosenbrock_g!, output_length = 2))