using JuMP using Ipopt function datagen(N,T) ## Generate data for a linear model to test optimization srand(1234) N = convert(Int64,N) #inputs to functions such as -ones- need to be integers! T = convert(Int64,T) #inputs to functions such as -ones- need to be integers! const n = convert(Int64,N*T) # use -const- as a way to declare a variable to be global (so other functions can access it) # generate the Xs const X = cat(2,ones(N*T,1),5+3*randn(N*T,1),rand(N*T,1), 2.5+2*randn(N*T,1),15+3*randn(N*T,1), .7-.1*randn(N*T,1),5+3*randn(N*T,1), rand(N*T,1),2.5+2*randn(N*T,1), 15+3*randn(N*T,1),.7-.1*randn(N*T,1), 5+3*randn(N*T,1),rand(N*T,1),2.5+2*randn(N*T,1), 15+3*randn(N*T,1),.7-.1*randn(N*T,1)); # generate the betas (X coefficients) const bAns = [ 2.15; 0.10; 0.50; 0.10; 0.75; 1.2; 0.10; 0.50; 0.10; 0.75; 1.2; 0.10; 0.50; 0.10; 0.75; 1.2 ] # generate the std dev of the errors const sigAns = 0.3 # generate the Ys from the Xs, betas, and error draws draw = 0 + sigAns*randn(N*T,1) const y = X*bAns+draw # return generated data so that other functions (below) have access return X,y,bAns,sigAns,n end X,y,bAns,sigAns,n = datagen(1e4,5); bHat = (X'*X)\(X'*y); sigHat = sqrt((y-X*bHat)'*(y-X*bHat)/(n-size(X,2))); [round([bHat;sigHat],3) [bAns;sigAns]] myMLE = Model(solver=IpoptSolver(tol=1e-6)); @defVar(myMLE, b[i=1:size(X,2)]); @defVar(myMLE, s>=0.0); @setNLObjective(myMLE, Max, (n/2)*log(1/(2*pi*s^2))-sum{(y[i]-sum{X[i,k]*b[k], k=1:size(X,2)})^2, i=1:size(X,1)}/(2s^2)); status = solve(myMLE) bOpt = getValue(b[:]); sOpt = getValue(s); [round([bOpt;sOpt],3) [bAns;sigAns]] getObjectiveValue(myMLE) this_par = myMLE.colVal m_eval = JuMP.JuMPNLPEvaluator(myMLE); MathProgBase.initialize(m_eval, [:ExprGraph, :Grad, :Hess]) hess_struct = MathProgBase.hesslag_structure(m_eval) hess_vec = zeros(length(hess_struct[1])) numconstr = length(m_eval.m.linconstr) + length(m_eval.m.quadconstr) + length(m_eval.m.nlpdata.nlconstr) dimension = length(myMLE.colVal) MathProgBase.eval_hesslag(m_eval, hess_vec, this_par, 1.0, zeros(numconstr)) this_hess_ld = sparse(hess_struct[1], hess_struct[2], hess_vec, dimension, dimension) hOpt = this_hess_ld + this_hess_ld' - sparse(diagm(diag(this_hess_ld))); hOpt = -full(hOpt); #since we are maximizing seOpt = sqrt(diag(full(hOpt)\eye(size(hOpt,1)))); [round([bOpt;sOpt],3) round(seOpt,3)] seHat = sqrt((sigHat^2).*diag((X'*X)\eye(size(X,2)))); [round(bHat,3) round(seHat,3)]