#!/usr/bin/env python # coding: utf-8 # ![MOSEK ApS](https://www.mosek.com/static/images/branding/webgraphmoseklogocolor.png ) # # *Fusion:* Object-Oriented API for Conic Optimization # # *Fusion* is an Object-Oriented API specifically designed for Conic Optimization with **MOSEK**. In version 9 of **MOSEK** *Fusion* is available for Python, C#, Java and C++. # # *Fusion* makes it easy to assemble optimization models from conic blocks without going through the nitty-gritty of converting the optimization problem into matrix form - *Fusion* takes care of that part. It makes it easy to add and remove constraints and experiment with the model, making prototyping of complex models very quick. It provides linear expressions, linear algebra operations and cones. # # This is a quick demonstration of the main capabilities of *Fusion*. More details may be found in the documentation for the respective APIs. In particular section 6 of each Fusion API manual contains a lot more modeling techniques. # # In[1]: from mosek.fusion import * import numpy as np import sys # # Problem formulation in *Fusion* # Fusion solves optimization problems of the form # $$ # \begin{array}{rll} # \mbox{minimize/maximize} & c^T x & \\ # \mbox{subject to} & A^i x + b^i & \in & K^i, & \forall i, \\ # \end{array} # $$ # # where $K^i$ are convex cones. The possible cones $K^i$ are # # * $\{0\}$ - the zero cone. This expresses simply a linear equality constraint $Ax+b=0$. # * $\mathbb{R}_+$ - positive orthant. This expresses simply a linear inequality constraint $Ax+b\geq 0$. # * $\mathcal{Q}$ - quadratic cone, $x_1\geq \sqrt{x_2^2+\cdots+x_n^2}$ where $n$ is the length of the cone. # * $\mathcal{Q_r}$ - rotated quadratic cone, $2x_1x_2\geq x_3^2+\cdots+x_n^2$, $x_1,x_2\geq 0$. # * $K_\mathrm{exp}$ - the exponential cone $x_1\geq x_2\exp(x_3/x_2)$, useful in particular in entropy ptoblems. # * $\mathcal{P}_\alpha$ - the three-dimensional power cone $x_1^\alpha x_2^{1-\alpha}\geq |x_3|$, where $0<\alpha<1$. # * $\mathbb{S}_+$ - the cone of positive semidefinite matrices. # # That allows for expressing linear, conic quadratic (SOCP), semidefinite, relative entropy, $p$-norm and many other types of problems. # # # Linear expressions # # Linear expressions are represented by the class ``Expression``, of which ``Variable`` (that is an optimization variable in the model) is a special case. Linear expressions are constructed in an intuitive way. For example if $A,b$ are constant data matrices then we can form $Ax+b$ as follows: # In[2]: # Fix some dimensions and constant data for the example m, n = 10, 6 A = np.random.uniform(-1.0, 1.0, [m,n]) b = np.random.uniform(-1.0, 1.0, [m]) # Construct a model with a variable of length n M = Model("example model") x = M.variable(n) # Construct Ax+b e = Expr.add(Expr.mul(A,x), b) # Check we have the right dimension print(e.getShape()) # # Conic constraints # # We can now solve the unconstrained linear regression problem, that is minimize $\|Ax+b\|_2$. This will require a new variable $t$ such that the compound vector $(t,Ax+b)$ belongs to the quadratic cone (i.e. $t\geq \|Ax+b\|_2$). The compound vector is created as a stack from existing expressions of compatible shapes. # In[3]: # Add scalar variable and conic quadratic constraint t >= \|Ax+b\|_2 t = M.variable() M.constraint(Expr.vstack(t, e), Domain.inQCone()) # Let t be the objective we minimize M.objective(ObjectiveSense.Minimize, t) # Solve and print solution M.solve() print(x.level()) # Suppose we want to further restrict $x$, say $f^Tx\geq 1$ where $f$ is a given vector. # In[4]: f = np.random.uniform(0.0, 1.0, [n]) # f^T dot x >= 1 M.constraint(Expr.dot(f,x), Domain.greaterThan(1.0)) M.solve() print(x.level()) # # Matrix notation # # Now suppose we want change the objective to # # $$ # \mbox{minimize}\quad \|Ax + b\|_2 + \sum_{j=1}^n \lambda_i e^{x_i} # $$ # where $\lambda$ are positive coefficients. This can be rewritten as # # $$ # \begin{array}{lll} # \mbox{minimize} & t + \lambda^T w & \\ # \mbox{subject to} & (t, Ax+b) \in \mathcal{Q}, & \\ # & (w_i, 1, x_i) \in K_\mathrm{exp}. # \end{array} # $$ # # (Indeed, the last set of constraints is just $w_i\geq e^{x_i}$). We only need to define the last set of constraints and this can be achieved in one go by stacking them in a matrix as follows: # In[5]: w = M.variable(n) M.constraint(Expr.hstack(w, Expr.constTerm(n, 1.0), x), Domain.inPExpCone()); # The horizontal stack above creates an expression of shape $n \times 3$ which looks as follows # # $$ # \left[ # \begin{array}{cc} # w_1 & 1 & x_1 \\ # w_2 & 1 & x_2 \\ # . & . & . \\ # w_n & 1 & x_n \\ # \end{array} # \right] # $$ # # and the conic constraint is just a short way of writing that *every row of that matrix belongs to the exponential cone*, which is exactly what we need. # # We can now solve it, this time with log on screen. # In[6]: lamb = np.random.uniform(0.0, 1.0, [n]) M.setLogHandler(sys.stdout) # Objective = t + lambda dot w M.objective(ObjectiveSense.Minimize, Expr.add(t, Expr.dot(lamb,w))) M.solve() print(x.level()) # # Semidefinite variables # # An $n\times n$ semidefinite variable can be defined with: # In[7]: n = 5 M = Model("semidefinite model") X = M.variable(Domain.inPSDCone(n)) print(X.getShape()) # and it can be used like any variable of this shape. For example we can solve the very simple illustrative problem of maximizing the sum of elements of $X$ subject to a fixed diagonal. # In[8]: diag = np.random.uniform(1.0, 2.0, n) print(diag, "\n") # Fixed diagonal M.constraint(X.diag(), Domain.equalsTo(diag)) # Objective = sum of elements of X M.objective(ObjectiveSense.Maximize, Expr.sum(X)) # Solve M.solve() print(np.reshape(X.level(), [5,5])) # # A cheatsheet to building expressions # # # In[9]: n = 6 m = 10 a = np.random.uniform(0.0, 1.0, [n]) A = np.random.uniform(0.0, 1.0, [m,n]) M = Model('demo model') x = M.variable('x', n, Domain.unbounded()) y = M.variable('y', n, Domain.greaterThan(0.0)) z = M.variable('z', n, Domain.inRange(-1.0, 1.0)) # Multi-dimensional variable X = M.variable('X', [n,n]) # Binary version e0 = Expr.add(x,1.0) # x+1.0 (element-wise) e1 = Expr.add(x,y) # x+y e2 = Expr.add(a,y) # a+y e3 = Expr.sub(x,y) # x-y e4 = Expr.add(Expr.add(x,y),z) # x+y+z # List version e5 = Expr.add([x, y, z]) # x+y+z # Multiplication e6 = Expr.mul(7.0,x) # 7.0*x e7 = Expr.mulElm(a,x) # a *. x, element wise multiplication # Inner and outer products e8 = Expr.dot(a,x) # a'*x print(e8.getShape()) e9 = Expr.outer(a,x) # a*x' Outer product print(e9.getShape()) # Reduction type operations e10 = Expr.sum(x) # Matrix multiplication e11 = Expr.mul(A, X) # AX print(e11.getShape()) # Creative Commons License
This work is licensed under a Creative Commons Attribution 4.0 International License. The **MOSEK** logo and name are trademarks of Mosek ApS. The code is provided as-is. Compatibility with future release of **MOSEK** or the `Fusion API` are not guaranteed. For more information contact our [support](mailto:support@mosek.com).