This webpage is for programmers who need examples of use of the functions of the class. The examples are designed to illustrate the syntax. They do not correspond to any meaningful model. For examples of models, visit biogeme.epfl.ch.
Important note: the functions in the module algorithms
belonged to the module optimization
in version 3.2.6 of Biogeme. Since version 3.2.7, the generic optimization algorithms are contained in the module algorithms
while the module optimization
contains function that can be directly used for the estimation by Biogeme.
import datetime
print(datetime.datetime.now())
2021-08-04 16:35:52.221727
import biogeme.version as ver
print(ver.getText())
biogeme 3.2.8 [2021-08-04] Version entirely written in Python Home page: http://biogeme.epfl.ch Submit questions to https://groups.google.com/d/forum/biogeme Michel Bierlaire, Transport and Mobility Laboratory, Ecole Polytechnique Fédérale de Lausanne (EPFL)
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import biogeme.algorithms as algo
import biogeme.biogeme as bio
import biogeme.database as db
import biogeme.models as models
from biogeme.expressions import Beta, Variable
Defne the verbosity of Biogeme
import biogeme.messaging as msg
logger = msg.bioMessage()
logger.setSilent()
#logger.setDetailed()
#logger.setDebug()
A = np.array([[ 0.3571, -0.1030, 0.0274, -0.0459],
[-0.1030, 0.2525, 0.0736, -0.3845],
[ 0.0274, 0.0736, 0.2340, -0.2878],
[-0.0459, -0.3845, -0.2878, 0.5549]])
A
array([[ 0.3571, -0.103 , 0.0274, -0.0459], [-0.103 , 0.2525, 0.0736, -0.3845], [ 0.0274, 0.0736, 0.234 , -0.2878], [-0.0459, -0.3845, -0.2878, 0.5549]])
L, E, P = algo.schnabelEskow(A)
L
array([[ 0.7449161 , 0. , 0. , 0. ], [-0.06161768, 0.59439319, 0. , 0. ], [-0.38635223, 0.00604629, 0.46941286, 0. ], [-0.51616551, -0.22679419, -0.26511936, 0.00152451]])
The factor $L$ is such that $A + E = PLL^TP^T$. Therefore, the expression below should be the null matrix.
P @ L @ L.T @ P.T - E - A
array([[-5.55111512e-17, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 0.00000000e+00, -5.55111512e-17, 1.38777878e-17, 0.00000000e+00], [ 0.00000000e+00, 1.38777878e-17, 0.00000000e+00, 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]])
A2 = np.array([[ 1890.3, -1705.6, -315.8, 3000.3],
[-1705.6, 1538.3, 284.9, -2706.6],
[ -315.8, 284.9, 52.5, -501.2],
[ 3000.3, -2706.6, -501.2, 4760.8]])
A2
array([[ 1890.3, -1705.6, -315.8, 3000.3], [-1705.6, 1538.3, 284.9, -2706.6], [ -315.8, 284.9, 52.5, -501.2], [ 3000.3, -2706.6, -501.2, 4760.8]])
L, E, P = algo.schnabelEskow(A2)
L
array([[ 6.89985507e+01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [-7.26392069e+00, 3.19413321e-01, 0.00000000e+00, 0.00000000e+00], [-3.92269109e+01, -1.28891153e-01, 4.44731720e-01, 0.00000000e+00], [ 4.34835220e+01, 1.90522168e-01, 3.34584739e-01, 1.71484739e-03]])
The factor $L$ is such that $A + E = PLL^TP^T$. Therefore, the expression below should be the null matrix.
P @ L @ L.T @ P.T - E - A2
array([[-2.27373675e-13, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -9.09494702e-13]])
class rosenbrock(algo.functionToMinimize):
def __init__(self):
self.x = None
def setVariables(self, x):
self.x = x
def f(self, batch=None):
if batch is not None:
raise excep.biogemeError('This function is not '
'data driven.')
n = len(self.x)
f = sum(100.0 * (self.x[i + 1]-self.x[i]**2)**2
+ (1.0 - self.x[i])**2 for i in range(n - 1))
return f
def g(self):
n = len(self.x)
g = np.zeros(n)
for i in range(n - 1):
g[i] = g[i] - 400 * self.x[i] * \
(self.x[i + 1] -self.x[i]**2) - \
2 * (1 - self.x[i])
g[i + 1] = g[i + 1] + \
200 * (self.x[i + 1] - self.x[i]**2)
return g
def h(self):
n = len(self.x)
H = np.zeros((n, n))
for i in range(n - 1):
H[i][i] = H[i][i] - 400 * self.x[i + 1] \
+ 1200 * self.x[i]**2 + 2
H[i+1][i] = H[i+1][i] - 400 * self.x[i]
H[i][i+1] = H[i][i+1] - 400 * self.x[i]
H[i+1][i+1] = H[i+1][i+1] + 200
return H
def f_g(self, batch=None):
if batch is not None:
raise excep.biogemeError('This function is not '
'data driven.')
return self.f(), self.g()
def f_g_h(self, batch=None):
if batch is not None:
raise excep.biogemeError('This function is not '
'data driven.')
return self.f(), self.g(), self.h()
def f_g_bhhh(self, batch=None):
raise excep.biogemeError('This function is not data driven.')
theFunction = rosenbrock()
There is an infinite number of local optima. For each integer $k$,
$$x^* = ((-1)^{k+1}, k \pi) $$is a local optimum.
class example58(algo.functionToMinimize):
def __init__(self):
self.x = None
def setVariables(self, x):
self.x = x
def f(self, batch=None):
if batch is not None:
raise excep.biogemeError('This function is not '
'data driven.')
n = len(self.x)
f = 0.5 * self.x[0] * self.x[0] + \
self.x[0] * np.cos(self.x[1])
return f
def g(self):
n = len(self.x)
g = np.array([self.x[0] + np.cos(self.x[1]),
-self.x[0]*np.sin(self.x[1])])
return g
def h(self):
n = len(self.x)
H = np.array([[1,
-np.sin(self.x[1])],
[-np.sin(self.x[1]),
-self.x[0] * np.cos(self.x[1])]])
return H
def f_g(self, batch=None):
if batch is not None:
raise excep.biogemeError('This function is not '
'data driven.')
return self.f(), self.g()
def f_g_h(self, batch=None):
if batch is not None:
raise excep.biogemeError('This function is not '
'data driven.')
return self.f(), self.g(), self.h()
def f_g_bhhh(self, batch=None):
raise excep.biogemeError('This function is not data driven.')
ex58 = example58()
x = np.array([-1.5, 1.5])
theFunction.setVariables(x)
f, g = theFunction.f_g()
alpha, nfev = algo.lineSearch(theFunction, x, f, g, -g)
print(f"alpha={alpha} nfev={nfev}")
alpha=0.0009765625 nfev=12
x0 = np.array([-1.5, 1.5])
xstar, messages = algo.newtonLineSearch(theFunction, x0)
xstar
array([1., 1.])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Unconstrained Newton with line search Relative gradient: 5.571454408920588e-11 Number of iterations: 23 Number of function evaluations: 80 Number of gradient evaluations: 80 Number of hessian evaluations: 24 Cause of termination: Relative gradient = 5.6e-11 <= 6.1e-06
x0 = np.array([1, 1])
xstar, messages = algo.newtonLineSearch(ex58, x0)
xstar
array([1. , 3.14159265])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Unconstrained Newton with line search Relative gradient: 2.202540372794277e-10 Number of iterations: 5 Number of function evaluations: 28 Number of gradient evaluations: 28 Number of hessian evaluations: 6 Cause of termination: Relative gradient = 2.2e-10 <= 6.1e-06
x0 = np.array([-1.5, 1.5])
xstar, messages = algo.newtonTrustRegion(theFunction, x0)
xstar
array([0.99999995, 0.99999989])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Unconstrained Newton with trust region Relative gradient: 9.752977092603033e-10 Cause of termination: Relative gradient = 9.8e-10 <= 6.1e-06 Number of iterations: 28 Number of function evaluations: 49 Number of gradient evaluations: 22 Number of hessian evaluations: 22
x0 = np.array([1.0, 1.0])
xstar, messages = algo.newtonTrustRegion(ex58, x0)
xstar
array([-1.00000000e+00, -1.56439954e-09])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Unconstrained Newton with trust region Relative gradient: 1.5037932097369974e-09 Cause of termination: Relative gradient = 1.5e-09 <= 6.1e-06 Number of iterations: 5 Number of function evaluations: 10 Number of gradient evaluations: 6 Number of hessian evaluations: 6
x0 = np.array([-1.5, 1.5])
xstar, messages = algo.bfgsLineSearch(theFunction, x0, maxiter=10000)
xstar
array([0.99999897, 0.99999797])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Inverse BFGS with line search Relative gradient: 1.531551364614023e-07 Cause of termination: Relative gradient = 1.5e-07 <= 6.1e-06 Number of iterations: 32 Number of function evaluations: 152 Number of gradient evaluations: 33
x0 = np.array([1, 1])
xstar, messages = algo.bfgsLineSearch(ex58, x0, maxiter=10000)
xstar
array([-1.00000142e+00, 3.57636862e-06])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Inverse BFGS with line search Relative gradient: 3.4378215567736776e-06 Cause of termination: Relative gradient = 3.4e-06 <= 6.1e-06 Number of iterations: 10 Number of function evaluations: 48 Number of gradient evaluations: 11
x0 = np.array([-1.5, 1.5])
xstar, messages = algo.bfgsTrustRegion(theFunction, x0, maxiter=10000)
xstar
array([1.00000052, 1.00000078])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: BFGS with trust region Relative gradient: 1.7562432530828692e-06 Cause of termination: Relative gradient = 1.8e-06 <= 6.1e-06 Number of iterations: 50 Number of function evaluations: 88 Number of gradient evaluations: 38
x0 = np.array([1, 1])
xstar, messages = algo.bfgsTrustRegion(ex58, x0, maxiter=10000)
xstar
array([-9.99999972e-01, 1.58776353e-08])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: BFGS with trust region Relative gradient: 2.7200302000474536e-08 Cause of termination: Relative gradient = 2.7e-08 <= 6.1e-06 Number of iterations: 17 Number of function evaluations: 30 Number of gradient evaluations: 13
The bound constraints do not exclude the unconstrained optimum.
x0 = np.array([-1.5, 1.5])
theBounds = algo.bioBounds([(-1000, 1000), (-1000, 1000)])
xstar, messages = algo.simpleBoundsNewtonAlgorithm(theFunction,
theBounds,
x0)
xstar
array([0.99999961, 0.99999918])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Newton with trust region for simple bound constraints Proportion analytical hessian: 100.0% Relative projected gradient: 2.0646130807833174e-07 Relative change: 0.00038043149628330664 Number of iterations: 32 Number of function evaluations: 77 Number of gradient evaluations: 23 Number of hessian evaluations: 23 Cause of termination: Relative gradient = 2.1e-07 <= 6.1e-06
Here, the bound constraints do exclude the unconstrained optimum.
x0 = np.array([-1.5, 1.5])
theBounds = algo.bioBounds([(-1000, 0), (1, 1000)])
xstar, messages = algo.simpleBoundsNewtonAlgorithm(theFunction,
theBounds,
x0)
xstar
array([-0.99497475, 1. ])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Newton with trust region for simple bound constraints Proportion analytical hessian: 100.0% Relative projected gradient: 7.676708776216401e-09 Relative change: 2.0046022513153794e-05 Number of iterations: 7 Number of function evaluations: 20 Number of gradient evaluations: 7 Number of hessian evaluations: 7 Cause of termination: Relative gradient = 7.7e-09 <= 6.1e-06
The bound constraints do not exclude the unconstrained optimum.
x0 = np.array([1, 1])
theBounds = algo.bioBounds([(-1000, 1000), (-1000, 1000)])
xstar, messages = algo.simpleBoundsNewtonAlgorithm(ex58,
theBounds,
x0)
xstar
array([1. , 3.14159265])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Newton with trust region for simple bound constraints Proportion analytical hessian: 100.0% Relative projected gradient: 2.389957069442179e-11 Relative change: 2.245052812572204e-06 Number of iterations: 6 Number of function evaluations: 19 Number of gradient evaluations: 7 Number of hessian evaluations: 7 Cause of termination: Relative change = 2.25e-06 <= 1e-05
The bound constraints do exclude the unconstrained optimum.
x0 = np.array([-1, 0])
theBounds = algo.bioBounds([(-1000, 0), (1, 1000)])
xstar, messages = algo.simpleBoundsNewtonAlgorithm(ex58,
theBounds,
x0)
xstar
array([-0.54030231, 1. ])
for k, v in messages.items():
print(f'{k}:\t{v}')
Algorithm: Newton with trust region for simple bound constraints Proportion analytical hessian: 100.0% Relative projected gradient: 8.881784197001252e-16 Relative change: 0.4596976941318611 Number of iterations: 1 Number of function evaluations: 4 Number of gradient evaluations: 2 Number of hessian evaluations: 2 Cause of termination: Relative gradient = 8.9e-16 <= 6.1e-06