This notebook measures the performance of a simple optimize use case.
import numpy as np
from scipy.optimize import minimize
def rosen(x):
"""The Rosenbrock function"""
return 100.0 * (x[1]-x[0]**2.0)**2.0 + (1-x[0])**2.0
%%timeit
minimize(rosen, [0.0, 0.0], method="nelder-mead",
options={'xtol': 1e-8, 'disp': False})
3.85 ms ± 232 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
import datetime
print(datetime.datetime.now())
for i in range(1000):
minimize(rosen, [0.0, 0.0], method="nelder-mead",
options={'xtol': 1e-8, 'disp': False})
print(datetime.datetime.now())
2018-07-22 14:33:29.011554 2018-07-22 14:33:32.803683
from numba import jit
@jit
def rosen2(x):
"""The Rosenbrock function"""
return 100.0 * (x[1]-x[0]**2.0)**2.0 + (1-x[0])**2.0
%%timeit
minimize(rosen2, [0.0, 0.0], method="nelder-mead",
options={'xtol': 1e-8, 'disp': False})
3.55 ms ± 224 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
## mmm... it didn't seem to help.
import numba as nb
nb.__version__
'0.36.2'
import sys
sys.version
'3.6.4 |Anaconda custom (64-bit)| (default, Jan 16 2018, 12:04:33) \n[GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)]'