%config InlineBackend.figure_formats = ['svg']
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [8, 4]
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import time
SciPy is a Python-based ecosystem of open-source software for mathematics, science, and engineering, see https://scipy.org/.
In particular, these are some of the core packages (not all of these are technically a part of SciPy):
In this lecture when we talk about SciPy, we will be reffering to the SciPy library.
The SciPy library comes with a large set of packages:
See https://docs.scipy.org/doc/ for a complete list.
Today we will showcase a few selected packages
The scipy.special
package include many special functions, e.g.
airy
elliptic
bessel
gamma
beta
hypergeometric
parabolic cylinder
mathieu
spheroidal wave
struve
kelvin
For a complete list see the Scipy Reference Manual on special functions
The Bessel functions $j_v(z)$ are eigen functions of the radial modes of a circular drum
scipy.special.jv(v, z)
where v
is the order and z
the argumentfrom scipy.special import jv
x = np.linspace(0, 50, num=1000)
for v in range(3):
plt.plot(x, jv(v, x), label=f'$j_{v}(x)$')
plt.legend(); plt.grid(); plt.xlabel('$x$');
scipy.integration
¶The scipy.integrate
package contains methods for
scipy.integrate
¶For adaptive numerical integration of function objects, use the scipy.integrate
methods:
quad(func, start, end)
, single integraldblquad(func, starts, ends)
, double integraltplquad(func, starts, ends)
, triple integralnquad(func, ranges)
, integration over multiple variables.scipy.integrate.quad
¶Lets compute the integral $$\int_{-\infty}^{\infty} \, e^{-x^2} dx = \sqrt{\pi}$$ over the whole real interval, using numerical quadrature.
from scipy.integrate import quad
def func(x): return np.exp(-x**2)
# Note to self: Try increasing the accuracy epsabs
I, abs_err = quad(func, -float('inf'), float('inf'), epsabs=1e-4)
print('Analytic:', np.sqrt(np.pi) )
print('Numeric :', I, '+/-', abs_err )
The quad
function performs adaptive integration and ensures that the result has the requested accuracy.
For numerical integration of already discretized data scipy.integrate
provides cumtrapz
, simps
and romb
Lets try out cumtrapz
from scipy.integrate import cumtrapz
help(cumtrapz)
scipy.integrate.cumtrapz
¶from scipy.integrate import cumtrapz as scipy_cumtrapz
x = np.linspace(-2, 2, num=20)
f = x**2
F = scipy_cumtrapz(f, x, initial=0)
plt.plot(x, (x**3 - 1)/3, label='analytical')
plt.plot(x, F, 'o', label='numerical', alpha=0.75)
plt.grid(); plt.legend(loc='best'); plt.xlabel('$x$'); plt.ylabel('$F(x)$');
Why do these differ? How can we get the correct solution?
scipy.integrate
package also provide solvers for Ordinary Differential Equations (ODEs)solve_ivp
isIVPs are problems of the form $$\frac{d}{dt} \mathbf{y} = \mathbf{f}(t, \mathbf{y}) , \quad \mathbf{y}(t_0) = \mathbf{y}_0$$
from scipy.integrate import solve_ivp
help(solve_ivp)
The equation of motion for a damped oscillator can be written as
$$\frac{d^2}{dt^2} x + 2\xi \omega_0 \frac{d}{dt} x + \omega_0^2 x = 0$$with an angular frequency $\omega_0$ and a damping ratio $\xi$.
To be able to solve this using scipy.integrate.solve_ivp
\quad \Leftrightarrow \quad \frac{d}{dt} \left[ \begin{array}{c} v \\ x \end{array} \right] = \left[ \begin{array}{c} -2\xi \omega_0 v - \omega_0^2 x \\ v \end{array} \right] $$ where $v \equiv \frac{d}{dt} x$ and $\mathbf{y} \equiv \left[ \begin{array}{c} v \\ x \end{array} \right]$
Implement $ f(t, \mathbf{y}) \equiv \left[ \begin{array}{c} -2\xi \omega_0 v - \omega_0^2 x \\ v \end{array} \right]$
Set $\omega_0 = 1.0$ and $\xi = 0.1$ and use $x(0) = 1$ and $v(0) = \frac{d}{dt} x(0) = 0.0$
omega0, xi = 1.0, 0.1
def f(t, y):
v, x = y
return [-2*xi*omega0*v - omega0**2*x, v]
res = solve_ivp(
f, y0=(0, 1), t_span=(0, 10), # Time range
#method='RK23', # RK45 is default
#rtol=1e-5,
) ## Note to self: Try lower order Runge-Kutta method and tune relative tolerance
def plot_res(res):
plt.plot(res.t, res.y[0], label='$v$')
plt.plot(res.t, res.y[1], label='$x$')
plt.legend(); plt.xlabel('t'); plt.grid();
print(f'Number of integration points:', res.t.shape)
plot_res(res)
The stepping in the solve_ivp
result can be controlled using t_eval
.
t = np.linspace(0, 10.0, num=400)
res = solve_ivp(
f, y0=(0, 1),
t_span=(t.min(), t.max()), t_eval=t)
plot_res(res)
scipy.interpolate
¶The scipy.interpolate
module provides several methods for:
For example: For 1D spline interpolation we can use UnivariateSpline(x, y, ...)
and its methods
from scipy.interpolate import UnivariateSpline
help(UnivariateSpline)
x = np.linspace(-2*np.pi, 2*np.pi, num=100)
y = np.sin(x) + np.random.normal(scale=0.1, size=x.shape)
x_fine = np.linspace(x.min(), x.max(), num=1000)
def plot_spline():
plt.plot(x, y, '.', label='Noisy data')
plt.plot(x_fine, np.sin(x_fine), '--', label='Exact sol.')
plt.legend(loc='upper right'); plt.grid();
plot_spline()
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(x, y, s=0, k=1) # 1st order spline, line interp
plot_spline()
plt.plot(x_fine, spline(x_fine), '-', label='spline')
plt.legend(loc='upper right');
spline = UnivariateSpline(x, y, s=1.) # Smoothing
plot_spline()
plt.plot(x_fine, spline(x_fine), label='spline')
plt.legend(loc='upper right');
scipy.interpolate.SmoothBivariateSpline(x, y, z, ...)
def hat(x, y):
r = np.sqrt(x**2 + y**2)
return 20.0*np.sin(r)/r
Npoints = 200
points = 8. * np.pi * (np.random.random((Npoints, 2)) - 0.5) # Random grid points
x, y = points.T
values = hat(x, y)
tmp = np.linspace(-4*np.pi, 4*np.pi, num=100)
X, Y = np.meshgrid(tmp, tmp)
from mpl_toolkits.mplot3d import Axes3D
plt.figure(figsize=(15,10))
ax = plt.subplot(1,1,1, projection='3d')
ax.plot(x, y, values, markersize=4, marker='.', linewidth=0)
from scipy.interpolate import SmoothBivariateSpline
spline = SmoothBivariateSpline(x, y, values)
Z = spline(tmp, tmp)
plt.figure(figsize=(15,10))
ax = plt.subplot(1,1,1, projection='3d')
ax.plot(x, y, values, markersize=4, marker='.', linewidth=0)
ax.plot_surface(X, Y, Z, cmap=plt.cm.rainbow, rstride=1, cstride=1, alpha=.5, linewidth=0)
scipy.linalg
¶Summary of routines in numpy.linalg
norm(A)
, matrix or vector norm (default 2-norm)det(A)
, determinant of 2D equal dimensional arrayssolve(A, b)
, solves linear system $A \cdot \mathbf{x} = \mathbf{b}$qr(A)
, QR-factorizationinv(A)
, pinv(A)
, matrix- and pseudo-inverseeig(A)
, eigvals(A)
, eigenvalues and eigenvectors of $A$svd(A)
, Single-value decompositionThe numpy.linalg
routines covers most of (and is implemented using!) LAPACK functionality.
SciPy provides extra functionality for:
scipy.linalg
¶Several methods for matrix factorization is available in SciPy:
Matrix factorization of a $M \times N$ matrix of the form $$A = P \cdot L \cdot U$$ where $P$ is a permutation matrix $L$ is lower- and $U$ is upper-triangular.
This is useful for repeatedly solving linear systems with different right-hand-sides $\mathbf{b}_i$ $$A \cdot \mathbf{x}_i = \mathbf{b}_i$$
(Cholesky decomposition does the same but for positive definite hermitian matrices.)
import time
from scipy.linalg import lu_factor, lu_solve
n = 50
N = 1000
A = np.random.random((N, N))
t = time.time()
lu, P = lu_factor(A)
for idx in range(n):
b = np.random.random(N)
x = lu_solve((lu, P), b)
print('LU-solve: {:4.3f}s (including factorization!)'.format(time.time() - t) )
t = time.time()
for idx in range(n):
b = np.random.random(N)
x = np.linalg.solve(A, b)
print('np.solve: {:4.3f}s'.format(time.time() - t) )
Matrix factorization of the form $$A = Q R$$ where $Q$ is an orthogonal matrix (i.e. $Q \cdot Q^T = \mathbf{1}$) and $R$ is upper triangular
np.set_printoptions(precision=3)
A = np.random.random((3,2))
Qnp, Rnp = np.linalg.qr(A)
from scipy.linalg import qr as scipy_qr
Q, R = scipy_qr(A)
print('Qnp =\n', Qnp, '\nRnp = \n', Rnp, sep='')
print('Q =\n', Q, '\nR = \n', R, sep='')
scipy.linalg.funm
¶The function $f(A)$ of a matrix $A$ is defined by the Taylor series $$f(A) = \sum_{k=0}^\infty \frac{f^{(k)}(0)}{k!} A^k \, .$$
SciPy provides a number of specialized functions for this
expm(A, q=order)
, Matrix exponent ($e^A$) using the Pade approximationlogm
, sinm
, cosm
, tanm
, etc.For an arbitrary functions there is
funm(A, func=f)
, general function exponentA = np.random.random((3,3))
print('A=\n', A, sep='')
from scipy.linalg import funm
from scipy.special import jv # Bessel function
B = funm(A, lambda x: jv(0, x))
print('B=\n', B, sep='')
scipy.linalg
¶SciPy routines for building common special matrices:
Type | Function |
Block diagonal | block_diag |
Circulant | circulant |
Companion | companion |
Hadamard | hadamard |
Hankel | hankel |
Hilbert | hilbert |
Inverse Hilbert | invhilbert |
Leslie | leslie |
Pascal | pascal |
Toeplitz | toeplitz |
Van der Monde | vander |
For more information on these, look at the scipy.linalg
homepage.
scipy.optimize
¶scipy.optimize
¶scipy.optimize
General purpose:
minimize(func, x0, method='CG')
, for scalar (real) valued target function func
method='Nelder-Mead'
, 'Powell'
, 'CG'
, etc.leastsq(func, x0, ...)
, for vector valued functions, minimizes $|\vec{f}(\vec{x})|$Constrained minimizers (multivariate):
fmin_l_bfgs_b
, Limited memory constrained BFGS methodfmin_tnc
, Truncated Newton algorithmfmin_cobyla
, Constrained Optimization BY Linearfmin_slsqp
, Sequential Least SQuares ProgrammingGlobal minimizers:
anneal
, Simulated annealingbrute
, Brute force minimization (in limited range)Scalar function minimizers:
fminbound(func, x1, x2)
, Bounded minimizationbrent
, Brent's method (bisection + secant + inv.quad-interp)scipy.optimize.minimize
¶Minimize the Rosenbrock function: $$ f(x,y) = (1-x)^2 + 100(y-x^2)^2 $$ with a minima at $(x,y) = (1,1)$.
from scipy.optimize import minimize as scipy_minimize
def f(X):
x, y = X
return (1-x)**2 + 100.*(y-x**2)**2
result = scipy_minimize(f, x0=[0.75, 1.5])
print(f'Minimum f = {result.fun} at x = {result.x}')
plt.figure(figsize=(10,7))
ax = plt.subplot(1,1,1, projection='3d')
x = np.linspace(-2.0, 2.0, num=50)
y = np.linspace(-0.5, 3.0, num=50)
X,Y = np.meshgrid(x, y)
ax.plot_surface(X, Y, f((X, Y)),
cmap=plt.cm.rainbow, rstride=1, cstride=1, alpha=.5, linewidth=0)
ax.plot3D([result.x[0]], [result.x[1]], [result.fun], 'ok', markersize=20)
ax.set_xlabel('x'); ax.set_ylabel('y'); ax.set_zlabel('f(x,y)')
ax.view_init(elev=45, azim=60)
scipy.optimize.leastsq
¶Example: Model-fitting using leastsq
leastsq(func, x0, args=())
x0
Example:
def model(t, A, omega, theta): return A * np.sin(omega * t + theta)
def data_model_diff(params, t, data): return model(t, *params) - data
t = np.linspace(0, 1, num=25)
A, omega, theta = 1/3, 2.5*np.pi, np.pi/5
data = model(t, A, omega, theta) + np.random.normal(size=t.shape, scale=0.1)
x0 = np.array([1.0, 6.0, 0.0])
from scipy.optimize import leastsq
opt_params, flag = leastsq(data_model_diff, x0, args=(t, data))
print('Exact parameters:', np.array([A, omega, theta]))
print('Fit parameters :', opt_params)
t_fine = np.linspace(0,1)
plt.plot(t, data, 'o', label='data')
plt.plot(t_fine, model(t_fine, *opt_params), label='fit')
plt.plot(t_fine, model(t_fine, A, omega, theta), label='real')
plt.legend(loc='lower left'); plt.grid();
scipy.optimize
¶For finding roots scipy.optimize
provides:
Scalar function solvers
newton
, Newton-Raphson or secant methodbisect
, Bisection method on bounded intervalbrentq
, brentf
, riddler
, other bounded solversMulti-dimensional non-linear solvers
root
. ommon interface providing several methodsfsolve
, uses MINPACK's hybrd and hybrj algorithmsbroyden1
, Broyden's good methodbroyden2
, Broyden's bad methodMulti-dimensional large-scale non-linear solvers
newton_krylov
, Newton's method with Krylov approximation of inverse Jacobiananderson
, Extended Anderson mixing methodMulti-dimensional: Simple solvers
exitingmixing
, Newton with Tuned diagonal Jacobian approx.linearmixing
, Newton using scalar Jacobian approx.diagbroyden
, Newton using Broyden Jacobian approx.scipy.optimize.root
¶root(fun, x0)
, solves $\vec{f}(\vec{x}) = \vec{0}$ with initial guess $\vec{x}_0$f = lambda x: x + np.sqrt(2) * np.cos(x)
from scipy.optimize import root as scipy_root
result = scipy_root(f, x0=-5.0)
print('Root at x =', result.x)
x = np.linspace(-10, 10)
plt.plot(x, f(x), label='f(x)')
plt.plot(result.x, f(result.x), 'ro', label='root')
plt.legend(loc='best'); plt.grid(True); plt.xlabel('x');
scipy.sparse
¶ndarray
datatype stores data continous and "dense" in memory,However, many engineering problems (e.g. FEM, CFD, etc.) can be formulated in terms of sparse matrices.
SciPy provides support for sparse representation of arrays and matrices in the scipy.sparse
package.
There are several Matrix storage formats available:
csr
csc
bsr
lil
dok
coo
dia
Advice:
lil
, supports fancy indexing (as NumPy)csr
or csc
lil
to, for example, csr
is efficient!scipy.sparse
¶import scipy.sparse as sparse
N = 500
A = sparse.rand(N, N, density=0.01, format='csr')
loops = 400
t = time.time()
for idx in range(loops):
B = A * A
print('Sparse mat-mul: {:2.4f} s'.format(time.time() - t))
A = A.todense()
t = time.time()
for idx in range(loops):
B = A * A
print('Dense mat-mul: {:2.4f} s'.format(time.time() - t))
scipy.sparse.linalg
¶scipy.sparse.linalg
SciPy Sparse Linear Algebra sub-packageIterative linear system solvers, for $A\mathbf{x} = \mathbf{b}$
bicg
, Biconjugate Gradientbicgstab
, Biconjugate Gradient Stabilizedcg
, Conjugate Gradientcgs
, Conjugate Gradient Squaredgmres
, Generalized Minimal Residuallgmres
, Improved GMRESgmr
, Quasi-minimal Residuallsgr
, Least-squares solverMatrix Factorizations
eigs
, eigsh
, Partial eigenvalues and eigen vectorslobpcg
, Symmetric partial eigenproblems (with preconditioning)svds
, Partial singular value decompositionsplu
, spilu
, Complete and incomplete LU-factorizationThe implicit matrix algorithms only use the matrix-vector product operation, $A \cdot \vec{x}$.
Valid matrix objects are
scipy.sparse.linalg.LinearOperator
scipy.sparse.LinearOperator
¶def Laplacian(f):
""" Given a 2D array compute the Laplacian in the two dimensions. """
df = np.zeros_like(f)
df[1:-1, 1:-1] += np.diff(f, n=2, axis=0)[:, 1:-1]
df[1:-1, 1:-1] += np.diff(f, n=2, axis=1)[1:-1, :]
return df
Observation:
Embedd the Laplacian
function in a LinearOperator
object
def get_Laplacian_LinearOperator(grid_shape):
def matvec(vec):
f = vec.reshape(grid_shape)
df = Laplacian(f)
out_vec = df.flatten()
return out_vec
from scipy.sparse.linalg import LinearOperator
N = np.prod(grid_shape)
LaplacianOperator = LinearOperator((N, N), matvec=matvec, dtype=np.float)
return LaplacianOperator
Solve using Conjugate Gradient implicit linear systems solver scipy.sparse.linalg.cg
source = np.zeros(shape=(20, 20))
source[10, 1:-1] = -1.0
L = get_Laplacian_LinearOperator(source.shape)
b = source.flatten() # RHS vec w. source terms
f0 = np.zeros_like(source).flatten() # initial guess
# -- Solve L * f = b, using CG iterative solver
from scipy.sparse.linalg import cg as scipy_cg
f_sol, err = scipy_cg(L, b, x0=f0)
f = f_sol.reshape(source.shape)
plt.figure(figsize=plt.figaspect(0.4))
plt.subplot(121)
plt.imshow(-source, interpolation='nearest')
plt.ylabel('source')
plt.subplot(122)
plt.imshow(f, interpolation='nearest')
plt.ylabel('solution');
scipy.sparse.csgraph
¶For analysing and working with
scipy.sparse.csgraph
sub-packageThe module contains routines for:
connected_components
, laplacian
breadth_first_order
, depth_first_order
, breadth_first_tree
, depth_first_tree
, minimum_spanning_tree
shortest_path(csgraph)
, dijkstra
, floyd_warshall
, bellman_ford
Common for all of these is that the
(N, N)
where N
is the number of nodes in the graph.i, j
, the value at position N[i,j]
represent the distance or weight for their connection.i, j
are not connected this is represented by an empty element in the sparse matrix.Consider the following undirected graph:
(0)
/ \
0.1 0.2
/ \
(2) (1)
This graph has three nodes, where node 0 and 1 are connected with a distance of 0.2, and nodes 0 and 2 are connected with a distance of 0.1. We can construct the dense, masked, and sparse representations as follows, keeping in mind that an undirected graph is represented by a symmetric matrix:
G_dense = np.array([[0 , 0.2, 0.1],
[0.2, 0 , 0 ],
[0.1, 0 , 0 ]])
G_masked = np.ma.masked_values(G_dense, 0)
from scipy.sparse import csr_matrix
G_sparse = csr_matrix(G_dense)
print('Dense:\n', G_dense, sep='')
print('Masked:\n', G_masked, sep='')
print('Sparse:\n', G_sparse, sep='')
scipy.spatial
¶In many cases there is a need to construct a mesh or divide a surface into parts given a set of points.
The scipy.spatial
module provides methods for:
Delaunay(points, ...)
,Voronoi(point, ...)
,ConvexHull(points, ...)
The module provides convenient helpers to plot these in the 2D-case.
scipy.spatial
also contains routines for fast nearest neighbour lookup, and distance computations.
KDTree
takes a (N, M)
array-like object as input,N
is the number of points,M
is the spatial dimension of the problem.cKDTRee
is faster C-based implementationdistance
contains methods for the calculation of distances between points in a given input setfrom scipy.spatial import distance
points = np.array([[0.0, 0.0], [0.1, 0.0], [0.2, 0.1], [0.0, 0.3]])
print('points =\n', points)
dists = distance.pdist(points)
print(dists)
print(' 0-1 0-2 0-3 1-2 1-3 2-3')
for idx, (x, y) in enumerate(points):
plt.plot(x, y, 'o', markersize=10, color='Navy')
plt.text(x + 0.005, y, str(idx), ha='left')
plt.grid()
scipy.stats
¶The SciPy statistics module scipy.stats
contains:
Random distributions:
alpha | anglit | arcsine | beta | betaprime |
bradford | burr | cauchy | chi | chi2 |
cosine | dgamma | dweibull | erlang | expon |
exponpow | exponweib | f | fatiguelife | fisk |
foldcauchy | foldnorm | frechet_l | frechet_r | gamma |
gausshyper | genexpon | genextreme | gengamma | genhalflogistic |
genlogistic | genpareto | gilbrat | gompertz | gumbel_l |
gumbel_r | halfcauchy | halflogistic | halfnorm | hypsecant |
invgamma | invgauss | invnorm | invweibull | johnsonsb |
johnsonsu | ksone | kstwobign | laplace | levy |
levy_l | levy_stable | loggamma | logistic | loglaplace |
lognorm | lomax | maxwell | mielke | nakagami |
ncf | nct | ncx2 | norm | pareto |
powerlaw | powerlognorm | powernorm | rayleigh | rdist |
recipinvgauss | reciprocal | rice | semicircular | t |
triang | truncexpon | truncnorm | tukeylambda | uniform |
vonmises | wald | weibull\_max | weibull\_min | wrapcauchy |
bernoulli | binom | boltzmann | dlaplace | geom |
hypergeom | logser | nbinom | planck | poisson |
randint | skellam | zipf |
Parameters, defined in the shapes
method.
Methods implemented by all continuous distributions
rvs(N)
, draw N
Random Value Samplespdf(x)
, Probability Distribution Functioncdf(x)
, Cumulative Distribution Functionsf(x)
, Survival Function $(1-\textrm{cdf}(x))$ppf(y)
, Percent Point Function $\textrm{cdf}^{-1}(y)$isf(y)
, Inverse Survival Function $\textrm{isf}^{-1}(y)$stats(..)
, mean, variance, skew or kurtois?
moment(n)
, n
:th order moment of distributionDistribution parameter estimators
fit(data)
, Maximum likely hood estimation of distribution parameters from data
fit_loc_scale(data)
, fit only location
and scale
with fixed shape
expect(func=f)
, expected value of function wrt. the distributionscipy.stats.nct
from scipy import stats as scipy_stats
print('Parameters:', scipy_stats.nct.shapes)
help(scipy_stats.nct)
df = 1.0; nc = 1.0
n = scipy_stats.nct(df, nc)
x = np.linspace(-10, 20, num=400)
y = np.linspace(0, 1, num=400)
plt.subplot(131)
plt.plot(x, n.pdf(x), label='PDF');
plt.hist(n.rvs(100000), bins=20, density=True, alpha=0.25,
range=(np.min(x), np.max(x)), label='RVS');
plt.legend()
plt.subplot(132)
plt.plot(x, n.cdf(x), label='CDF')
plt.plot(x, n.sf(x), label='SF')
plt.legend(loc='best')
plt.subplot(133)
plt.plot(y, n.ppf(y), label='PPF')
plt.plot(y, n.isf(y), label='ISF')
plt.legend(loc='best')
plt.ylim([np.min(x), np.max(x)])
plt.subplots_adjust(bottom=.1, top=.95, left=.1, right=.95)
plt.show()
The Signal Processing Toolbox scipy.signal
Components:
From Wikipedia:
"In mathematics and, in particular, functional analysis, convolution is a mathematical operation on two functions f and g, producing a third function that is typically viewed as a modified version of one of the original functions, giving the area overlap between the two functions as a function of the amount that one of the original functions is translated."
from scipy.signal import convolve, fftconvolve
f = np.random.random((1000, 1000))
stencil = np.array([[0., 1., 0.],
[1.,-4., 1.],
[0., 1., 0.]])
t = time.time()
fnabla2 = convolve(f, stencil, mode='valid')
print('convolve: {:4.3f} s '.format(time.time() - t))
t = time.time()
fnabla2_fft = fftconvolve(f, stencil, mode='valid')
print('FFT convolve: {:4.3f} s '.format(time.time() - t))
t = time.time()
Nabla2f = np.zeros(np.array(f.shape) - np.array([2, 2]))
Nabla2f[:] = f[2:, 1:-1] + f[0:-2, 1:-1] + f[1:-1, 2:] + f[1:-1, 0:-2] - 4 * f[1:-1, 1:-1]
print('plain NumPy: {:4.3f} s '.format(time.time() - t))
np.testing.assert_array_almost_equal(fnabla2, fnabla2_fft)
np.testing.assert_array_almost_equal(fnabla2, Nabla2f)
from scipy.misc import ascent
import scipy.signal as signal
image = ascent()[180:360, 200:400]
laplacian = np.array([[0, 1, 0],
[1,-4, 1],
[0, 1, 0]], dtype=np.float)
deriv2 = signal.convolve2d(image, laplacian, mode='same',boundary='symm')
wiener = signal.wiener(image)
derfilt = np.array([1, -2, 1], dtype=np.float)
ck = signal.cspline2d(image,8.0)
deriv = signal.sepfir2d(ck, derfilt, [1]) + signal.sepfir2d(ck, [1], derfilt)
import matplotlib.cm as cm
plt.figure(figsize=plt.figaspect(0.25))
data_list = [image, wiener, deriv2, deriv]
title_list = ['Original', 'Wiener filter', 'Laplacian', 'B-spline sepfir']
ax = None
for idx, (data, title) in enumerate(zip(data_list, title_list)):
plt.subplot(1, 4, idx+1)
plt.title(title)
plt.imshow(data, cmap=cm.gray)
plt.xticks([]); plt.yticks([])
plt.axis('image')
The SciPy multi-dimensional image processing module scipy.ndimage
,
contains methods for "Images" with n-dimensions, eg. MRI etc. such as
For more info see http://docs.scipy.org/doc/scipy/reference/ndimage.html
The scipy.io
module contains the extra I/O routines:
loadmat(fname)
, load MATLAB .mat
data filesavemat(fname, dict)
, save dictionary of names and arrays to MATLAB .mat
-filereadsav(fname)
, read IDL .sav
-fileswavfile.read(file)
, read .wav
sound filewavfile.write(fname, rate, data)
, write .wav
sound filearff.loadarff(file)
, Load an ARFF file standard format for WEKAnetcdf.netcdf_file(fname)
, read a NetCDF file.Schroedinger equation in 1D
$$ \left[-\frac{\hbar^2}{2m} \frac{d^2}{dx^2} + V(x) \right] \Psi(x) = E \Psi(x) $$where $V(x) = x^2$ and $\hbar/2m = 1$
$$ \frac{d^2}{dx^2} \Psi(x) \approx \frac{\Psi( x_{i+1}) - 2\Psi(x_i) + \Psi(x_{i-1})}{(\Delta x)^2} $$N = 300
x = np.linspace(-4.0, 4.0, num=N)
dx = x[1] - x[0]
d2_dx2 = - np.array([1., -2., 1.]) / dx**2
H_diags = np.zeros((3, N)) + d2_dx2[:, np.newaxis]
H_diags[1, :] += x**2 # Add potential on middle diagonal
H = sparse.spdiags(H_diags, [-1,0,1], N, N).tocsr()
import scipy.sparse.linalg as splinalg
eigv, eigvec = splinalg.eigsh(H, 5, which='SM', tol=1e-4, maxiter=10*N)
plt.plot(x, x**2)
for idx in range(eigvec.shape[-1]):
plt.plot(x, eigv[idx]+np.zeros_like(x), ':k')
plt.plot(x, 8*np.ravel(eigvec[:, idx]) + eigv[idx])
plt.ylim([0, eigv[-1]+2])
plt.ylabel(r'$E_n / \hbar \omega$')
plt.xlabel(r'$x$');
from scipy import misc
import matplotlib.pyplot as plt
face = misc.face()
plt.imshow(face);