Given the data points
x | y |
---|---|
0.1 | 0.3 |
0.3 | 0.1 |
1.0 | 0.2 |
1.4 | 0.3 |
2.8 | 0.7 |
4.5 | 0.9 |
# Solution 1.1
%pylab inline
import numpy
import pylab
import scipy.interpolate
x=numpy.array([0.1,0.3,1.0,1.4,2.8,4.5])
y=numpy.array([0.3,0.1,0.2,0.3,0.7,0.9])
xx=numpy.linspace(min(x), max(x), 100)
lp=scipy.interpolate.lagrange(x, y)
pylab.plot(xx, lp(xx), 'g', label='Lagrange')
# Overlay raw data
pylab.plot(x,y,'k*',label='Raw data')
pylab.xlabel('x')
pylab.ylabel('y')
# Add a legend
pylab.legend(loc='best')
pylab.show()
Populating the interactive namespace from numpy and matplotlib
# Solution 1.2
# Cubic
poly_coeffs=numpy.polyfit(x, y, 3)
p3 = numpy.poly1d(poly_coeffs)
pylab.plot(xx, p3(xx), 'r', label='Cubic')
# Overlay raw data
pylab.plot(x,y,'k*',label='Raw data')
pylab.xlabel('x')
pylab.ylabel('y')
# Add a legend
pylab.legend(loc='best')
pylab.show()
For this exercise, consider the system of linear equations
\begin{align*} 2x + -3y + 4z &= 5 \\ x - y + 5z &= 0 \\ -x + 2y + 7z &= -2 \end{align*}numpy.dot(linalg.inv(A),b)
to verify your solution. [5 marks]# 2.1
import numpy.linalg
A = numpy.array([[2., -3., 4.],
[1., -1., 5.],
[-1, 2., 7.]])
b = numpy.array([5., 0., -2.])
if numpy.isclose(numpy.linalg.det(A), 0):
raise ValueError("Matrix A is singular and therefore cannot be inverted.")
# 2.2
def upper_triangle(A, b):
# Assuming it is a square matrix of size nxn.
n = numpy.size(b)
# Loop over each pivot row.
for k in range(n-1):
# Loop over each equation below the pivot row.
for i in range(k+1, n):
# Define the scaling factor outside the innermost
# loop otherwise its value gets changed as you are
# over-writing A
s = (A[i,k]/A[k,k])
# we don't start the following loop from 0 as we can assume
# some entries in the rows are already zero
for j in range(k, n):
A[i,j] = A[i,j] - s*A[k,j]
b[i] = b[i] - s*b[k]
upper_triangle(A, b)
print(A)
print(b)
[[ 2. -3. 4. ] [ 0. 0.5 3. ] [ 0. 0. 6. ]] [ 5. -2.5 3. ]
# 2.3
def solver(A, b):
upper_triangle(A, b)
n = numpy.size(b)
x = numpy.zeros(n)
for k in range(n-1, -1, -1):
s = 0.
for j in range(k+1, n):
s = s + A[k, j]*x[j]
x[k] = (b[k] - s)/A[k, k]
return x
x = solver(A, b)
# 2.4
if numpy.allclose(numpy.dot(linalg.inv(A),b), x):
print("Solver passed")
else:
print("Solver failed")
Solver passed
Consider the function $f(x) = \cos(x)$.
%pylab inline
import math
def central_diff(f, x, h):
fxph = f(x+h)
fxnh = f(x-h)
return (fxph-fxnh)/(2*h)
exact = -math.sin(2.5) # for this example we know trivially what the exact solution should be
print("Exact deriviative at cos(2.5) = ", -math.sin(2.5))
print("%20s"%("Central differencing"))
atol=1.0e-6
fd_errors = []; cd_errors = []; h_all = [] # we're going to store all the values for plotting
h=1.2 # an initial mesh spacing
for i in range(100):
cd = central_diff(math.cos, 2.5, h)
print("%10g (error=%10.2g)"%(cd, abs(cd-exact)))
if abs(cd-exact)<atol:
break
# store the h and the errors
h_all.append(h); cd_errors.append(abs(cd-exact))
h=h/2 # halve h for the next iteration
# as we expect a polynomial relationship between h and the errors,
# a log-log plot will demonstrate this if we get straight lines
# the slopes of these lines indicating the order of the relationship:
loglog(h_all,cd_errors,'k.-',label='central')
xlabel('h');ylabel('Error');grid(True)
pylab.legend(loc='best')
Populating the interactive namespace from numpy and matplotlib Exact deriviative at cos(2.5) = -0.5984721441039564 Central differencing -0.464833 (error= 0.13) -0.563205 (error= 0.035) -0.589535 (error= 0.0089) -0.59623 (error= 0.0022) -0.597911 (error= 0.00056) -0.598332 (error= 0.00014) -0.598437 (error= 3.5e-05) -0.598463 (error= 8.8e-06) -0.59847 (error= 2.2e-06) -0.598472 (error= 5.5e-07)
/usr/local/lib/python3.6/site-packages/IPython/core/magics/pylab.py:160: UserWarning: pylab import has clobbered these variables: ['pylab'] `%matplotlib` prevents importing * from pylab and numpy "\n`%matplotlib` prevents importing * from pylab and numpy"
<matplotlib.legend.Legend at 0x1098340f0>
import numpy as np
def trapezoid_rule(start_point, end_point, f, atol=1.0e-6, maxiter=1000):
convergence = np.zeros(maxiter)
for number_of_bins in range (1, maxiter):
bin_size = float(end_point - start_point)/number_of_bins
# Loop to create each trapezoid
for i in range(number_of_bins):
a = start_point + (bin_size * i)
b = a+bin_size
convergence[number_of_bins] += bin_size*float(f(a)+f(b))/2.0
if number_of_bins>1 and abs(convergence[number_of_bins]-convergence[number_of_bins-1])<atol:
break
return convergence[number_of_bins], number_of_bins
def f(x):
return sqrt(x)*cos(x)
print(trapezoid_rule(0.0, pi, f, 0.01))
(-0.94069360904553889, 9)
Consider the function: $$f(x) = \dfrac{1}{(x − 0.3)^2 + 0.01} - \dfrac{1}{(x − 0.8)^2 + 0.04}$$.
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def bisection(fct, a, b, atol=1.0E-4, nmax=100):
n = 0
fevals = 0
while n <= nmax:
c = (a+b)/2.
if fct(c) == 0. or (b-a)/2. < atol: # one evaluation
fevals += 1
print('Bisection used', fevals, 'evaluations')
return c
n += 1
if np.sign(fct(c)) == np.sign(fct(a)): # two evaluations
a = c
else:
b = c
fevals += 2
raise RuntimeError('no root found within [a,b]')
def newton_numdif(fct, x0, dx=1.0E-2, atol=1.0E-4):
x = [x0]
fevals = 0
while 1:
dfdx = (fct(x[-1]+dx)-fct(x[-1]))/(dx) # two evaluations
if numpy.isclose(dfdx, 0):
print("Newton-Raphson failing: zero gradient")
return x[-1]
x.append(x[-1] - fct(x[-1])/dfdx) # one evaluation
fevals += 3
if abs(x[-1]-x[-2]) < atol:
print('Newton (num dif) used', fevals, 'evaluations')
return x[-1]
def f(x):
return 1./((x-0.3)**2+0.01) - 1./((x-0.8)**2+0.04)
x0=0.5
print(bisection(f, x0, 2*x0))
print(newton_numdif(f, x0))
xx = np.linspace(-2,3,100)
plt.plot(xx,f(xx))
plt.plot(xx,np.linspace(0,0,100),'k--')
plt.show()
Bisection used 25 evaluations 0.58001708984375 Newton (num dif) used 12 evaluations 0.5799999836358588
# 5.4
# Any x outside the bounds defined by the maximum and minimum values of f(x)
# results in Newton-Raphsons method to diverge and ultimately fail
# as the gradient goes to zero. Also fails where df/dx=0.
print(newton_numdif(f, 1.0))
Newton-Raphson failing: zero gradient 155.18371252502192