#!/usr/bin/env python # coding: utf-8 # # Nonlinear Equations and their Roots # ## CH EN 2450 - Numerical Methods # **Prof. Tony Saad (www.tsaad.net)
Department of Chemical Engineering
University of Utah** #
# The purpose of root finding methods for nonlinear functions is to find the roots - or values of the independent variable, e.g. x - that make the nonlinear function equal to zero. The general form of a nonlinear root finding proposition is given as: # # find $x$ such that # \begin{equation} # f(x) = 0 # \end{equation} # # Alternatively, a problem may be presented as: # # find $x$ such that # \begin{equation} # f(x) = a # \end{equation} # # then, one redefines this into what is called "residual" form # \begin{equation} # r(x) \equiv f(x) - a # \end{equation} # and reformulates the problem to find $x$ such that # \begin{equation} # r(x) = 0 # \end{equation} # # In all examples below, we will explore the roots of the following function # \begin{equation} # \ln(x) + \cos(x)e^{-0.1x} = 2 # \end{equation} # or, in residual form # \begin{equation} # r(x) \equiv \ln(x) + \cos(x)e^{-0.1x} - 2 = 0 # \end{equation} # This function has three roots, 5.309, 8.045, and 10.02 # In[12]: import numpy as np get_ipython().run_line_magic('matplotlib', 'inline') get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'") import matplotlib.pyplot as plt from scipy.optimize import fsolve # In[13]: res = lambda x: np.log(x) + np.cos(x)*np.exp(-0.1*x)-2.0 x = np.linspace(4,15,200) plt.grid() plt.axhline(y=0,color='k') plt.plot(x,res(x)) # # Root Finding Methods # There are two classes of methods to find roots of equations: # 1. Closed domain methods, # 2. Open domain methods # # Closed domain methods work by bracketing the root while open domain methods can start with an arbitrary initial guess. # # Close Domain Methods # ## Bisection Method # Perhaps the most popular and intuitive root finding method, the Bisection method works by first bracketing a root and then successively improving the bracket by taking the midway point. An algorithm looks like the following # 1. Choose values $a$ and $b$ such that the root, $x_0$ is $a \leq x_0 \leq b$ # 2. Calculate $c = \frac{a+b}{2}$ as the midway point between $a$ and $b$ # 3. Check which side the root is: if $f(a)\times f(c) < 0$ then $b = c$ else $a=c$ # 4. Check for convergence and repeat as necessary # Below is an example implementation of the bisection method # In[14]: def bisect(f,a,b,tol, maxiter): err = tol + 100 niter = 0 print('{:<12} {:<12} {:<12} {:<12}'.format('Iteration','a','b','error')) while err > tol and niter < maxiter: niter +=1 c = (a + b)/2.0 fc = f(c) fa = f(a) if (fa * fc < 0.0): b = c else: a = c err = abs(fc) print('{:<12} {:<12} {:<12} {:<12}'.format(niter, round(a,6), round(b,6), round(err,10))) print('Iterations=',niter,' Root=',c) return c # In[15]: bisect(res,4,5.5,1e-5,10) # ## Method of False Position (Regula-Falsi) # The Methods of False Position or Regula-Falsi takes into consideration how close a guess might be to a root. It requires two initial guesses that bracket the root but instead of cutting the bracket in half, the Falsi method connects the two guesses via a straight line and then finds the point at which this straight line intersects the x-axis and uses that as a new guess. # # Here's the algorithm for the Regula-Falsi method: # 1. Choose values $a$ and $b$ such that the root, $x_0$ is $a \leq x_0 \leq b$ # 2. Calculate the slope of the line connecting $a$ and $b$: $m = \frac{f(b) - f(a)}{b-a}$ # 3. Find the point at which this line intersects the x-axis: $c = b - \frac{f(b)}{m}$ # 3. Check which side the root is: if $f(a)\times f(c) < 0$ then $b = c$ else $a=c$ # 4. Check for convergence and repeat as necessary # In[16]: def falsi(f,a,b,tol): niter = 0 err = tol + 100 print('{:<12} {:<12} {:<12} {:<12}'.format('Iteration','a','b','error')) while err > tol: fa = f(a) fb = f(b) m = (fb - fa)/(b - a) c = b - fb/m fc = f(c) err = abs(fc) if fc * fa < 0.0: b = c else: a = c err = abs(fc) print('{:<12} {:<12} {:<12} {:<12}'.format(niter, round(a,6), round(b,6), round(err,10))) niter += 1 print('Iterations:', niter, 'Root=',c) return c # In[17]: falsi(res,4,5.5,1e-5) # # Open Domain Methods # Closed domain methods require two initial guesses that bracket a root and are guaranteed to converge to the root, but in general are not practical when the root's location is unknown. Open domain methods relax this requirement and do not need initial guesses that bracket a root. # ## The Secant Method # The secant method is identical to the regula-falsi method but does not require the initial guesses to bracket a root. Here's an algorithm for the secant method: # 1. Choose values $a$ and $b$ that do not necessarily bracket a root # 2. Calculate the slope of the line connecting $a$ and $b$: $m = \frac{f(b) - f(a)}{b-a}$ # 3. Find the point at which this line intersects the x-axis: $c = b - \frac{f(b)}{m}$ # 3. Set $a = b$ and $b = c$ # 4. Check for convergence and repeat as necessary # In[18]: def secant(f,a,b,tol): niter = 0 err = tol + 100 print('{:<12} {:<12} {:<12} {:<12}'.format('Iteration','a','b','error')) while err > tol: fa = f(a) fb = f(b) m = (fb - fa)/(b - a) c = b - fb/m fc = f(c) err = abs(fc) a = b b = c print('{:<12} {:<12} {:<12} {:<12}'.format(niter, round(a,6), round(b,6), round(err,10))) niter += 1 print('Iterations:', niter, 'Root=',c) return c # In[19]: secant(res,7,7.5,1e-5) # ## Newton's Method # Newton's method is one of the most popular open domain nonlinear solvers. It is based on a two-term approximation of the Taylor series of a function $f(x)$. Given an initial guess $x_0$, Newton's method is implemented in the following steps: # 1. Choose $x_0$ as an initial guess # 2. Compute $f(x^0)$ and $f'(x^0)$ # 3. Compute $x_1 = x_0 - \frac{f(x_0)}{f'(x_0)} # 3. Set $x_0 = x_1$ # 4. Check for convergence and repeat as necessary # In[20]: def newton(f,df,x0,tol): niter = 0 err = tol + 100 print('iteration \t root\n') while err > tol and niter < 100: x1 = x0 - f(x0)/df(x0) x0 = x1 err = abs(f(x0)) niter += 1 print(niter, '\t', x1) print('Iterations:', niter, 'Root=',x1) return x1 # In many cases, the derivative is not known and one must use a finite difference approximation to the derivative. # In[21]: def newton2(f,x0,tol): niter = 0 err = tol + 100 while err > tol and niter < 100: delta = 1e-4 * x0 + 1e-12 df = (f(x0 + delta) - f(x0))/delta x1 = x0 - f(x0)/df x0 = x1 err = abs(f(x0)) niter += 1 print('Iterations:', niter, 'Root=',x1) return x1 # In[22]: newton2(res,1,1e-5) # In[23]: import urllib import requests from IPython.core.display import HTML def css_styling(): styles = requests.get("https://raw.githubusercontent.com/saadtony/NumericalMethods/master/styles/custom.css") return HTML(styles.text) css_styling() # In[29]: r = lambda x: np.sin(x) + np.cos(1+x**2) - 1 df = lambda x: np.cos(x) - 2 * x * np.sin(1+x**2) # r = lambda x: np.sin(x) + np.cos(1+x**2) - 1 # df = lambda x: np.cos(x) - 2 * x * np.sin(1+x**2) r = lambda x: np.log(x) + np.exp(x) -1 df = lambda x : 1.0/x + np.exp(x) newton(r,df,0.5,0.0001) # In[28]: fsolve(r,0.5) # In[26]: x = np.linspace(0.01,2) r = lambda x: np.log(x) + np.exp(x) -1 plt.plot(x,np.log(x), x, 1 - np.exp(x)) plt.axhline(y=0) # In[ ]: