#!/usr/bin/env python # coding: utf-8 # ### Example 1b: Linear convection in 2D, revisited # # We will now revisit the first example of this tutorial with an example that is better suited to the numerical scheme used in Devito. As a reminder, the governing equation is: # # $$\frac{\partial u}{\partial t}+c\frac{\partial u}{\partial x} + c\frac{\partial u}{\partial y} = 0$$ # # We then discretized this using forward differences in time and backward differences in space: # # $$u_{i,j}^{n+1} = u_{i,j}^n-c \frac{\Delta t}{\Delta x}(u_{i,j}^n-u_{i-1,j}^n)-c \frac{\Delta t}{\Delta y}(u_{i,j}^n-u_{i,j-1}^n)$$ # # In the previous example, the system was initialised with a hat function. As easy as this example seems, it actually highlights a few limitations of finite differences and related methods: # - The governing equation above contains spatial derivatives ($\frac{\partial u}{\partial x}$ and $\frac{\partial u}{\partial y}$). The hat, with its sharp corners, is discontinuous and therefore non-smooth, meaning that the derivatives do not exist at the corners of the hat. This means that the governing equation has no solution in the strict sense for this problem. Mathematically, this problem can be overcome by introducing weak solutions, which still exist in the presence of discontinuities, as long as the problem is smooth almost everywhere. The Finite Volume (FV), Finite Element (FEM) and related schemes are based on this weak form. # - The finite differences method only works well if finite differences are a good approximation of the derivatives. With the chosen discretization above, this requires that $\frac{u_{i,j}^n-u_{i-1,j}^n}{\Delta x} \approx \frac{\partial u}{\partial x}$ and $\frac{u_{i,j}^n-u_{i,j-1}^n}{\Delta y} \approx \frac{\partial u}{\partial y}$. This is the case for systems with a smooth solution if $\Delta x$ and $\Delta y$ are sufficiently small. But if the solution is non-smooth, as in this example, then we can't expect much regardless of the grid size. # - First-order methods, such as the backward differences that we have used in this example, are known to create artificial diffusion. Higher-order schemes, such as central differences, avoid this problem. However, in the presence of discontinuities these methods introduce so-called spurious oscillations. These oscillations may even build up (grow infinitely) and cause the computation to diverge. # - Discontinuities can appear by themselves for some equations (such as the Burgers equation that we discuss next), even if the initial condition is smooth. In CFD, discontinuities appear for example as shocks in the simulation of transonic flow. For this reason, numerical schemes that behave well in the presence of discontinuities have been a research subject for a long time. A thorough discussion is beyond the scope of this tutorial, but can be found in [R. LeVeque (1992): Numerical Methods for Conservation Laws, 2nd ed., Birkhäuser Verlag, pp. 8-13]. # # In the remainder of this example, we will reproduce the results from the previous example, only this time with a smooth initial condition. This lets us observe Devito in a setting for which it is better equipped. # # In[1]: from examples.cfd import plot_field, init_hat, init_smooth import numpy as np get_ipython().run_line_magic('matplotlib', 'inline') # Some variable declarations nx = 81 ny = 81 nt = 100 c = 1. dx = 2. / (nx - 1) dy = 2. / (ny - 1) sigma = .2 dt = sigma * dx # Let us now initialise the field with an infinitely smooth bump, as given by [J.A. Krakos (2012): Unsteady Adjoint Analysis for Output Sensitivity # and Mesh Adaptation, PhD thesis, p. 68] as $$ # f(r)= # \begin{cases} # \frac{1}{A}e^{-1/(r-r^2)} &\text{ for } 0 < r < 1,\\ # 0 &\text{ else.} # \end{cases} # $$ # We use this with $A=100$, and define the initial condition in two dimensions as $$u^0(x,y)=1+f\left(\frac{2}{3}x\right)*f\left(\frac{2}{3}y\right).$$ # In[2]: #NBVAL_IGNORE_OUTPUT # Create field and assign initial conditions u = np.empty((nx, ny)) init_smooth(field=u, dx=dx, dy=dy) # Plot initial condition plot_field(u, zmax=4) # Solving this will move the bump again. # In[3]: # Repeat initialisation, so we can re-run the cell init_smooth(field=u, dx=dx, dy=dy) for n in range(nt + 1): # Copy previous result into a new buffer un = u.copy() # Update the new result with a 3-point stencil u[1:, 1:] = (un[1:, 1:] - (c * dt / dy * (un[1:, 1:] - un[1:, :-1])) - (c * dt / dx * (un[1:, 1:] - un[:-1, 1:]))) # Apply boundary conditions. # Note: -1 here is the last index in the array, not the one at x=-1 or y=-1. u[0, :] = 1. # left u[-1, :] = 1. # right u[:, 0] = 1. # bottom u[:, -1] = 1. # top # In[4]: #NBVAL_IGNORE_OUTPUT # A small sanity check for auto-testing assert (u[45:55, 45:55] > 1.8).all() u_ref = u.copy() plot_field(u, zmax=4.) # Hooray, the wave moved! It looks like the solver works much better for this example: The wave has not noticeably changed its shape. # #### Devito implementation # Again, we can re-create this via a Devito operator. Let's fill the initial buffer with smooth data and look at it: # In[5]: #NBVAL_IGNORE_OUTPUT from devito import Grid, TimeFunction grid = Grid(shape=(nx, ny), extent=(2., 2.)) u = TimeFunction(name='u', grid=grid) init_smooth(field=u.data[0], dx=dx, dy=dy) plot_field(u.data[0]) # We create again the discretized equation as shown below. Note that the equation is still the same, only the initial condition has changed. # In[6]: from devito import Eq eq = Eq(u.dt + c*u.dxl + c*u.dyl) print(eq) # SymPy can re-organise this equation just like in the previous example. # In[7]: from devito import solve from sympy import nsimplify, pprint stencil = solve(eq, u.forward) pprint(nsimplify(stencil)) # We can now use this stencil expression to create an operator to apply to our data object: # In[8]: #NBVAL_IGNORE_OUTPUT from devito import Operator # Reset our initial condition in both buffers. # This is required to avoid 0s propagating into # our solution, which has a background value of 1. init_smooth(field=u.data[0], dx=dx, dy=dy) init_smooth(field=u.data[1], dx=dx, dy=dy) # Apply boundary conditions. # Note that as the u.data method is from numpy, we can use the # -1 syntax to represent the last item in the array. u.data[:, 0, :] = 1. u.data[:, -1, :] = 1. u.data[:, :, 0] = 1. u.data[:, :, -1] = 1. # Create an Operator that updates the forward stencil # point in the interior subdomain only. op = Operator(Eq(u.forward, stencil, subdomain=grid.interior)) # Apply the operator for a number of timesteps op(time=nt, dt=dt) plot_field(u.data[0]) # Some small sanity checks for the testing framework assert (u.data[0, 45:55, 45:55] > 1.8).all() assert np.allclose(u.data[0], u_ref, rtol=3.e-2) # Again, this looks just like the result from NumPy. Since this example is just like the one before, the low-level treatment of boundaries is also unchanged. # In[9]: #NBVAL_IGNORE_OUTPUT # Reset our data field and ICs in both buffers init_smooth(field=u.data[0], dx=dx, dy=dy) init_smooth(field=u.data[1], dx=dx, dy=dy) # For defining BCs, we generally to explicitly set rows/columns # in our field using an expression. We can use Devito's "indexed" # notation to do this. A u in this instance is a sympy function # we cannot use the numpy shortcut u[-1] to refer to the last location: x, y = grid.dimensions t = grid.stepping_dim bc_left = Eq(u[t + 1, 0, y], 1.) bc_right = Eq(u[t + 1, nx-1, y], 1.) bc_top = Eq(u[t + 1, x, ny-1], 1.) bc_bottom = Eq(u[t + 1, x, 0], 1.) # Now combine the BC expressions with the stencil to form operator. expressions = [Eq(u.forward, stencil, subdomain=grid.interior)] expressions += [bc_left, bc_right, bc_top, bc_bottom] op = Operator(expressions=expressions, opt=None, openmp=False) # <-- Turn off performance optimisations op(time=nt, dt=dt) plot_field(u.data[0]) # Some small sanity checks for the testing framework assert (u.data[0, 45:55, 45:55] > 1.8).all() assert np.allclose(u.data[0], u_ref, rtol=3.e-2) # The C code of the Kernel is also still the same. # In[10]: print(op.ccode)