#!/usr/bin/env python # coding: utf-8 # # Gradient Descent # In[1]: from __future__ import division get_ipython().run_line_magic('pylab', 'inline') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') # $\newcommand{\dotp}[2]{\langle #1, #2 \rangle}$ # $\newcommand{\umin}[1]{\underset{#1}{\min}\;}$ # $\newcommand{\RR}{\mathbb{R}}$ # $\newcommand{\pd}[2]{ \frac{ \partial #1}{\partial #2} }$ # $\newcommand{\norm}[1]{\|#1\|}$ # $\newcommand{\pa}[1]{\left(#1\right)}$ # # We consider the problem of finding a minimizer of a convex smooth function $\;f : \RR^d \rightarrow \RR$; that is, we want to solve # $$\umin{x \in \RR^d} f(x)$$ # # Note that the minimizer is not necessarily unique. # # The simplest method is gradient descent, which iteratively computes # $$ x^{(k+1)} = x^{(k)} - \tau \nabla f(x^{(k)}), $$ # where $\nabla f(x) \in \RR^d$ is the gradient of $f$ at $x$, $x^{(0)} \in \RR^d$ is an arbitrary initial point, and the stepsize $\tau$ must satisfy # $$0<\tau<2/\beta,$$ # to have convergence, where $\beta$ is a Lipschitz constant of $\nabla f$; that is, # $$ \forall (x,x') \in \RR^N \times \RR^N, \quad # \norm{\nabla f(x) - \nabla f(x')} \leq \beta \norm{x-x'}. $$ # # For instance, if $f$ is of class $C^2$, $$\beta= \sup_x \norm{Hf(x)},$$ # where $Hf(x) \in \RR^{d \times d}$ is the Hessian of $f$ at $x$ and $\norm{\cdot}$ is the spectral operator norm (largest eigenvalue). # ## Gradient descent in 2-D # We consider a simple problem, corresponding to the minimization of a 2-D ($d=2$) quadratic form # $$ f(x) = \frac{1}{2} \pa{ x_1^2 + \eta x_2^2 } ,$$ # where $\eta>0$ controls the anisotropy, and hence the difficulty, of the problem. # We can note that minimizing a strongly convex quadratic function is equivalent to solving a positive definite linear system. Gradient descent is then equivalent to the Richardson iteration applied to this linear system. # We define the anisotropy parameter $\eta$: # In[2]: eta = 8 # We define the function $f$: # In[3]: f = lambda x : (x[0]**2 + eta*x[1]**2) / 2 # Note: a 'lambda' function is a one-line function definition; # in Matlab, this would be `f=@(x)(x(1)^2+eta*x(2)^2)/2;` # # An example of function evaluation: # In[4]: f([2,3]) # We display the function using a contourplot: # In[5]: figsize(6,6) t = linspace(-.7,.7,101) [u,v] = meshgrid(t,t) F = (u**2 + eta*v**2) / 2 contourf(t,t,F,35) # We define the gradient of $f$: # In[6]: Grad_f = lambda x : array([x[0], eta*x[1]]) # An example of evaluation: # In[7]: Grad_f([1,2]) # Since $f$ is quadratic, its Hessian is the constant matrix $\left(\begin{array}{cc}1&0\\0&\eta\end{array}\right)$. Its spectral norm, which is the Lipschitz constant of `Grad_f`, is $\beta=\max(1,\eta)$. # # The stepsize $\tau$ must satisfy $0< \tau < 2/\beta$. # In[8]: tau = 1.8/max(eta,1); tau # Now we implement the gradient descent method: given the initial estimate $x^{(0)}$ of the solution, the stepsize $\tau$ and the number $k$ of iterations, we compute $x^{(k)}$: # In[9]: nbiter = 10 x = [1,1] # initial estimate of the solution for iter in range(nbiter): # iter goes from 0 to nbiter-1 x = x - tau*Grad_f(x) x # to display x, like in Matlab. Use print(x) if this is not the last command of the cell, else nothing is displayed # Note: there is no 'end' for the loops in Python; instead, the content of the loop is determined by the indentation: here four blank spaces before `x=...` # In[10]: f(_) # _ is the current variable, like Matlab's ans # We encapsulate the code in a function called `GradientDescent`. # In[11]: def GradDescent(Grad_f, x0, nbiter, tau): x = x0; for iter in range(nbiter): # iter goes from 0 to nbiter-1 x = x - tau*Grad_f(x) # x has type 'array'. Like in C, one can also write x-=tau*Grad_f(x) return x # In[12]: GradDescent(Grad_f,[1,1],10,tau) # We define a function called `GradDescentArray` which puts the iterates in a 'matrix' (a 2-D array in fact); they are first put in a list and the list is converted to an array at the end (the + operation on lists concatenates them, whereas on arrays this is the classical elementwise addition). # In[13]: def GradDescentArray(Grad_f, x0, nbiter, tau): x = x0; xlist=[list(x0)]; # the cast is just in case x0 would be an array for iter in range(nbiter): x = x - tau*Grad_f(x) xlist = xlist + [list(x)] return array(xlist) # In[14]: xarray=GradDescentArray(Grad_f,[0.6,0.6],10,0.225) xarray # We can access the elements of an array like in Matlab. Be careful: like in C, the first element has index 0! Some examples: # In[15]: xarray[1,0] # In[16]: xarray[1,:] # In[17]: xarray[:,0] # We can apply the function $f$ to every iterate $x^{(k)}$ of the list `xarray`, using the command `map`: # In[18]: map(f,xarray) # We plot the cost function $f(x^{(k)})$ as a function of $k$, in log-scale: # In[19]: plot(range(len(xarray)),log10(map(f,xarray)),'o-') # We remark that because the function $f$ is strongly convex, the convergence is linear. Also, we can note that gradient descent is monotonic: $f(x^{(k)})$ is decreased at every iteration. # We plot the iterates above the contourplot of $f$: # In[20]: figsize(8,8) contourf(t,t,F,35) xarray=GradDescentArray(Grad_f,[0.6,0.6],30,0.225) plot(xarray[:,0], xarray[:,1], 'w.-') #

# # __Exercise: re-run the previous cells with different values of tau # (try 0.25,0.225,0.2,0.125,0.05).__

# #

__Also try different initial estimates.__

# #

__Also try different values of the anisotropy eta__. #

# ## Gradient descent in large dimension: image restoration # Local differential operators like the discrete gradient are fundamental for variational image processing. # We load the image Lena in the array $x^\sharp$ (we choose the symbol # - 'sharp', because the image is clean, not degraded). # In[27]: from scipy import misc xsharp = misc.lena() print(xsharp.shape) # like Matlab's size(xsharp). Given as a tuple. print("The size of the image is %s x %s." % (xsharp.shape[0],xsharp.shape[1])) print("The range of the pixel values is [%s,%s]." % (xsharp.min(),xsharp.max())) xsharp = xsharp.astype(float32) # like Matlab's xsharp=double(xsharp) so that the pixel values are floating point numbers # We display the image. # In[28]: figsize(11,11) imshow(xsharp, interpolation='nearest', cmap=cm.gray, vmin=0, vmax=255) # Without specifying vmin and vmax, imshow auto-adjusts its range so that black and white are # the min and max of the data, respectively, like Matlab's imagesc. colorbar() # displays the color bar close to the image #axis('off') # uncomment to remove the axes subplots_adjust(top=0.75) title('This is Lena') # We define the 'discrete gradient' $D$, which is the linear operator which maps an image to an image whose values are pairs of vertical and horizontal finite differences: # # $$(D x)_{n_1,n_2} = \big((D x)_{n_1,n_2,v},(D x)_{n_1,n_2,h}\big)=( x_{n_1+1,n_2}-x_{n_1,n_2}, x_{n_1,n_2+1}-x_{n_1,n_2} ) \in \RR^2,$$ # where $n_1$ and $n_2$ are the row and column numbers, respectively (the origin $n_1=n_2=0$ corresponds to the pixel at the top left corner) and where Neumann boundary conditions are assumed: a difference accross the boundary is zero. # Let us define the operator $D$ as a function: # In[29]: def D(x): vdiff = r_[diff(x,1,0), zeros([1,x.shape[1]])] # the r_ command concatenates along the rows hdiff = c_[diff(x,1,1), zeros([x.shape[0],1])] # the c_ command concatenates along the columns return concatenate((vdiff[...,newaxis], hdiff[...,newaxis]), axis=2) # combination along a third dimension # An alternative, more compact, definition: #D = lambda x : c_['2,3',r_[diff(x,1,0), zeros([1,x.shape[1]])],c_[diff(x,1,1), zeros([x.shape[0],1])]] # In[30]: v = D(xsharp) # We display the two components as two grayscale images. # In[31]: from mpl_toolkits.axes_grid1 import make_axes_locatable fig, (axv,axh) = subplots(1,2,figsize=(16,7)) # one figure with two horizontal subfigures suptitle('The two images of vertical and horizontal finite differences') imv=axv.imshow(v[:,:,0], cmap=cm.gray) imh=axh.imshow(v[:,:,1], cmap=cm.gray) axv.set_title('Image of vertical finite differences') axh.set_title('Image of horizontal finite differences') dividerv = make_axes_locatable(axv) caxv = dividerv.append_axes("right", size="7%", pad=0.1) colorbar(imv,cax=caxv) dividerh = make_axes_locatable(axh) caxh = dividerh.append_axes("right", size="7%", pad=0.1) colorbar(imh,cax=caxh) # Let us display the image of the magnitudes $\norm{(D x)_{n_1,n_2}}$, which are large near edges. # In[32]: figsize(11,11) imshow(sqrt(sum(v**2,2)), cmap=cm.gray, vmin=0) colorbar() subplots_adjust(top=0.75) # We remark that the gradient magnitude is close to zero at most pixels. # Imaging inverse problems consist in estimating an unknown image $x^\sharp$, having at our disposal only the data $y$, which contains partial degraded information about $x^\sharp$: we have the forward model # $$y=Ax^\sharp\ +\mbox{ noise}$$ # where $A$ is a known linear observation/degradation operator. $A$ is not invertible in general, which makes the problem ill-posed. So, one must inject some prior information modeling what a natural image is, to regularize the problem and make it well-posed. Maybe the simplest prior one can use is that natural images are smooth; that is, their gradient has low energy (like for the Lena image above). This yields Tikhonov regularization: $x^\sharp$ is estimated by $x^\star$, which is a solution to # $$\umin{x \in \RR^d} f(x)$$ # where # $$f(x)=\frac{1}{2}\|Ax-y\|^2+\frac{\lambda}{2}\|Dx\|^2_{2,2},$$ # with $\|Dx\|^2_{2,2}=\sum_{n_1,n_2} \|(Dx)_{n_1,n_2}\|^2_2=\sum_{n_1,n_2} (Dx)_{n_1,n_2,v}^2+(Dx)_{n_1,n_2,h}^2$ # and some regularization parameter $\lambda$, which depends on the noise level. # # Here again, the function $f$ to minimize is smooth (it is quadratic) and we can use gradient descent. We have: # $$\nabla f(x)=A^*(Ax-y)+\lambda D^*Dx=(A^*A+\lambda D^*D)x-A^*y.$$ # # So, to compute $\nabla f(x)$ for some image $x$, we must be able to apply the operator $A^*A$, where $A^*$ is the adjoint of $A$, and $D^*D$. For this, let us define $D^*$ as a function: # In[33]: Dadj = lambda v : r_['0,2',-v[0,:,0],-diff(v[:-1,:,0],1,0),v[-2,:,0]] + c_['1,2',-v[:,0,1],-diff(v[:,:-1,1],1,1),v[:,-2,1]] # $D^*$ can be viewed as the opposite of a discrete divergence operator. Hence, $D^*\circ D$ can be viewed as the opposite of a discrete Laplacian operator. In fact, $D^*\circ D$ amounts to convolving the image with the filter $\left(\begin{array}{ccc}0&-1&0\\-1&4&-1\\0&-1&0\end{array}\right)$, with symmetric boundary conditions. # # So, $D^*\circ D$ could be implemented more efficiently than here, but this is not the point, and we will need $D$ and $D^*$ separately later, for the total variation. # We check the relation $\norm{D x}^2 - \dotp{D^*Dx}{x}=0.$ # In[34]: (D(xsharp)**2).sum() - (Dadj(D(xsharp))*xsharp).sum() # ##Image Denoising # We consider the problem of denoising the image $y \in \RR^{N_1\times N_2}$, which corresponds to the problem above with $A$ the Identity operator. So we want to solve $$\umin{x \in \RR^d} f(x)$$ # where # $$f(x)=\frac{1}{2}\|x-y\|^2+\frac{\lambda}{2}\|Dx\|^2_{2,2}.$$ # Note we are now looking for an unknown $x$ which lives in a Hilbert space of dimension $d=N_1.N_2$, the number of pixels of the image; for Lena, $d=512^2=262144$, hence the term 'large-scale optimization'. # We add noise to the original image $x^\sharp$, to simulate the noisy image $y$. # In[35]: (N1,N2) = shape(xsharp) noiselevel = 20 y = xsharp + noiselevel * randn(N1,N2) # We display the noisy image $y$. # In[36]: figsize(11,11) imshow(y, interpolation='nearest', cmap=cm.gray, vmin=0, vmax=255) colorbar() subplots_adjust(top=0.75) title('This is a noisy version of Lena') # We define the gradient of $f$ as a function. #

# # __Exercise: write the code of the function Grad_f.__

# In[37]: def Grad_f(x, y, Lambda): # put your code here. # The Lipschitz constant $\beta$ of $\nabla f$ is $\|Id+\lambda D^*D\|=1+8\lambda$. #

# # __Exercise: write the code of the function GradDescent.__

# In[5]: def GradDescent(Grad_f, y, Lambda, x0, nbiter, tau): # put your code here. # In[39]: Lambda = 8; xdenoised = GradDescent(Grad_f, y, Lambda, y, 100, 1.9/(1+8*Lambda)) # In[40]: figsize(10,10) imshow(xdenoised, cmap=cm.gray, vmin=0, vmax=255) title('Denoised image') # We can note that this denoising process is linear: the denoised image $\tilde{x}$ obtained at convergence satisfies # $$\nabla f (\tilde{x})=0\ \Leftrightarrow\ \tilde{x}=(\mathrm{Id}+\lambda D^*D)^{-1}y.$$ # As a result, we can see that the noise is attenuated, but the image is blurry: its fine details have been lost. So, this denoising process has bad performances. Good denoising processes are nonlinear. We will see in the next lab that using the total variation instead of Tikhonov regularization is much better. # ## Projected Gradient Descent # Often, one wants to minimize a smooth convex cost function, but with the additional constraint that the solution lies in some closed convex subset $\Omega$; that is, one wants to solve # $$\umin{x \in \Omega} f(x)\quad\equiv\quad\umin{x \in \RR^d} f(x)\quad\mbox{s.t.}\quad x\in\Omega$$ # # A natural extension of gradient descent is then _projected gradient descent_, which iteratively computes # $$ x^{(k+1)} = P_\Omega\big(x^{(k)} - \tau \nabla f(x^{(k)})\big), $$ # # where $P_\Omega$ is orthogonal projection onto $\Omega$. # # Here also, convergence of the iterates $x^{(k)}$ to a solution $x^\star$ is guaranteed, provided that $0<\tau<2/\beta$. # # ## Image Inpainting by Projected Gradient Descent # # We now consider inpainting; that is, reconstructing missing pixel values from a subset of available pixels. # # We keep about 10% of the pixels at random, the other ones are missing. # In[41]: mask = rand(xsharp.shape[0],xsharp.shape[1])>0.9 # In[42]: fig, (subfig1,subfig2) = subplots(1,2,figsize=(16,7)) # one figure with two horizontal subfigures subfig1.imshow(mask, cmap=cm.gray) subfig2.imshow(mask*xsharp, cmap=cm.gray) subfig1.set_title('The binary mask') subfig2.set_title('The available pixel values are displayed, the missing pixels are in black') # The degradation operator $A$ is simply pixelwise multiplication by the binary mask. We have $A^*=A$ and $A^*A=A$. # In[43]: y = mask*xsharp # The problem we consider is $$\umin{x \in \Omega} f(x)\quad\equiv\quad\umin{x \in \RR^d} \frac{1}{2}\|Dx\|^2\quad\mbox{s.t.}\quad Ax=y.$$ #

# # __Exercise: write the code of the function Grad_f.__

# In[44]: def Grad_f(x): # put your code here # The Lipschitz constant $\beta$ of $\nabla f$ is 8. # In[45]: def Proj_Omega(x, y, mask): x[mask] = y[mask] # parameters of functions are given by reference, so the content of x can be modified return x #

# # __Exercise: write the code of the function Grad_f.__

# In[46]: def ProjGradDescent(Grad_f, Proj_Omega, y, mask, x0, nbiter, tau): # put your code here # In[47]: tau = 1.9 / 8 nbiter = 300 # In[48]: xrestored = ProjGradDescent(Grad_f, Proj_Omega, y, mask, y, nbiter, tau) # In[49]: figsize(10,10) imshow(xrestored, cmap=cm.gray, vmin=0, vmax=255) title('Denoised image')