#!/usr/bin/env python # coding: utf-8 # ## ThinkDSP # # This notebook contains solutions to exercises in Chapter 10: Signals and Systems # # Copyright 2015 Allen Downey # # License: [Creative Commons Attribution 4.0 International](http://creativecommons.org/licenses/by/4.0/) # In[ ]: # Get thinkdsp.py import os if not os.path.exists('thinkdsp.py'): get_ipython().system('wget https://github.com/AllenDowney/ThinkDSP/raw/master/code/thinkdsp.py') # In[2]: import numpy as np import matplotlib.pyplot as plt from thinkdsp import decorate # ## Exercise 1 # # In this chapter I describe convolution as the sum of shifted, # scaled copies of a signal. Strictly speaking, this operation is # *linear* convolution, which does not assume that the signal # is periodic. # # But when we multiply the # DFT of the signal by the transfer function, that operation corresponds # to *circular* convolution, which assumes that the signal is # periodic. As a result, you might notice that the output contains # an extra note at the beginning, which wraps around from the end. # # Fortunately, there is a standard solution to this problem. If you # add enough zeros to the end of the signal before computing the DFT, # you can avoid wrap-around and compute a linear convolution. # # Modify the example in `chap10soln.ipynb` and confirm that zero-padding # eliminates the extra note at the beginning of the output. # *Solution:* I'll truncate both signals to $2^{16}$ elements, then zero-pad them to $2^{17}$. Using powers of two makes the FFT algorithm most efficient. # # Here's the impulse response: # In[3]: if not os.path.exists('180960__kleeb__gunshot.wav'): get_ipython().system('wget https://github.com/AllenDowney/ThinkDSP/raw/master/code/180960__kleeb__gunshot.wav') # In[4]: from thinkdsp import read_wave response = read_wave('180960__kleeb__gunshot.wav') start = 0.12 response = response.segment(start=start) response.shift(-start) response.truncate(2**16) response.zero_pad(2**17) response.normalize() response.plot() decorate(xlabel='Time (s)') # And its spectrum: # In[5]: transfer = response.make_spectrum() transfer.plot() decorate(xlabel='Frequency (Hz)', ylabel='Amplitude') # Here's the signal: # In[6]: if not os.path.exists('92002__jcveliz__violin-origional.wav'): get_ipython().system('wget https://github.com/AllenDowney/ThinkDSP/raw/master/code/92002__jcveliz__violin-origional.wav') # In[7]: violin = read_wave('92002__jcveliz__violin-origional.wav') start = 0.11 violin = violin.segment(start=start) violin.shift(-start) violin.truncate(2**16) violin.zero_pad(2**17) violin.normalize() violin.plot() decorate(xlabel='Time (s)') # And its spectrum: # In[8]: spectrum = violin.make_spectrum() # Now we can multiply the DFT of the signal by the transfer function, and convert back to a wave: # In[9]: output = (spectrum * transfer).make_wave() output.normalize() # The result doesn't look like it wraps around: # In[10]: output.plot() # And we don't hear the extra note at the beginning: # In[11]: output.make_audio() # We should get the same results from `np.convolve` and `scipy.signal.fftconvolve`. # # First I'll get rid of the zero padding: # In[12]: response.truncate(2**16) response.plot() # In[13]: violin.truncate(2**16) violin.plot() # Now we can compare to `np.convolve`: # In[14]: output2 = violin.convolve(response) # The results are similar: # In[15]: output2.plot() # And sound the same: # In[16]: output2.make_audio() # But the results are not exactly the same length: # In[17]: len(output), len(output2) # `scipy.signal.fftconvolve` does the same thing, but as the name suggests, it uses the FFT, so it is substantially faster: # In[18]: from thinkdsp import Wave import scipy.signal ys = scipy.signal.fftconvolve(violin.ys, response.ys) output3 = Wave(ys, framerate=violin.framerate) # The results look the same. # In[19]: output3.plot() # And sound the same: # In[20]: output3.make_audio() # And within floating point error, they are the same: # In[21]: output2.max_diff(output3) # ## Exercise 2 # # The Open AIR library provides a "centralized... on-line resource for # anyone interested in auralization and acoustical impulse response # data" (http://www.openairlib.net). Browse their collection # of impulse response data and download one that sounds interesting. # Find a short recording that has the same sample rate as the impulse # response you downloaded. # # Simulate the sound of your recording in the space where the impulse # response was measured, computed two way: by convolving the recording # with the impulse response and by computing the filter that corresponds # to the impulse response and multiplying by the DFT of the recording. # *Solution:* I downloaded the impulse response of the Lady Chapel at St Albans Cathedral http://www.openairlib.net/auralizationdb/content/lady-chapel-st-albans-cathedral # # Thanks to Audiolab, University of York: Marcin Gorzel, Gavin Kearney, Aglaia Foteinou, Sorrel Hoare, Simon Shelley. # # In[22]: if not os.path.exists('stalbans_a_mono.wav'): get_ipython().system('wget https://github.com/AllenDowney/ThinkDSP/raw/master/code/stalbans_a_mono.wav') # In[23]: response = read_wave('stalbans_a_mono.wav') start = 0 duration = 5 response = response.segment(duration=duration) response.shift(-start) response.normalize() response.plot() decorate(xlabel='Time (s)') # Here's what it sounds like: # In[24]: response.make_audio() # The DFT of the impulse response is the transfer function: # In[25]: transfer = response.make_spectrum() transfer.plot() decorate(xlabel='Frequency (Hz)', ylabel='Amplitude') # Here's the transfer function on a log-log scale: # In[26]: transfer.plot() decorate(xlabel='Frequency (Hz)', ylabel='Amplitude', xscale='log', yscale='log') # Now we can simulate what a recording would sound like if it were played in the same room and recorded in the same way. Here's the trumpet recording we have used before: # In[27]: if not os.path.exists('170255__dublie__trumpet.wav'): get_ipython().system('wget https://github.com/AllenDowney/ThinkDSP/raw/master/code/170255__dublie__trumpet.wav') # In[28]: wave = read_wave('170255__dublie__trumpet.wav') start = 0.0 wave = wave.segment(start=start) wave.shift(-start) wave.truncate(len(response)) wave.normalize() wave.plot() decorate(xlabel='Time (s)') # Here's what it sounds like before transformation: # In[29]: wave.make_audio() # Now we compute the DFT of the violin recording. # In[30]: spectrum = wave.make_spectrum() # I trimmed the violin recording to the same length as the impulse response: # In[31]: len(spectrum.hs), len(transfer.hs) # In[32]: spectrum.fs # In[33]: transfer.fs # Now we can multiply in the frequency domain and the transform back to the time domain. # In[34]: output = (spectrum * transfer).make_wave() output.normalize() # Here's a comparison of the original and transformed recordings: # In[35]: wave.plot() # In[36]: output.plot() # And here's what it sounds like: # In[37]: output.make_audio() # Now that we recognize this operation as convolution, we can compute it using the convolve method: # In[38]: convolved2 = wave.convolve(response) convolved2.normalize() convolved2.make_audio()