import numpy as np from scipy.signal import convolve2d from scipy import stats %pylab inline img = imread('lettera.bmp') arr = np.asarray(img, int) mn = np.mean(arr) arr[arr < mn] = -1 arr[arr >= mn] = 1 original_mean = np.mean(arr) print original_mean sigma = 2 noisy_arr = arr + sigma*np.random.normal(size=arr.shape) imshow(arr) title("Clean Image") noisy_mean = np.mean(noisy_arr) print noisy_mean imshow(noisy_arr > noisy_mean) title("Noisy Image") # First, set up our averaging kernel w_conv = np.ones((3, 3)) w_conv[1, 1] = 0 # The simplest denoising: just average nearby pixels. less_noisy_arr = convolve2d(noisy_arr, w_conv, mode='same') # mode='same' to keep same size less_noisy_mean = mean(less_noisy_arr) imshow(less_noisy_arr > less_noisy_mean) title("Less Noisy Image by simple convolution") figure() num_iters = 20 for i in range(num_iters): less_noisy_arr = convolve2d(less_noisy_arr, w_conv, mode='same') # mode='same' to keep same size less_noisy_mean = mean(less_noisy_arr) imshow(less_noisy_arr > less_noisy_mean) title("Less Noisy Image by repeated convolution.") figure() num_iters = 280 for i in range(num_iters): less_noisy_arr = convolve2d(less_noisy_arr, w_conv, mode='same') # mode='same' to keep same size less_noisy_mean = mean(less_noisy_arr) imshow(less_noisy_arr > less_noisy_mean) title("Less Noisy Image by too many repeated convolutions.") from scipy.sparse import dia_matrix, eye def unnormalized_log_prior(x): size = x.size z = np.zeros(x.shape) z[:3, :3] = 1 z[1, 1] = 0 z_vec = z.reshape(-1) non_zeros = [e - x.shape[0] - 1 for e, v in enumerate(z_vec[:x.shape[0] * 3].reshape(-1)) if v > 0] toep_mat = dia_matrix((size, size)) for non_zero in non_zeros: toep_mat = toep_mat + eye(size, size, non_zero) x_flat = x.reshape(-1) return toep_mat.dot(x_flat).dot(x_flat) print unnormalized_log_prior(2 * (noisy_arr > noisy_mean) - 1) print unnormalized_log_prior(2 * (less_noisy_arr > less_noisy_mean) - 1) print unnormalized_log_prior(ones(noisy_arr.shape)) print unnormalized_log_prior(ones(noisy_arr.shape) * -1) def unnormalized_log_prior2(x): """ compute the log prior using adjacent pairs. """ size = x.size z = np.zeros(x.shape) z[:3, :3] = 1 z[1, 1] = 0 z_vec = z.reshape(-1) non_zeros = [e - x.shape[0] - 1 for e, v in enumerate(z_vec[:x.shape[0] * 3].reshape(-1)) if v > 0] toep_mat = dia_matrix((size, size)) for non_zero in non_zeros: toep_mat = toep_mat + eye(size, size, non_zero) x_flat = x.reshape(-1) return toep_mat.dot(x_flat) noisy_arr_copy = noisy_arr.copy() lmbda = 0.5 for i in range(15): logodds = log(stats.norm.pdf(noisy_arr_copy, loc=1, scale=2)) - log(stats.norm.pdf(noisy_arr_copy, loc=-1, scale=2)) noisy_arr_copy = (1 - lmbda) * noisy_arr_copy + lmbda * tanh(unnormalized_log_prior2(noisy_arr_copy).reshape(noisy_arr_copy.shape) + .5 * logodds) imshow(noisy_arr_copy) print sum(np.abs(noisy_arr_copy - arr)) / arr.size denoised_mu = np.mean(noisy_arr_copy) noisy_arr_copy = 2 * (noisy_arr_copy > denoised_mu) - 1 imshow(noisy_arr_copy) imshow(arr) sum(np.abs(noisy_arr_copy - arr)) / arr.size imshow(noisy_arr_copy - arr)