import cPickle
%pylab --no-import
from scipy.io import loadmat
import feature_learning
reload(feature_learning)
## test plot images
images = loadmat('data/starter/IMAGES.mat')['IMAGES'].transpose((2, 0, 1))
pylab.gray()
feature_learning.plot_images(images, (2, 5), 2.5, suptitle='raw images')
## test extract patches
patches = feature_learning.sample_patches(images, npatches = 10000, patch_sz = 8)
print patches.shape
feature_learning.plot_images(patches[:25], (5, 5), 0.7, suptitle='random patches')
print patches.max(), patches.min()
## test normalization
normalized_patches = feature_learning.normalize_image01(patches)
print normalized_patches.min(), normalized_patches.max()
feature_learning.plot_images(normalized_patches[:25], (5, 5), 0.7, suptitle='normalized patches')
Using matplotlib backend: module://IPython.kernel.zmq.pylab.backend_inline Populating the interactive namespace from numpy and matplotlib (10000, 8, 8) 2.21938562393 -1.89389848709 0.1 0.9
Using gpu device 0: Quadro 4000
<matplotlib.figure.Figure at 0x92a1590>
feats = normalized_patches.reshape((normalized_patches.shape[0], -1))
print normalized_patches.shape
print feats.shape
(10000, 8, 8) (10000, 64)
## TEST PCA Transformation
reload(feature_learning)
nsamples, nfeats = feats.shape
pylab.imshow(np.dot(feats.T, feats), cmap=cm.jet)
title('cov of raw patches')
## test pca w/o reduction, w/o whitening
pca = feature_learning.PCATransform(ncomponents = nfeats)
pca_X = pca.fit_transform(feats)
print pca_X.shape
figure()
pylab.imshow(np.dot(pca_X.T, pca_X), cmap=cm.jet)
title('cov of no reduction, no whitening')
## test pca w reduction, w/o whitening
pca = feature_learning.PCATransform(ncomponents = nfeats/2)
pca_X = pca.fit_transform(feats)
print pca_X.shape
figure()
pylab.imshow(np.dot(pca_X.T, pca_X), cmap=cm.jet)
title('cov of reduction, no whitening')
## test pca w/o reduction, w whitening
pca = feature_learning.PCATransform(ncomponents = nfeats)
pca_X = pca.fit_transform(feats, whiten=True)
print pca_X.shape
figure()
pylab.imshow(np.dot(pca_X.T, pca_X), cmap=cm.jet)
title('cov of no reduction, whitening')
## test pca w reduction, w whitening
pca = feature_learning.PCATransform(ncomponents = nfeats/2)
pca_X = pca.fit_transform(feats, whiten=True)
print pca_X.shape
figure()
pylab.imshow(np.dot(pca_X.T, pca_X), cmap=cm.jet)
title('cov of reduction, whitening')
(10000, 64) (10000, 32) (10000, 64) (10000, 32)
<matplotlib.text.Text at 0xc25bc90>
## TEST PCA Transformation
feats = normalized_patches.reshape((normalized_patches.shape[0], -1))
print normalized_patches.shape
print feats.shape
reload(feature_learning)
nsamples, nfeats = feats.shape
feats = feats - np.mean(feats, axis = 1)[:, np.newaxis]
pylab.imshow(np.dot(feats.T, feats), cmap=cm.jet)
title('zero mean cov of raw patches')
## test pca w/o reduction, w/o whitening
zca = feature_learning.ZCATransform()
zca_X = zca.fit_transform(feats)
print zca_X.shape
figure()
pylab.imshow(np.dot(zca_X.T, zca_X), cmap=cm.jet)
title('cov of ZCA no whitening')
## test zca w/o reduction, w whitening
zca_X = zca.fit_transform(feats, whiten=True)
print zca_X.shape
figure()
pylab.imshow(np.dot(zca_X.T, zca_X), cmap=cm.jet)
title('cov of ZCA whitening')
(10000, 8, 8) (10000, 64) (10000, 64) (10000, 64)
<matplotlib.text.Text at 0xc26fc50>
## plot out effects of PCA-whitening and ZCA-whitening
digits = cPickle.load(open('data/digits.pkl'))
digits_X, digits_y = digits
print digits_X.shape, digits_y.shape
reload(feature_learning)
feature_learning.plot_images(digits_X[:25].reshape((-1, 28, 28)),
(5, 5), suptitle='raw digits')
## PCA whitening
digits_pca_white = feature_learning.PCATransform(28*28).fit_transform(digits_X, whiten=True)
feature_learning.plot_images(digits_pca_white[:25].reshape((-1, 28, 28)),
(5, 5), suptitle='pca white digits')
## PCA reduction without whitening
digits_pca_white = feature_learning.PCATransform(16*16).fit_transform(digits_X, whiten=False)
feature_learning.plot_images(digits_pca_white[:25].reshape((-1, 16, 16)),
(5, 5), suptitle='pca reduced digits')
## ZCA whitening
digits_zca_white = feature_learning.ZCATransform().fit_transform(digits_X, whiten=True)
feature_learning.plot_images(digits_zca_white[:25].reshape((-1, 28, 28)),
(5, 5), suptitle='zca white digits')
(42000, 784) (42000,)
## TEST Sparse Autoencoder
reload(feature_learning)
## normalization does not work with digits images
## as they are NOT natural images
#normalized_digits = feature_learning.normalize_image01(digits_X)
normalized_digits = digits_X / 255.
#normalized_digits = digits_X
print digits_X.min(), digits_X.max()
print normalized_digits.min(), normalized_digits.max()
n_sample, n_vis = normalized_digits.shape
n_hid = 196
sparsity = 0.1
l2_coeff = 3e-3
sparsity_coeff = 3
sae = feature_learning.SparseAuoEncoder(n_vis, n_hid, sparsity, l2_coeff, sparsity_coeff)
sae.fit(normalized_digits[:10000], maxfun = 400, iprint=1)
W1, W2, b1, b2 = sae.restore_params()
feature_learning.plot_images(W1.T.reshape((-1, 28, 28)), (14, 14))
0 255 0 255
<feature_learning.SparseAuoEncoder at 0x226a5bd0>
## hard threshold normalization
W1, W2, b1, b2 = sae.restore_params()
feature_learning.plot_images(W1.T.reshape((-1, 28, 28)), (14, 14))
## normalization by natrual image methods
W1, W2, b1, b2 = sae.restore_params()
feature_learning.plot_images(W1.T.reshape((-1, 28, 28)), (14, 14))