Nathanaël Perraudin, Michaël Defferrard, Tomasz Kacprzak, Raphael Sgier
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import healpy as hp
import pygsp as pg
import itertools
from deepsphere import utils
plt.rcParams['figure.figsize'] = (17, 5) # (9, 4) for matplotlib notebook
nside = 16
cm = plt.cm.RdBu_r
cm.set_under('w')
def plot_spherical_harmonic(nside, l, m):
lmax = l
idx = hp.sphtfunc.Alm.getidx(lmax, l, m)
size = hp.sphtfunc.Alm.getsize(lmax, mmax=lmax)
print('{} spherical harmonics for l in [0, {}]'.format(size, lmax))
print('l={}, m={} is at index {}'.format(l, m, idx))
alm = np.zeros(size, dtype=np.complex128)
alm[idx] = 1
map = hp.sphtfunc.alm2map(alm, nside, lmax, verbose=False)
hp.mollview(map, title="Spherical harmonic of degree l={} and order m={}".format(l,m), cmap=cm)
#hp.cartview(map, title=f"Spherical harmonic l={l}, m={m}")
with utils.HiddenPrints():
hp.graticule();
plot_spherical_harmonic(nside, l=2, m=0)
6 spherical harmonics for l in [0, 2] l=2, m=0 is at index 2
Compute the spherical harmonics up to $\ell_{max}$ and plot them.
harmonics = utils.compute_spherical_harmonics(nside, lmax=18)
harmonics.shape
(3072, 361)
def plot_harmonics(harmonics, title=''):
n_harmonics = harmonics.shape[1]
l, m = 0, 0
for idx in range(n_harmonics):
hp.mollview(harmonics[:, idx],
title='{}: l={}, m={}'.format(title, l, m),
nest=True,
sub=(np.sqrt(n_harmonics), np.sqrt(n_harmonics), idx+1),
max=np.max(np.abs(harmonics)),
min=-np.max(np.abs(harmonics)),
cbar=False,
cmap=cm)
m += 1
if m > l:
l += 1
m = -l
with utils.HiddenPrints():
hp.graticule();
plot_harmonics(harmonics[:, :16], 'Spherical harmonic')
The graph Fourier modes are the eigenvectors of the graph Laplacian. $$L = U \Lambda U^T$$
graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64)
graph.compute_fourier_basis()
2019-03-26 20:05:05,525:[WARNING](pygsp.graphs.graph.compute_fourier_basis): Computing the full eigendecomposition of a large matrix (3072 x 3072) is expensive. Consider decreasing n_eigenvectors or, if using the Fourier basis to filter, using a polynomial filter instead.
The weighted adjacency matrix is very sparse. Distance between pixels is not constant.
fig, axes = plt.subplots(1, 2)
axes[0].imshow(graph.W.toarray())
axes[1].hist(graph.W[graph.W>0].T,30);
Fourier modes of the graph.
plot_harmonics(graph.U[:, :16], 'Eigenvector')
The eigenvalues are clearly organized in groups, which corresponds to angular frequencies $\ell$ of the spherical harmonics.
plt.plot(graph.e[:50], '.-')
idx = 0
for l in range(7):
plt.text(idx, graph.e[idx] + 0.005, 'l = {}'.format(l))
idx += 2*l + 1
The Fourier modes capture the spherical geometry.
The eigenvectors can be used to embed any graph in a 3D space. In our case, that should produce a sphere. That is Laplacian eigenmaps, an algorithm that preserves local distances.
Question: are the graph representation and the embedding actually equivalent?
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(graph.U[:, 1], graph.U[:, 2], graph.U[:, 3], c=graph.d);
# The spherical harmonics obviously capture the geometry too.
# ax.scatter(harmonics[:, 1], harmonics[:, 2], harmonics[:, 3], c=graph.d);
Todo:
# Combinatorial Laplacian for the spherical harmonics.
graph = utils.healpix_graph(nside, lap_type='combinatorial', nest=True, dtype=np.float64)
graph.compute_fourier_basis()
2019-03-26 20:05:38,716:[WARNING](pygsp.graphs.graph.compute_fourier_basis): Computing the full eigendecomposition of a large matrix (3072 x 3072) is expensive. Consider decreasing n_eigenvectors or, if using the Fourier basis to filter, using a polynomial filter instead.
Let us try to re-order the spherical harmonic with respect of the graph frequencies
ind = np.argsort(np.diag(harmonics.T @ graph.L @ harmonics))
harmonics_sort = harmonics[:,ind]
Spherical harmonics are not exactly orthogonal on the graph.
C_euclidean = harmonics_sort.T @ harmonics_sort
C_graph = (harmonics_sort.T @ graph.L @ harmonics_sort) #@ np.diag(1/(graph.e[:harmonics_sort.shape[1]] +0.0001))
fig, axes = plt.subplots(1, 2)
axes[0].imshow(C_euclidean)
axes[1].imshow(C_graph)
fig, axes = plt.subplots(1, 2)
axes[0].plot(np.diag(C_euclidean))
axes[1].plot(np.diag(C_graph), label='diag(F^T L F)')
axes[1].plot(graph.e[:harmonics_sort.shape[1]],label='graph eigenvalues')
axes[1].legend()
<matplotlib.legend.Legend at 0x7fc109ce94e0>
win = hp.sphtfunc.pixwin(nside=32)
?That is not an exact representation of the correspondance as in the case of non-uniform sampling, the integrals (i.e., the SHT) are not equal to the dot products with the sampled harmonic functions (which aren't orthogonal and hence don't form a basis).
lmax = 10
n_harmonics = np.sum(np.arange(1, 2*lmax+2, 2))
print('{} harmonics for lmax = {}'.format(n_harmonics, lmax))
121 harmonics for lmax = 10
C = harmonics[:, :n_harmonics].T @ harmonics[:, :n_harmonics]
print(np.linalg.norm(C, ord='fro'))
C = graph.U[:, :n_harmonics].T @ graph.U[:, :n_harmonics]
print(np.linalg.norm(C, ord='fro'))
C = harmonics[:, :n_harmonics].T @ graph.U[:, :n_harmonics]
print(np.linalg.norm(C, ord='fro'))
plt.imshow(np.abs(C), cmap=plt.cm.gist_heat_r)
plt.colorbar()
11.000013221921 11.000000000000002 10.858836567926407
<matplotlib.colorbar.Colorbar at 0x7fc109c63668>
def show_non_orthogonality(nside, lmax):
harmonics = utils.compute_spherical_harmonics(nside, lmax=lmax)
graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64)
n_harmonics = np.cumsum(np.arange(1, 2*lmax+2, 2))
graph.compute_fourier_basis(n_eigenvectors=n_harmonics[-1])
C = harmonics.T @ harmonics
fig, axes = plt.subplots(2, 1, sharex=True)
axes[0].plot(np.sum(C**2, axis=1))
axes[0].plot(C.diagonal())
# Hence the rows and columns of the correlation matrix don't sum up to one.
C = harmonics.T @ graph.U[:, :harmonics.shape[1]]
axes[1].plot(np.sum(C**2, axis=0))
axes[1].plot(np.sum(C**2, axis=1));
show_non_orthogonality(nside=16, lmax=8)
TODO: the graph should be constructed (if that's even possible) such that the Fourier modes converge to the spherical harmonics while we sample more (i.e., nside increases).
lmax = 20
nsides = [4, 8, 16]
# Normalized corresponds better than combinatorial. Less difference between nsides.
# lap_type='combinatorial'
lap_type='normalized'
def compute_orhtogonality(nside, lmax, lap_type='normalized'):
n_harmonics = np.cumsum(np.arange(1, 2*lmax+2, 2))
harmonics = utils.compute_spherical_harmonics(nside, lmax=lmax)
graph = utils.healpix_graph(nside, lap_type=lap_type, nest=True, dtype=np.float64)
n_eigenvectors = min(n_harmonics[-1], graph.N)
graph.compute_fourier_basis(n_eigenvectors)
C = harmonics.T @ graph.U
return C, n_harmonics
def compute_energy(C, lmax):
"""Compute the percentage energy in each subspace (diagonal blocks)."""
energy_in = np.empty(lmax+1)
start = 0
for ell in range(lmax + 1):
n_harmonics = 2*ell + 1
end = start + n_harmonics
energy_square = np.sum(C[start:end, start:end]**2)
# col_sum = np.sum(C[start:end, :]**2) # how each harmonic are distributed on all eigenvectors
# row_sum = np.sum(C[:, start:end]**2) # how each eigenvector are distributed on all harmonics
# np.testing.assert_allclose(col_sum, n_harmonics, rtol=1e-2)
# np.testing.assert_allclose(row_sum, n_harmonics, rtol=1e-2)
energy_in[ell] = energy_square / n_harmonics
start = end
return energy_in
energy = dict()
for nside in nsides:
C, _ = compute_orhtogonality(nside, lmax, lap_type)
energy[nside] = compute_energy(C, lmax)
fig, ax = plt.subplots(figsize=(5, 2))
for nside in nsides:
ax.plot(range(lmax+1), 100*energy[nside], '.-', label=rf'$N_{{side}}={nside}$')
ax.legend()
ax.set_ylabel('correspondance [%]')
ax.set_xlabel('degree $\ell$')
ax.set_xticks(range(0, lmax + 1, lmax//10))
fig.tight_layout();
# fig.savefig(os.path.join(pathfig, 'subspace_alignment.pdf'))
That is a better way to compare, as it uses the real spherical harmonic transform (SHT).
# nsides = [4, 8, 16, 32] # slow
nsides = [4, 8]
spectral_content = dict()
for nside in nsides:
lmax = 3 * nside - 1
n_harmonics = np.cumsum(np.arange(1, 2*lmax+2, 2))[-1]
graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64)
graph.compute_fourier_basis(n_eigenvectors=n_harmonics)
cl = np.empty((n_harmonics, lmax+1))
for i in range(n_harmonics):
eigenvector = hp.reorder(graph.U[:, i], n2r=True)
# alm = hp.sphtfunc.map2alm(eigenvector)
cl[i] = hp.sphtfunc.anafast(eigenvector, lmax=lmax)
spectral_content[nside] = np.empty((lmax+1, lmax+1))
start = 0
for ell in range(lmax+1):
end = start + (2 * ell + 1)
spectral_content[nside][ell] = np.sum(cl[start:end,:], axis=0)
start = end
fig1, axes = plt.subplots(1, len(nsides))
fig2, ax2 = plt.subplots()
for ax, (nside, sc) in zip(axes, spectral_content.items()):
sc = sc / sc[0, 0]
im = ax.imshow(sc, cmap=plt.cm.gist_heat_r)
fig1.colorbar(im, ax=ax)
ax.set_title(rf'$N_{{side}}={nside}$')
energy_in = np.diag(sc)
ax2.plot(energy_in, label=rf'$N_{{side}}={nside}$')
ax2.legend();
# TODO: should it sum to one?
i = 10
eigenvector = hp.reorder(graph.U[:, i], n2r=True)
np.linalg.norm(eigenvector)
cl = hp.sphtfunc.anafast(eigenvector, lmax=lmax)
np.linalg.norm(cl)
0.002285872879387514
We don't care if the Fourier modes are not exactly the spherical harmonics, but we want the Laplacian operator to be close.
The Laplacian is a second order operator on the sphere (see Laplace's spherical harmonics):
$$ r^2 \nabla^2 Y_\ell^m(\theta, \phi) = -\ell (\ell + 1) Y_\ell^m(\theta, \phi). $$Pluging in the graph Laplacian $L = -\nabla^2$ and setting the radius $r=1$ gives
$$ L Y_\ell^m(\theta, \phi) = \ell (\ell + 1) Y_\ell^m(\theta, \phi) \\ L a_\ell^m = \ell (\ell + 1) a_\ell^m $$So multiplying a map with the Laplacian $L$ should affect its spectrum in a predictable way.
TODO:
hp.pixelfunc.pix2ang
or hp.query_disc
.folder = 'data'
name = 'COM_CMB_IQU-smica_1024_R2.02_full.fits'
if not os.path.exists(os.path.join(folder, name)):
url = 'https://irsa.ipac.caltech.edu/data/Planck/release_2/all-sky-maps/maps/component-maps/cmb/' + name
utils.download(url, folder, name)
nside = 32
npix = 12 * nside**2 # hp.nside2npix
print('Nside = {}, Npix = {}'.format(nside, npix))
graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64)
if True:
map1_n, _, _ = hp.read_map('data/COM_CMB_IQU-smica_1024_R2.02_full.fits', field=(0, 1, 3), nest=True)
map1_n = hp.ud_grade(map1_n, nside_out=nside, order_in='NESTED')
else:
map1_n = np.random.normal(size=hp.nside2npix(nside))
# map1_n -= map1_n.mean()
map1_r = hp.reorder(map1_n, n2r=True)
hp.mollview(map1_r)
cl1 = hp.sphtfunc.anafast(map1_r)
fig, ax = plt.subplots()
ax.plot(cl1);
Nside = 32, Npix = 12288 NSIDE = 1024 ORDERING = NESTED in fits file INDXSCHM = IMPLICIT
/home/defferra/miniconda3/envs/scnn/lib/python3.6/site-packages/healpy/fitsfunc.py:326: UserWarning: No INDXSCHM keyword in header file : assume IMPLICIT "assume {}".format(schm))
Ratio between the SHT of the map before and after multiplication by the Laplacian. The frequency response should be equal to $\ell (\ell + 1)$.
map2_n = graph.L @ map1_n
map2_r = hp.reorder(map2_n, n2r=True)
cl2 = hp.sphtfunc.anafast(map2_r)
cl12 = hp.sphtfunc.anafast(map1_r, map2_r)
ratio1 = cl2 / cl1
plt.plot(ratio1, label='Empirical response')
ratio2 = cl12 / cl1
plt.plot(ratio2, label='Empirical response')
#plt.plot(ratio[2:])
#plt.plot(area * ratio)
# The scaling issue (the asymptotic value of the error) is probably related to the area of the pixels.
area = 4*np.pi / npix
print('Area of a pixel: {:.2e}'.format(area))
l = np.arange(cl1.shape[0])
ideal = l*(l+1) * area / 4
plt.plot(l, ideal, label='Ideal response: $\ell(\ell+1)$')
plt.legend();
Area of a pixel: 1.02e-03
plt.plot(cl2);
plt.plot(cl12);
error = ratio1 / ideal
plt.semilogy(error);
error = ratio2 / ideal
plt.semilogy(error);
/home/defferra/miniconda3/envs/scnn/lib/python3.6/site-packages/ipykernel_launcher.py:1: RuntimeWarning: divide by zero encountered in true_divide """Entry point for launching an IPython kernel. /home/defferra/miniconda3/envs/scnn/lib/python3.6/site-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in true_divide after removing the cwd from sys.path.
We don't care if the Fourier modes are not exactly the spherical harmonics, but we want the Laplacian operator to be close. Or, at least, the filtering operations to be close.
Todo:
Functions:
healpy.sphtfunc.map2alm
=> spherical harmonic transform (SHT)healpy.sphtfunc.almxfl
=> arbitrary smoothinghealpy.sphtfunc.smoothing
=> Gaussian smoothingnside = 256
map, _, _ = hp.read_map('data/COM_CMB_IQU-smica_1024_R2.02_full.fits', field=(0, 1, 3), nest=True)
map = hp.ud_grade(map, nside_out=nside, order_in='NESTED')
map = hp.reorder(map, inp='NESTED', out='RING')
# map = np.random.normal(size=hp.nside2npix(nside))
lmax = 3 * nside - 1
print("We'll compute {} frequencies".format(lmax))
hp.mollview(map)
NSIDE = 1024 ORDERING = NESTED in fits file INDXSCHM = IMPLICIT
/home/defferra/miniconda3/envs/scnn/lib/python3.6/site-packages/healpy/fitsfunc.py:326: UserWarning: No INDXSCHM keyword in header file : assume IMPLICIT "assume {}".format(schm))
We'll compute 767 frequencies
alm = hp.sphtfunc.map2alm(map)
plt.plot(np.abs(alm))
l, m = hp.Alm.getlm(lmax=lmax)
print(list(zip(l, m))[:100])
[(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0), (13, 0), (14, 0), (15, 0), (16, 0), (17, 0), (18, 0), (19, 0), (20, 0), (21, 0), (22, 0), (23, 0), (24, 0), (25, 0), (26, 0), (27, 0), (28, 0), (29, 0), (30, 0), (31, 0), (32, 0), (33, 0), (34, 0), (35, 0), (36, 0), (37, 0), (38, 0), (39, 0), (40, 0), (41, 0), (42, 0), (43, 0), (44, 0), (45, 0), (46, 0), (47, 0), (48, 0), (49, 0), (50, 0), (51, 0), (52, 0), (53, 0), (54, 0), (55, 0), (56, 0), (57, 0), (58, 0), (59, 0), (60, 0), (61, 0), (62, 0), (63, 0), (64, 0), (65, 0), (66, 0), (67, 0), (68, 0), (69, 0), (70, 0), (71, 0), (72, 0), (73, 0), (74, 0), (75, 0), (76, 0), (77, 0), (78, 0), (79, 0), (80, 0), (81, 0), (82, 0), (83, 0), (84, 0), (85, 0), (86, 0), (87, 0), (88, 0), (89, 0), (90, 0), (91, 0), (92, 0), (93, 0), (94, 0), (95, 0), (96, 0), (97, 0), (98, 0), (99, 0)]
cl = hp.sphtfunc.anafast(map)
plt.plot(cl)
[<matplotlib.lines.Line2D at 0x7fc1023ca518>]
graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64)
map = graph.L @ map
alm = hp.sphtfunc.map2alm(map)
plt.plot(np.abs(alm))
[<matplotlib.lines.Line2D at 0x7fc102401518>]
cl = hp.sphtfunc.anafast(map)
plt.plot(cl)
[<matplotlib.lines.Line2D at 0x7fc109549358>]
Discrete low-pass filtering using the graph eigenvectors and the spherical harmonics. Note that taking the dot product between the sampled spherical harmonics and the map to filter is not equivalent to performing the filtering with the real SHT.
def compare_filtering(nside=16, n_signals=100):
n_harmonics = 16
n_pixels = harmonics.shape[0]
graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64)
graph.compute_fourier_basis(n_eigenvectors=n_harmonics)
signals = np.random.uniform(size=(n_pixels, n_signals))
eigenvectors = graph.U
harmonics_ = harmonics[:, :n_harmonics]
signals_sphere = harmonics_ @ harmonics_.T @ signals
signals_graph = eigenvectors @ eigenvectors.T @ signals
hp.mollview(signals[:, 0], nest=True)
hp.mollview(signals_sphere[:, 0], nest=True)
hp.mollview(signals_graph[:, 0], nest=True)
error = signals_graph - signals_sphere
hp.mollview(error[:, 0], nest=True)
print(np.linalg.norm(error, ord='fro'))
compare_filtering()
2.795195363732162
The angular power spectrum is given by $$ \hat{C}_\ell = \frac{1}{2\ell + 1} \sum_m |\hat{a}_{\ell m}|^2 $$ As such, $\hat{C}_\ell$ is the expected variance of the $\hat{a}_{\ell m}$ at order $\ell$. It also implies the standard result that the total power at the angular wavenumber $\ell$ is $(2\ell + 1) \hat{C}_\ell$ because there are $2\ell + 1$ modes for each $\ell$.
Todo:
psd = hp.sphtfunc.anafast(map, lmax=lmax)
plt.semilogy(psd);
TODO:
Functions:
healpy.sphtfunc.almxfl
=> arbitrary smoothinghealpy.sphtfunc.smoothing
=> Gaussian smoothingnside = 256
map, _, _ = hp.read_map('data/COM_CMB_IQU-smica_1024_R2.02_full.fits', field=(0, 1, 3), nest=True)
map = hp.ud_grade(map, nside_out=nside, order_in='NESTED')
map = hp.reorder(map, inp='NESTED', out='RING')
hp.mollview(map)
NSIDE = 1024 ORDERING = NESTED in fits file INDXSCHM = IMPLICIT
/home/defferra/miniconda3/envs/scnn/lib/python3.6/site-packages/healpy/fitsfunc.py:326: UserWarning: No INDXSCHM keyword in header file : assume IMPLICIT "assume {}".format(schm))
Smoothing by filtering with spherical harmonics, i.e., do an element-wise multiplication in the spectral domain after a spherical transform.
smooth = hp.sphtfunc.smoothing(map, sigma=0.01, verbose=False)
hp.mollview(smooth)
Smoothing by filtering on graphs.
graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64)
graph.estimate_lmax()
map = hp.reorder(map, inp='RING', out='NESTED')
filter = pg.filters.Heat(graph, tau=30)
filter = filter.approximate('Chebyshev', order=10)
filter.plot()
smooth = filter.filter(map)
hp.mollview(smooth, nest=True)
Compare the speed of both approaches.
We limit OpenMP (used by HEALPix) to use a single core as graph convolutions are not parallelized yet. While the mutliplication of sparse matrices with dense vectors can be parallelized, it is not implemented by scipy. It's hopefully implemented in tensorflow or pytorch.
nsides = [64, 128, 256, 512, 1024, 2048] # Max 1024 for the real Planck map.
# Polynomial orders for graph filtering.
orders = [5, 15]
# Largest order (angular frequency) for the spherical harmonic transform.
# lmax = lmax * nsides. HEALPix default is 3*nside-1.
lmax = [2, 3]
# Number of OpenMP threads for spherical harmonics.
# TODO: set before starting jupyter.
os.environ['OMP_NUM_THREADS'] = '1'
times_graph = np.zeros((len(nsides), len(orders)))
times_sphere = np.zeros((len(nsides), len(lmax)))
times_part = np.zeros((len(nsides), len(orders), 3))
for i, nside in enumerate(nsides):
print('Timing nside: {}'.format(nside))
# map = hp.ud_grade(map_cmb, nside_out=nside, order_in='NESTED')
map = np.random.normal(size=hp.nside2npix(nside))
# Filtering on the graph. Need the nested ordering.
graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64)
graph.estimate_lmax()
for j, order in enumerate(orders):
filter = pg.filters.Heat(graph, tau=30).approximate('Chebyshev', order=order)
t = time()
smooth = filter.filter(map)
times_graph[i, j] = time() - t
print(' * Graph full: done')
for k, o in enumerate([1,2,4]):
# Filtering on the graph. Need the nested ordering.
graph = utils.healpix_graph(nside, lap_type='normalized', nest=True, dtype=np.float64, indexes=np.arange((nside//o)**2))
graph.estimate_lmax()
for j, order in enumerate(orders):
filter = pg.filters.Heat(graph, tau=30).approximate('Chebyshev', order=order)
t = time()
smooth = filter.filter(map[:(nside//o)**2])
times_part[i, j, k] = time() - t
print(' * Graph part: done')
# Filtering with the spherical harmonics. Need the ring ordering.
map = hp.reorder(map, inp='NESTED', out='RING')
for j, lm in enumerate(lmax):
t = time()
hp.sphtfunc.smoothing(map, sigma=0.01, verbose=False, lmax=lm*nside)
times_sphere[i, j] = time() - t
print(' * Libsharp: done')
# Reset OpenMP to use all available cores.
del os.environ['OMP_NUM_THREADS']
Timing nside: 64 * Graph full: done * Graph part: done * Libsharp: done Timing nside: 128 * Graph full: done * Graph part: done * Libsharp: done Timing nside: 256 * Graph full: done * Graph part: done * Libsharp: done Timing nside: 512 * Graph full: done * Graph part: done * Libsharp: done Timing nside: 1024 * Graph full: done * Graph part: done * Libsharp: done Timing nside: 2048 * Graph full: done * Graph part: done * Libsharp: done
# Save results for the plot in the paper.
np.savez('results/filtering_speed.npz', nsides=nsides, orders=orders,
lmax=lmax, times_sphere=times_sphere, times_graph=times_graph, times_part=times_part)
fig, ax = plt.subplots()
npix = [hp.nside2npix(nside) for nside in nsides]
ax.loglog(npix, times_graph, '.-')
ax.loglog(npix, times_sphere, ':')
labels = ['Graph, polynomial order {}'.format(order) for order in orders]
labels += ['Spherical harmonics, $\ell_{{max}}$ = {}$N_{{side}}$'.format(lm) for lm in lmax]
ax.legend(labels)
for i, nside in enumerate(nsides):
ax.text(npix[i], times_sphere[i, -1] * 1.8, 'Nside = {}'.format(nside), horizontalalignment='center')
ax.set_ylim(0.8 * times_graph.min(), 3 * times_sphere.max())
ax.set_xlabel('Number of pixels')
ax.set_ylabel('Processing time [s]')
ax.set_title('Single core performance');
#ax.ticklabel_format(style='sci', axis='x')
fig, ax = plt.subplots()
ax.semilogy(orders, times_graph.T, '.-')
labels = ['Filtering with graphs, Nside = {}'.format(nside) for nside in nsides]
ax.legend(labels)
ax.set_xlabel('Polynomial order')
ax.set_xticks(orders)
ax.set_ylabel('Processing time [s]');
fig, ax = plt.subplots()
ax.semilogy(lmax, times_sphere.T, '.-')
labels = ['Filtering with spherical harmonics, Nside = {}'.format(nside) for nside in nsides]
ax.legend(labels)
ax.set_xlabel('lmax')
ax.set_xticks(lmax)
ax.set_ylabel('Processing time [s]');
As a graph representation of the sphere is not dependant on the sampling, we can compute spherical harmonics from random samplings of the sphere. As the sampling increases, the eigenvectors approach the spherical harmonics.
Do the eigenvalues converge to the orders $\ell$, i.e. do the stairs become flatter, when increasing $N_{pix}$? No, they mostly stay the same. We are not in the Belkin & Niyogy setting.
n_harmonics = 16
n_points = [100, 500, 1000]
fig = plt.figure()
for i, n in enumerate(n_points):
ax = fig.add_subplot(1, 3, i+1, projection='3d')
graph = pg.graphs.Sphere(nb_pts=n)
graph.compute_fourier_basis(n_eigenvectors=n_harmonics)
graph.plot_signal(graph.U[:, 1], edges=False, ax=ax)
_ = ax.axis('off')
Tweak the part graph such that the eigenvectors computed on parts of the sphere are as if we would have restricted the global eigenvectors to a part of sphere.
Todo: Mais tu peux essayer sur une grille normale. Le plus simple pour vérifier si c'est possible, c'est de vérifier s'il est nécessaire d'avoir une matrice circulante pour avoir des exponentielles complexes comme vecteurs propres.