Supporting material for paper published in:
Scientific Reports - https://www.nature.com/articles/srep26348
Notebook written by:
Suhas Somnath, and Chris R. Smith
The Center for Nanophase Materials Science and The Institute for Functional Imaging for Materials
Oak Ridge National Laboratory
1/19/2017
Here, we will be working with four dimensional datasets acquired using a scanning transmission electron microscope (STEM). These datsets have four dimensions - two (x, y) dimensions from the position of the electron beam and each spatial pixel contains a two dimensional (u, v) image, called a ronchigram, recorded by the detector. Though the ronchigrams are typically averaged to two values (bright field, dark field), retaining the raw ronchigrams enables deeper investigation of data to reveal the existence of different phases in the material and other patterns that would typically not be visible in the averaged data
Image courtesy of Jean Bilheux from the neutron imaging GitHub repository.
# Ensure python 3 compatibility
from __future__ import division, print_function, absolute_import
import os
# Import necessary libraries:
import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from IPython.display import display, HTML
import ipywidgets as widgets
from sklearn.cluster import KMeans
import sys
import pyUSID as usid
import pycroscopy as px
# Make Notebook take up most of page width
display(HTML(data="""
<style>
div#notebook-container { width: 95%; }
div#menubar-container { width: 65%; }
div#maintoolbar-container { width: 99%; }
</style>
"""))
# set up notebook to show plots within the notebook
% matplotlib notebook
usid.plot_utils.use_nice_plot_params()
For simplicity we will use a dataset that has already been translated form its original data format into a Univeral Spectroscopy and Imaging Data (USID) hierarchical data format (HDF5 or H5) file. For more information regarding USID, HDF5, etc. please see the documentation on our github projects
# Select a file to work on:
#h5_path = px.io_utils.file_dialog('*.h5', '4D STEM dataset formatted according to USID')
h5_path = '/Users/syz/Dropbox (ORNL)/Papers/pycroscopy/Figures/Figure_2/4D_STEM/20120212_21_GB_float32_downsampled64x64_corrected.h5'
print('Working on:\n' + h5_path)
# Open the file
h5_file = h5py.File(h5_path, mode='r+')
save_figs = False
folder_path, _ = os.path.split(h5_path)
figures_folder = os.path.join(folder_path, 'Figures')
if not os.path.exists(figures_folder):
os.mkdir(figures_folder)
Look at the contents of this file:
usid.hdf_utils.print_tree(h5_file, main_dsets_only=True)
# Select the dataset containing the raw data to start working with:
h5_main = usid.hdf_utils.find_dataset(h5_file, 'Raw_Data')[-1]
# Upgrade this object from a regular HDF5 dataset to a USIDataset:
h5_main = usid.USIDataset(h5_main)
# Read some necessary parameters:
h5_pos_inds = h5_main.h5_pos_inds
num_rows, num_cols = h5_main.pos_dim_sizes
h5_spec_inds = h5_main.h5_spec_inds
num_sensor_rows, num_sensor_cols = h5_main.spec_dim_sizes
h5_avg_spat = usid.hdf_utils.find_dataset(h5_file, 'Spectroscopic_Mean')[-1]
h5_avg_cbed = h5_avg_spat.parent['Mean_Ronchigram']
fig, axes = plt.subplots(ncols=2, figsize=(8, 4))
for axis, dset, title in zip(axes.flat, [h5_avg_spat, h5_avg_cbed], ['Dark-Field Image', 'Mean CBED pattern']):
img = np.reshape(dset[()], (int(dset.size ** 0.5), int(dset.size ** 0.5)))
usid.plot_utils.plot_map(axis, img)#, cmap='gray')
axis.set_title(title)
fig.tight_layout()
if save_figs:
fig.savefig(os.path.join(figures_folder, 'Averaged_data.pdf'), format='pdf', bbox_inches = 'tight')
coarse_row = int(0.5*num_rows)
coarse_col = int(0.5*num_cols)
coarse_pos = coarse_row * num_rows + coarse_col
current_ronch = np.reshape(h5_main[coarse_pos], (num_sensor_rows, num_sensor_cols))
fig, axes = plt.subplots(ncols=2, figsize=(14,7))
axes[0].hold(True)
axes[0].set_title('Mean Response')
main_map = axes[0].imshow(np.reshape(h5_main.parent['Spectroscopic_Mean'], (num_rows, num_cols)),
cmap=px.plot_utils.cmap_jet_white_center(), origin='lower')
main_vert_line = axes[0].axvline(x=coarse_col, color='k')
main_hor_line = axes[0].axhline(y=coarse_row, color='k')
axes[1].set_title('Ronchigram at current pixel')
img_zoom = axes[1].imshow(current_ronch,cmap=px.plot_utils.cmap_jet_white_center(), origin='lower')
def move_zoom_box(event):
if not main_map.axes.in_axes(event):
return
coarse_col = int(round(event.xdata))
coarse_row = int(round(event.ydata))
main_vert_line.set_xdata(coarse_col)
main_hor_line.set_ydata(coarse_row)
coarse_pos = coarse_row * num_rows + coarse_col
current_ronch = np.reshape(h5_main[coarse_pos], (num_sensor_rows, num_sensor_cols))
img_zoom.set_data(current_ronch)
#img_zoom.set_clim(vmax=ronch_max, vmin=ronch_min)
fig.canvas.draw()
cid = main_map.figure.canvas.mpl_connect('button_press_event', move_zoom_box)
# widgets.interact(move_zoom_box, coarse_row=(0, num_rows, 1),
# coarse_col=(0, num_cols, 1));
SVD decomposes data (arranged as position x value) into a sequence of orthogonal components arranged in descending order of variance. The first component contains the most significant trend in the data. The second component contains the next most significant trend orthogonal to all previous components (just the first component). Each component consists of the trend itself (eigenvector), the spatial variaion of this trend (eigenvalues), and the variance (statistical importance) of the component.
Here, SVD essentially compares every single ronchigram with every other ronchigram to find statistically significant trends in the dataset. Such correlation would be infeasible if the ronchigrams were averaged to bright-field and dark-field scalar values.
# Choose how many components you want
num_svd_comps = 256
proc = px.processing.SVD(h5_main, num_components=num_svd_comps)
h5_svd_group = proc.compute()
h5_u = h5_svd_group['U']
h5_v = h5_svd_group['V']
h5_s = h5_svd_group['S']
The plot below shows the variance or statistical significance of the SVD components. The first few components contain the most significant information while the last few components mainly contain noise.
Note also that the plot below is a log-log plot. The importance of each subsequent component drops exponentially.
# Choose how many components of U and V to display
num_plot_comps = 16
# Visualize variance of the principal components
fig, axes = usid.plot_utils.plot_scree(h5_s, title='Variance')
if save_figs:
fig.savefig(os.path.join(figures_folder, 'SVD_S.pdf'), format='pdf', bbox_inches = 'tight')
The plot below shows the spatial distribution of each SVD component
# Visualize the abundance maps from SVD:
loadings = np.reshape(h5_u[:, :num_plot_comps], (num_rows, num_cols, -1)).transpose([2, 0, 1])
fig, axes = usid.plot_utils.plot_map_stack(loadings, num_comps=num_plot_comps, title='Abundance Maps',
cmap=usid.plot_utils.cmap_jet_white_center(), title_yoffset=0.925)
if save_figs:
fig.savefig(os.path.join(figures_folder, 'SVD_U.pdf'), format='pdf', bbox_inches = 'tight')
The V dataset contains the endmembers for each component
# Visualize the Endmembers from SVD:
eigenvectors = np.reshape(h5_v[:num_plot_comps], (-1, num_sensor_rows, num_sensor_cols))
fig, axes = usid.plot_utils.plot_map_stack(eigenvectors, num_comps=num_plot_comps, title='Endmembers',
cmap=px.plot_utils.cmap_jet_white_center(), title_yoffset=0.925)
if save_figs:
fig.savefig(os.path.join(figures_folder, 'SVD_V.pdf'), format='pdf', bbox_inches = 'tight')
Clustering divides data into k clusters such that the variance within each cluster is minimized.
In principle, clustering can be perfomed on any dataset that has some spectral values (eg. - ronchgirams in the case of the raw dataset or an array of values for each SVD component) for each position. However, the computation time increases with the size of the dataset.
Here, we will be performing k-means clustering on the U matrix from SVD. We want a large enough number of clusters so that K-means identifies fine nuances in the data. At the same time, we want to minimize computational time by reducing the number of clusters. We recommend 32 clusters.
# Choose how many SVD components to use in clustering
spectral_components = 128
# Choose how many clusters to use
num_clusters = 32
estimator = KMeans(n_clusters=num_clusters)
proc = px.processing.Cluster(h5_u, estimator, num_comps=spectral_components)
h5_kmeans_group = proc.compute()
h5_labels = h5_kmeans_group['Labels']
h5_centroids = h5_kmeans_group['Mean_Response']
# In case we take existing results, we need to get these parameters
num_comps_for_clustering = h5_centroids.shape[1]
label_mat = np.reshape(h5_labels, (num_rows, num_cols))
fig, axis = px.viz.cluster_utils.plot_cluster_labels(label_mat, num_clusters=num_clusters)
axis.set_title('kMeans Labels')
if save_figs:
fig.savefig(os.path.join(figures_folder, 'kMeans_labels.pdf'), format='pdf', bbox_inches = 'tight')
The vertical length of the branches indicates the relative separation between neighboring clusters.
e_vals = np.reshape(h5_u[:, :spectral_components],
(num_rows, num_cols, -1))
fig = px.viz.cluster_utils.plot_cluster_dendrogram(label_mat, e_vals,
num_comps_for_clustering,
num_clusters,
last=num_clusters);
if save_figs:
fig.savefig(os.path.join(figures_folder, 'kMeans_dendrogram.pdf'), format='pdf', bbox_inches = 'tight')
h5_file.close()