#!/usr/bin/env python # coding: utf-8 # ## Animating slices of a human head MRI scan, using Plotly FigureWidget # In[ ]: import plotly.graph_objects as go import numpy as np from skimage import io # Read the volume data: # In[ ]: volume = (io.imread("https://s3.amazonaws.com/assets.datacamp.com/blog_assets/attention-mri.tif")).T volume.shape # In[ ]: n_slices = volume.shape[0] r, c = volume[0].shape # Set the height of the first slice to be visualized: # In[ ]: height = (volume.shape[0]-1) / 10 grid = np.linspace(0, height, n_slices) slice_step = grid[1] - grid[0] slice_step # The normalized slice values are color-mapped to the bone colorscale (the usual colorscale for medical images). # In[ ]: pl_bone=[[0.0, 'rgb(0, 0, 0)'], [0.05, 'rgb(10, 10, 14)'], [0.1, 'rgb(21, 21, 30)'], [0.15, 'rgb(33, 33, 46)'], [0.2, 'rgb(44, 44, 62)'], [0.25, 'rgb(56, 55, 77)'], [0.3, 'rgb(66, 66, 92)'], [0.35, 'rgb(77, 77, 108)'], [0.4, 'rgb(89, 92, 121)'], [0.45, 'rgb(100, 107, 132)'], [0.5, 'rgb(112, 123, 143)'], [0.55, 'rgb(122, 137, 154)'], [0.6, 'rgb(133, 153, 165)'], [0.65, 'rgb(145, 169, 177)'], [0.7, 'rgb(156, 184, 188)'], [0.75, 'rgb(168, 199, 199)'], [0.8, 'rgb(185, 210, 210)'], [0.85, 'rgb(203, 221, 221)'], [0.9, 'rgb(220, 233, 233)'], [0.95, 'rgb(238, 244, 244)'], [1.0, 'rgb(255, 255, 255)']] # In[ ]: initial_slice = dict(type='surface', z=height*np.ones((r,c)), surfacecolor=np.flipud(volume[-1]), colorscale=pl_bone, showscale=False ) # Define the plot layout: # In[ ]: layout3d = dict(title='Head Scanning', font=dict(family='Balto'), width=650, height=650, scene=dict(zaxis=dict(range=[-0.1, 6.8], autorange=False))) # In[ ]: fw = go.FigureWidget(data=[initial_slice], layout=layout3d) # In[ ]: #fw # In[ ]: import ipywidgets as iw slider = iw.IntSlider(value=0, min=0, max=n_slices-1, step=1, description='slice #') slider.layout = dict(margin='1px 80px 40px 5px', width='400px') # In[ ]: def slice_changed(change): k=slider.value fw.data[0].update(z=(height-slice_step*k)*np.ones((r,c)), surfacecolor=np.flipud(volume[-1-k])) slider.observe(slice_changed, 'value') # In[ ]: #help(iw.Play) # In[ ]: play_button = iw.Play(value=0, min=0, max=n_slices-1, step=1, interval=40) play_button.layout = dict(margin='1px 10px 50px 100px') iw.jslink((play_button, 'value'), (slider, 'value')) iw.VBox([fw, iw.HBox([play_button, slider])]) # Animation without slider (watch the above cell): # In[ ]: for k in range(n_slices): with fw.batch_animate(duration=10, easing='cubic-in-out'): fw.data[0].update(z=(height-k*slice_step)*np.ones((r,c)), surfacecolor=np.flipud(volume[-1-k])) # It seems that the latter animation (without slider) is faster. # For a much faster animation see this gif file created from the fig generated in the notebook `scanning-the-head-classic.ipynb`: