#!/usr/bin/env python # coding: utf-8 # # Landsat 8 NDVI Analysis on the Cloud # # This notebook demonstrates a "Cloud-native" analysis of [Normalized Difference Vegetation Index (NDVI)](https://en.wikipedia.org/wiki/Normalized_difference_vegetation_index) using Landsat 8 data. # # **What is unique about this workflow is that no data is downloaded to our local computer! All calculations are performed in memory across many distributed machines on AWS.** # # This workflow is possible because the Landsat 8 data is stored in [Cloud-Optimized Geotiff](http://www.cogeo.org) format, which can be accessed remotely via [xarray](http://xarray.pydata.org/en/stable/) and [rasterio](https://rasterio.readthedocs.io/en/latest/) Python libraries. Distributed computing is enabled through a [Pangeo](http://pangeo-data.org) JupyterHub deployment with [Dask Kubernetes](https://github.com/dask/dask-kubernetes). # # About Landsat 8: # https://landsat.usgs.gov/landsat-8 # # About the Landsat archive: # https://cloud.google.com/storage/docs/public-datasets/landsat # # Date: August 30, 2018 # # Created by: # Scott Henderson (scottyh@uw.edu), Daniel Rothenberg # # Updated to use hvplot/holoviews by Ryan Abernathy and Rich Signell (rsignell@usgs.gov) # In[1]: # Import required libraries import os import pandas as pd import rasterio import xarray as xr import requests from dask_kubernetes import KubeCluster from dask.distributed import Client from dask.distributed import wait, progress import hvplot.xarray import hvplot.pandas import cartopy.crs as ccrs import holoviews as hv # In[2]: # Print package versions print('xarray version: ', xr.__version__) print('rasterio version: ', rasterio.__version__) print('hvplot version: ', hvplot.__version__) # In[3]: # Set environment variables for cloud-optimized-geotiffs efficiency os.environ['GDAL_DISABLE_READDIR_ON_OPEN']='YES' os.environ['CPL_VSIL_CURL_ALLOWED_EXTENSIONS']='TIF' # ## Use NASA Common Metadata Repository (CMR) to get Landsat 8 images # # [NASA CMR](https://earthdata.nasa.gov/about/science-system-description/eosdis-components/common-metadata-repository) is a new unified way to search for remote sensing assests across many archive centers. If you prefer a graphical user interface, NASA [Earthdata Search](https://search.earthdata.nasa.gov/search) is built on top of CMR. CMR returns download links through the USGS (https://earthexplorer.usgs.gov), but the same archive is mirrored as a (Google Public Dataset)[https://cloud.google.com/storage/docs/public-datasets/landsat], so we'll make a function that queries CMR and returns URLs to the imagery stored on Google Cloud. # In[4]: def query_cmr_landsat(collection='Landsat_8_OLI_TIRS_C1',tier='T1', path=47, row=27): """Query NASA CMR for Collection1, Tier1 Landsat scenes from a specific path and row.""" data = [f'short_name={collection}', f'page_size=2000', f'attribute[]=string,CollectionCategory,{tier}', f'attribute[]=int,WRSPath,{path}', f'attribute[]=int,WRSRow,{row}', ] query = 'https://cmr.earthdata.nasa.gov/search/granules.json?' + '&'.join(data) r = requests.get(query, timeout=100) print(r.url) df = pd.DataFrame(r.json()['feed']['entry']) # Save results to a file #print('Saved results to cmr-result.json') #with open('cmr-result.json', 'w') as j: # j.write(r.text) return df # In[5]: def make_google_archive(pids, bands): """Turn list of product_ids into pandas dataframe for NDVI analysis.""" path = pids[0].split('_')[2][1:3] row = pids[0].split('_')[2][-2:] baseurl = f'https://storage.googleapis.com/gcp-public-data-landsat/LC08/01/0{path}/0{row}' # baseurl = f's3://landsat-pds/c1/L8/0{path}/0{row}' # not sure there is an AWS equivalent here dates = [pd.to_datetime(x.split('_')[3]) for x in pids] df = pd.DataFrame(dict(product_id=pids, date=dates)) for band in bands: df[band] = [f'{baseurl}/{x}/{x}_{band}.TIF' for x in pids] return df # In[6]: df = query_cmr_landsat() # In[7]: pids = df.title.tolist() df = make_google_archive(pids, ['B4', 'B5']) # In[8]: df.head() # In[9]: image_url = df.iloc[0]['B4'] # In[10]: image_url # ## Launch Dask Kubernetes Cluster # # This will allow us to distribute our analysis across many machines. In the default configuration for Pangeo Binder, each worker has 2 vCPUs and 7Gb of RAM. It may take several minutes to initialize these workers and make them available to Dask. # In[11]: # Click on the 'Dashboard link' to monitor calculation progress cluster = KubeCluster(n_workers=10) cluster # In[12]: # Attach Dask to the cluster client = Client(cluster) # ## Examine a single band Landsat image # # The *rasterio* library allows us to read Geotiffs on the web without downloading the entire image. *Xarray* has a built-in load_rasterio() function that allows us to open the file as a DataArray. Xarray also uses Dask for lazy reading, so we want to make sure the native block tiling of the image matches the dask "chunk size". These dask chunks are automatically distributed among all our workers when a computation is requested, so ideally they will fit in the worker memory. A chunk size of 2048x2048 with a float32 datatype implies a 16Mb array. # # In[13]: # Load with rasterio image_url = df.iloc[0]['B4'] with rasterio.open(image_url) as src: print(src.profile) # In[14]: # Note that the blocksize of the image is 256 by 256, so we want xarray to use some multiple of that xchunk = 2048 ychunk = 2048 da = xr.open_rasterio(image_url, chunks={'band': 1, 'x': xchunk, 'y': ychunk}) da # In[16]: # If we request to compute something or plot these arrays, the necessary data chunks will be accessed on cloud storage: # Watch the KubeCluster dashboard to see the worker activity when this command is run: # Note that no data is stored on the disk here, it's all in memory band1 = da.sel(band=1).persist() progress(band1) # In[17]: get_ipython().run_cell_magic('time', '', "display(band1.hvplot(rasterize=True, width=600, height=400, cmap='viridis'))\n") # In[18]: # skip this because it takes minutes to run and doesn't use the dask workers. # here we use the CRS info (epsg:32610 = UTM zone 10n) to plot in lon/lat # rather than in projected coordinates. if 0: crs = ccrs.UTM(zone='10n') display(band1.hvplot(crs=crs, rasterize=True, width=600, height=400, cmap='viridis')) # ## Load all Landsat bands into an xarray dataset # # Often we want to analyze a time series of satellite imagery, but we are constrained by computational resources. So we either download all the images, extract a small subset and then do our analysis. Or, we coarsen the resolution of all our images so that the entire set fits into our computer RAM. Because this notebook is running on Google Cloud with access to many resources in our Kube Cluster, we no longer have to worry about the computational constraints, and can conduct our analysis at full resoution! # # First we need to construct an xarray dataset object (which has data variables 'band4' and 'band5' in a n-dimensional array with x-coordinates representing UTM easting, y-coordinates representing UTM northing, and a time coordinate representing the image acquisition date). # # There are different ways to go about this, but we will load our images with a timestamp index since each image is taken on a different date. Typically, this is a chore if our images are not on the same grid to begin with, but xarray knows how to automatically align images based on their georeferenced coordinates. # In[19]: # Note that these landsat images are not necessarily the same shape or on the same grid: for image_url in df.B4[:5]: with rasterio.open(image_url) as src: print(src.shape, src.bounds) # In[20]: def create_multiband_dataset(row, bands=['B4','B5'], chunks={'band': 1, 'x': 2048, 'y': 2048}): '''A function to load multiple landsat bands into an xarray dataset ''' # Each image is a dataset containing both band4 and band5 datasets = [] for band in bands: url = row[band] da = xr.open_rasterio(url, chunks=chunks) da = da.squeeze().drop(labels='band') ds = da.to_dataset(name=band) datasets.append(ds) DS = xr.merge(datasets) return DS # In[21]: # Merge all acquisitions into a single large Dataset datasets = [] for i,row in df.iterrows(): try: print('loading...', row.date) ds = create_multiband_dataset(row) datasets.append(ds) except Exception as e: print('ERROR loading, skipping acquistion!') print(e) # In[22]: get_ipython().run_cell_magic('time', '', "DS = xr.concat(datasets, dim=pd.DatetimeIndex(df.date.tolist(), name='time'))\nprint('Dataset size (Gb): ', DS.nbytes/1e9)\n") # In[23]: DS # ### Note that xarray has automatically expanded the dimensions to include the maximum extents of all the images, also the chunksize has been automatically adjusted. # # There is definitely some room for improvement here from a computational efficiency standpoint - in particular the dask chunks are no longer aligned with the image tiles. This is because each image starts at different coordinates and has different shapes, but xarray uses a single chunk size for the entire datasets. There will also be many zeros in this dataset, so future work could take advantage of sparse arrays. # # These points aside, our KubeCluster will automatically parallelize our computations for us, so we can not worry too much about optimal efficiency and just go ahead and run our analysis! # ### Browse interactively using hvplot # In[24]: get_ipython().run_cell_magic('time', '', "display(DS['B4'].hvplot('x', 'y', groupby='time', rasterize=True, width=600, height=400, cmap='viridis'))\n") # ## Distributed NDVI computations # # Set up our NDVI dataset. Note that NDVI is not actually computed until we call the Dask compute(), persist(), or call other functions such as plot() that require actually operate on the data! # In[25]: NDVI = (DS['B5'] - DS['B4']) / (DS['B5'] + DS['B4']) NDVI # ### Plot NDVI on specific date (full resolution) # # Only data for a single landsat acquisition date is pulled from Cloud storage # In[26]: day='2013-04-21' s = NDVI.sel(time=day) s # In[27]: del s['time'] # In[28]: s # In[29]: get_ipython().run_cell_magic('time', '', "display(s.hvplot('x', 'y', rasterize=True, width=600, height=400, cmap='viridis', label=day))\n") # ### Mean NDVI for a range of dates # # This example calculates the mean NDVI per-pixel (30m) for 2013-2014, storing result in local RAM. This takes a while... # In[30]: ndvi = NDVI.sel(time=slice('2013-01-01', '2014-01-01')).mean(dim='time').persist() progress(ndvi) # In[31]: ndvi # In[32]: get_ipython().run_cell_magic('time', '', "display(ndvi.hvplot('x','y', rasterize=True, width=600, height=400, cmap='viridis'))\n\n# Point of interest we'll extract a timeseries from\n#plt.plot(562370, 5312519, 'ko')\n") # ### Extract region of interest Farmland near Everett, WA (Ebey Island) and resample to monthly mean average # # We expect to see higher NDVI values in the summer months, corresponding to dense vegetation # In[33]: # https://www.geoplaner.com # lat, lon to northing, easting # 47.962940 --> 5312519 # -122.164483--> 562370 #+ 5 km buffer #EPSG:32610 WGS 84 / UTM zone 10N xcen = 562370 ycen = 5312519 buf = 5000 # look at point +/- 5km ds = NDVI.sel(x=slice(xcen-buf,xcen+buf), y=slice(ycen-buf,ycen+buf)) timeseries = ds.resample(time='1MS').mean().persist() # In[34]: timeseries # In[35]: s = timeseries.to_series() (s.hvplot(width=700, height=300, legend=False) * s.hvplot.scatter(width=700, height=300, legend=False)).relabel('Mean NDVI') # ### Plot subset of all NDVI arrays at full resolution for acquisitions in 2015 # In[36]: ds2015 = ds.sel(time=slice('2015-01-01', '2016-01-01')) nt = len(ds2015) # Getting small multiple plots going with `hvplot` is a bit challenging. Need to use `Layout` from `holoviews`: # In[37]: get_ipython().run_cell_magic('time', '', "\ndss = ds.sel(time=slice('2015-01-01', '2016-01-01'))\nncols = 5\n\nplots = []\nfor i in range(nt):\n ds_panel = dss.isel(time=i)\n\n #if last plot in row, add colorbar and make it wider\n is_last_in_row = (i+1)%ncols == 0\n height = 200\n width = 150 if not is_last_in_row else 200\n\n p = (ds_panel\n .hvplot('x', 'y', rasterize=True, width=width, height=height, cmap='viridis', \n xaxis=False, yaxis=False, colorbar=is_last_in_row)\n .relabel(str(ds_panel.time.values)[:10]))\n\n plots.append(p)\n \ndisplay(hv.Layout(plots).cols(ncols))\n") # ### Plot subset of all NDVI arrays at full resolution for acquisitions in 2015 # In[38]: get_ipython().run_cell_magic('time', '', "display(NDVI.hvplot('x','y',groupby='time', rasterize=True, width=700, height=500, cmap='viridis'))\n") # ## In conclusion # # * This notebook demonstrates the power of storing data publically in the Cloud as optimized geotiffs - scientists can conduct scalable analysis without downloading the data to a local machine. Only derived subsets and figures need to be downloaded! # * We used a crude NDVI calculation, designed to demonstrate the syntax and tools - a proper analysis should take into account cloud masks and other corrections