import os
import ipywidgets as widgets
from ipywidgets import interact
from osgeo import gdal
import geopandas as gpd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from shapely.geometry import mapping, Polygon, MultiPolygon, LineString, Point
wns_fname = os.path.join('data', "WNS_Status_Provisional_8_30_2018.shp")
wns = gpd.GeoDataFrame.from_file(wns_fname)
wns['year'] = wns.WNS_MAP_YR.str[:4].astype(int)
target_crs = {'proj': 'aea',
'lat_1': 29.5,
'lat_2': 45.5,
'lat_0': 23,
'lon_0': -96,
'x_0': 0,
'y_0': 0,
'datum': 'NAD83',
'units': 'm',
'no_defs': True}
states_fname = os.path.join('data', "cb_2017_us_state_20m.shp")
states = gpd.read_file(states_fname)
states_aea = states.to_crs(target_crs)
conus_states = states_aea[~states_aea.NAME.isin(['Alaska', 'Hawaii', 'Puerto Rico'])]
bounds = conus_states.bounds
x_bounds = [bounds.minx.min(), bounds.maxx.max()]
y_bounds = [bounds.miny.min(), bounds.maxy.max()]
centroids = wns.centroid
centroids = centroids.to_crs(target_crs)
wns['x'] = centroids.x
wns['y'] = centroids.y
wns['area'] = centroids.to_crs(target_crs).geometry.area
wns['area_weight'] = wns.area/wns.area.max()
# wns.head(5)
def reclass_as_pcnt(Z):
uniques = np.unique(Z)[::-1]
z_reclass = np.copy(Z)
total_dens = Z.sum()
cum_area = 0.0
for u in uniques:
cum_area += np.count_nonzero(Z==u)*u
z_reclass[Z==u] = cum_area/total_dens
return z_reclass
#area_lookup[unique] = np.count_nonzero(z==unique)
def create_kernel(x, y, X, Y, factor=1.2, weights=None):
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = gaussian_kde(values, weights=weights)#, bw_method='silverman')#, bw_method=0.15/np.asarray(values).std(ddof=1))
# kernel.silverman_factor()
# kernel.set_bandwidth(bw_method='silverman')
kernel.set_bandwidth( factor)
# kernel.set_bandwidth(bw_method=bw_method)
# kernel.set_bandwidth(bw_method=0.15/np.asarray(values).std(ddof=1))
Z = np.reshape(kernel(positions).T, X.shape)
return reclass_as_pcnt(Z)
def create_kernel_contours(x, y, z, levels=[0.5, 0.75, 0.95]):
cset = plt.contour(x, y, z, levels=levels, colors=['red', 'white', 'blue'])
return cset
def plot_one(x, y, X, Y, Z, title='', isopleth=0.75):
fig = plt.figure(figsize=(25, 15))
ax = fig.add_subplot(111)
cset = create_kernel_contours(X, Y, Z, levels=[isopleth])
# ax.imshow(np.rot90(Z), cmap=plt.cm.Reds_r,
# extent=[X.min(), X.max(), Y.min(), Y.max()], alpha=0.6)
hull = Polygon(zip(x, y)).convex_hull
pnts = np.array(np.asarray(hull.exterior)).transpose()
# ax.plot(pnts[0],pnts[1])
# ax.plot(x, y, 'k.', markersize=6, alpha=0.4)
# ax.set_xlim([X.min(), X.max()])
# ax.set_ylim([Y.min(), Y.max()])
ax.set_xlim([-3000000, 3000000])
ax.set_ylim([0, 3700000])
# plt.colorbar()
states_aea.plot(color='None', edgecolor='black', ax=ax, alpha=0.4)
plt.title(title,fontsize=30)
# plt.show()
ax.set_aspect('equal')
return ax
# copied from https://gist.github.com/tillahoffmann/f844bce2ec264c1c8cb5
import numpy as np
from scipy.spatial.distance import cdist
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, shape (n, ), optional, default: None
An array of weights, of the same shape as `x`. Each value in `x`
only contributes its associated weight towards the bin count
(instead of 1).
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : float
Effective sample size using Kish's approximation.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
kde.pdf(points) : ndarray
Alias for ``kde.evaluate(points)``.
kde.set_bandwidth(bw_method='scott') : None
Computes the bandwidth, i.e. the coefficient that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
.. versionadded:: 0.11.0
kde.covariance_factor : float
Computes the coefficient (`kde.factor`) that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
The default is `scotts_factor`. A subclass can overwrite this method
to provide a different method, or set it through a call to
`kde.set_bandwidth`.
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
>>> "Measurement model, return two coupled measurements."
>>> m1 = np.random.normal(size=n)
>>> m2 = np.random.normal(scale=0.5, size=n)
>>> return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = np.atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self.weights = weights / np.sum(weights)
else:
self.weights = np.ones(self.n) / self.n
# Compute the effective sample size
# http://surveyanalysis.org/wiki/Design_Effects_and_Effective_Sample_Size#Kish.27s_approximate_formula_for_computing_effective_sample_size
self.neff = 1.0 / np.sum(self.weights ** 2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = np.reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
# compute the normalised residuals
chi2 = cdist(points.T, self.dataset.T, 'mahalanobis', VI=self.inv_cov) ** 2
# compute the pdf
result = np.sum(np.exp(-.5 * chi2) * self.weights, axis=1) / self._norm_factor
return result
__call__ = evaluate
def scotts_factor(self):
return np.power(self.neff, -1./(self.d+4))
def silverman_factor(self):
return np.power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method):
self._bw_method = 'use constant'
self.covariance_factor = lambda: self.factor / bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
# Compute the mean and residuals
_mean = np.sum(self.weights * self.dataset, axis=1)
_residual = (self.dataset - _mean[:, None])
# Compute the biased covariance
self._data_covariance = np.atleast_2d(np.dot(_residual * self.weights, _residual.T))
# Correct for bias (http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_covariance)
self._data_covariance /= (1 - np.sum(self.weights ** 2))
self._data_inv_cov = np.linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) #* self.n
cache = {}
def interactive_plot(factor, iso=0.75, year=2017,
add_counties=False,
add_centroids=True,
add_surface=True,
weight_by_county_area=True,
weight_by_year=True):
if year in cache:
X, Y, data = cache[year]['data']
else:
cache[year] = {}
cache[year]['kernels'] = {}
data = wns[wns.year <= year]
pad=500000
X, Y = np.mgrid[x_bounds[0]-pad:x_bounds[1]+pad:100j, y_bounds[0]-pad:y_bounds[1]+pad:100j]
cache[year]['data'] = (X, Y, data)
if weight_by_year:
weights = gen_weights(data.years_pres, factor=weight_f.value)
else:
weights = np.ones(data.area_weight.shape)
weights /= np.sum(weights)
if weight_by_county_area:
county_weights = data.area_weight
county_weights /= np.sum(county_weights)
weights += county_weights
weights /= np.sum(weights)
weights = list(weights)
Z = create_kernel(data.x, data.y, X, Y, factor, weights=weights)
cache[year]['kernels'][factor] = Z
ax = plot_one(data.x, data.y, X, Y, Z, isopleth=iso, title=f"WNS {year}-{year+1}")
if add_surface:
ax.imshow(np.rot90(Z), cmap=plt.cm.Reds_r,
extent=[X.min(), X.max(), Y.min(), Y.max()], alpha=0.6)
if add_counties:
data.to_crs(states_aea.crs).plot(column='year', cmap='plasma', ax=ax, legend=True)
if add_centroids:
data.to_crs(states_aea.crs).centroid.plot(color='grey', ax=ax)
f = widgets.FloatSlider(
value=1.2,
min=0.1,
max=4.0,
step=0.1,
description="BW Factor")
f.continuous_update = False
y = widgets.IntSlider(
value=2014,
min=wns.year.min()+1,
max=wns.year.max(),
step=1,
description="Year")
i = widgets.FloatSlider(
value=0.75,
min=0.05,
max=.99,
step=0.05,
description="Isopleth")
i.continuous_update = False
c = widgets.Checkbox(value=False, description='Show Counties')
c2 = widgets.Checkbox(value=False, description='Show Centroids')
c3 = widgets.Checkbox(value=False, description='Show KDE Surface')
c4 = widgets.Checkbox(value=True, description='Weight by county areas')
c5 = widgets.Checkbox(value=True, description='Weight by years since detection')
move the slider below to set the shape of this curve
*Note: Changes to this curve do not update the map until one of the widgets below is triggered*
weight_f = widgets.FloatSlider(
value=2,
min=0.0,
max=10.0,
step=0.1,
description="Weight Exponent",
layout={'width': '500px'})
def exponential(x, factor=2.0):
return x**factor
wns['years_pres'] = wns.year.max()-wns.year
def gen_weights(years_pres, factor=2.0):
weights = [exponential(weight, factor) for weight in years_pres]
# weights = [w/np.max(weights) for w in weights]
return weights
def show_weight_curve(factor=2):
years = list(range(0, wns.years_pres.max()))
weights = gen_weights(years, factor=factor)
weights = weights / np.max(weights)
f, ax1 = plt.subplots(1, 1, figsize=(5,5))
ax1.plot(years, weights)
ax1.set_title('County Weights by years since detection')
ax1.set_xlabel('Years since Detection (n)')
ax1.set_ylabel('Weight')
ax1.set_ylim(0,1.1)
tex = r'$n^{' + str(factor) + '}$'
ax1.text(1, 0.8, tex, fontsize=20, va='bottom')
plt.tight_layout()
_ = interact(show_weight_curve, factor=weight_f)
--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-1-271b28361b9e> in <module>() ----> 1 weight_f = widgets.FloatSlider( 2 value=2, 3 min=0.0, 4 max=10.0, 5 step=0.1, NameError: name 'widgets' is not defined
%matplotlib inline
_ = interact(interactive_plot, factor=f, iso=i, year=y,
add_counties=c,
add_centroids=c2,
add_surface=c3,
weight_by_county_area=c4,
weight_by_years=c5)
interactive(children=(FloatSlider(value=1.2, continuous_update=False, description='BW Factor', max=4.0, min=0.…
i.continuous_update = False