• Call numerator inputs State
• Call denominator inputs Quality
In [4]:
%matplotlib inline
import numpy as np
from skimage import io
import matplotlib.pyplot as plt

In [51]:


Out[51]:
False
In [59]:
class stats:

def __init__( self, head = None,
tail = None,
url = None,
normalize = True,
periodic = np.array([]),
shift = True,
display = True,
cutoff = None):

self.periodic = np.array( periodic )

if url is None:
self.tail = tail
elif url is not None:
# URL is a quickstart option to compute statistics really fast out of the box
self.tail = None
self.url = url

# Initialize cutoff
if cutoff is None:
elif isinstance(cutoff, int):
cutoff = np.array( [cutoff] )
elif isinstance(cutoff, list):
cutoff = np.array( cutoff )
self.cutoff = cutoff

self.Stats( normalize, shift, display, cutoff )

def Stats( self, normalize, shift, display, cutoff ):

self.spatial = convolve( self.head, self.tail, sz )
vectors = AppendIndex( self.head.shape, self.spatial )

self.vectors = vectors
# self.units.values defines the normalized vector values

if normalize:
# normalize passed from __init__
method, denom = self.__Normalize()
self.denom = denom
self.spatial = np.divide( self.spatial, denom )

# slice/truncate statistics
for ax in np.arange( len( self.spatial.shape ) ):
# This indexing conditions on cutoff could be problematic.  Need size checking

cutoffid = min( cutoff.size - 1, ax) # subtract 1 because ax starts at zero
self.spatial = np.compress(
np.abs( self.vectors[ax] ) < cutoff[ cutoffid ],
self.spatial,
axis = ax )
self.vectors[ax] = np.compress(
np.abs( self.vectors[ax] ) < cutoff[ cutoffid ] ,
self.vectors[ax] )

if shift:
# FFT Shift
self.spatial = np.fft.fftshift( self.spatial )
# Shift indices
for ii, ax in enumerate( self.vectors ):
self.vectors[ii] = np.fft.fftshift( ax )

# Switch X,Y
# http://stackoverflow.com/questions/24791614/numpy-pcolormesh-typeerror-dimensions-of-c-are-incompatible-with-x-and-or-y
plt.pcolor(self.vectors[1], self.vectors[0], self.spatial.real)

# Requires special treatment after slicing
def numer( self ):
# Determine denominator vector axes
vectors = AppendIndex( self.head.shape, self.denom )

# trim the denominator
denom = self.denom
for ax in np.arange( len( denom.shape ) ):
# This indexing conditions on cutoff could be problematic.  Need size checking
cutoffid = min( self.cutoff.size - 1, ax) # subtract 1 because ax starts at zero
denom = np.compress(
np.abs( vectors[ax] ) < self.cutoff[ cutoffid ],
denom,
axis = ax )
# Unshifted matrices will have vectors whose first index is zero
if not np.all( [v[0]==0 for v in s.vectors] ):
denom = np.fft.fftshift( denom )

return np.multiply( self.spatial, denom )

def __Normalize( self ):
# Decision on the denominator here

sz = Periodic2Size( self.head.shape, axes = self.periodic)

if len( self.periodic ) >= 0:
# Swap this to a leaner function
method = 'convolve'
tail = None,
sz = sz)
else:
print len( self.periodic )

return method, denom

def npNormalization( sz, dims ):
# dims is a d x 2 array
# the first and second columns are the upper and lower bounds of the vector
# It computes something requiring the manhattan
return

A = np.array( io.imread( url ) )

# Process the Image
A = Binarize( A )

return A

def convolve( head, tail = None, sz = None ):
# Name the variables based on the reference and moving features

if sz is None:

fA = np.fft.fftn( head, s = sz )

if tail is None:
# Use [Wiener Kichin](http://en.wikipedia.org/wiki/Wiener%E2%80%93Khinchin_theorem)
fA = np.abs( fA )
fA = fA ** 2
else:
# Brute Force FFT approach for different states
fA = np.multiply( fA ,
np.conj(
np.fft.fftn( tail, s = sz )
) )

fA = np.fft.ifftn( fA, s = sz )

fA = np.real_if_close( fA )

return fA

def AppendIndex( osz, fA ):
# osz - Original Size of the raw data - A.shape
# Define the unit vector distances for the statistics

nsz = fA.shape
ind = list()
for id, ax in enumerate( nsz ):
a = np.arange(0, ax)
a[ a > osz[id]/2 ] = a[ a > osz[id]/2 ] - ax
ind.append( a )

# A list of unit vector indices for each axis
return ind

def SetCutoff( sz ):
# Set the cutoff criteria for the vectors
return np.floor(np.array( sz )/2)

def Periodic2Size( oshape, axes, mult = .6 ):
# Determine the size of the FFT domain for the convolution

# Initialize the size of nonperiodic Statistics FFT size
nshape = np.array( oshape ) + np.array( oshape ) * mult
if len( axes ) > 0:
# Reassign periodic axes where they exist
for ax in axes:
nshape[ax] = oshape[ax]
return nshape.round().astype('int')

def Binarize( A ):
return np.round( A.astype( 'double' ) /255 )

In [63]:
A = np.zeros( (5,5))
A = np.eye( 5, 5 )
s = stats( head = A )
print s.numer()

[[  4.00000000e+00   0.00000000e+00   5.55111512e-17]
[  0.00000000e+00   5.00000000e+00   0.00000000e+00]
[ -1.11022302e-16   0.00000000e+00   4.00000000e+00]]

In [28]:
s = stats( head = A )
s.numer().real

Out[28]:
array([[  98839.70867533 +1.19423439e-11j,
98743.97181312 +5.08162773e-11j,
98703.28805482 +3.90110405e-11j, ...,
111186.87637421 +1.51447200e-10j,
110677.37570426 +1.68153982e-10j,
110099.45797757 +1.77715004e-10j],
[  98634.77280396 +4.38013990e-11j,
98538.44642300 +7.87424116e-11j,
98512.61607253 +4.97933466e-11j, ...,
110777.05394361 +1.89441848e-10j,
110396.60381262 +2.05793298e-10j,
110016.20999482 +2.06630061e-10j],
[  98615.30384565 +4.87395867e-11j,
98466.70758052 +8.54524195e-11j,
98560.60794045 +7.68287176e-11j, ...,
110154.77830743 +1.95968334e-10j,
110132.46268657 +2.06491279e-10j,
109861.18913243 +2.34144394e-10j],
...,
[ 109815.35634598 +8.59878933e-11j,
110086.68916929 +1.18679831e-10j,
110109.16842432 +1.31699463e-10j, ...,
98273.03657601 +8.92171347e-11j,
98179.56436567 +1.22805210e-10j,
98327.88132465 +1.33444383e-10j],
[ 109970.61800943 +1.19896195e-10j,
110351.02704300 +1.52845499e-10j,
110731.49399307 +1.59493161e-10j, ...,
98225.45758298 +1.19505615e-10j,
98251.36692774 +1.40378638e-10j,
98347.56672675 +1.53089452e-10j],
[ 110054.13618272 +1.38828860e-10j,
110631.98930842 +1.60288292e-10j,
111141.45557327 +1.73157981e-10j, ...,
98415.84623601 +1.21052586e-10j,
98456.56612632 +1.46293094e-10j,
98552.17870886 +1.65763697e-10j]])

# Back Calculate Numerator¶

In [7]:
url = 'https://farm8.staticflickr.com/7368/12972465885_1bcb9717bd_c.jpg'
A = Binarize( A )
period = [0,1]
s = stats( head = A, periodic=period, display=False )
print "Volume Fractions:: %f / %f" % (s.spatial.real.max(), s.head.mean())
print "Maximum Imaginary Value :: " + str( s.spatial.imag.max() )
#  plt.imshow( s.numer().real )
# print "Counts:: %f / %f" % (s.numer().real.max(), s.head.sum())

Volume Fractions:: 0.468414 / 0.468414
Maximum Imaginary Value :: 1.20787761987e-15


# Autocorrelation :: Local Data :: Periodic¶

In [8]:
# url = 'https://farm8.staticflickr.com/7368/12972465885_1bcb9717bd_c.jpg'
# A = io.imread( url )
period = [0,1]
s = stats( head = A, shift = True, normalize=True, periodic=period )
print "Volume Fractions:: %f / %f" % (s.spatial.real.max(), s.head.mean())
print "Maximum Imaginary Value :: " + str( s.spatial.imag.max() )

Volume Fractions:: 0.468414 / 0.468414
Maximum Imaginary Value :: 1.20787761987e-15


# Cross-Correlation :: Local :: Periodic¶

In [9]:
#url = 'https://farm8.staticflickr.com/7368/12972465885_1bcb9717bd_c.jpg'
#A = Binarize( A )
B = 1-A
period = [0,1]
s = stats( head = A, tail=B, periodic=period )
print "Minimum Magnitude:: %f" % (np.abs(s.spatial.real).min())
print "Maximum Imaginary Value :: " + str( s.spatial.imag.max() )

Minimum Magnitude:: 0.000000
Maximum Imaginary Value :: 1.72904358514e-16


# Cross-Correlation :: Local :: Periodic :: Cutoff Axes¶

In [10]:
# url = 'https://farm8.staticflickr.com/7368/12972465885_1bcb9717bd_c.jpg'
# A = io.imread( url )
B = 1-A
period = [0,1]
s = stats( head = A, tail=B, periodic=period, cutoff = [100, 50] )
print "Minimum Magnitude:: %f" % (np.abs(s.spatial.real).min())
print "Maximum Imaginary Value :: " + str( s.spatial.imag.max() )

Minimum Magnitude:: 0.000000
Maximum Imaginary Value :: 1.72904358514e-16


# Cross-Correlation :: Local :: Periodic :: Cutoff Magnitude¶

In [11]:
B = 1-A
period = [0,1]
s = stats( head = A, tail=B, periodic=period, cutoff = 50 )
print "Minimum Magnitude:: %f" % (np.abs(s.spatial.real).min())
print "Maximum Imaginary Value :: " + str( s.spatial.imag.max() )

Minimum Magnitude:: 0.000000
Maximum Imaginary Value :: 1.72904358514e-16


# Cross Correlation :: Local Data¶

In [12]:
#url = 'https://farm8.staticflickr.com/7368/12972465885_1bcb9717bd_c.jpg'
#A = Binarize( A )
B = 1-A
s = stats( head = A, tail=B )
print "Minimum Magnitude:: %f" % (np.abs(s.spatial.real).min())
print "Maximum Imaginary Value :: " + str( s.spatial.imag.max() )

Minimum Magnitude:: 0.000000
Maximum Imaginary Value :: 2.94064460027e-16


# Autocorrelation :: Local Data¶

In [13]:
#url = 'https://farm8.staticflickr.com/7368/12972465885_1bcb9717bd_c.jpg'
#A = Binarize( A )
s = stats( head = A )
print "Volume Fractions:: %f / %f" % (s.spatial.real.max(), s.head.mean())
print "Maximum Imaginary Value :: " + str( s.spatial.imag.max() )

Volume Fractions:: 0.468414 / 0.468414
Maximum Imaginary Value :: 5.40158167971e-16


# Partial Periodic¶

In [14]:
period = [1]
s = stats( head = A, periodic=period )
print "Volume Fractions:: %f / %f" % (s.spatial.real.max(), s.head.mean())
print "Maximum Imaginary Value :: " + str( s.spatial.imag.max() )

Volume Fractions:: 0.468414 / 0.468414
Maximum Imaginary Value :: 9.06348009753e-16


# Quickstart From URL¶

In [16]:
s = stats( url = url )
print "Volume Fractions:: %f / %f" % (s.spatial.real.max(), s.head.mean())
print "Maximum Imaginary Value :: " + str( s.spatial.imag.max() )

Volume Fractions:: 0.468414 / 0.468414
Maximum Imaginary Value :: 5.40158167971e-16

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:
# Parametric Analysis of Imaginary Components After Communiting Convolution Functions With np.fft

for v in np.arange( 101,1002,100):
sz = np.array( (v,v) )
A = np.ones( shape=sz )

B = np.concatenate((
np.concatenate( (A, np.zeros( sz )), axis = 0 ),
np.concatenate( (np.zeros( sz ), np.zeros( sz )), axis = 0 )
),axis = 1)

fB = np.fft.fft2(B)
fB = fB * np.conj( fB )
fB = np.fft.ifft2( fB )

fA = np.fft.fft2( A, s = 2 * sz )
fA = fA * np.conj( fA )
fA = np.fft.ifft2( fA )
print v,fA.imag.max(),fB.imag.max()