Compare 1d kalman filter with full kalman filter with only 1 observed state variable.
from __future__ import division
import numpy as np
def predict(pos, variance, movement, movement_variance):
return (pos + movement, variance + movement_variance)
def update(mean, variance, measurement, measurement_variance):
return multiply(mean, variance, measurement, measurement_variance)
def multiply(mu1, var1, mu2, var2):
mean = (var1*mu2 + var2*mu1) / (var1+var2)
variance = 1 / (1/var1 + 1/var2)
return (mean, variance)
%matplotlib inline
from numpy.random import randn
import numpy as np
from filterpy.kalman import KalmanFilter
from filterpy.leastsq import LeastSquaresFilter
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = (16.0, 16.0)
lsq = LeastSquaresFilter(dt=1, order=2, noise_variance = 5)
f = KalmanFilter(dim_x=1, dim_z=1, dim_u=1)
f.P = 500
f.H = np.array([[1.]])
f.F = np.array([[1.]])
f.B = np.array([[1.]])
f.Q = 3
f.R = 5
pos = (0., 500.)
print (pos)
plt.figure()
for i in range(50):
z = i + randn()
pos = update(pos[0], pos[1], z, 5)
x = lsq(z)
f.update(z)
plt.scatter(i, pos[0], c='b')
plt.scatter(i, x, c='g')
plt.scatter(i, f.x[0,0], c='r')
assert abs(pos[0]- f.x[0,0]) < 1.e-12
pos = predict(pos[0], pos[1], 1., 3.)
f.predict(u=1)
plt.show()
(0.0, 500.0)
%matplotlib inline
from numpy.random import randn
import numpy as np
from filterpy.kalman import KalmanFilter
from filterpy.leastsq import LeastSquaresFilter
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = (16.0, 16.0)
lsq = LeastSquaresFilter(dt=1, order=2, noise_variance = 5)
f = KalmanFilter(dim_x=1, dim_z=1, dim_u=1)
f.P = 500
f.H = np.array([[1.]])
f.F = np.array([[1.]])
f.B = np.array([[1.]])
f.Q = .02
f.R = 5
kxs = []
lxs = []
kKs = []
lKs = []
zs = []
for i in range(500):
z = i + randn()*5
zs.append(z)
lsq(z)
f.update(z)
kxs.append(f.x[0,0])
lxs.append(lsq.x)
kKs.append(f.K[0,0])
lKs.append(lsq.K2)
f.predict(u=1)
plt.figure()
plt.plot (kxs, c='b')
plt.plot (lxs, c='g')
plt.plot (zs, c='r')
plt.figure()
plt.plot (kKs, c='b')
plt.plot (lKs, c='r')
plt.show()
print(lKs[-1])
print(kKs[-1])
0.00014299687477634373 0.0612771680782
# pos,vel filter
%matplotlib inline
from numpy.random import randn
import numpy as np
from filterpy.kalman import KalmanFilter
from filterpy.leastsq import LeastSquaresFilter
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = (16.0, 16.0)
lsq = LeastSquaresFilter(dt=1, order=2, noise_variance = 5)
f = KalmanFilter(dim_x=2, dim_z=1, dim_u=0)
f.P *= 500
f.F = np.array([[1., 1.],
[0., 1.]])
f.H = np.array([[1., 0.]])
f.Q *= .001
f.R *= 5
kxs = []
lxs = []
kKs = []
lKs = []
zs = []
for i in range(100):
z = i + randn()*5
zs.append(z)
lsq(z)
f.update(z)
kxs.append(f.x[0,0])
lxs.append(lsq.x)
kKs.append(f.K[0,0])
lKs.append(lsq.K2)
f.predict(u=1)
plt.figure()
plt.plot (kxs, c='b')
plt.plot (lxs, c='g')
plt.plot (zs, c='r')
plt.figure()
plt.plot (kKs, c='b')
plt.plot (lKs, c='r')
plt.show()
#print(f.K)
[[ 0.15538196] [ 0.01299707]]
#testing a constant K gain filter implementation
from numpy import dot
class kf(object):
def __init__ (self):
self.H = 1
self.x = 0
self.y = 0
self.F = 1
self.B = 0
self.K = 1
def update(self, Z):
self.y = Z - dot(self.H, self.x)
# x = x + Ky
# predict new x with residual scaled by the kalman gain
self.x = self.x + dot(self.K, self.y)
def predict(self, u=0):
# x = Fx + Bu
self._x = dot(self.F, self.x) + dot(self.B, u)
k = kf()
k.x = np.array([[1., 0.]]).T
k.F = np.array([[1., 1.],
[0., 1.]])
k.H = np.array([[1., 0.]])
k.K = np.array([[0.15538196, 0.01299707]]).T
lsq = LeastSquaresFilter(dt=1, order=2, noise_variance = 5)
kxs = []
lxs = []
zs = []
for i in range(1,100):
z = i + randn()*5
zs.append(z)
lsq(z)
k.predict()
k.update(z)
kxs.append(k.x[0,0])
lxs.append(lsq.x)
plt.figure()
plt.plot (kxs, c='b')
plt.plot (lxs, c='g')
plt.plot (zs, c='r')
plt.show()