Bayesian Probabilistic Matrix Factorization - Autoregressive (BPMF-AR) model for spatiotemporal short-term prediction.
import numpy as np
from numpy import linalg as LA
from numpy.random import multivariate_normal
from scipy.stats import wishart
def Normal_Wishart(mu_0, lamb, W, nu, seed = None):
"""Function drawing a Gaussian-Wishart random variable"""
Lambda = wishart(df = nu, scale = W, seed = seed).rvs()
cov = np.linalg.inv(lamb * Lambda)
mu = multivariate_normal(mu_0, cov)
return mu, Lambda
Given two matrices $A\in\mathbb{R}^{m_1\times n_1}$ and $B\in\mathbb{R}^{m_2\times n_2}$, then, the Kronecker product between these two matrices is defined as
$$A\otimes B=\left[ \begin{array}{cccc} a_{11}B & a_{12}B & \cdots & a_{1m_2}B \\ a_{21}B & a_{22}B & \cdots & a_{2m_2}B \\ \vdots & \vdots & \ddots & \vdots \\ a_{m_11}B & a_{m_12}B & \cdots & a_{m_1m_2}B \\ \end{array} \right]$$where the symbol $\otimes$ denotes Kronecker product, and the size of resulted $A\otimes B$ is $(m_1m_2)\times (n_1n_2)$ (i.e., $m_1\times m_2$ columns and $n_1\times n_2$ rows).
If $A=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ \end{array} \right]$ and $B=\left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10 \\ \end{array} \right]$, then, we have
$$A\otimes B=\left[ \begin{array}{cc} 1\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] & 2\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] \\ 3\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] & 4\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] \\ \end{array} \right]$$$$=\left[ \begin{array}{cccccc} 5 & 6 & 7 & 10 & 12 & 14 \\ 8 & 9 & 10 & 16 & 18 & 20 \\ 15 & 18 & 21 & 20 & 24 & 28 \\ 24 & 27 & 30 & 32 & 36 & 40 \\ \end{array} \right]\in\mathbb{R}^{4\times 6}.$$kr_prod
)¶Given two matrices $A=\left( \boldsymbol{a}_1,\boldsymbol{a}_2,...,\boldsymbol{a}_r \right)\in\mathbb{R}^{m\times r}$ and $B=\left( \boldsymbol{b}_1,\boldsymbol{b}_2,...,\boldsymbol{b}_r \right)\in\mathbb{R}^{n\times r}$ with same number of columns, then, the Khatri-Rao product (or column-wise Kronecker product) between $A$ and $B$ is given as follows,
$$A\odot B=\left( \boldsymbol{a}_1\otimes \boldsymbol{b}_1,\boldsymbol{a}_2\otimes \boldsymbol{b}_2,...,\boldsymbol{a}_r\otimes \boldsymbol{b}_r \right)\in\mathbb{R}^{(mn)\times r}$$where the symbol $\odot$ denotes Khatri-Rao product, and $\otimes$ denotes Kronecker product.
If $A=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ \end{array} \right]=\left( \boldsymbol{a}_1,\boldsymbol{a}_2 \right) $ and $B=\left[ \begin{array}{cc} 5 & 6 \\ 7 & 8 \\ 9 & 10 \\ \end{array} \right]=\left( \boldsymbol{b}_1,\boldsymbol{b}_2 \right) $, then, we have
$$A\odot B=\left( \boldsymbol{a}_1\otimes \boldsymbol{b}_1,\boldsymbol{a}_2\otimes \boldsymbol{b}_2 \right) $$$$=\left[ \begin{array}{cc} \left[ \begin{array}{c} 1 \\ 3 \\ \end{array} \right]\otimes \left[ \begin{array}{c} 5 \\ 7 \\ 9 \\ \end{array} \right] & \left[ \begin{array}{c} 2 \\ 4 \\ \end{array} \right]\otimes \left[ \begin{array}{c} 6 \\ 8 \\ 10 \\ \end{array} \right] \\ \end{array} \right]$$$$=\left[ \begin{array}{cc} 5 & 12 \\ 7 & 16 \\ 9 & 20 \\ 15 & 24 \\ 21 & 32 \\ 27 & 40 \\ \end{array} \right]\in\mathbb{R}^{6\times 2}.$$def kr_prod(a, b):
return np.einsum('ir, jr -> ijr', a, b).reshape(a.shape[0] * b.shape[0], -1)
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8], [9, 10]])
print(kr_prod(A, B))
[[ 5 12] [ 7 16] [ 9 20] [15 24] [21 32] [27 40]]
def BPMF(dense_mat, sparse_mat, binary_mat, W, X, maxiter1, maxiter2):
dim1 = sparse_mat.shape[0]
dim2 = sparse_mat.shape[1]
rank = W.shape[1]
pos = np.where((dense_mat>0) & (sparse_mat==0))
position = np.where(sparse_mat > 0)
beta0 = 1
nu0 = rank
mu0 = np.zeros((rank))
tau = 1
a0 = 1e-6
b0 = 1e-6
W0 = np.eye(rank)
for iter in range(maxiter1):
W_bar = np.mean(W, axis = 0)
var_mu0 = (dim1 * W_bar + beta0 * mu0)/(dim1 + beta0)
var_nu = dim1 + nu0
var_W = np.linalg.inv(np.linalg.inv(W0)
+ dim1 * np.cov(W.T) + dim1 * beta0/(dim1 + beta0)
* np.outer(W_bar - mu0, W_bar - mu0))
var_W = (var_W + var_W.T)/2
var_mu0, var_Lambda0 = Normal_Wishart(var_mu0, dim1 + beta0, var_W, var_nu, seed = None)
var1 = X.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat.T).reshape([rank, rank, dim1]) + np.dstack([var_Lambda0] * dim1)
var4 = tau * np.matmul(var1, sparse_mat.T) + np.dstack([np.matmul(var_Lambda0, var_mu0)] * dim1)[0, :, :]
for i in range(dim1):
var_Lambda1 = var3[ :, :, i]
inv_var_Lambda1 = np.linalg.inv((var_Lambda1 + var_Lambda1.T)/2)
var_mu = np.matmul(inv_var_Lambda1, var4[:, i])
W[i, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda1)
X_bar = np.mean(X, axis = 0)
var_mu0 = (dim2 * X_bar + beta0 * mu0)/(dim2 + beta0)
var_nu = dim2 + nu0
var_X = np.linalg.inv(np.linalg.inv(W0)
+ dim2 * np.cov(X.T) + dim2 * beta0/(dim2 + beta0)
* np.outer(X_bar - mu0, X_bar - mu0))
var_X = (var_X + var_X.T)/2
var_mu0, var_Lambda0 = Normal_Wishart(var_mu0, dim2 + beta0, var_X, var_nu, seed = None)
var1 = W.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat).reshape([rank, rank, dim2]) + np.dstack([var_Lambda0] * dim2)
var4 = tau * np.matmul(var1, sparse_mat) + np.dstack([np.matmul(var_Lambda0, var_mu0)] * dim2)[0, :, :]
for t in range(dim2):
var_Lambda1 = var3[ :, :, t]
inv_var_Lambda1 = np.linalg.inv((var_Lambda1 + var_Lambda1.T)/2)
var_mu = np.matmul(inv_var_Lambda1, var4[:, t])
X[t, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda1)
mat_hat = np.matmul(W, X.T)
rmse = np.sqrt(np.sum((dense_mat[pos] - mat_hat[pos])**2)/dense_mat[pos].shape[0])
var_a = a0 + 0.5 * sparse_mat[position].shape[0]
error = sparse_mat - mat_hat
var_b = b0 + 0.5 * np.sum(error[position]**2)
tau = np.random.gamma(var_a, 1/var_b)
if (iter + 1) % 100 == 0:
print('Iter: {}'.format(iter + 1))
print('RMSE: {:.6}'.format(rmse))
print()
W_plus = np.zeros((dim1, rank))
X_plus = np.zeros((dim2, rank))
mat_hat_plus = np.zeros((dim1, dim2))
for iters in range(maxiter2):
W_bar = np.mean(W, axis = 0)
var_mu0 = (dim1 * W_bar + beta0 * mu0)/(dim1 + beta0)
var_nu = dim1 + nu0
var_W = np.linalg.inv(np.linalg.inv(W0)
+ dim1 * np.cov(W.T) + dim1 * beta0/(dim1 + beta0)
* np.outer(W_bar - mu0, W_bar - mu0))
var_W = (var_W + var_W.T)/2
var_mu0, var_Lambda0 = Normal_Wishart(var_mu0, dim1 + beta0, var_W, var_nu, seed = None)
var1 = X.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat.T).reshape([rank, rank, dim1]) + np.dstack([var_Lambda0] * dim1)
var4 = tau * np.matmul(var1, sparse_mat.T) + np.dstack([np.matmul(var_Lambda0, var_mu0)] * dim1)[0, :, :]
for i in range(dim1):
var_Lambda1 = var3[ :, :, i]
inv_var_Lambda1 = np.linalg.inv((var_Lambda1 + var_Lambda1.T)/2)
var_mu = np.matmul(inv_var_Lambda1, var4[:, i])
W[i, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda1)
W_plus += W
X_bar = np.mean(X, axis = 0)
var_mu0 = (dim2 * X_bar + beta0 * mu0)/(dim2 + beta0)
var_nu = dim2 + nu0
var_X = np.linalg.inv(np.linalg.inv(W0)
+ dim2 * np.cov(X.T) + dim2 * beta0/(dim2 + beta0)
* np.outer(X_bar - mu0, X_bar - mu0))
var_X = (var_X + var_X.T)/2
var_mu0, var_Lambda0 = Normal_Wishart(var_mu0, dim2 + beta0, var_X, var_nu, seed = None)
var1 = W.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat).reshape([rank, rank, dim2]) + np.dstack([var_Lambda0] * dim2)
var4 = tau * np.matmul(var1, sparse_mat) + np.dstack([np.matmul(var_Lambda0, var_mu0)] * dim2)[0, :, :]
for t in range(dim2):
var_Lambda1 = var3[ :, :, t]
inv_var_Lambda1 = np.linalg.inv((var_Lambda1 + var_Lambda1.T)/2)
var_mu = np.matmul(inv_var_Lambda1, var4[:, t])
X[t, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda1)
X_plus += X
mat_hat = np.matmul(W, X.T)
mat_hat_plus += mat_hat
var_a = a0 + 0.5 * sparse_mat[position].shape[0]
error = sparse_mat - mat_hat
var_b = b0 + 0.5 * np.sum(error[position]**2)
tau = np.random.gamma(var_a, 1/var_b)
W = W_plus/maxiter2
X = X_plus/maxiter2
mat_hat = mat_hat_plus/maxiter2
final_mape = np.sum(np.abs(dense_mat[pos] -
mat_hat[pos])/dense_mat[pos])/dense_mat[pos].shape[0]
final_rmse = np.sqrt(np.sum((dense_mat[pos] -
mat_hat[pos])**2)/dense_mat[pos].shape[0])
print('Final MAPE: {:.6}'.format(final_mape))
print('Final RMSE: {:.6}'.format(final_rmse))
print()
return mat_hat, W, X
We consider a dataset of $m$ discrete time series $\boldsymbol{y}_{i}\in\mathbb{R}^{f},i\in\left\{1,2,...,m\right\}$. The time series may have missing elements. We express spatio-temporal dataset as a matrix $Y\in\mathbb{R}^{m\times f}$ with $m$ rows (e.g., locations) and $f$ columns (e.g., discrete time intervals),
$$Y=\left[ \begin{array}{cccc} y_{11} & y_{12} & \cdots & y_{1f} \\ y_{21} & y_{22} & \cdots & y_{2f} \\ \vdots & \vdots & \ddots & \vdots \\ y_{m1} & y_{m2} & \cdots & y_{mf} \\ \end{array} \right]\in\mathbb{R}^{m\times f}.$$We consider a dataset of $m$ discrete time series $\boldsymbol{y}_{i}\in\mathbb{R}^{nf},i\in\left\{1,2,...,m\right\}$. The time series may have missing elements. We partition each time series into intervals of predifined length $f$. We express each partitioned time series as a matrix $Y_{i}$ with $n$ rows (e.g., days) and $f$ columns (e.g., discrete time intervals per day),
$$Y_{i}=\left[ \begin{array}{cccc} y_{11} & y_{12} & \cdots & y_{1f} \\ y_{21} & y_{22} & \cdots & y_{2f} \\ \vdots & \vdots & \ddots & \vdots \\ y_{n1} & y_{n2} & \cdots & y_{nf} \\ \end{array} \right]\in\mathbb{R}^{n\times f},i=1,2,...,m,$$therefore, the resulting structure is a tensor $\mathcal{Y}\in\mathbb{R}^{m\times n\times f}$.
How to transform a data set into something we can use for time series prediction?
Now we have the data in an easy-to-use form by parsing Urban Traffic Speed Dataset of Guangzhou, China.
import scipy.io
tensor = scipy.io.loadmat('Guangzhou-data-set/tensor.mat')
tensor = tensor['tensor']
random_matrix = scipy.io.loadmat('Guangzhou-data-set/random_matrix.mat')
random_matrix = random_matrix['random_matrix']
random_tensor = scipy.io.loadmat('Guangzhou-data-set/random_tensor.mat')
random_tensor = random_tensor['random_tensor']
dense_mat = tensor.reshape([tensor.shape[0], tensor.shape[1] * tensor.shape[2]])
missing_rate = 0.2
# =============================================================================
### Random missing (RM) scenario:
### ------------------------------
### missing rate | 0.2 | 0.4 |
### rank | 80 | 80 |
### ------------------------------
### Set the RM scenario by:
binary_mat = np.round(random_tensor + 0.5 - missing_rate).reshape([random_tensor.shape[0],
random_tensor.shape[1]
* random_tensor.shape[2]])
# =============================================================================
# =============================================================================
### Non-random missing (NM) scenario:
### ------------------------------
### missing rate | 0.2 | 0.4 |
### rank | 10 | 10 |
### ------------------------------
### Set the NM scenario by:
# binary_tensor = np.zeros(tensor.shape)
# for i1 in range(tensor.shape[0]):
# for i2 in range(tensor.shape[1]):
# binary_tensor[i1,i2,:] = np.round(random_matrix[i1,i2] + 0.5 - missing_rate)
# binary_mat = binary_tensor.reshape([binary_tensor.shape[0], binary_tensor.shape[1]
# * binary_tensor.shape[2]])
# =============================================================================
sparse_mat = np.multiply(dense_mat, binary_mat)
Using clear explanations: If we have a partially observed matrix $Y\in\mathbb{R}^{m\times T}$, then how to do a single-step rolling prediction starting at the time interval $f+1$ and ending at the time interval $T$?
The mechanism is:
First learn spatial factors $W\in\mathbb{R}^{m\times r}$, temporal factors $X\in\mathbb{R}^{f\times r}$, and AR coefficients $\boldsymbol{\theta}_{s}\in\mathbb{R}^{d},s=1,2,...,r$ from partially observed matrix $Y\in\mathbb{R}^{m\times f}$.
Predict $\boldsymbol{x}_{f+1}$ by
Load partially observed matrix $Y_{f}\in\mathbb{R}^{m\times b}$ ($b$ is the number of back steps) and fix spatial factors $W\in\mathbb{R}^{m\times T}$ and AR coefficients $\boldsymbol{\theta}_{s}\in\mathbb{R}^{d},s=1,2,...,r$, then learn temporal factors $X\in\mathbb{R}^{b\times r}$.
Compute the AR coefficients $\boldsymbol{\theta}_{s}\in\mathbb{R}^{d},s=1,2,...,r$ by
Predict $\boldsymbol{x}_{f+2}$ by
where $$Q=[\tilde{\boldsymbol{x}}_{h_d+1},\cdots,\tilde{\boldsymbol{x}}_{T}]^{\top}\in\mathbb{R}^{T'\times d}$$ and $$P=[x_{h_d+1},\cdots,x_{T}]^{\top}.$$
def OfflineBPMF(sparse_mat, init, time_lags, maxiter1, maxiter2):
"""Offline Bayesain Temporal Matrix Factorization"""
W = init["W"]
X = init["X"]
d=time_lags.shape[0]
dim1 = sparse_mat.shape[0]
dim2 = sparse_mat.shape[1]
rank = W.shape[1]
position = np.where(sparse_mat > 0)
binary_mat = np.zeros((dim1, dim2))
binary_mat[position] = 1
tau = 1
alpha = 1e-6
beta = 1e-6
beta0 = 1
nu0 = rank
mu0 = np.zeros((rank))
W0 = np.eye(rank)
for iter in range(maxiter1):
X_bar = np.mean(X, axis = 0)
var_mu0 = (dim2 * X_bar + beta0 * mu0)/(dim2 + beta0)
var_nu = dim2 + nu0
var_X = np.linalg.inv(np.linalg.inv(W0)
+ dim2 * np.cov(X.T) + dim2 * beta0/(dim2 + beta0)
* np.outer(X_bar - mu0, X_bar - mu0))
var_X = (var_X + var_X.T)/2
var_mu0, var_Lambda0 = Normal_Wishart(var_mu0, dim2 + beta0, var_X, var_nu, seed = None)
var1 = W.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat).reshape([rank, rank, dim2]) + np.dstack([var_Lambda0] * dim2)
var4 = tau * np.matmul(var1, sparse_mat) + np.dstack([np.matmul(var_Lambda0, var_mu0)] * dim2)[0, :, :]
for t in range(dim2):
var_Lambda1 = var3[ :, :, t]
inv_var_Lambda1 = np.linalg.inv((var_Lambda1 + var_Lambda1.T)/2)
var_mu = np.matmul(inv_var_Lambda1, var4[:, t])
X[t, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda1)
mat_hat = np.matmul(W, X.T)
var_alpha = alpha + 0.5 * sparse_mat[position].shape[0]
error = sparse_mat - mat_hat
var_beta = beta + 0.5 * np.sum(error[position] ** 2)
tau = np.random.gamma(var_alpha, 1/var_beta)
X_plus = np.zeros((dim2, rank))
for iter in range(maxiter2):
X_bar = np.mean(X, axis = 0)
var_mu0 = (dim2 * X_bar + beta0 * mu0)/(dim2 + beta0)
var_nu = dim2 + nu0
var_X = np.linalg.inv(np.linalg.inv(W0)
+ dim2 * np.cov(X.T) + dim2 * beta0/(dim2 + beta0)
* np.outer(X_bar - mu0, X_bar - mu0))
var_X = (var_X + var_X.T)/2
var_mu0, var_Lambda0 = Normal_Wishart(var_mu0, dim2 + beta0, var_X, var_nu, seed = None)
var1 = W.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat).reshape([rank, rank, dim2]) + np.dstack([var_Lambda0] * dim2)
var4 = tau * np.matmul(var1, sparse_mat) + np.dstack([np.matmul(var_Lambda0, var_mu0)] * dim2)[0, :, :]
for t in range(dim2):
var_Lambda1 = var3[ :, :, t]
inv_var_Lambda1 = np.linalg.inv((var_Lambda1 + var_Lambda1.T)/2)
var_mu = np.matmul(inv_var_Lambda1, var4[:, t])
X[t, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda1)
X_plus += X
mat_hat = np.matmul(W, X.T)
var_alpha = alpha + 0.5 * sparse_mat[position].shape[0]
error = sparse_mat - mat_hat
var_beta = beta + 0.5 * np.sum(error[position] ** 2)
tau = np.random.gamma(var_alpha, 1/var_beta)
X = X_plus/maxiter2
Sigma_eta = np.eye(dim2 - np.max(time_lags))
Sigma_theta = np.eye(time_lags.shape[0])
theta = np.zeros((time_lags.shape[0], rank))
for s in range(rank):
P = X[np.max(time_lags) : dim2, s]
Q = np.zeros((dim2 - np.max(time_lags), time_lags.shape[0]))
for t in range(np.max(time_lags), dim2):
Q[t - np.max(time_lags), :] = X[t - time_lags, s]
theta[:, s] = np.matmul(np.matmul(np.matmul(np.linalg.inv(np.matmul(np.matmul(Q.T, Sigma_eta), Q)
+ np.linalg.inv(Sigma_theta)),
Q.T), np.linalg.inv(Sigma_eta)), P)
return X, theta
def st_prediction(dense_mat, sparse_mat, pred_time_steps, back_steps, rank, time_lags, maxiter):
start_time = dense_mat.shape[1] - pred_time_steps
dense_mat0 = dense_mat[:, 0 : start_time]
sparse_mat0 = sparse_mat[:, 0 : start_time]
binary_mat0 = np.zeros((sparse_mat0.shape[0], sparse_mat0.shape[1]))
binary_mat0[np.where(sparse_mat0 > 0)] = 1
dim1 = sparse_mat0.shape[0]
dim2 = sparse_mat0.shape[1]
mat_hat = np.zeros((dim1, pred_time_steps))
init = {"W": np.random.rand(dim1, rank),
"X": np.random.rand(dim2, rank)}
mat_hat, W, X = BPMF(dense_mat0, sparse_mat0, binary_mat0, init["W"], init["X"], maxiter[0], maxiter[1])
init["W"] = W.copy()
Sigma_eta = np.eye(dim2 - np.max(time_lags))
Sigma_theta = np.eye(time_lags.shape[0])
theta = np.zeros((time_lags.shape[0], rank))
for s in range(rank):
P = X[np.max(time_lags) : dim2, s]
Q = np.zeros((dim2 - np.max(time_lags), time_lags.shape[0]))
for t in range(np.max(time_lags), dim2):
Q[t - np.max(time_lags), :] = X[t - time_lags, s]
theta[:, s] = np.matmul(np.matmul(np.matmul(np.linalg.inv(np.matmul(np.matmul(Q.T, Sigma_eta), Q)
+ np.linalg.inv(Sigma_theta)),
Q.T), np.linalg.inv(Sigma_eta)), P)
X0 = np.zeros((dim2 + 1, rank))
X0[0 : dim2, :] = X.copy()
X0[dim2, :] = np.einsum('ij, ij -> j', theta, X0[dim2 - time_lags, :])
init["X"] = X0[X0.shape[0] - back_steps : X0.shape[0], :]
mat_hat[:, 0] = np.matmul(W, X0[dim2, :])
for t in range(1, pred_time_steps):
dense_mat1 = dense_mat[:, start_time - back_steps + t : start_time + t]
sparse_mat1 = sparse_mat[:, start_time - back_steps + t : start_time + t]
X, theta = OfflineBPMF(sparse_mat1, init, time_lags, maxiter[2], maxiter[3])
X0 = np.zeros((back_steps + 1, rank))
X0[0 : back_steps, :] = X.copy()
X0[back_steps, :] = np.einsum('ij, ij -> j', theta, X0[back_steps - time_lags, :])
init["X"] = X0[1: back_steps + 1, :]
mat_hat[:, t] = np.matmul(W, X0[back_steps, :])
if (t + 1) % 40 == 0:
print('Time step: {}'.format(t + 1))
small_dense_mat = dense_mat[:, start_time : dense_mat.shape[1]]
pos = np.where(small_dense_mat > 0)
final_mape = np.sum(np.abs(small_dense_mat[pos] -
mat_hat[pos])/small_dense_mat[pos])/small_dense_mat[pos].shape[0]
final_rmse = np.sqrt(np.sum((small_dense_mat[pos] -
mat_hat[pos]) ** 2)/small_dense_mat[pos].shape[0])
print('Final MAPE: {:.6}'.format(final_mape))
print('Final RMSE: {:.6}'.format(final_rmse))
print()
return mat_hat
The main influential factors for such prediction are:
The number of back steps $b$ (back_steps
).
rank
.
maxiter
.
time_lags
.
import time
start = time.time()
pred_time_steps = 144 * 5
back_steps = 144 * 4 * 1
rank = 50
time_lags = np.array([1, 2, 144])
maxiter = np.array([100, 100, 10, 20])
small_dense_mat = dense_mat[:, dense_mat.shape[1] - pred_time_steps : dense_mat.shape[1]]
mat_hat = st_prediction(dense_mat, sparse_mat, pred_time_steps, back_steps, rank, time_lags, maxiter)
end = time.time()
print('Running time: %d seconds'%(end - start))
Iter: 100 RMSE: 4.49363 Final MAPE: 0.0937335 Final RMSE: 4.01711 Time step: 40 Time step: 80 Time step: 120 Time step: 160 Time step: 200 Time step: 240 Time step: 280 Time step: 320 Time step: 360 Time step: 400 Time step: 440 Time step: 480 Time step: 520 Time step: 560 Time step: 600 Time step: 640 Time step: 680 Time step: 720 Final MAPE: 0.113364 Final RMSE: 4.42602 Running time: 19584 seconds
Experiment results of short-term traffic prediction with missing values using Bayesian temporal matrix factorization (BTMF):
scenario | back_steps |
rank |
time_lags |
maxiter |
mape | rmse |
---|---|---|---|---|---|---|
40%, NM | $144\times 2$ | 30 | (1,2,144) | (100,100,10,20) | 0.1203 | 4.7566 |
Original data | $144\times 4$ | 80 | (1,2,144) | (100,100,10,20) | 0.1045 | 4.1626 |
20%, RM | $144\times 4$ | 50 | (1,2,144) | (100,100,10,20) | 0.1134 | 4.4260 |
40%, NM | $144\times 4$ | 50 | (1,2,144) | (100,100,10,20) | 0.1190 | 4.6293 |
20%, NM | $144\times 4$ | 30 | (1,2,144) | (100,100,10,20) | 0.1132 | 4.4421 |
40%, NM | $144\times 4$ | 30 | (1,2,144) | (100,100,10,20) | 0.1180 | 4.6520 |