import tensorly as tl
tl.set_backend('tensorflow')
import numpy as np
import tensorflow as tf
tfe = tf.contrib.eager
from tensorly.tucker_tensor import tucker_to_tensor
from tensorly.random import check_random_state
from tensorly.metrics import RMSE
Using numpy backend. Using tensorflow backend.
num_epochs = 1000
penalty = 0.0001
lr = 0.01
shape = [5, 5, 5]
rank = [5, 5, 5]
Make the results reproducible by fixing the random seed
random_state = 1234
rng = check_random_state(random_state)
Create the tensor we want to decompose:
tensor = tfe.Variable(tl.tensor(rng.random_sample(shape)))
Initialise a random Tucker decomposition of that tensor (that is, the core tensor and the factors of the decomposition)
core = tfe.Variable(tl.tensor(rng.random_sample(rank)))
factors = [tfe.Variable(tl.tensor(rng.random_sample((tensor.shape[i], rank[i])))) for i in range(len(tensor.get_shape()._dims))]
Let's define our optimiser
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr)
Now we just iterate through the training loop and backpropagate...
for epoch in range(num_epochs):
with tfe.GradientTape() as tape:
rec = tl.tucker_to_tensor(core, factors)
loss_value = tf.norm(rec - tensor, ord=2)
for f in factors:
loss_value = loss_value + penalty*tf.norm(f, 2)
grads = tape.gradient(loss_value, [core] + factors)
optimizer.apply_gradients(zip(grads, [core] + factors),
global_step=tf.train.get_or_create_global_step())
rec_error = tl.norm(rec - tensor, 2)
if epoch % 100 == 0:
print("Epoch {:03d}: Loss: {:.3f}".format(epoch, rec_error))
Epoch 000: Loss: 118.707 Epoch 100: Loss: 2.810 Epoch 200: Loss: 2.653 Epoch 300: Loss: 2.484 Epoch 400: Loss: 2.326 Epoch 500: Loss: 2.194 Epoch 600: Loss: 2.057 Epoch 700: Loss: 1.868 Epoch 800: Loss: 1.633 Epoch 900: Loss: 1.445