import autograd
import autograd.optimizers as optim
import autograd.numpy as np
from autograd import grad
import matplotlib.pyplot as plt
%matplotlib inline
You may find the following resources helpful for understanding how RNNs and LSTMs work:
# Load the Shakespeare text
with open('data/shakespeare.txt', 'r') as f:
text = f.read()
print("------------------------------")
# Print a sample of the text
print(text[:100])
data_length = len(text)
vocab = list(set(text))
vocab_size = len(vocab) # + 1 # The extra + 1 is for the end-of-string token
char_to_index = { char:index for (index,char) in enumerate(vocab) }
index_to_char = { index:char for (index,char) in enumerate(vocab) }
print("------------------------------")
print("TOTAL NUM CHARACTERS = {}".format(data_length))
print("NUM UNIQUE CHARACTERS = {}".format(vocab_size))
(Image from the Wild ML RNN Tutorial)
The update of an RNN is expressed by the following formulas:
$$ h_t = \tanh(U x_t + W h_{t-1} + b_h) $$$$ y_t = \text{softmax}(V h_t + b_y) $$Here, each $x_t$ is a character---in this example, there are 65 unique characters. Since in each step the model takes as input a character and outputs a prediction for the next character in the sequence, both $x_t$ and $o_t$ are 65-dimensional vectors, i.e., $x_t, o_t \in \mathbb{R}^{65}$. We can choose any dimension for the hidden state $h_t$; in this case, we will use $h_t \in \mathbb{R}^{100}$. With this setup, the dimensions of $U$, $W$, and $V$ are $100 \times 65$, $100 \times 100$, and $65 \times 100$, respectively.
For a vector $\mathbf{x}$, we have:
$$ \text{softmax}(\mathbf{x})_i = \frac{e^{\mathbf{x}_i}}{\sum_j e^{\mathbf{x}_j}} $$# Warning: not numerically stable
def softmax_unstable(x):
return np.exp(x) / np.sum(np.exp(x))
softmax_unstable([1, 2, 1000])
# Numerically stable version
def softmax(x):
exponential = np.exp(x - np.max(x))
return exponential / np.sum(exponential)
softmax([1,2,1000])
def log_softmax(x):
return np.log(softmax(x) + 1e-6)
log_softmax([1,2,1000])
def initialize_params(input_size, hidden_size, output_size):
params = {
'U': np.random.randn(hidden_size, input_size) * 0.01,
'W': np.random.randn(hidden_size, hidden_size) * 0.01,
'V': np.random.randn(output_size, hidden_size) * 0.01,
'b_h': np.zeros(hidden_size),
'b_o': np.zeros(output_size)
}
return params
# test_params = initialize_params(2, 3, 2)
# for param in test_params:
# print("{} = \n{}\n".format(param, test_params[param]))
def initialize_hidden(hidden_size):
return np.zeros(hidden_size)
def model(params, x, h_prev):
h = np.tanh(np.dot(params['U'], x) + np.dot(params['W'], h_prev) + params['b_h'])
y = softmax(np.dot(params['V'], h) + params['b_o'])
return y, h
def criterion(output, target):
"""Negative log-likelihood loss. Useful for training a classification problem with n classes.
"""
output = np.log(output)
return -output[target]
def loss(params, input_seq, target_seq, opts):
hidden = initialize_hidden(opts['hidden_size'])
loss = 0
for i in range(len(input_seq)):
# output, hidden = model(params, input_seq[i], hidden)
# loss += criterion(output, target_seq[i])
x = input_seq[i]
hidden = np.tanh(np.dot(params['U'], x) + np.dot(params['W'], hidden) + params['b_h'])
output = softmax(np.dot(params['V'], hidden) + params['b_o'])
loss += criterion(output, target_seq[i])
return loss
loss_grad = grad(loss)
def sgd(grad, init_params, callback=None, num_iters=200, step_size=0.1, mass=0.9):
"""Stochastic gradient descent with momentum.
grad() must have signature grad(x, i), where i is the iteration number."""
def sample(params, initial, length, opts):
hidden = initialize_hidden(opts['hidden_size'])
current_char = initial
final_string = initial
for i in range(length):
x = create_one_hot(char_to_index[current_char], opts['input_size'])
output, hidden = model(params, x, hidden)
p = output
current_index = np.random.choice(range(vocab_size), p=p.ravel())
current_char = index_to_char[current_index]
final_string += current_char
return final_string
def create_one_hot(j, length):
vec = np.zeros(length)
vec[j] = 1
return vec
data_length / sequence_length
# Use non-overlapping 25-character chunks for training
sequence_length = 50
num_epochs = 1
print_every = 100
evaluate_every = 100
lr = 1e-2
opts = {
'input_size': vocab_size,
'hidden_size': 100,
'output_size': vocab_size,
}
params = initialize_params(opts['input_size'], opts['hidden_size'], opts['output_size'])
for ep in range(num_epochs):
# i = 0
# while i * sequence_length + 1 < 10000:
for i in range(data_length / sequence_length):
start = i * sequence_length
end = start + sequence_length + 1
chunk = text[start:end]
input_chars = chunk[:-1]
target_chars = chunk[1:]
input_seq = [char_to_index[c] for c in input_chars]
target_seq = [char_to_index[c] for c in target_chars]
input_seq_one_hot = [create_one_hot(j, vocab_size) for j in input_seq]
example_loss = loss(params, input_seq_one_hot, target_seq, opts)
grad_params = loss_grad(params, input_seq_one_hot, target_seq, opts)
for param in params:
gradient = np.clip(grad_params[param], -5, 5)
params[param] -= lr * gradient
if i % print_every == 0:
print("LOSS = {}".format(example_loss))
# print(grad_params)
if i % evaluate_every == 0:
sampled_string = sample(params, initial='a', length=100, opts=opts)
print(sampled_string)
# i += 1
"""
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)
BSD License
"""
import numpy as np
# data I/O
data = open('data/shakespeare.txt', 'r').read() # should be simple plain text file
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print 'data has %d characters, %d unique.' % (data_size, vocab_size)
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
# hyperparameters
hidden_size = 100 # size of hidden layer of neurons
seq_length = 50 # number of steps to unroll the RNN for
learning_rate = 1e-2
# model parameters
Wxh = np.random.randn(hidden_size, vocab_size) * 0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size) * 0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size) * 0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
# forward pass
for t in xrange(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state
ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(xrange(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(Whh.T, dhraw)
# for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
# np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
def sample(h, seed_ix, n):
"""
sample a sequence of integers from the model
h is memory state, seed_ix is seed letter for first time step
"""
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in xrange(n):
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
y = np.dot(Why, h) + by
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
n, p = 0, 0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
while True:
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p+seq_length+1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size,1)) # reset RNN memory
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
# sample from the model now and then
if n % 100 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print '----\n %s \n----' % (txt, )
hprev = np.zeros((hidden_size,1)) # reset RNN memory
# forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
if n % 100 == 0: print 'iter %d, loss: %f' % (n, loss) # print progress
# perform parameter update with Adagrad
for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
# mem += dparam * dparam
# param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
param += -learning_rate * dparam
p += seq_length # move data pointer
n += 1 # iteration counter
The update of an LSTM is given by the following equations:
$$ i_t = \sigma(U_i x_t + W_i h_{t-1} + b_i) $$$$ f_t = \sigma(U_f x_t + W_f h_{t-1} + b_f) $$$$ o_t = \sigma(U_o x_t + W_o h_{t-1} + b_o) $$$$ \tilde{C}_t = \tanh(U_C x_t + W_C h_{t-1} + b_C) $$$$ C_t = i_t * \tilde{C}_t + f_t * C_{t-1} $$$$ h_t = o_t * \tanh(C_t) $$