#!/usr/bin/env python
# coding: utf-8
# # SYDE 556/750: Simulating Neurobiological Systems
#
# ## Administration
#
# - 2-slide presentations on project topics
# - Mar 27th: Yun, Dameracharla, St George, Kim, McLeod, Bulger, Jackes, Lu, Gravel, Ige, Vaidyanathan, Zhang
# - Apr 1st: Newhook, Strang, Shalekeen, Cooke, Li, Koh, Peshimam, Park, Bianchini, Weng, Dumont
# - Apr 3rd: Sathiyaseelan, Luo, Lee, Chopra, Ferrabee, Tong, Yao, Selby, Skupien, Wiyatno, Abbasi Azadgoleh
# ## Memory
#
# Readings: [Serial Working Memory](http://compneuro.uwaterloo.ca/files/publications/choo.2010a.pdf); [Associative Memory](http://compneuro.uwaterloo.ca/files/publications/stewart.2011.pdf)
#
# - We've seen how to represent symbol-like structures using vectors
# - Typically high dimensional vectors
# - How can we store those over short periods of time (working memory)
# - Long periods of time?
# - Recall Jackendoff's 4th challenge (same repn in long and working memory)
# ## Attractor networks
# - An attractor in dynamical systems theory is a system state (or states) towards which other states tend over time
# - The standard analogy is to imagine the state space as a ‘hill-like’ topology which a ball travels through (tending downhill).
#
#
#
# - In neural network research, attractor networks (networks with dynamical attractors) have long been thought relevant for various behaviours
# - e.g., memory, integration, off-line updating of representations, repetitive pattern generation, noise reduction, etc.
#
#
# - The neural integrator is an example of an attractor network
#
# - The neural integrator can be thought of as a line attractor as in a)
# - Actually an approximate one as in b)
#
#
#
# - Attractor networks were extensively examined in the ANN community (e.g. hopfield nets). Amit suggested that persistent activity could be associated with recurrent biological networks
# - Persistent activity is found in motor, premotor, parietal, prefrontal, frontal, hippocampal, and inferotemporal cortex; and basal ganglia, midbrain, superior colliculus, and brainstem
# - Focus to date is often on simple attractors:
# - The NEF lets us easily generalize.
# ## Generalizing dynamics
#
# - A cyclic attractor (i.e. a set of attractive points that the system moves through)
#
#
#
# - Note: The hill and ball analogy doesn't work anymore.
#
# - This is an oscillator. Technically a nonlinear one, since it should be stable (the Simple Harmonic Oscillator is linear). Let's build a stable oscillator.
# In[45]:
get_ipython().run_line_magic('pylab', 'inline')
import nengo
from nengo.utils.ensemble import response_curves
from nengo.dists import Uniform
model = nengo.Network('Oscillator')
freq = .25
scale = 1.1
N=300
with model:
stim = nengo.Node(lambda t: [.5,.5] if 0.1
#
# - The attractor space is much more complicated, so you need a lot more neurons to do as good a job ($N^D$)
# In[57]:
get_ipython().run_line_magic('pylab', 'inline')
#A 1D integrator with a few hundred neurons works great
import nengo
from nengo.utils.functions import piecewise
model = nengo.Network(label='1D Line Attractor', seed=5)
N = 300
tau = 0.01
with model:
stim = nengo.Node(piecewise({.3:[1], .5:[0] }))
neurons = nengo.Ensemble(N, dimensions=1)
nengo.Connection(stim, neurons, transform=tau, synapse=tau)
nengo.Connection(neurons, neurons, synapse=tau)
stim_p = nengo.Probe(stim)
neurons_p = nengo.Probe(neurons, synapse=.01)
sim = nengo.Simulator(model)
sim.run(4)
t=sim.trange()
plot(t, sim.data[stim_p], label = "stim")
plot(t, sim.data[neurons_p], label = "position")
legend(loc="best");
# In[61]:
#Need lots of neurons to get reasonable performance in higher D
import nengo
from nengo.utils.functions import piecewise
model = nengo.Network(label='2D Plane Attractor', seed=4)
N = 2000 #600
tau = 0.01
with model:
stim = nengo.Node(piecewise({.3:[1, -1], .5:[0, 0] }))
neurons = nengo.Ensemble(N, dimensions=2)
nengo.Connection(stim, neurons, transform=tau, synapse=tau)
nengo.Connection(neurons, neurons, synapse=tau)
stim_p = nengo.Probe(stim)
neurons_p = nengo.Probe(neurons, synapse=.01)
sim = nengo.Simulator(model)
sim.run(4)
t=sim.trange()
plot(t, sim.data[stim_p], label = "stim")
plot(t, sim.data[neurons_p], label = "position")
legend(loc="best");
# In[62]:
#Note that the representation saturates at the radius
with model:
stim.output = piecewise({.2:[1, -1], 1.2:[0, 0] })
sim = nengo.Simulator(model)
sim.run(4)
t=sim.trange()
plot(t, sim.data[stim_p], label = "stim")
plot(t, sim.data[neurons_p], label = "position");
# - So for higher dimensional memories, you may want the dimensions to be independent
# - To make this easy, Nengo has 'ensemble arrays'
# In[63]:
model = nengo.Network(label='Ensemble Array', seed=123)
N = 300 #neurons per sub_ensemble
tau = 0.01
with model:
stim = nengo.Node(piecewise({.3:[1, -1], .5:[0, 0] }))
neurons = nengo.networks.EnsembleArray(N, n_ensembles=2)
nengo.Connection(stim, neurons.input, transform=tau, synapse=tau)
nengo.Connection(neurons.output, neurons.input, synapse=tau)
stim_p = nengo.Probe(stim)
neurons_p = nengo.Probe(neurons.output, synapse=.01)
sim = nengo.Simulator(model)
sim.run(4)
t=sim.trange()
plot(t, sim.data[stim_p], label = "stim")
plot(t, sim.data[neurons_p], label = "position");
# In[64]:
from nengo_gui.ipython import IPythonViz
IPythonViz(model, "configs/ensemble_array.py.cfg")
# - And saturation effects are quite different
# In[65]:
#Note that the representation saturates at the radius
with model:
stim.output = piecewise({.2:[1, -1], 1.2:[0, 0] })
sim = nengo.Simulator(model)
sim.run(4)
t=sim.trange()
plot(t, sim.data[stim_p], label = "stim")
plot(t, sim.data[neurons_p], label = "position");
# ## Working memory
#
# - We can build high dimensional working memories using plane attractors
# - In general we want some saturation, and some independence between dimensions
# - Gives both a 'soft normalization'
# - And good temporal stability for fewer neurons
#
# In[66]:
import nengo
import nengo_spa as spa
def colour_input(t):
if t < 0.15:
return 'BLUE'
elif 1.0 < t < 1.15:
return 'GREEN'
elif 1.7 < t < 1.85:
return 'RED'
else:
return '0'
model = spa.Network(label="HighD Working Memory", seed=5)
dimensions = 32
with model:
colour_in = spa.Transcode(colour_input, output_vocab=dimensions)
mem = spa.State(dimensions, subdimensions=4, feedback=1.,
feedback_synapse=0.1,
neurons_per_dimension=50)
# Connect the ensembles
colour_in >> mem
model.config[nengo.Probe].synapse = nengo.Lowpass(0.03)
colour_in_p = nengo.Probe(colour_in.output)
mem_p = nengo.Probe(mem.output)
sim = nengo.Simulator(model)
sim.run(3.)
plt.figure(figsize=(10, 10))
vocab = model.vocabs[dimensions]
plt.subplot(2, 1, 1)
plt.plot(sim.trange(), spa.similarity(sim.data[colour_in_p], vocab))
plt.legend(vocab.keys(), fontsize='x-small')
plt.ylabel("colour")
plt.subplot(2, 1, 2)
plt.plot(sim.trange(), spa.similarity(sim.data[mem_p], vocab))
plt.legend(fontsize='x-small')
plt.legend(vocab.keys(), fontsize='x-small')
plt.ylabel("memory")
plt.xlabel("time [s]");
# In[67]:
from nengo_gui.ipython import IPythonViz
IPythonViz(model, "configs/simple_spa_wm.py.cfg")
# In[21]:
# model.all_ensembles has all the neurons
model.all_ensembles[2].n_neurons
# - You can see interference effects here
# - If you recalled things from this memory, you'd probably have a 'recency effect'
# - i.e. The things most recently put in memory would be best recalled
#
# - This is seen in human memory, but so is primacy
# - How could we get primacy?
# In[68]:
import nengo
import nengo_spa as spa
def colour_input(t):
if t < 0.15:
return 'BLUE'
elif 1.0 < t < 1.15:
return 'GREEN'
elif 1.7 < t < 1.85:
return 'RED'
else:
return '0'
model = spa.Network(label="HighD Working Memory", seed=3)
dimensions = 32
with model:
colour_in = spa.Transcode(colour_input, output_vocab=dimensions)
mem = spa.State(dimensions, subdimensions=4, feedback=1.2,
feedback_synapse=0.1,
neurons_per_dimension=50)
# Connect the ensembles
colour_in >> mem
model.config[nengo.Probe].synapse = nengo.Lowpass(0.03)
colour_in_p = nengo.Probe(colour_in.output)
mem_p = nengo.Probe(mem.output)
# In[29]:
from nengo_gui.ipython import IPythonViz
IPythonViz(model, "configs/simple_spa_wm_primacy.py.cfg")
# In[69]:
sim = nengo.Simulator(model)
sim.run(3.)
plt.figure(figsize=(10, 10))
vocab = model.vocabs[dimensions]
plt.subplot(2, 1, 1)
plt.plot(sim.trange(), spa.similarity(sim.data[colour_in_p], vocab))
plt.legend(vocab.keys(), fontsize='x-small')
plt.ylabel("colour")
plt.subplot(2, 1, 2)
plt.plot(sim.trange(), spa.similarity(sim.data[mem_p], vocab))
plt.legend(fontsize='x-small')
plt.legend(vocab.keys(), fontsize='x-small')
plt.ylabel("memory")
plt.xlabel("time [s]");
# - By making the recurrent connection greater than one we made whatever is in memory 'grow' over time
# - This gives a 'primacy' effect
# - i.e. Whatever you ran into first is what you remember
# - You could also get this by changing connection weights
#
# - Together, primacy and recency capture the most salient properties of human working memory
# - You see these effects in both 'free recall' and 'serial recall'
#
#
#
# - So far, the networks we've seen would work well for free recall presumably.
# - How to include order information?
# ## Serial Working Memory
#
# - An ordered list is a very simple kind of structure
# - To represent structures, we can use what we learned last time: vector binding
# - Specifically, Semantic Pointers
#
# - In this case, we can do something like:
#
# $Pos_0\circledast Item_0 + Pos_1\circledast Item_1 + ...$
#
# - The $Item$ can just whatever vector we want to remember
# - How should we generate $Pos$? What features does it need?
# - It would be good if we could generate them on the fly, forever
# - It would be good if $Pos_0$ "came before" $Pos_1$ in some sense
#
#
# - There is a kind of vector that's ideal for this called a 'unitary vector'
# - A unitary vector does not change length when convolved
# - So, you can convolve forever
# - The convolution of a unitary vector with itself is 'unlike' the original vector
# - So, you can go fwd/backward with convolution/correlation
# - This gives a way of 'counting' positions:
# - $Pos_0 \circledast PlusOne = Pos_1$
# - $Pos_1 \circledast PlusOne = Pos_2$
# - $Pos_2 \circledast PlusOne = Pos_3$
# - etc.
# - And, $Pos_3 \circledast PlusOne' = Pos_2$
#
# - We can put this representation together with primacy and recency, and we get something like this
#
#
#
# - To do 'recall' from this representation, we want to remember what we've already recalled, so we can add a circuit like this
#
#
#
#
# - This does a pretty good job of capturing lots of working memory data
#
#
#
#
# - Order effects
#
#
#
#
# - Error probability of items in the list (closer items are more likely to be incorrect, unless you change similarity between items)
#
#
#
#
#
#
# - Item similarity effects
#
#
#
#
# - Arbitrary list lengths
#
# - This model can actually do lots more as well
# - backwards recall, etc.
# - A slight variation does free recall
# - you have to include non-bound item representations as well
# - You'll notice that it relies on a 'clean-up'
# - This is a kind of long term memory
# - We saw this show up before when talking about symbols
# - How can we build such a thing in spiking neurons?
# ## Long term memory
#
#
# - In order for memories to be stable over the long term, they must be encoded in connection weights
# - These kind of memories are often called 'associative memories'
# - Auto-associative: recall the same thing you're shown
# - e.g. for noise reduction
# - pattern completion
# - Hetero-associative: recall some arbitrary association
# - e.g. for capturing domain relationships
# - reasoning
#
#
# - Typical solutions in ANNs are
# - Hopfield networks: A many-point attractor network
# - Linear associators: Do SVD on the input/output to get a weight matrix
# - Multi-layer Perceptron (MLP): Use backprop to train a network to compute the feedfwd mapping
#
#
#
#
#
# - Often worried about how to learn these
# - We'll talk about learning in a later lecture
# - Often the learning is so un-'biologically plausible' that it's best thought of as an optimization (like least squares in the NEF).
#
# - How to compute these kinds of mappings with the NEF/SPA?
# - We want to 'recognize' specific vectors (in a vocabulary)
# - We want only some neurons to fire when a given vector is present (sparse)
# - We want to activate a given output vector if the input is recognized
#
#
# - Can do it in one layer with a feedforward approach
# - Set the encoding vectors of some small set of neurons to be a vocabulary item
# - That will pick out specific vectors
# - Set the intercepts of neurons to be high in the positive direction
# - So they will only fire if there is 'enough' of that vector in the input
# - Set the linear transformation on the output of those neurons to be the desired output vector
# - Since these are in the weights, the output will perfectly represent the desired output
#
# - We've seen several techniques that will make this easy
# - Ensemble arrays: For collecting lots of small populations together
# - SPA module: For defining vocabs, etc.
# In[112]:
import nengo
import nengo_spa as spa
D = 32
seed = 1
model = spa.Network("Associative Memory", seed=seed)
vocab = spa.Vocabulary(dimensions=D,
pointer_gen=np.random.RandomState(seed + 1))
words = ['RED', 'GREEN', 'BLUE']
vocab.populate(';'.join(words))
#noise_RED = vocab.parse("RED").v + .4*np.random.randn(D)
noise_RED = vocab.parse("RED").v + .2*vocab.parse("GREEN").v + .2*np.random.randn(D)
noise_RED = noise_RED/np.linalg.norm(noise_RED)
noise_GREEN = vocab.parse("GREEN").v + .2*np.random.randn(D)
noise_GREEN = noise_GREEN/np.linalg.norm(noise_GREEN)
def memory_input(t):
if t < 0.2:
return vocab.parse("BLUE").v
elif .2 < t < .5:
return noise_RED
elif .5 < t < .8:
return vocab.parse("RED").v
elif .8 < t < 1:
return noise_GREEN
else:
return vocab.parse("0").v
with model:
stim = spa.Transcode(memory_input, label='input', output_vocab=vocab)
am = spa.ThresholdingAssocMem(threshold=0.3, input_vocab=vocab,
mapping=vocab.keys(),
function=lambda x: x>.2)
stim >> am
in_p = nengo.Probe(stim.output)
out_p = nengo.Probe(am.output, synapse=0.03)
# In[113]:
from nengo_gui.ipython import IPythonViz
IPythonViz(model, "configs/simple_cleanup.py.cfg")
# In[114]:
# Notice that the magnitude of the cleaned element is preserved (without 'function='),
# and others are all made closer to or less than zero. The output is thus
# more 'purely' the target vector.
# 'function=' puts another threshold after to boost to one.
sim = nengo.Simulator(model)
sim.run(1.2)
t = sim.trange()
figure(figsize=(10,10))
plt.subplot(2, 1, 1)
plt.plot(t, spa.similarity(sim.data[in_p], vocab))
plt.ylabel("Input")
plt.ylim(top=1.1)
plt.legend(vocab.keys(), loc='best')
plt.subplot(2, 1, 2)
plt.plot(t, spa.similarity(sim.data[out_p], vocab))
plt.ylabel("Output")
plt.legend(vocab.keys(), loc='best');
# - This implementation has several nice features
# - Extremely fast (1 synapse feedforward ~ 5ms)
# - Fully spiking, integrates with SPA models
# - Scales very well
#
#
#
# - Scaling properties of a neural clean-up memory.
# - A SP is formed by binding $k$ pairs of random SPs and adding
# - Binding to random probes from that set.
# - Figure shows the minimum number of dimensions required to recover a lexical item from the input 99% of the time.
# - Averages over 200 simulations for each combination of $k$, $M$, and $D$ values.
# - Vertical dashed line is approximate size of an adult lexicon.
# - Horizontal dashed lines show the performance of a non-neural clean-up directly implementing the algorithm.
# ## SPA Example
#
# - This is the same example as for the symbols lecture, but with a working memory
# - The WM is just a single attractor network
# - The model is supposed to memorize the bound inputs and then answer questions about what is bound to what
# In[1]:
get_ipython().run_line_magic('pylab', 'inline')
import nengo
import nengo_spa as spa
def colour_input(t):
if t < 0.25:
return 'RED'
elif t < 0.5:
return 'BLUE'
else:
return '0'
def shape_input(t):
if t < 0.25:
return 'CIRCLE'
elif t < 0.5:
return 'SQUARE'
else:
return '0'
def cue_input(t):
if t < 0.5:
return '0'
sequence = ['0', 'CIRCLE', 'RED', '0', 'SQUARE', 'BLUE']
idx = int(((t - 0.5) // (1. / len(sequence))) % len(sequence))
return sequence[idx]
# Number of dimensions for the Semantic Pointers
D = 32
seed = 4
vocab = spa.Vocabulary(dimensions=D,
pointer_gen=np.random.RandomState(seed))
words = ['RED', 'SQUARE', 'BLUE', 'CIRCLE']
vocab.populate(';'.join(words))
model = spa.Network(label="Question answering with memory")
with model:
colour_in = spa.Transcode(colour_input, output_vocab=vocab)
shape_in = spa.Transcode(shape_input, output_vocab=vocab)
cue = spa.Transcode(cue_input, output_vocab=vocab, label="cue")
conv = spa.State(vocab, subdimensions=4,
feedback=1.,
feedback_synapse=0.4,
label="memory")
#out = spa.State(vocab)
out = spa.ThresholdingAssocMem(threshold=0.3, input_vocab=vocab,
mapping=vocab.keys(), label="clean up",
function = lambda x: x>.2)
# Connect the buffers
colour_in * shape_in >> conv
conv * ~cue >> out
with model:
model.config[nengo.Probe].synapse = nengo.Lowpass(0.03)
p_colour_in = nengo.Probe(colour_in.output)
p_shape_in = nengo.Probe(shape_in.output)
p_cue = nengo.Probe(cue.output)
p_conv = nengo.Probe(conv.output)
p_out = nengo.Probe(out.output)
# In[2]:
from nengo_gui.ipython import IPythonViz
IPythonViz(model, "configs/binding_with_memory.py.cfg")
# In[3]:
with nengo.Simulator(model) as sim:
sim.run(3.)
plt.figure(figsize=(10, 10))
plt.subplot(5, 1, 1)
plt.plot(sim.trange(), spa.similarity(sim.data[p_colour_in], vocab))
plt.legend(vocab.keys(), fontsize='x-small')
plt.ylabel("color")
plt.subplot(5, 1, 2)
plt.plot(sim.trange(), spa.similarity(sim.data[p_shape_in], vocab))
plt.legend(vocab.keys(), fontsize='x-small')
plt.ylabel("shape")
plt.subplot(5, 1, 3)
plt.plot(sim.trange(), spa.similarity(sim.data[p_cue], vocab))
plt.legend(vocab.keys(), fontsize='x-small')
plt.ylabel("cue")
plt.subplot(5, 1, 4)
for pointer in ['RED * CIRCLE', 'BLUE * SQUARE']:
plt.plot(sim.trange(), vocab.parse(pointer).dot(sim.data[p_conv].T), label=pointer)
plt.legend(fontsize='x-small')
plt.ylabel("convolved")
plt.subplot(5, 1, 5)
plt.plot(sim.trange(), spa.similarity(sim.data[p_out], vocab))
plt.legend(vocab.keys(), fontsize='x-small')
plt.ylabel("Output")
plt.xlabel("time [s]");
# In[ ]: