Force use of development local repository. The repository must be the russell_dev branch of https://github.com/russelljjarvis/neuronunit
and the docker build is: docker-stacks/neuronunit-optimization
, there are more graceful ways of achieving this.
If you lack docker-stacks/neuronunit-optimization
try:
$git clone -b dev https://github.com/scidash/docker-stacks.git
$cd docker-stacks
$sudo bash build-all```
The best way will be to integrate this doc chapter5 and its supporting code
from https://github.com/russelljjarvis/neuronunit -> https://github.com/scidash/neuronunit
# Invocation of the jupyter note book:
On my local machine the development repo of neuronunit is located here:
$HOME/git
$docker run -p 8888:8888 -v
pwd
:/home/jovyan/mnt scidash/neuronunit-optimization
jupyter notebook --ip=0.0.0.0 --NotebookApp.token="" \
--NotebookApp.disable_check_xsrf=True
import sys
import os
THIS_DIR = os.path.dirname(os.path.realpath('chapter5.ipynb'))
this_nu = os.path.join(THIS_DIR,'../')
sys.path.insert(0,this_nu)
import neuronunit
from neuronunit.tests import get_neab
from neuronunit.tests import utilities #as outils
from neuronunit.tests import model_parameters as modelp
from neuronunit.models.reduced import ReducedModel
import numpy as np
outils = utilities.Utilities1(get_neab)
model = ReducedModel(get_neab.LEMS_MODEL_PATH,name='vanilla',backend='NEURON')
model.load_model()
outils.model = model
attempting to recover from pickled file Ignoring included LEMS file: Cells.xml Ignoring included LEMS file: Networks.xml Ignoring included LEMS file: Simulation.xml Mechanisms already loaded from path: /home/jovyan/mnt/git/neuronunit/neuronunit/tests/NeuroML2. Aborting. Ignoring included LEMS file: Cells.xml Ignoring included LEMS file: Networks.xml Ignoring included LEMS file: Simulation.xml
class Test:
def _optimize(self, model,modelp):
'''
The implementation of optimization, consisting of implementation details.
Inputs a model, and model parameter ranges to expore
Private method for programmer designer.
Outputs the optimal model, its attributes and the low error it resulted in.
'''
from neuronunit.optimization import nsga_serial
gap = nsga.GAparams(model)
# The number of generations is 3
gap.NGEN = 3
# The population of genes is 12
gap.MU = 12
gap.BOUND_LOW = [ np.min(i) for i in modelp.model_params.values() ]
gap.BOUND_UP = [ np.max(i) for i in modelp.model_params.values() ]
# call the actual Genetic Algorithm with Optimization parameters: number of generation (NGEN = 3)
# and gene population size (MU = 12)
vmpop, pop, invalid_ind, pf = nsga.main(gap.NGEN,gap.MU,model,modelp);
attributes = [ i.attrs for i in vmpop ]
rheobases = [ i.rheobase for i in vmpop ]
scores = [ i.score for i in vmpop ]
parameters = [ i.attrs for i in vmpop ]
data_tuples = (vmpop, parameters, scores, pop, invalid_ind, pf, rheobases)
return pop, data_tuples
def _get_optimization_parameters(self, data_tuples):
# Your specific unpacking of tuples that _optimize returns
# vmpop, parameters, scores, pop, invalid_ind, pf, rheobases)
_ , parameters , scores, _ , _ , _ , _ = zip(*data_tuples)
return parameters,scores
def optimize(self, model, modelp):
'''
# The Class users version of optimize
# where details are hidden in _optimizae
# Inputs:
# a Izhihikitch model specified in NML, but implemented with a NEURONbackend type
# from neuronunit. Modelp a formatated dictionary of model parameters
# where keys are Izhikitich parameters, and values are parameter ranges.
# Outputs:
# the optimal model, the scores as pandas data frame.
# data_tuples: other data about models from the converged gene population
# like resulting rheobase values from the converged genes, pandas score arrays
# the genes corresponding to attributes of the pareto front (in a raw format).
'''
# Do optimization including repeated calls to judge
models, data_tuples = self._optimize(model,modelp)
parameters, scores = self._get_optimization_parameters(data_tuples)
# this a way of looking at solved model parameters, ie candidate solutions from
# the pareto front.
# scores is a list of pandas dataframes for the converged gene population.
# It might be good to convert it into one big panda table if I knew how.
return model, scores, data_tuples
t = Test()
model,scores,data_tuples = t.optimize(model,modelp)
--------------------------------------------------------------------------- ImportError Traceback (most recent call last) <ipython-input-3-d3aa1193fdcf> in <module>() 61 62 t = Test() ---> 63 model,scores,data_tuples = t.optimize(model,modelp) <ipython-input-3-d3aa1193fdcf> in optimize(self, model, modelp) 51 52 # Do optimization including repeated calls to judge ---> 53 models, data_tuples = self._optimize(model,modelp) 54 parameters, scores = self._get_optimization_parameters(data_tuples) 55 # this a way of looking at solved model parameters, ie candidate solutions from <ipython-input-3-d3aa1193fdcf> in _optimize(self, model, modelp) 9 Outputs the optimal model, its attributes and the low error it resulted in. 10 ''' ---> 11 from optimization import nsga_serial 12 gap = nsga.GAparams(model) 13 # The number of generations is 3 ImportError: No module named 'optimization'
modelp
import pandas as pd
models = data_tuples[1]
sc = pd.DataFrame(scores[0])
for j,i in enumerate(models):
i.name = attributes[j]
sc
data = [ models[0].name ]
model_values0 = pd.DataFrame(data)
model_values0
rheobases=data_tuples[5][0]
data = [ models[0].name ]
model_values0 = pd.DataFrame(data)
model_values0
rheobases[0]
sc1 = pd.DataFrame(scores[1])
sc1
rheobases[1]
data=[ models[1].name ]
model_values1 = pd.DataFrame(data)
model_values1
models[1].name
The code below is used to get the differences between values obtained via brute force, and those obtained otherwise. It displays the differences in parameter values as pandas data tables.
I have knowingly violated Github conventions by adding data (a pickled file, as well as sources to the repository). The justification being that ground_error (the ground truth to compare against Genetic Algorithm outputs). Takes a prohibitively long time to generate, and therefore detracts from notebooking philosophy.
import pickle
import pandas as pd
try:
ground_error = pickle.load(open('big_model_evaulated.pickle','rb'))
except:
# The exception code is only skeletal, it would not actually work, but its the right principles.
print('{0} it seems the error truth data does not yet exist, lets create it now '.format(str(False)))
ground_error = list(futures.map(outils.func2map, ground_truth))
pickle.dump(ground_error,open('big_model_evaulated.pickle','wb'))
# ground_error_nsga=list(zip(vmpop,pop,invalid_ind))
# pickle.dump(ground_error_nsga,open('nsga_evaulated.pickle','wb'))
sum_errors = [ i[0] for i in ground_error ]
composite_errors = [ i[1] for i in ground_error ]
attrs = [ i[2] for i in ground_error ]
rheobase = [ i[3] for i in ground_error ]
indexs = [i for i,j in enumerate(sum_errors) if j==np.min(sum_errors) ][0]
indexc = [i for i,j in enumerate(composite_errors) if j==np.min(composite_errors) ][0]
#assert indexs == indexc
vmpop = data_tuples[0]
df_0 = pd.DataFrame([ (k,v,vmpop[0].attrs[k],float(v)-float(vmpop[0].attrs[k])) for k,v in ground_error[indexc][2].items() ])
df_1 = pd.DataFrame([ (k,v,vmpop[1].attrs[k],float(v)-float(vmpop[1].attrs[k])) for k,v in ground_error[indexc][2].items() ])
#These are the differences in attributes found via brute force versus the genetic algorithm. For the top two candidates.
df_0
df_1