In this notebook we illustrate how Janggu can be leveraged with hyperparameter optimization.
In particular, for this example, we shall use the hyperopt python packages. So one needs
to install the package using pip install hyperopt
import os
import numpy as np
from keras import Model
from keras import backend as K
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import GlobalAveragePooling2D
from keras.layers import Input
from keras.layers import Reshape
from pkg_resources import resource_filename
from janggu import Janggu
from janggu import Scorer
from janggu import inputlayer
from janggu import outputdense
from janggu.data import Bioseq
from janggu.data import Cover
from janggu.data import ReduceDim
from janggu.data import view
from janggu.layers import DnaConv2D
from janggu.layers import LocalAveragePooling2D
from janggu.utils import ExportClustermap
from janggu.utils import ExportTsne
from janggu.utils import ExportTsv
from IPython.display import Image
np.random.seed(1234)
from hyperopt import hp
from hyperopt import tpe
from hyperopt import fmin
from hyperopt import Trials
Using TensorFlow backend.
First, we need to specify the output directory in which the results are stored.
os.environ['JANGGU_OUTPUT'] = '/home/wkopp/janggu_examples'
We use the following toy example files that are shipped with the package:
# load the dataset
# The pseudo genome represents just a concatenation of all sequences
# in sample.fa and sample2.fa. Therefore, the results should be almost
# identically to the models obtained from classify_fasta.py.
REFGENOME = resource_filename('janggu', 'resources/pseudo_genome.fa')
# ROI contains regions spanning positive and negative examples
ROI_TRAIN_FILE = resource_filename('janggu', 'resources/roi_train.bed')
ROI_VAL_FILE = resource_filename('janggu', 'resources/roi_test.bed')
# PEAK_FILE only contains positive examples
PEAK_FILE = resource_filename('janggu', 'resources/scores.bed')
First, we define a function that instantiates the training and validation datasets needed for the model fitting/evaluation. The function can be parametrized by a dictionary. We shall make use of this parameter to test performances of different sequence encoding orders in this example. However, in general it can be useful, useful to consider different flanking windows, or normalization procedures for the coverage tracks.
def get_data(params):
DNA_TRAIN = Bioseq.create_from_refgenome('dna', refgenome=REFGENOME,
roi=ROI_TRAIN_FILE,
order=params['order'],
binsize=200,
store_whole_genome=True)
LABELS_TRAIN = Cover.create_from_bed('peaks', roi=ROI_TRAIN_FILE,
bedfiles=PEAK_FILE,
binsize=200,
resolution=200,
storage='sparse',
store_whole_genome=True)
DNA_VAL = view(DNA_TRAIN, ROI_VAL_FILE)
LABELS_VAL = view(LABELS_TRAIN, ROI_VAL_FILE)
return ((DNA_TRAIN, ReduceDim(LABELS_TRAIN)), (DNA_VAL, ReduceDim(LABELS_VAL)))
We define the neural network as described in other tutorial examples. Using Janggu's helper functionality.
@inputlayer
@outputdense('sigmoid')
def double_stranded_model_dnaconv(inputs, inp, oup, params):
""" keras model for scanning both DNA strands.
A more elegant way of scanning both strands for motif occurrences
is achieved by the DnaConv2D layer wrapper, which internally
performs the convolution operation with the normal kernel weights
and the reverse complemented weights.
"""
with inputs.use('dna') as layer:
# the name in inputs.use() should be the same as the dataset name.
layer = DnaConv2D(Conv2D(params[0], (params[1], 1),
activation=params[2]))(layer)
output = GlobalAveragePooling2D(name='motif')(layer)
return inputs, output
Finally, we define the objective function whose semantics is explained in the hyperopt documentation in detail.
In essense, given the params
dictionary, the dataset is unpacked and the corresponding model is instantiated and fitted. The model's validation loss is reported back to hyperopt via the return value. This is in turn used to used to pick an appropriate set of parameters for the next trial.
def objective(params):
#print(params)
train_data, val_data = get_data(params)
modelname = 'dna2peak_ex4_order{}_k{}_w{}'.format(params['order'], params['nkernels'], params['kernelwidth'])
# without clear session, the memory consumption increases continuously
K.clear_session()
# create a new model object
model = Janggu.create(template=double_stranded_model_dnaconv,
modelparams=(params['nkernels'], params['kernelwidth'], 'relu'),
inputs=train_data[0],
outputs=train_data[1],
name=modelname)
model.compile(optimizer='adadelta', loss='binary_crossentropy',
metrics=['acc'])
hist = model.fit(train_data[0], train_data[1], epochs=50, verbose=False, validation_data=val_data)
res = {'loss': hist.history['val_loss'][-1], 'status':'ok',
'modelname': model.name}
print(res)
return res
We define a simple search space using different numbers of convolution filters, different filter lengths and different DNA sequence encoding orders.
space={'nkernels': hp.choice('nk', [10, 20, 30]),
'kernelwidth': hp.choice('kw', [11, 15, 25]),
'order': hp.choice('o', [1,2,3])}
trials = Trials()
best = fmin(objective, space=space, algo=tpe.suggest, max_evals=30, trials=trials)
0%| | 0/30 [00:00<?, ?it/s, best loss: ?]WARNING:tensorflow:From /home/wkopp/anaconda3/envs/jdev/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer. WARNING:tensorflow:From /home/wkopp/anaconda3/envs/jdev/lib/python3.7/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. {'loss': 0.3910023140907288, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k20_w15'} {'loss': 0.21008342802524566, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k10_w11'} {'loss': 0.2897782826423645, 'status': 'ok', 'modelname': 'dna2peak_ex4_order2_k10_w11'} {'loss': 0.21085885047912598, 'status': 'ok', 'modelname': 'dna2peak_ex4_order2_k10_w15'} {'loss': 0.3971425604820251, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k10_w25'} {'loss': 0.24330212771892548, 'status': 'ok', 'modelname': 'dna2peak_ex4_order2_k30_w11'} {'loss': 0.24345718264579774, 'status': 'ok', 'modelname': 'dna2peak_ex4_order2_k20_w11'} {'loss': 0.16862974882125856, 'status': 'ok', 'modelname': 'dna2peak_ex4_order2_k20_w25'} {'loss': 0.1668671941757202, 'status': 'ok', 'modelname': 'dna2peak_ex4_order2_k20_w25'} {'loss': 0.44734586238861085, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k10_w11'} {'loss': 0.45441911220550535, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k10_w11'} {'loss': 0.35290857672691345, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k30_w25'} {'loss': 0.24321768164634705, 'status': 'ok', 'modelname': 'dna2peak_ex4_order2_k10_w25'} {'loss': 0.4089042639732361, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k20_w11'} {'loss': 0.17503570795059203, 'status': 'ok', 'modelname': 'dna2peak_ex4_order2_k10_w25'} {'loss': 0.2442852509021759, 'status': 'ok', 'modelname': 'dna2peak_ex4_order2_k30_w11'} {'loss': 0.39497903823852537, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k30_w11'} {'loss': 0.3563139796257019, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k30_w25'} {'loss': 0.39190362811088564, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k30_w15'} {'loss': 0.35503706216812136, 'status': 'ok', 'modelname': 'dna2peak_ex4_order1_k30_w25'} {'loss': 0.11563926815986633, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w25'} {'loss': 0.11595120310783386, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w25'} {'loss': 0.11960413366556168, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w25'} {'loss': 0.147387475669384, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w25'} {'loss': 0.12433685302734375, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w25'} {'loss': 0.13556983858346938, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w15'} {'loss': 0.137903308570385, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w25'} {'loss': 0.13357810378074647, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w25'} {'loss': 0.14981190830469132, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w15'} {'loss': 0.12669599175453186, 'status': 'ok', 'modelname': 'dna2peak_ex4_order3_k20_w25'} 100%|██████████| 30/30 [1:53:31<00:00, 227.05s/it, best loss: 0.11563926815986633]
bestmodel = ''
score = np.inf
for trial in trials.results:
if score > trial['loss']:
score = trial['loss']
bestmodel = trial['modelname']
print('The best model ({}) has obtained a validation loss of {}'.format(bestmodel, score))
The best model (dna2peak_ex4_order3_k20_w25) has obtained a validation loss of 0.11563926815986633
When using the Janggu wrapper to fit a neural network, the model parameters are stored automatically into the JANGGU_OUTPUT path. Therefore, the best model can be fetched using:
model = Janggu.create_by_name(bestmodel)
model.summary()
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dna (InputLayer) (None, 198, 1, 64) 0 _________________________________________________________________ dna_conv2d_1 (DnaConv2D) (None, 174, 1, 20) 32020 _________________________________________________________________ motif (GlobalAveragePooling2 (None, 20) 0 _________________________________________________________________ peaks (Dense) (None, 1) 21 ================================================================= Total params: 32,041 Trainable params: 32,041 Non-trainable params: 0 _________________________________________________________________