#!/usr/bin/env python # coding: utf-8 # # Hyperparameter optimization using Janggu # In this notebook we illustrate how Janggu can be leveraged with hyperparameter optimization. # In particular, for this example, we shall use the hyperopt python packages. So one needs # to install the package using `pip install hyperopt` # In[1]: import os import numpy as np from keras import Model from keras import backend as K from keras.layers import Conv2D from keras.layers import Dense from keras.layers import GlobalAveragePooling2D from keras.layers import Input from keras.layers import Reshape from pkg_resources import resource_filename from janggu import Janggu from janggu import Scorer from janggu import inputlayer from janggu import outputdense from janggu.data import Bioseq from janggu.data import Cover from janggu.data import ReduceDim from janggu.data import view from janggu.layers import DnaConv2D from janggu.layers import LocalAveragePooling2D from janggu.utils import ExportClustermap from janggu.utils import ExportTsne from janggu.utils import ExportTsv from IPython.display import Image np.random.seed(1234) from hyperopt import hp from hyperopt import tpe from hyperopt import fmin from hyperopt import Trials # First, we need to specify the output directory in which the results are stored. # In[2]: os.environ['JANGGU_OUTPUT'] = '/home/wkopp/janggu_examples' # We use the following toy example files that are shipped with the package: # In[4]: # load the dataset # The pseudo genome represents just a concatenation of all sequences # in sample.fa and sample2.fa. Therefore, the results should be almost # identically to the models obtained from classify_fasta.py. REFGENOME = resource_filename('janggu', 'resources/pseudo_genome.fa') # ROI contains regions spanning positive and negative examples ROI_TRAIN_FILE = resource_filename('janggu', 'resources/roi_train.bed') ROI_VAL_FILE = resource_filename('janggu', 'resources/roi_test.bed') # PEAK_FILE only contains positive examples PEAK_FILE = resource_filename('janggu', 'resources/scores.bed') # ## Define a get_data function # First, we define a function that instantiates the training and validation datasets needed for the model fitting/evaluation. # The function can be parametrized by a dictionary. # We shall make use of this parameter to test performances of different sequence encoding orders in this example. # However, in general it can be useful, useful to consider different flanking windows, # or normalization procedures for the coverage tracks. # In[6]: def get_data(params): DNA_TRAIN = Bioseq.create_from_refgenome('dna', refgenome=REFGENOME, roi=ROI_TRAIN_FILE, order=params['order'], binsize=200, store_whole_genome=True) LABELS_TRAIN = Cover.create_from_bed('peaks', roi=ROI_TRAIN_FILE, bedfiles=PEAK_FILE, binsize=200, resolution=200, storage='sparse', store_whole_genome=True) DNA_VAL = view(DNA_TRAIN, ROI_VAL_FILE) LABELS_VAL = view(LABELS_TRAIN, ROI_VAL_FILE) return ((DNA_TRAIN, ReduceDim(LABELS_TRAIN)), (DNA_VAL, ReduceDim(LABELS_VAL))) # ## Define and fit a model # We define the neural network as described in other tutorial examples. Using Janggu's helper functionality. # In[7]: @inputlayer @outputdense('sigmoid') def double_stranded_model_dnaconv(inputs, inp, oup, params): """ keras model for scanning both DNA strands. A more elegant way of scanning both strands for motif occurrences is achieved by the DnaConv2D layer wrapper, which internally performs the convolution operation with the normal kernel weights and the reverse complemented weights. """ with inputs.use('dna') as layer: # the name in inputs.use() should be the same as the dataset name. layer = DnaConv2D(Conv2D(params[0], (params[1], 1), activation=params[2]))(layer) output = GlobalAveragePooling2D(name='motif')(layer) return inputs, output # Finally, we define the objective function whose semantics is explained in the hyperopt documentation in detail. # In essense, given the `params` dictionary, the dataset is unpacked and the corresponding model is instantiated and fitted. The model's validation loss is reported back to hyperopt via the return value. This is in turn used to used to pick an appropriate set of parameters for the next trial. # In[14]: def objective(params): #print(params) train_data, val_data = get_data(params) modelname = 'dna2peak_ex4_order{}_k{}_w{}'.format(params['order'], params['nkernels'], params['kernelwidth']) # without clear session, the memory consumption increases continuously K.clear_session() # create a new model object model = Janggu.create(template=double_stranded_model_dnaconv, modelparams=(params['nkernels'], params['kernelwidth'], 'relu'), inputs=train_data[0], outputs=train_data[1], name=modelname) model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['acc']) hist = model.fit(train_data[0], train_data[1], epochs=50, verbose=False, validation_data=val_data) res = {'loss': hist.history['val_loss'][-1], 'status':'ok', 'modelname': model.name} print(res) return res # We define a simple search space using different numbers of convolution filters, different filter lengths and different DNA sequence encoding orders. # In[9]: space={'nkernels': hp.choice('nk', [10, 20, 30]), 'kernelwidth': hp.choice('kw', [11, 15, 25]), 'order': hp.choice('o', [1,2,3])} # In[10]: trials = Trials() # In[11]: best = fmin(objective, space=space, algo=tpe.suggest, max_evals=30, trials=trials) # In[18]: bestmodel = '' score = np.inf for trial in trials.results: if score > trial['loss']: score = trial['loss'] bestmodel = trial['modelname'] print('The best model ({}) has obtained a validation loss of {}'.format(bestmodel, score)) # When using the Janggu wrapper to fit a neural network, the model parameters are stored automatically into the JANGGU_OUTPUT path. Therefore, the best model can be fetched using: # In[16]: model = Janggu.create_by_name(bestmodel) # In[17]: model.summary()