import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda
from keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, BatchNormalization, LocallyConnected2D, Permute
from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback
from keras import regularizers
from keras import backend as K
import keras.losses
import tensorflow as tf
from tensorflow.python.framework import ops
import isolearn.keras as iso
import numpy as np
import tensorflow as tf
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import pandas as pd
import os
import pickle
import numpy as np
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
import isolearn.keras as iso
from seqprop.visualization import *
from seqprop.generator import *
from seqprop.predictor import *
from seqprop.optimizer import *
from definitions.aparent_all_libs import load_saved_predictor
class IdentityEncoder(iso.SequenceEncoder) :
def __init__(self, seq_len, channel_map) :
super(IdentityEncoder, self).__init__('identity', (seq_len, len(channel_map)))
self.seq_len = seq_len
self.n_channels = len(channel_map)
self.encode_map = channel_map
self.decode_map = {
nt: ix for ix, nt in self.encode_map.items()
}
def encode(self, seq) :
encoding = np.zeros((self.seq_len, self.n_channels))
for i in range(len(seq)) :
if seq[i] in self.encode_map :
channel_ix = self.encode_map[seq[i]]
encoding[i, channel_ix] = 1.
return encoding
def encode_inplace(self, seq, encoding) :
for i in range(len(seq)) :
if seq[i] in self.encode_map :
channel_ix = self.encode_map[seq[i]]
encoding[i, channel_ix] = 1.
def encode_inplace_sparse(self, seq, encoding_mat, row_index) :
raise NotImplementError()
def decode(self, encoding) :
seq = ''
for pos in range(0, encoding.shape[0]) :
argmax_nt = np.argmax(encoding[pos, :])
max_nt = np.max(encoding[pos, :])
seq += self.decode_map[argmax_nt]
return seq
def decode_sparse(self, encoding_mat, row_index) :
raise NotImplementError()
import warnings
warnings.simplefilter("ignore")
from keras.backend.tensorflow_backend import set_session
def contain_tf_gpu_mem_usage() :
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
contain_tf_gpu_mem_usage()
Using TensorFlow backend.
#Define target isoform loss function
def get_isoform_loss() :
def loss_func(predictor_outputs) :
pwm_logits, pwm, sampled_pwm, iso_pred, cut_pred, iso_pred_score, cut_score_pred = predictor_outputs
#Specify costs
iso_loss = -1.0 * K.mean(iso_pred_score[..., 0], axis=0)
#Compute total loss
total_loss = iso_loss
return K.reshape(K.sum(total_loss, axis=0), (1,))
def val_loss_func(predictor_outputs) :
pwm_logits, pwm, sampled_pwm, iso_pred, cut_pred, iso_pred_score, cut_score_pred = predictor_outputs
#Specify costs
iso_loss = -1.0 * K.mean(iso_pred_score[..., 0], axis=0)
#Compute total loss
total_loss = iso_loss
return K.reshape(K.mean(total_loss, axis=0), (1,))
return loss_func, val_loss_func
def get_nop_transform() :
def _transform_func(pwm) :
return pwm
return _transform_func
class ValidationCallback(Callback):
def __init__(self, val_name, val_loss_model, val_steps) :
self.val_name = val_name
self.val_loss_model = val_loss_model
self.val_steps = val_steps
self.val_loss_history = []
#Track val loss
self.val_loss_history.append(self.val_loss_model.predict(x=None, steps=self.val_steps)[0])
def on_batch_end(self, batch, logs={}) :
#Track val loss
val_loss_value = self.val_loss_model.predict(x=None, steps=self.val_steps)[0]
self.val_loss_history.append(val_loss_value)
#print("mean(" + self.val_name + ") = " + str(np.mean(val_loss_values)))
#Sequence optimization monitor during training
class StoreSequenceMonitor(Callback):
def __init__(self, seqprop_model, sequence_encoder, run_dir="", run_prefix="", store_every1=5, store_every2=100, swap_iter=100, val_steps=1) :
self.seqprop_model = seqprop_model
self.val_steps = val_steps
self.sequence_encoder = sequence_encoder
self.run_prefix = run_prefix
self.run_dir = run_dir
self.edit_distance_samples = []
self.store_every1 = store_every1
self.store_every2 = store_every2
self.store_every = store_every1
self.swap_iter = swap_iter
if not os.path.exists(self.run_dir): os.makedirs(self.run_dir)
seqs = self._sample_sequences()
#self._store_sequences(seqs, 0)
def _sample_sequences(self) :
sampled_pwm = self.seqprop_model.predict(x=None, steps=self.val_steps)[2]
seqs = []
for i in range(sampled_pwm.shape[1]) :
for j in range(sampled_pwm.shape[0]) :
seqs.append(self.sequence_encoder.decode(sampled_pwm[j, i, :, :, 0]))
return seqs
def _store_sequences(self, seqs, epoch) :
#Save sequences to file
with open(self.run_dir + self.run_prefix + "_epoch_" + str(epoch) + "_" + str(self.val_steps) + "_steps.txt", "a+") as f:
for i in range(len(seqs)) :
f.write(seqs[i] + "\n")
def on_batch_end(self, batch, logs={}) :
if batch > self.swap_iter :
self.store_every = self.store_every2
if batch % self.store_every == 0 :
seqs = self._sample_sequences()
self._store_sequences(seqs, batch)
#Function for running SeqProp on a set of objectives to optimize
def run_seqprop(run_prefix, sequence_templates, loss_funcs, val_loss_funcs, transform_funcs, n_sequences=1, n_samples=1, n_valid_samples=1, library_context='simple', eval_mode='sample', normalize_logits=False, n_epochs=10, steps_per_epoch=100) :
n_objectives = len(sequence_templates)
seqprop_predictors = []
valid_monitors = []
train_histories = []
valid_histories = []
for obj_ix in range(n_objectives) :
print("Optimizing objective " + str(obj_ix) + '...')
sequence_template = sequence_templates[obj_ix]
loss_func = loss_funcs[obj_ix]
val_loss_func = val_loss_funcs[obj_ix]
transform_func = transform_funcs[obj_ix]
#Build Generator Network
_, seqprop_generator = build_generator(seq_length=205, n_sequences=n_sequences, n_samples=n_samples, sequence_templates=[sequence_template * n_sequences], batch_normalize_pwm=normalize_logits, pwm_transform_func=transform_func, validation_sample_mode='sample')
_, valid_generator = build_generator(seq_length=205, n_sequences=n_sequences, n_samples=n_valid_samples, sequence_templates=[sequence_template * n_sequences], batch_normalize_pwm=normalize_logits, pwm_transform_func=None, validation_sample_mode='sample', master_generator=seqprop_generator)
for layer in valid_generator.layers :
layer.name += "_valversion"
#Build Predictor Network and hook it on the generator PWM output tensor
_, seqprop_predictor = build_predictor(seqprop_generator, load_saved_predictor(model_path, library_context=library_context), n_sequences=n_sequences, n_samples=n_samples, eval_mode=eval_mode)
_, valid_predictor = build_predictor(valid_generator, load_saved_predictor(model_path, library_context=library_context), n_sequences=n_sequences, n_samples=n_valid_samples, eval_mode='sample')
for layer in valid_predictor.layers :
if 'aparent' in layer.name and '_valversion' not in layer.name :
layer.name += "_valversion"
#Build Loss Model (In: Generator seed, Out: Loss function)
_, loss_model = build_loss_model(seqprop_predictor, loss_func)
_, valid_loss_model = build_loss_model(valid_predictor, val_loss_func)
#Specify Optimizer to use
#opt = keras.optimizers.SGD(lr=0.5)
#opt = keras.optimizers.SGD(lr=0.1, momentum=0.9, decay=0, nesterov=True)
opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
#Compile Loss Model (Minimize self)
loss_model.compile(loss=lambda true, pred: pred, optimizer=opt)
def get_logit(p) :
return np.log(p / (1. - p))
#Specify callback entities
measure_func = lambda pred_outs: np.mean(get_logit(np.expand_dims(pred_outs[0], axis=0) if len(pred_outs[0].shape) <= 2 else pred_outs[0]), axis=0)
#train_monitor = FlexibleSeqPropCutMonitor(predictor=seqprop_predictor, plot_on_train_end=False, plot_every_epoch=False, track_every_step=True, measure_func=measure_func, measure_name='Isoform Log Odds', plot_pwm_start=70-50, plot_pwm_end=76+50, sequence_template=sequence_template, plot_pwm_indices=np.arange(n_sequences).tolist(), figsize=(12, 1.25))
valid_monitor = FlexibleSeqPropCutMonitor(predictor=valid_predictor, plot_on_train_end=True, plot_every_epoch=False, track_every_step=True, measure_func=measure_func, measure_name='Isoform Log Odds', plot_pwm_start=70-50, plot_pwm_end=76+50, sequence_template=sequence_template, plot_pwm_indices=np.arange(n_sequences).tolist(), figsize=(12, 1.25))
train_history = ValidationCallback('loss', loss_model, 1)
valid_history = ValidationCallback('val_loss', valid_loss_model, 1)
#Standard sequence decoder
acgt_encoder = IdentityEncoder(205, {'A':0, 'C':1, 'G':2, 'T':3})
#Build callback for printing intermediate sequences
store_seq_monitor = StoreSequenceMonitor(valid_generator, acgt_encoder, run_dir="./seqprop_samples/" + run_prefix + "/", run_prefix="intermediate", val_steps=1)
callbacks =[
#EarlyStopping(monitor='loss', min_delta=0.001, patience=5, verbose=0, mode='auto'),
valid_monitor,
train_history,
valid_history,
store_seq_monitor
]
#Fit Loss Model
_ = loss_model.fit(
[], np.ones((1, 1)), #Dummy training example
epochs=n_epochs,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks
)
valid_monitor.predictor = None
train_history.val_loss_model = None
valid_history.val_loss_model = None
seqprop_predictors.append(seqprop_predictor)
valid_monitors.append(valid_monitor)
train_histories.append(train_history)
valid_histories.append(valid_history)
return seqprop_predictors, valid_monitors, train_histories, valid_histories
#Specfiy file path to pre-trained predictor network
save_dir = os.path.join(os.getcwd(), '../../../aparent/saved_models')
model_name = 'aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5'
model_path = os.path.join(save_dir, model_name)
import random
def set_seed(seed_value) :
# 1. Set the `PYTHONHASHSEED` environment variable at a fixed value
os.environ['PYTHONHASHSEED']=str(seed_value)
# 2. Set the `python` built-in pseudo-random generator at a fixed value
random.seed(seed_value)
# 3. Set the `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)
# 4. Set the `tensorflow` pseudo-random generator at a fixed value
tf.set_random_seed(seed_value)
# 5. Configure a new global `tensorflow` session
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
run_prefix = "seqprop_apa_max_isoform_simple_20000_updates_non_normalized_retry_2"
seq_template = 'TCCCTACACGACGCTCTTCCGATCTNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAATTGTTCGTTGGTCGGCTTGAGTGCGTGTGTCTCGTTTAGATGCTGCGCCTAACCCTAAGCAGATTCTTCATGCAATTG'
library_context = 'simple'
rand_seed = 1177#14755
#Run SeqProp Optimization
K.clear_session()
#set_seed(rand_seed)
print("Running optimization experiment 'Alien1 Max Isoform'")
#Number of PWMs to generate per objective
n_sequences = 10
#Number of One-hot sequences to sample from the PWM at each grad step
n_samples = 1
#Number of epochs per objective to optimize
n_epochs = 1
#Number of steps (grad updates) per epoch
steps_per_epoch = 20000
#Either 'pwm' or 'sample'
eval_mode = 'sample'
#Normalize sequence logits
normalize_logits = False#True
#Number of One-hot validation sequences to sample from the PWM
n_valid_samples = 1
sequence_templates = [
seq_template
]
losses, val_losses = zip(*[
get_isoform_loss()
])
transforms = [
None
]
seqprop_predictors, valid_monitors, train_histories, valid_histories = run_seqprop(run_prefix, sequence_templates, losses, val_losses, transforms, n_sequences, n_samples, n_valid_samples, library_context, eval_mode, normalize_logits, n_epochs, steps_per_epoch)
seqprop_predictor, valid_monitor, train_history, valid_history = seqprop_predictors[0], valid_monitors[0], train_histories[0], valid_histories[0]
Running optimization experiment 'Alien1 Max Isoform' Optimizing objective 0... Epoch 1/1 20000/20000 [==============================] - 334s 17ms/step - loss: -73.6884
def load_data(vae_encoder, vae_pwm_start=0, vae_pwm_end=1) :
#Load cached dataframe
n_test = 1000
seqs = []
with open('vae/apa_simple_seqs_strong.txt', 'rt') as f :
for line_raw in f :
line = line_raw.strip()
seqs.append(line.split("\t")[0])
seqs = seqs[-n_test:]
print("len(seqs) = " + str(len(seqs)) + " (loaded)")
x_test = np.concatenate([np.expand_dims(np.expand_dims(vae_encoder(seq[vae_pwm_start:vae_pwm_end]), axis=0), axis=0) for seq in seqs], axis=0)
print(x_test.shape)
return x_test
#Specfiy problem-specific parameters
vae_path = "vae/saved_models/vae_apa_max_isoform_simple_strong_len_96_latent_100_epochs_50_kl_factor_1125_annealed"
#VAE model path
saved_vae_encoder_model_path = vae_path + "_encoder.h5"
saved_vae_decoder_model_path = vae_path + "_decoder.h5"
#Padding for the VAE
vae_upstream_padding = ''
vae_downstream_padding = ''
#VAE sequence template
vae_sequence_template = 'N' * 45 + 'AATAAA' + 'N' * 45
#VAE latent dim
vae_latent_dim = 100
#Oracle predictor model path
saved_predictor_model_path = '../../../aparent/saved_models/aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5'
#Subtring indices for VAE
vae_pwm_start = 25
vae_pwm_end = 25+96
#VAE parameter collection
vae_params = [
saved_vae_encoder_model_path,
saved_vae_decoder_model_path,
vae_upstream_padding,
vae_downstream_padding,
vae_latent_dim,
vae_pwm_start,
vae_pwm_end
]
acgt_encoder = IdentityEncoder(96, {'A':0, 'C':1, 'G':2, 'T':3})
#Load VAE models
vae_encoder_model = load_model(saved_vae_encoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':lambda y_true,y_pred:y_pred})
vae_decoder_model = load_model(saved_vae_decoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':lambda y_true,y_pred:y_pred})
#Load data set
x_test = load_data(acgt_encoder, vae_pwm_start=vae_pwm_start, vae_pwm_end=vae_pwm_end)
len(seqs) = 1000 (loaded) (1000, 1, 96, 4)
from scipy.stats import norm
def get_z_sample_numpy(z_mean, z_log_var, n_samples=1) :
n = z_mean.shape[0]
m = z_mean.shape[2]
epsilon = np.random.normal(loc=0., scale=1., size=(n, n_samples, m))
return z_mean + np.exp(0.5 * z_log_var) * epsilon
#Evaluate VAE Likelihood (ELBO) on supplied data
def evaluate_elbo(vae_encoder_model, vae_decoder_model, sequence_one_hots, pwm_start=0, pwm_end=-1, n_samples=1) :
_epsilon = 10**-6
if pwm_end == -1 :
pwm_end = sequence_one_hots.shape[2]
#Get sequence VAE encodings
z_mean, z_log_var, _ = vae_encoder_model.predict(x=sequence_one_hots, batch_size=32, verbose=False)
z_mean = np.tile(np.expand_dims(z_mean, axis=1), (1, n_samples, 1))
z_log_var = np.tile(np.expand_dims(z_log_var, axis=1), (1, n_samples, 1))
z = get_z_sample_numpy(z_mean, z_log_var, n_samples=n_samples)
#Get re-decoded sequence PWMs
s_dummy = np.zeros((sequence_one_hots.shape[0], 1))
decoded_pwms = np.zeros((sequence_one_hots.shape[0], n_samples) + sequence_one_hots.shape[1:])
for sample_ix in range(n_samples) :
_, decoded_pwm, _ = vae_decoder_model.predict(x=[s_dummy, z[:, sample_ix, :]], batch_size=32, verbose=False)
decoded_pwms[:, sample_ix, :, :, :] = decoded_pwm
sequence_one_hots_expanded = np.tile(np.expand_dims(sequence_one_hots, 1), (1, n_samples, 1, 1, 1))
#Calculate reconstruction log prob
log_p_x_given_z = np.sum(np.sum(sequence_one_hots_expanded[:, :, :, pwm_start:pwm_end, :] * np.log(np.clip(decoded_pwms[:, :, :, pwm_start:pwm_end, :], _epsilon, 1. - _epsilon)) / np.log(10.), axis=(2, 4)), axis=2)
#Calculate standard normal and importance log probs
log_p_std_normal = np.sum(norm.logpdf(z, 0., 1.) / np.log(10.), axis=-1)
log_p_importance = np.sum(norm.logpdf(z, z_mean, np.sqrt(np.exp(z_log_var))) / np.log(10.), axis=-1)
#Calculate per-sample ELBO
log_p_vae = log_p_x_given_z + log_p_std_normal - log_p_importance
log_p_vae_div_n = log_p_vae - np.log(n_samples) / np.log(10.)
#Calculate mean ELBO across samples (log-sum-exp trick)
max_log_p_vae = np.max(log_p_vae_div_n, axis=-1)
log_mean_p_vae = max_log_p_vae + np.log(np.sum(10**(log_p_vae_div_n - np.expand_dims(max_log_p_vae, axis=-1)), axis=-1)) / np.log(10.)
mean_log_p_vae = np.mean(log_mean_p_vae)
return log_mean_p_vae, mean_log_p_vae, log_p_vae
#Load final sequences
run_prefix = "seqprop_apa_max_isoform_simple_20000_updates"
folder_path = "seqprop_samples/" + run_prefix + "/"
file_path = folder_path + "intermediate_epoch_19900_1_steps.txt"
seq_template = 'TCCCTACACGACGCTCTTCCGATCTNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAATTGTTCGTTGGTCGGCTTGAGTGCGTGTGTCTCGTTTAGATGCTGCGCCTAACCCTAAGCAGATTCTTCATGCAATTG'
seqs = []
with open(file_path, "rt") as f :
split_on_tab = False
for l in f.readlines() :
l_strip = l.strip()
seq = l_strip
if split_on_tab :
seq = l_strip.split("\t")[0]
if seq_template is not None :
seq = ''.join([
seq_template[j] if seq_template[j] != 'N' else seq[j]
for j in range(len(seq))
])
seqs.append(seq)
#Evaluate ELBOs
n_z_samples = 32
#Evaluate VAE Likelihood on test data
log_mean_p_vae_test, mean_log_p_vae_test, _ = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_test, n_samples=n_z_samples)
#Evaluate VAE Likelihood on generated data
x_gen = np.zeros((len(seqs), 1, 96, 4))
for i in range(len(seqs)) :
gen_seq = seqs[i][25:25+96]
for j in range(len(gen_seq)) :
if gen_seq[j] == 'A' :
x_gen[i, 0, j, 0] = 1.
elif gen_seq[j] == 'C' :
x_gen[i, 0, j, 1] = 1.
elif gen_seq[j] == 'G' :
x_gen[i, 0, j, 2] = 1.
elif gen_seq[j] == 'T' :
x_gen[i, 0, j, 3] = 1.
log_mean_p_vae_new, mean_log_p_vae_new, _ = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_gen, n_samples=n_z_samples)
#Evaluate VAE Likelihood on random data
n = x_test.shape[0]
x_random = np.zeros((n, 1, 96, 4))
for i in range(n) :
random_seq = ''
for j in range(len(vae_sequence_template)) :
if vae_sequence_template[j] == 'N' :
rand_letter = np.random.choice(['A', 'C', 'G', 'T'], replace=False)
random_seq += rand_letter
else :
random_seq += vae_sequence_template[j]
for j in range(len(random_seq)) :
if random_seq[j] == 'A' :
x_random[i, 0, j, 0] = 1.
elif random_seq[j] == 'C' :
x_random[i, 0, j, 1] = 1.
elif random_seq[j] == 'G' :
x_random[i, 0, j, 2] = 1.
elif random_seq[j] == 'T' :
x_random[i, 0, j, 3] = 1.
log_mean_p_vae_random, mean_log_p_vae_random, _ = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_random, n_samples=n_z_samples)
#Compare VAE Log Likelihoods (ELBO), With random background
def plot_joint_histo(measurements, labels, colors, x_label, y_label, n_bins=50, figsize=(6, 4), save_fig=False, fig_name="default_1", fig_dpi=150, min_val=None, max_val=None, max_y_val=None) :
min_hist_val = np.min(measurements[0])
max_hist_val = np.max(measurements[0])
for i in range(1, len(measurements)) :
min_hist_val = min(min_hist_val, np.min(measurements[i]))
max_hist_val = max(max_hist_val, np.max(measurements[i]))
if min_val is not None :
min_hist_val = min_val
if max_val is not None :
max_hist_val = max_val
hists = []
bin_edges = []
means = []
for i in range(len(measurements)) :
hist, b_edges = np.histogram(measurements[i], range=(min_hist_val, max_hist_val), bins=n_bins, density=True)
hists.append(hist)
bin_edges.append(b_edges)
means.append(np.mean(measurements[i]))
bin_width = bin_edges[0][1] - bin_edges[0][0]
#Compare Log Likelihoods
f = plt.figure(figsize=figsize)
for i in range(len(measurements)) :
plt.bar(bin_edges[i][1:] - bin_width/2., hists[i], width=bin_width, linewidth=2, edgecolor='black', color=colors[i], label=labels[i])
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlim(min_hist_val, max_hist_val)
if max_y_val is not None :
plt.ylim(0, max_y_val)
plt.xlabel(x_label, fontsize=14)
plt.ylabel(y_label, fontsize=14)
for i in range(len(measurements)) :
plt.axvline(x=means[i], linewidth=2, color=colors[i], linestyle="--")
plt.legend(fontsize=14, loc='upper left')
plt.tight_layout()
if save_fig :
plt.savefig(fig_name + ".eps")
plt.savefig(fig_name + ".svg")
plt.savefig(fig_name + ".png", dpi=fig_dpi, transparent=True)
plt.show()
plot_joint_histo(
[log_mean_p_vae_random, log_mean_p_vae_test, log_mean_p_vae_new],
['Rand', 'Test', 'Gen'],
['green', 'orange', 'red'],
'VAE Log Likelihood',
'Data Density',
min_val=-80,
max_val=-50,
n_bins=40,
figsize=(6, 4),
save_fig=True,
fig_name=run_prefix + "_likelihood_w_random"
)