In the sequel we aim to visualize certain arbitrages in multi-variate markets by means of machine learning.
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Input, Dense, Conv2D, Concatenate, Dropout, Subtract, \
Flatten, MaxPooling2D, Multiply, Lambda, Add, Dot
from keras.backend import constant
from keras import optimizers
from keras.engine.topology import Layer
from keras.models import Model
from keras.layers import Input
from keras import initializers
from keras.constraints import max_norm
import keras.backend as K
import matplotlib.pyplot as plt
#name = 'scaleable arbitrage'
#name = 'UPBR'
name = 'non-scaleable arbitrage'
N = 50
T = 1.0
#Definition of neural networks for hedging strategies
if name == 'scaleable arbitrage':
m = 2 # dimension of price
else:
m = 1
d = 3 # number of layers in strategy
n = 32 # nodes in the first but last layers
# architecture is the same for all networks
layers = []
for j in range(N):
for i in range(d):
if i < d-1:
nodes = n
layer = Dense(nodes, activation='tanh',trainable=True,
kernel_initializer=initializers.RandomNormal(0,0.5),#kernel_initializer='random_normal',
bias_initializer=initializers.RandomNormal(0,0.5),
name=str(i)+str(j))
else:
nodes = m
layer = Dense(nodes, activation='linear', trainable=True,
kernel_initializer=initializers.RandomNormal(0,0.5),#kernel_initializer='random_normal',
bias_initializer=initializers.RandomNormal(0,0.5),
name=str(i)+str(j))
layers = layers + [layer]
#Implementing the loss function
# Inputs is the training set below, containing the price S0,
#the initial hedging being 0, the initial wealth
#and the increments of the price process
price = Input(shape=(m,))
wealth = Input(shape=(1,))
strategies =[]
outputs = []
inputs = [price]+[wealth]
for j in range(N):
outputs = outputs + [wealth]
strategy = price
for k in range(d):
strategy= layers[k+(j)*d](strategy) # strategy at j is the alpha at j
incr = Input(shape=(m,))
pricenew=Add()([price,incr])
price=pricenew
helper1 = Multiply()([strategy, incr])
helper2 = Lambda(lambda x:K.sum(x,axis=1))(helper1)
wealthnew = Add()([wealth,helper2])
inputs = inputs + [incr]
wealth=wealthnew
strategies = strategies + [strategy]
outputs = outputs + [wealth] + strategies
outputs = Concatenate()(outputs)
model = Model(inputs=inputs, outputs=outputs)
50*(64+32*33+33) # number of parameters
57650
We generate data subject to three different models for $m$ traded assets and we try to learn an investment strategy $ H $ such that $ (H \bullet X ) \geq 0 $.
By Ito's formula $X$ satisfies the following equation $$ dX(t) = \sqrt{X(t)} d W(t) + \frac{1}{4} dt \, , $$ where $ d W(t) = \sum_{i=1}^3 \frac{\frac{1}{\sqrt{3}} + \frac{1}{2}B^i(t)}{\sqrt{X(t)}} dB^i (t) $ is a Brownian motion, too.
The value process of the learned portfolio at initial wealth $0$ is $$ (H \bullet X) = \int_0^. H_s dX(s) \, . $$
sigma = 0.2
drift = 0.2
Ktrain = 10**4
initialprice = 1
initialwealth = 0
Ktest = 10**2
class datageneration:
def __init__(self,increments,Ktrain,initialprice,initialwealth):
self.increments = increments
self.Ktrain = Ktrain
self.initialprice = initialprice
self.initialwealth = initialwealth
N = len(self.increments)
def trainingdata(self):
xtrain = ([self.initialprice*np.ones((self.Ktrain,m))] +
[self.initialwealth*np.ones((self.Ktrain,1))] +
self.increments)
ytrain =np.zeros((self.Ktrain,N+N))
return xtrain, ytrain
class modelincrements:
def __init__(self,name,Ktrain):
self.name = name
self.Ktrain = Ktrain
def increments(self):
if self.name == 'scaleable arbitrage':
helper = [np.random.normal(0,sigma*np.sqrt(T)/np.sqrt(N),(self.Ktrain,1)) for i in range(N)]
increments = [np.ones((self.Ktrain,m))*drift*T/N + np.concatenate([helper[i],2*helper[i]],axis=1) for i in range(N)]
elif self.name == 'UPBR':
increments = [np.ones((self.Ktrain,m))*drift*(1/np.sqrt(T*(N-i)/N))*T/N + np.random.normal(0,sigma*np.sqrt(T)/np.sqrt(N),(self.Ktrain,m)) for i in range(N)]
elif self.name == 'non-scaleable arbitrage':
increments = []
trajectory = [1/np.sqrt(3)*np.ones((self.Ktrain,3))]
for i in range(N):
x = np.random.normal(0,0.5*np.sqrt(T)/np.sqrt(N),(self.Ktrain,3))
y = trajectory[-1]+x
increments = increments + [np.sum(y**2,axis=1,keepdims=True)-np.sum(trajectory[-1]**2,axis=1,keepdims=True)]
trajectory = trajectory + [y]
return increments
modelsA = modelincrements(name,Ktrain)
modelUPBR = modelincrements(name,Ktrain)
modelnsA = modelincrements(name,Ktrain)
incrementssA = modelsA.increments()
incrementsUPBR = modelUPBR.increments()
incrementsnsA = modelnsA.increments()
sA = datageneration(incrementssA,Ktrain,initialprice,initialwealth)
UPBR = datageneration(incrementsUPBR,Ktrain,initialprice,initialwealth)
nsA = datageneration(incrementsnsA,Ktrain,initialprice,initialwealth)
xtrainsA= sA.trainingdata()[0]
ytrainsA = sA.trainingdata()[1]
xtrainUPBR = UPBR.trainingdata()[0]
ytrainUPBR = UPBR.trainingdata()[1]
xtrainnsA=nsA.trainingdata()[0]
ytrainnsA=nsA.trainingdata()[1]
def custom_loss(y_true,y_pred):
z = K.exp(-y_pred[:,N])
z=K.mean(z)
return z
from keras import optimizers
adam=optimizers.Adam(lr=0.01)
model.compile(optimizer='adam',loss=custom_loss)
if name =='scaleable arbitrage':
xtrain = xtrainsA
ytrain = ytrainsA
elif name == 'UPBR':
xtrain = xtrainUPBR
ytrain = ytrainUPBR
elif name == 'non-scaleable arbitrage':
xtrain = xtrainnsA
ytrain = ytrainnsA
import matplotlib.pyplot as plt
for i in range(1):
model.fit(x=xtrain,y=ytrain, epochs=50,verbose=True,batch_size=100)
Epoch 1/50 10000/10000 [==============================] - 15s - loss: 1.6949 Epoch 2/50 10000/10000 [==============================] - 3s - loss: 0.7824 Epoch 3/50 10000/10000 [==============================] - 3s - loss: 0.7146 Epoch 4/50 10000/10000 [==============================] - 3s - loss: 0.6767 - ETA: 0s - loss: 0 - ETA: 0s - loss: Epoch 5/50 10000/10000 [==============================] - 3s - loss: 0.6557 Epoch 6/50 10000/10000 [==============================] - 3s - loss: 0.6423 Epoch 7/50 10000/10000 [==============================] - 3s - loss: 0.6341 Epoch 8/50 10000/10000 [==============================] - 3s - loss: 0.6264 Epoch 9/50 10000/10000 [==============================] - 3s - loss: 0.6222 Epoch 10/50 10000/10000 [==============================] - 3s - loss: 0.6161 Epoch 11/50 10000/10000 [==============================] - 3s - loss: 0.6122 Epoch 12/50 10000/10000 [==============================] - 3s - loss: 0.6092 - ETA: 0s - l Epoch 13/50 10000/10000 [==============================] - 3s - loss: 0.6078 Epoch 14/50 10000/10000 [==============================] - 3s - loss: 0.6028 - ETA: 0s - loss: 0.6 - ETA: 0s - loss: 0.60 Epoch 15/50 10000/10000 [==============================] - 3s - loss: 0.6010 Epoch 16/50 10000/10000 [==============================] - 3s - loss: 0.5969 Epoch 17/50 10000/10000 [==============================] - 3s - loss: 0.5943 Epoch 18/50 10000/10000 [==============================] - 3s - loss: 0.5918 Epoch 19/50 10000/10000 [==============================] - 3s - loss: 0.5917 Epoch 20/50 10000/10000 [==============================] - 3s - loss: 0.5877 Epoch 21/50 10000/10000 [==============================] - 3s - loss: 0.5856 Epoch 22/50 10000/10000 [==============================] - 3s - loss: 0.5838 Epoch 23/50 10000/10000 [==============================] - 3s - loss: 0.5807 Epoch 24/50 10000/10000 [==============================] - 3s - loss: 0.5790 Epoch 25/50 10000/10000 [==============================] - 3s - loss: 0.5771 Epoch 26/50 10000/10000 [==============================] - 3s - loss: 0.5771 Epoch 27/50 10000/10000 [==============================] - 3s - loss: 0.5754 Epoch 28/50 10000/10000 [==============================] - 3s - loss: 0.5724 Epoch 29/50 10000/10000 [==============================] - 3s - loss: 0.5701 Epoch 30/50 10000/10000 [==============================] - 3s - loss: 0.5663 Epoch 31/50 10000/10000 [==============================] - 3s - loss: 0.5677 Epoch 32/50 10000/10000 [==============================] - 3s - loss: 0.5642 Epoch 33/50 10000/10000 [==============================] - 3s - loss: 0.5645 Epoch 34/50 10000/10000 [==============================] - 3s - loss: 0.5598 Epoch 35/50 10000/10000 [==============================] - 3s - loss: 0.5612 Epoch 36/50 10000/10000 [==============================] - 3s - loss: 0.5578 Epoch 37/50 10000/10000 [==============================] - 3s - loss: 0.5564 Epoch 38/50 10000/10000 [==============================] - 3s - loss: 0.5550 Epoch 39/50 10000/10000 [==============================] - 3s - loss: 0.5507 Epoch 40/50 10000/10000 [==============================] - 3s - loss: 0.5525 Epoch 41/50 10000/10000 [==============================] - 3s - loss: 0.5499 Epoch 42/50 10000/10000 [==============================] - 3s - loss: 0.5463 Epoch 43/50 10000/10000 [==============================] - 3s - loss: 0.5446 Epoch 44/50 10000/10000 [==============================] - 3s - loss: 0.5433 Epoch 45/50 10000/10000 [==============================] - 3s - loss: 0.5457 Epoch 46/50 10000/10000 [==============================] - 3s - loss: 0.5405 Epoch 47/50 10000/10000 [==============================] - 3s - loss: 0.5417 Epoch 48/50 10000/10000 [==============================] - 3s - loss: 0.5381 Epoch 49/50 10000/10000 [==============================] - 3s - loss: 0.5370 Epoch 50/50 10000/10000 [==============================] - 4s - loss: 0.5352 - ETA
plt.hist(model.predict(xtrain)[:,N])
plt.show()
Ktest = 5
testmodelsA = modelincrements(name,Ktest)
testmodelUPBR = modelincrements(name,Ktest)
testmodelnsA = modelincrements(name,Ktest)
testincrementssA = testmodelsA.increments()
testincrementsUPBR = testmodelUPBR.increments()
testincrementsnsA = testmodelnsA.increments()
testsA = datageneration(testincrementssA,Ktest,initialprice,initialwealth)
testUPBR = datageneration(testincrementsUPBR,Ktest,initialprice,initialwealth)
testnsA = datageneration(testincrementsnsA,Ktest,initialprice,initialwealth)
xtestsA= testsA.trainingdata()[0]
ytestsA = testsA.trainingdata()[1]
xtestUPBR = testUPBR.trainingdata()[0]
ytestUPBR = testUPBR.trainingdata()[1]
xtestnsA=testnsA.trainingdata()[0]
ytestnsA=testnsA.trainingdata()[1]
if name =='scaleable arbitrage':
xtest = xtestsA
ytest = ytestsA
testincrements = testincrementssA
elif name == 'UPBR':
xtest = xtestUPBR
ytest = ytestUPBR
testincrements = testincrementsUPBR
elif name == 'non-scaleable arbitrage':
xtest = xtestnsA
ytest = ytestnsA
testincrements = testincrementsnsA
learnedstrat=model.predict(xtest)
for k in range(Ktest):
plt.plot(learnedstrat[k,0:N],label='k=%d'%k)
plt.legend(loc='upper left')
plt.show()
#only if m=1
if m==1:
pricepath=np.ones((Ktest,N))*initialprice
for i in range(1,N):
pricepath[:,i] = pricepath[:,i-1] + testincrements[i][:,0]
for k in range(Ktest):
plt.plot(pricepath[k,:],label='k=%d'%k)
plt.legend(loc='upper left')
plt.show()
elif m==2:
pricepath=np.ones((Ktest,N,m))*initialprice
for i in range(1,N):
for l in range(m):
pricepath[:,i,l] = pricepath[:,i-1,l] + testincrements[i][:,l]
for k in range(Ktest):
for l in range(m):
plt.plot(pricepath[k,:,l],label='k=%d'%k)
plt.legend(loc='upper left')
plt.show()
for k in range(Ktest):
if m==1:
plt.plot(learnedstrat[k,N:2*N],label='k=%d'%k)
elif m==2:
plt.plot(learnedstrat[k,N:4*N:2],label='k=%d'%k)
plt.plot(learnedstrat[k,N+1:4*N:2],label='k=%d'%k)
plt.legend(loc='upper left')
plt.show()