conda create -n tf20 python==3.7
conda activate tf20
pip install jupyter
pip install matplotlib
pip install scipy
pip install music21
pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
print(tf.__version__)
2.0.0-alpha0
import tensorflow.keras as keras
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
import tensorflow.keras.utils as utils
import os
#http://web.mit.edu/music21/doc/usersGuide/usersGuide_08_installingMusicXML.html
import music21
seq = ['g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'd8', 'e8', 'f8', 'g8', 'g8', 'g4',
'g8', 'e8', 'e8', 'e8', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4',
'd8', 'd8', 'd8', 'd8', 'd8', 'e8', 'f4', 'e8', 'e8', 'e8', 'e8', 'e8', 'f8', 'g4',
'g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4']
print("length of seq: {0}".format(len(seq)))
length of seq: 54
note_seq = ""
for note in seq:
note_seq += note + " "
m = music21.converter.parse("2/4 " + note_seq, format='tinyNotation')
m.show("midi")
m.show()
code2idx = {'c4': 0, 'd4': 1, 'e4': 2, 'f4': 3, 'g4': 4, 'a4': 5, 'b4': 6,
'c8': 7, 'd8': 8, 'e8': 9, 'f8': 10, 'g8': 11, 'a8': 12, 'b8': 13}
idx2code = {0: 'c4', 1: 'd4', 2: 'e4', 3: 'f4', 4: 'g4', 5: 'a4', 6: 'b4',
7: 'c8', 8: 'd8', 9: 'e8', 10: 'f8', 11: 'g8', 12: 'a8', 13: 'b8'}
code2scale = {'c': 0, 'd': 1, 'e': 2, 'f': 3, 'g': 4, 'a': 5, 'b': 6}
code2length = {'4': 0, '8': 1}
max_scale_value = len(code2scale) - 1
print(max_scale_value)
def code2features(code):
"""
'c4' --> ['c', '4'] --> [0, 0] --> [0/6, 0/6] --> [0, 0]
'd4' --> ['d', '4'] --> [1, 0] --> [1/6, 0/6] --> [0.16666, 0]
'a8' --> ['a', '8'] --> [5, 1] --> [5/6, 1/6] --> [0.83333, 0.16666]
"""
features = []
features.append(code2scale[code[0]] / float(max_scale_value))
features.append(code2length[code[1]])
return features
6
def seq2dataset(seq, window_size):
dataset_X = []
dataset_Y = []
for i in range(len(seq) - window_size):
subset = seq[i: (i + window_size + 1)]
for si in range(len(subset) - 1):
features = code2features(subset[si])
dataset_X.append(features)
dataset_Y.append([code2idx[subset[window_size]]])
return np.array(dataset_X), np.array(dataset_Y)
test_seq = ['c4', 'd4', 'e4', 'f4', 'g4', 'd8']
x, y = seq2dataset(seq=test_seq, window_size=4)
print(x)
print(y)
[[0. 0. ] [0.16666667 0. ] [0.33333333 0. ] [0.5 0. ] [0.16666667 0. ] [0.33333333 0. ] [0.5 0. ] [0.66666667 0. ]] [[4] [8]]
n_steps = 4
n_inputs = 2
x_train, y_train = seq2dataset(seq=seq, window_size=n_steps)
print("x_train: {0}".format(x_train.shape))
print("y_train: {0}".format(y_train.shape))
x_train: (200, 2) y_train: (50, 1)
# 입력을 (샘플 수, 타임스텝, 특성 수)로 형태 변환
x_train = np.reshape(x_train, (50, n_steps, n_inputs))
y_train = utils.to_categorical(y_train)
one_hot_vec_size = y_train.shape[1]
print("one hot encoding vector size is ", one_hot_vec_size)
print("After pre-processing")
print("x_train: {0}".format(x_train.shape))
print("y_train: {0}".format(y_train.shape))
print(x_train[0])
print(y_train[0])
one hot encoding vector size is 12 After pre-processing x_train: (50, 4, 2) y_train: (50, 12) [[0.66666667 1. ] [0.33333333 1. ] [0.33333333 0. ] [0.5 1. ]] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]
model = Sequential()
model.add(LSTM(
units=128,
kernel_initializer='glorot_normal',
bias_initializer='zero',
batch_input_shape=(1, n_steps, n_inputs),
stateful=True,
name="lstm"
))
model.add(Dense(
units=one_hot_vec_size,
kernel_initializer='glorot_normal',
bias_initializer='zero',
activation='softmax',
name="dense"
))
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= lstm (UnifiedLSTM) (1, 128) 67072 _________________________________________________________________ dense (Dense) (1, 12) 1548 ================================================================= Total params: 68,620 Trainable params: 68,620 Non-trainable params: 0 _________________________________________________________________
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
class LossHistory(keras.callbacks.Callback):
def init(self):
self.epoch = 0
self.losses = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
if self.epoch % 100 == 0:
print("epoch: {0} - loss: {1:8.6f}".format(self.epoch, logs.get('loss')))
self.epoch += 1
num_epochs = 1500
history = LossHistory() # 손실 이력 객체 생성
history.init()
for epoch_idx in range(num_epochs + 1):
model.fit(
x=x_train,
y=y_train,
epochs=1,
batch_size=1,
verbose=0,
shuffle=False,
callbacks=[history]
)
if history.losses[-1] < 1e-5:
print("epoch: {0} - loss: {1:8.6f}".format(epoch_idx, history.losses[-1]))
model.reset_states()
break
model.reset_states()
epoch: 0 - loss: 2.348783 epoch: 100 - loss: 0.495216 epoch: 200 - loss: 0.003327 epoch: 300 - loss: 0.000147 epoch: 395 - loss: 0.000010
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(history.losses)
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train'], loc='upper right')
plt.show()
scores = model.evaluate(x_train, y_train, batch_size=1)
print("{0}: {1}".format(model.metrics_names[1], scores[1]*100))
model.reset_states()
50/50 [==============================] - 0s 844us/sample - loss: 9.6249e-06 - accuracy: 1.0000 accuracy: 100.0
pred_count = 50 # 최대 예측 개수 정의
# 한 스텝 예측
seq_out = ['g8', 'e8', 'e4', 'f8']
pred_out = model.predict(x_train)
for i in range(pred_count):
idx = np.argmax(pred_out[i]) # one-hot 인코딩을 인덱스 값으로 변환
seq_out.append(idx2code[idx]) # seq_out는 최종 악보이므로 인덱스 값을 코드로 변환하여 저장
model.reset_states()
print("one step prediction : ", seq_out)
one step prediction : ['g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'd8', 'e8', 'f8', 'g8', 'g8', 'g4', 'g8', 'e8', 'e8', 'e8', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4', 'd8', 'd8', 'd8', 'd8', 'd8', 'e8', 'f4', 'e8', 'e8', 'e8', 'e8', 'e8', 'f8', 'g4', 'g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4']
#seq_in = ['g8', 'e8', 'e4', 'f8']
seq_in = ['f8', 'c8', 'd8', 'e4']
seq_out = seq_in
seq_in_featrues = []
for si in seq_in:
features = code2features(si)
seq_in_featrues.append(features)
for i in range(pred_count):
sample_in = np.array(seq_in_featrues)
sample_in = np.reshape(sample_in, (1, n_steps, n_inputs)) # 샘플 수, 타입스텝 수, 속성 수
pred_out = model.predict(sample_in)
idx = np.argmax(pred_out)
seq_out.append(idx2code[idx])
features = code2features(idx2code[idx])
seq_in_featrues.append(features)
seq_in_featrues.pop(0)
model.reset_states()
print("full song prediction: ", seq_out)
full song prediction: ['f8', 'c8', 'd8', 'e4', 'd8', 'd4', 'c8', 'd8', 'g8', 'g8', 'g8', 'g4', 'g8', 'e8', 'e8', 'e8', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4', 'd8', 'd8', 'd8', 'd8', 'd8', 'e8', 'f4', 'e8', 'e8', 'e8', 'e8', 'e8', 'f8', 'g4', 'g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4', 'd8']
note_seq = ""
for note in seq_out:
note_seq += note + " "
m = music21.converter.parse("2/4 " + note_seq, format='tinyNotation')
m.show("midi")
m.show()
m.write("midi", fp="./new_music_2.mid")
'./new_music_2.mid'