import pandas as pd
import numpy as np
np.random.seed(2017) #important to set the seed before importing keras
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.optimizers import SGD
import matplotlib
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import seaborn as sns
Using Theano backend.
%matplotlib inline
sns.set()
sns.set_style('ticks')
#load the dataset
df = pd.read_csv("HR_comma_sep.csv")
#preview
df.head()
satisfaction_level | last_evaluation | number_project | average_montly_hours | time_spend_company | Work_accident | left | promotion_last_5years | sales | salary | |
---|---|---|---|---|---|---|---|---|---|---|
0 | 0.38 | 0.53 | 2 | 157 | 3 | 0 | 1 | 0 | sales | low |
1 | 0.80 | 0.86 | 5 | 262 | 6 | 0 | 1 | 0 | sales | medium |
2 | 0.11 | 0.88 | 7 | 272 | 4 | 0 | 1 | 0 | sales | medium |
3 | 0.72 | 0.87 | 5 | 223 | 5 | 0 | 1 | 0 | sales | low |
4 | 0.37 | 0.52 | 2 | 159 | 3 | 0 | 1 | 0 | sales | low |
df.sales.value_counts()
sales 4140 technical 2720 support 2229 IT 1227 product_mng 902 marketing 858 RandD 787 accounting 767 hr 739 management 630 Name: sales, dtype: int64
df.rename(columns={'sales':'department'},inplace=True)
#let's convert the two categorical variables 'department' and 'salary' into dummy-variables for modelling
df = pd.get_dummies(df,columns=['department','salary'])
x,y = df.drop('left',axis=1).values, df.left.values
# let's do a training-test split for validation later on
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state=2017)
# let's convert our output variable into categorical format for keras
num_classes = np.max(y_train)+1
y_train = to_categorical(y_train,num_classes)
y_test = to_categorical(y_test,num_classes)
x_train.shape
(10499, 20)
n_cols = x_train.shape[1]
#set-up early-stopping monitor
early_stopping_monitor = EarlyStopping(patience=5)
# configure our neural-net
np.random.seed(2017) #important to set the seed for reproducibility
model = Sequential()
model.add(Dense(50,activation='relu',input_shape=(n_cols,)))
model.add(Dense(2,activation='softmax'))
#compile
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
#fit
history = model.fit(x_train,y_train,
epochs=100,verbose=1,validation_split=0.2,
callbacks=[early_stopping_monitor],shuffle=False)
model.summary()
#plot training and validation los
plt.plot(history.history['loss'],'r',label='training')
plt.plot(history.history['val_loss'],'b',label='validation')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
#plot training and validation accuracy
plt.figure(figsize=(8,7))
plt.plot(history.history['acc'],'r',label='training')
plt.plot(history.history['val_acc'],'b',label='validation')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
# so we see that our baseline model has quite good accuracy just after 20 epochs
#let's evaluate our model to predict on our hold-out data
model.evaluate(x_test,y_test)
Train on 8399 samples, validate on 2100 samples Epoch 1/100 8399/8399 [==============================] - 0s - loss: 0.6132 - acc: 0.7453 - val_loss: 0.4949 - val_acc: 0.7729 Epoch 2/100 8399/8399 [==============================] - 0s - loss: 0.5127 - acc: 0.7594 - val_loss: 0.4505 - val_acc: 0.7681 Epoch 3/100 8399/8399 [==============================] - 0s - loss: 0.4770 - acc: 0.7663 - val_loss: 0.4253 - val_acc: 0.7686 Epoch 4/100 8399/8399 [==============================] - 0s - loss: 0.4496 - acc: 0.7793 - val_loss: 0.4061 - val_acc: 0.7662 Epoch 5/100 8399/8399 [==============================] - 0s - loss: 0.4283 - acc: 0.7893 - val_loss: 0.3885 - val_acc: 0.7710 Epoch 6/100 8399/8399 [==============================] - 0s - loss: 0.4117 - acc: 0.7997 - val_loss: 0.3739 - val_acc: 0.7781 Epoch 7/100 8399/8399 [==============================] - 0s - loss: 0.3976 - acc: 0.8095 - val_loss: 0.3711 - val_acc: 0.7710 Epoch 8/100 8399/8399 [==============================] - 0s - loss: 0.3876 - acc: 0.8193 - val_loss: 0.3531 - val_acc: 0.7943 Epoch 9/100 8399/8399 [==============================] - 0s - loss: 0.3785 - acc: 0.8256 - val_loss: 0.3451 - val_acc: 0.8048 Epoch 10/100 8399/8399 [==============================] - 0s - loss: 0.3726 - acc: 0.8338 - val_loss: 0.3352 - val_acc: 0.8257 Epoch 11/100 8399/8399 [==============================] - 0s - loss: 0.3629 - acc: 0.8413 - val_loss: 0.3298 - val_acc: 0.8414 Epoch 12/100 8399/8399 [==============================] - 0s - loss: 0.3549 - acc: 0.8475 - val_loss: 0.3210 - val_acc: 0.8552 Epoch 13/100 8399/8399 [==============================] - 0s - loss: 0.3488 - acc: 0.8518 - val_loss: 0.3359 - val_acc: 0.8410 Epoch 14/100 8399/8399 [==============================] - 0s - loss: 0.3453 - acc: 0.8537 - val_loss: 0.3326 - val_acc: 0.8495 Epoch 15/100 8399/8399 [==============================] - 0s - loss: 0.3383 - acc: 0.8586 - val_loss: 0.3382 - val_acc: 0.8452 Epoch 16/100 8399/8399 [==============================] - 0s - loss: 0.3331 - acc: 0.8634 - val_loss: 0.3281 - val_acc: 0.8505 Epoch 17/100 8399/8399 [==============================] - 0s - loss: 0.3281 - acc: 0.8662 - val_loss: 0.3156 - val_acc: 0.8614 Epoch 18/100 8399/8399 [==============================] - 0s - loss: 0.3226 - acc: 0.8676 - val_loss: 0.3250 - val_acc: 0.8519 Epoch 19/100 8399/8399 [==============================] - 0s - loss: 0.3187 - acc: 0.8699 - val_loss: 0.3143 - val_acc: 0.8624 Epoch 20/100 8399/8399 [==============================] - 0s - loss: 0.3140 - acc: 0.8727 - val_loss: 0.3123 - val_acc: 0.8605 Epoch 21/100 8399/8399 [==============================] - 0s - loss: 0.3089 - acc: 0.8734 - val_loss: 0.3027 - val_acc: 0.8690 Epoch 22/100 8399/8399 [==============================] - 0s - loss: 0.3038 - acc: 0.8761 - val_loss: 0.2941 - val_acc: 0.8757 Epoch 23/100 8399/8399 [==============================] - 0s - loss: 0.2992 - acc: 0.8780 - val_loss: 0.2965 - val_acc: 0.8724 Epoch 24/100 8399/8399 [==============================] - 0s - loss: 0.2942 - acc: 0.8811 - val_loss: 0.2855 - val_acc: 0.8810 Epoch 25/100 8399/8399 [==============================] - 0s - loss: 0.2874 - acc: 0.8844 - val_loss: 0.2693 - val_acc: 0.8933 Epoch 26/100 8399/8399 [==============================] - 0s - loss: 0.2828 - acc: 0.8845 - val_loss: 0.2636 - val_acc: 0.8986 Epoch 27/100 8399/8399 [==============================] - 0s - loss: 0.2770 - acc: 0.8875 - val_loss: 0.2618 - val_acc: 0.9014 Epoch 28/100 8399/8399 [==============================] - 0s - loss: 0.2718 - acc: 0.8925 - val_loss: 0.2619 - val_acc: 0.8995 Epoch 29/100 8399/8399 [==============================] - 0s - loss: 0.2681 - acc: 0.8950 - val_loss: 0.2607 - val_acc: 0.9000 Epoch 30/100 8399/8399 [==============================] - 0s - loss: 0.2640 - acc: 0.8972 - val_loss: 0.2556 - val_acc: 0.9057 Epoch 31/100 8399/8399 [==============================] - 0s - loss: 0.2597 - acc: 0.9001 - val_loss: 0.2514 - val_acc: 0.9095 Epoch 32/100 8399/8399 [==============================] - 0s - loss: 0.2571 - acc: 0.8999 - val_loss: 0.2558 - val_acc: 0.9038 Epoch 33/100 8399/8399 [==============================] - 0s - loss: 0.2535 - acc: 0.9009 - val_loss: 0.2509 - val_acc: 0.9110 Epoch 34/100 8399/8399 [==============================] - 0s - loss: 0.2521 - acc: 0.9028 - val_loss: 0.2509 - val_acc: 0.9119 Epoch 35/100 8399/8399 [==============================] - 0s - loss: 0.2490 - acc: 0.9043 - val_loss: 0.2576 - val_acc: 0.9062 Epoch 36/100 8399/8399 [==============================] - 0s - loss: 0.2478 - acc: 0.9051 - val_loss: 0.2505 - val_acc: 0.9138 Epoch 37/100 8399/8399 [==============================] - 0s - loss: 0.2470 - acc: 0.9051 - val_loss: 0.2513 - val_acc: 0.9105 Epoch 38/100 8399/8399 [==============================] - 0s - loss: 0.2452 - acc: 0.9050 - val_loss: 0.2590 - val_acc: 0.9081 Epoch 39/100 8399/8399 [==============================] - 0s - loss: 0.2441 - acc: 0.9058 - val_loss: 0.2622 - val_acc: 0.9048 Epoch 40/100 8399/8399 [==============================] - 0s - loss: 0.2433 - acc: 0.9058 - val_loss: 0.2603 - val_acc: 0.9062 Epoch 41/100 8399/8399 [==============================] - 0s - loss: 0.2421 - acc: 0.9069 - val_loss: 0.2626 - val_acc: 0.9033 Epoch 42/100 8399/8399 [==============================] - 0s - loss: 0.2405 - acc: 0.9081 - val_loss: 0.2607 - val_acc: 0.9057 _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_1 (Dense) (None, 50) 1050 _________________________________________________________________ dense_2 (Dense) (None, 2) 102 ================================================================= Total params: 1,152 Trainable params: 1,152 Non-trainable params: 0 _________________________________________________________________ 32/4500 [..............................] - ETA: 0s
[0.25593803686565825, 0.90288888883590701]
# Let's increase number of nodes in the hidden layer and repeat
# configure our neural-net
np.random.seed(2017) #important to set the seed for reproducibility
model = Sequential()
model.add(Dense(100,activation='relu',input_shape=(n_cols,))) #changed number of hidden layers from 50 to 100
model.add(Dense(2,activation='softmax'))
#compile
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
#fit
history = model.fit(x_train,y_train,epochs=100,verbose=1,
validation_split=0.2,callbacks=[early_stopping_monitor],
shuffle=False)
model.summary()
#plot training and validation los
plt.plot(history.history['loss'],'r',label='training')
plt.plot(history.history['val_loss'],'b',label='validation')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
#plot training and validation accuracy
plt.figure(figsize=(8,7))
plt.plot(history.history['acc'],'r',label='training')
plt.plot(history.history['val_acc'],'b',label='validation')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
# so we see that our baseline model has quite good accuracy just after 20 epochs
#let's evaluate our model to predict on our hold-out data
print "Model Evaluation on test dataset [loss,accuracy] ",model.evaluate(x_test,y_test)
Train on 8399 samples, validate on 2100 samples Epoch 1/100 8399/8399 [==============================] - 0s - loss: 0.7075 - acc: 0.7378 - val_loss: 0.4806 - val_acc: 0.7724 Epoch 2/100 8399/8399 [==============================] - 0s - loss: 0.5155 - acc: 0.7553 - val_loss: 0.4538 - val_acc: 0.7705 Epoch 3/100 8399/8399 [==============================] - 0s - loss: 0.4946 - acc: 0.7606 - val_loss: 0.4499 - val_acc: 0.7652 Epoch 4/100 8399/8399 [==============================] - 0s - loss: 0.4803 - acc: 0.7626 - val_loss: 0.4456 - val_acc: 0.7633 Epoch 5/100 8399/8399 [==============================] - 0s - loss: 0.4708 - acc: 0.7645 - val_loss: 0.4414 - val_acc: 0.7662 Epoch 6/100 8399/8399 [==============================] - 0s - loss: 0.4602 - acc: 0.7662 - val_loss: 0.4364 - val_acc: 0.7686 Epoch 7/100 8399/8399 [==============================] - 0s - loss: 0.4492 - acc: 0.7720 - val_loss: 0.4274 - val_acc: 0.7748 Epoch 8/100 8399/8399 [==============================] - 0s - loss: 0.4359 - acc: 0.7783 - val_loss: 0.4141 - val_acc: 0.7771 Epoch 9/100 8399/8399 [==============================] - 0s - loss: 0.4200 - acc: 0.7882 - val_loss: 0.3981 - val_acc: 0.7795 Epoch 10/100 8399/8399 [==============================] - 0s - loss: 0.4021 - acc: 0.7993 - val_loss: 0.3834 - val_acc: 0.7848 Epoch 11/100 8399/8399 [==============================] - 0s - loss: 0.3854 - acc: 0.8090 - val_loss: 0.3643 - val_acc: 0.7938 Epoch 12/100 8399/8399 [==============================] - 0s - loss: 0.3688 - acc: 0.8202 - val_loss: 0.3458 - val_acc: 0.8062 Epoch 13/100 8399/8399 [==============================] - 0s - loss: 0.3532 - acc: 0.8306 - val_loss: 0.3287 - val_acc: 0.8200 Epoch 14/100 8399/8399 [==============================] - 0s - loss: 0.3407 - acc: 0.8409 - val_loss: 0.3158 - val_acc: 0.8338 Epoch 15/100 8399/8399 [==============================] - 0s - loss: 0.3300 - acc: 0.8494 - val_loss: 0.3052 - val_acc: 0.8486 Epoch 16/100 8399/8399 [==============================] - 0s - loss: 0.3211 - acc: 0.8566 - val_loss: 0.2946 - val_acc: 0.8595 Epoch 17/100 8399/8399 [==============================] - 0s - loss: 0.3119 - acc: 0.8634 - val_loss: 0.2874 - val_acc: 0.8752 Epoch 18/100 8399/8399 [==============================] - 0s - loss: 0.3056 - acc: 0.8682 - val_loss: 0.2798 - val_acc: 0.8843 Epoch 19/100 8399/8399 [==============================] - 0s - loss: 0.3002 - acc: 0.8721 - val_loss: 0.2744 - val_acc: 0.8919 Epoch 20/100 8399/8399 [==============================] - 0s - loss: 0.2941 - acc: 0.8769 - val_loss: 0.2703 - val_acc: 0.8990 Epoch 21/100 8399/8399 [==============================] - 0s - loss: 0.2896 - acc: 0.8797 - val_loss: 0.2670 - val_acc: 0.9033 Epoch 22/100 8399/8399 [==============================] - 0s - loss: 0.2853 - acc: 0.8830 - val_loss: 0.2642 - val_acc: 0.9062 Epoch 23/100 8399/8399 [==============================] - 0s - loss: 0.2809 - acc: 0.8855 - val_loss: 0.2635 - val_acc: 0.9043 Epoch 24/100 8399/8399 [==============================] - 0s - loss: 0.2772 - acc: 0.8886 - val_loss: 0.2613 - val_acc: 0.9057 Epoch 25/100 8399/8399 [==============================] - 0s - loss: 0.2754 - acc: 0.8886 - val_loss: 0.2626 - val_acc: 0.9043 Epoch 26/100 8399/8399 [==============================] - 0s - loss: 0.2701 - acc: 0.8922 - val_loss: 0.2597 - val_acc: 0.9048 Epoch 27/100 8399/8399 [==============================] - 0s - loss: 0.2663 - acc: 0.8938 - val_loss: 0.2588 - val_acc: 0.9067 Epoch 28/100 8399/8399 [==============================] - 0s - loss: 0.2642 - acc: 0.8944 - val_loss: 0.2603 - val_acc: 0.9081 Epoch 29/100 8399/8399 [==============================] - 0s - loss: 0.2601 - acc: 0.8980 - val_loss: 0.2598 - val_acc: 0.9062 Epoch 30/100 8399/8399 [==============================] - 0s - loss: 0.2580 - acc: 0.9000 - val_loss: 0.2585 - val_acc: 0.9095 Epoch 31/100 8399/8399 [==============================] - 0s - loss: 0.2552 - acc: 0.9018 - val_loss: 0.2572 - val_acc: 0.9119 Epoch 32/100 8399/8399 [==============================] - 0s - loss: 0.2537 - acc: 0.9024 - val_loss: 0.2583 - val_acc: 0.9105 Epoch 33/100 8399/8399 [==============================] - 0s - loss: 0.2512 - acc: 0.9043 - val_loss: 0.2565 - val_acc: 0.9105 Epoch 34/100 8399/8399 [==============================] - 0s - loss: 0.2499 - acc: 0.9048 - val_loss: 0.2566 - val_acc: 0.9114 Epoch 35/100 8399/8399 [==============================] - 0s - loss: 0.2487 - acc: 0.9056 - val_loss: 0.2577 - val_acc: 0.9100 Epoch 36/100 8399/8399 [==============================] - 0s - loss: 0.2482 - acc: 0.9056 - val_loss: 0.2655 - val_acc: 0.9019 Epoch 37/100 8399/8399 [==============================] - 0s - loss: 0.2459 - acc: 0.9070 - val_loss: 0.2594 - val_acc: 0.9081 Epoch 38/100 8399/8399 [==============================] - 0s - loss: 0.2458 - acc: 0.9071 - val_loss: 0.2709 - val_acc: 0.8986 Epoch 39/100 8399/8399 [==============================] - 0s - loss: 0.2461 - acc: 0.9071 - val_loss: 0.2703 - val_acc: 0.8986 _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_3 (Dense) (None, 100) 2100 _________________________________________________________________ dense_4 (Dense) (None, 2) 202 ================================================================= Total params: 2,302 Trainable params: 2,302 Non-trainable params: 0 _________________________________________________________________ 32/4500 [..............................] - ETA: 0s [0.24917428104082742, 0.90933333333333333]
# we see that increasing the number of nodes in the hidden layer didn't help, let's move on to model-3
# configure our neural-net
np.random.seed(2018) #important to set the seed for reproducibility
model = Sequential()
model.add(Dense(50,activation='relu',input_shape=(n_cols,)))
model.add(Dense(50,activation='relu'))
model.add(Dense(2,activation='softmax'))
#compile
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
#fit
history = model.fit(x_train,y_train,epochs=20,verbose=1,
validation_split=0.2,callbacks=[early_stopping_monitor],
shuffle=False)
model.summary()
#plot training and validation los
plt.plot(history.history['loss'],'r',label='training')
plt.plot(history.history['val_loss'],'b',label='validation')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
#plot training and validation accuracy
plt.figure(figsize=(8,7))
plt.plot(history.history['acc'],'r',label='training')
plt.plot(history.history['val_acc'],'b',label='validation')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
# so we see that our baseline model has quite good accuracy just after 20 epochs
#let's evaluate our model to predict on our hold-out data
print "Model Evaluation on test dataset [loss,accuracy] ",
model.evaluate(x_test,y_test)
Train on 8399 samples, validate on 2100 samples Epoch 1/20 8399/8399 [==============================] - 0s - loss: 0.6501 - acc: 0.7204 - val_loss: 0.5004 - val_acc: 0.7724 Epoch 2/20 8399/8399 [==============================] - 0s - loss: 0.5120 - acc: 0.7553 - val_loss: 0.4111 - val_acc: 0.8205 Epoch 3/20 8399/8399 [==============================] - 0s - loss: 0.4316 - acc: 0.7953 - val_loss: 0.3624 - val_acc: 0.8519 Epoch 4/20 8399/8399 [==============================] - 0s - loss: 0.3982 - acc: 0.8158 - val_loss: 0.3528 - val_acc: 0.8643 Epoch 5/20 8399/8399 [==============================] - 0s - loss: 0.3895 - acc: 0.8208 - val_loss: 0.3321 - val_acc: 0.8762 Epoch 6/20 8399/8399 [==============================] - 0s - loss: 0.3647 - acc: 0.8355 - val_loss: 0.3333 - val_acc: 0.8733 Epoch 7/20 8399/8399 [==============================] - 0s - loss: 0.3483 - acc: 0.8461 - val_loss: 0.3342 - val_acc: 0.8738 Epoch 8/20 8399/8399 [==============================] - 0s - loss: 0.3385 - acc: 0.8547 - val_loss: 0.3197 - val_acc: 0.8795 Epoch 9/20 8399/8399 [==============================] - 0s - loss: 0.3339 - acc: 0.8572 - val_loss: 0.3396 - val_acc: 0.8657 Epoch 10/20 8399/8399 [==============================] - 0s - loss: 0.3252 - acc: 0.8625 - val_loss: 0.3219 - val_acc: 0.8771 Epoch 11/20 8399/8399 [==============================] - 0s - loss: 0.3228 - acc: 0.8649 - val_loss: 0.3445 - val_acc: 0.8629 Epoch 12/20 8399/8399 [==============================] - 0s - loss: 0.3130 - acc: 0.8693 - val_loss: 0.3274 - val_acc: 0.8729 Epoch 13/20 8399/8399 [==============================] - 0s - loss: 0.3135 - acc: 0.8711 - val_loss: 0.3338 - val_acc: 0.8705 Epoch 14/20 8399/8399 [==============================] - 0s - loss: 0.3041 - acc: 0.8761 - val_loss: 0.3284 - val_acc: 0.8690 _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_5 (Dense) (None, 50) 1050 _________________________________________________________________ dense_6 (Dense) (None, 50) 2550 _________________________________________________________________ dense_7 (Dense) (None, 2) 102 ================================================================= Total params: 3,702 Trainable params: 3,702 Non-trainable params: 0 _________________________________________________________________ 32/4500 [..............................] - ETA: 0s
[0.31913121981090969, 0.87688888894187078]
#increasing the number of layers certainly helped which leads us to our next model where make the model deeper
# configure our neural-net
np.random.seed(2019)
model = Sequential()
model.add(Dense(50,activation='relu',input_shape=(n_cols,)))
model.add(Dense(50,activation='relu'))
# model.add(Dense(50,activation='relu'))
model.add(Dense(2,activation='softmax'))
#compile
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
#fit
history = model.fit(x_train,y_train,epochs=50,
verbose=1,validation_split=0.2,
callbacks=[early_stopping_monitor],shuffle=False)
model.summary()
#plot training and validation los
plt.plot(history.history['loss'],'r',label='training')
plt.plot(history.history['val_loss'],'b',label='validation')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
#plot training and validation accuracy
plt.figure(figsize=(8,7))
plt.plot(history.history['acc'],'r',label='training')
plt.plot(history.history['val_acc'],'b',label='validation')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
# so we see that our baseline model has quite good accuracy just after 20 epochs
#let's evaluate our model to predict on our hold-out data
print "Model Evaluation on test dataset [loss,accuracy] ",
model.evaluate(x_test,y_test)
Train on 8399 samples, validate on 2100 samples Epoch 1/50 8399/8399 [==============================] - 0s - loss: 0.8574 - acc: 0.7290 - val_loss: 0.5059 - val_acc: 0.7710 Epoch 2/50 8399/8399 [==============================] - 0s - loss: 0.5453 - acc: 0.7453 - val_loss: 0.4704 - val_acc: 0.7638 Epoch 3/50 8399/8399 [==============================] - 0s - loss: 0.5045 - acc: 0.7540 - val_loss: 0.4451 - val_acc: 0.7595 Epoch 4/50 8399/8399 [==============================] - 0s - loss: 0.4788 - acc: 0.7619 - val_loss: 0.4220 - val_acc: 0.7586 Epoch 5/50 8399/8399 [==============================] - 0s - loss: 0.4506 - acc: 0.7752 - val_loss: 0.3980 - val_acc: 0.7762 Epoch 6/50 8399/8399 [==============================] - 0s - loss: 0.4254 - acc: 0.7856 - val_loss: 0.3766 - val_acc: 0.7814 Epoch 7/50 8399/8399 [==============================] - 0s - loss: 0.4022 - acc: 0.8016 - val_loss: 0.3588 - val_acc: 0.8014 Epoch 8/50 8399/8399 [==============================] - 0s - loss: 0.3812 - acc: 0.8130 - val_loss: 0.3421 - val_acc: 0.8114 Epoch 9/50 8399/8399 [==============================] - 0s - loss: 0.3638 - acc: 0.8230 - val_loss: 0.3358 - val_acc: 0.8110 Epoch 10/50 8399/8399 [==============================] - 0s - loss: 0.3510 - acc: 0.8308 - val_loss: 0.3252 - val_acc: 0.8157 Epoch 11/50 8399/8399 [==============================] - 0s - loss: 0.3412 - acc: 0.8357 - val_loss: 0.3204 - val_acc: 0.8195 Epoch 12/50 8399/8399 [==============================] - 0s - loss: 0.3341 - acc: 0.8440 - val_loss: 0.3135 - val_acc: 0.8262 Epoch 13/50 8399/8399 [==============================] - 0s - loss: 0.3261 - acc: 0.8490 - val_loss: 0.3066 - val_acc: 0.8376 Epoch 14/50 8399/8399 [==============================] - 0s - loss: 0.3195 - acc: 0.8563 - val_loss: 0.3017 - val_acc: 0.8624 Epoch 15/50 8399/8399 [==============================] - 0s - loss: 0.3128 - acc: 0.8607 - val_loss: 0.2960 - val_acc: 0.8738 Epoch 16/50 8399/8399 [==============================] - 0s - loss: 0.3081 - acc: 0.8651 - val_loss: 0.2914 - val_acc: 0.8800 Epoch 17/50 8399/8399 [==============================] - 0s - loss: 0.3038 - acc: 0.8683 - val_loss: 0.2980 - val_acc: 0.8824 Epoch 18/50 8399/8399 [==============================] - 0s - loss: 0.3004 - acc: 0.8713 - val_loss: 0.2966 - val_acc: 0.8886 Epoch 19/50 8399/8399 [==============================] - 0s - loss: 0.2989 - acc: 0.8717 - val_loss: 0.3032 - val_acc: 0.8819 Epoch 20/50 8399/8399 [==============================] - 0s - loss: 0.2957 - acc: 0.8738 - val_loss: 0.3072 - val_acc: 0.8781 Epoch 21/50 8399/8399 [==============================] - 0s - loss: 0.2934 - acc: 0.8757 - val_loss: 0.3124 - val_acc: 0.8738 Epoch 22/50 8399/8399 [==============================] - 0s - loss: 0.2916 - acc: 0.8764 - val_loss: 0.3251 - val_acc: 0.8667 _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_8 (Dense) (None, 50) 1050 _________________________________________________________________ dense_9 (Dense) (None, 50) 2550 _________________________________________________________________ dense_10 (Dense) (None, 2) 102 ================================================================= Total params: 3,702 Trainable params: 3,702 Non-trainable params: 0 _________________________________________________________________ 32/4500 [..............................] - ETA: 0s
[0.31322626781463625, 0.878]