In [1]:
import tensorflow as tf
print(tf.__version__)
2.0.0-alpha0
In [2]:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPool2D,  \
    Dropout, Dense, Input, concatenate,      \
    GlobalAveragePooling2D, AveragePooling2D,\
    Flatten, BatchNormalization

import cv2 #python -m pip install opencv-python
import numpy as np
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical

import math
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import LearningRateScheduler
In [3]:
num_classes = 10
In [4]:
def load_cifar10_data(img_rows, img_cols):
    # Load cifar10 training and test sets
    (X_train, Y_train), (X_test, Y_test) = cifar10.load_data()

    # Resize training images
    X_train = np.array([cv2.resize(img, (img_rows, img_cols)) for img in X_train[:, :, :, :]])
    X_test = np.array([cv2.resize(img, (img_rows, img_cols)) for img in X_test[:, :, :, :]])

    X_train = X_train.astype('float16') / 255.0
    X_test = X_test.astype('float16') / 255.0

    # Transform targets to keras compatible format
    Y_train = to_categorical(Y_train, num_classes)
    Y_test = to_categorical(Y_test, num_classes)

    print("X_train: {0}".format(X_train.shape))
    print("Y_train: {0}".format(Y_train.shape))
    print("X_test: {0}".format(X_test.shape))
    print("Y_test: {0}".format(Y_test.shape))

    return X_train, Y_train, X_test, Y_test
In [5]:
X_train, y_train, X_test, y_test = load_cifar10_data(224, 224)
X_train: (50000, 224, 224, 3)
Y_train: (50000, 10)
X_test: (10000, 224, 224, 3)
Y_test: (10000, 10)
In [6]:
kernel_init = tf.keras.initializers.glorot_uniform()
bias_init = tf.keras.initializers.Constant(value=0.2)
In [7]:
def inception_module(x,
                     filters_1x1,
                     filters_1x1_to_3x3,
                     filters_3x3,
                     filters_1x1_to_5x5,
                     filters_5x5,
                     filters_pool_1x1,
                     name=None):

    conv_1x1 = Conv2D(
        filters=filters_1x1, 
        kernel_size=(1, 1), 
        padding='same', 
        activation='relu',
        kernel_initializer=kernel_init,
        bias_initializer=bias_init)(x)

    conv_3x3 = Conv2D(
        filters=filters_1x1_to_3x3, 
        kernel_size=(1, 1), 
        padding='same', 
        activation='relu',
        kernel_initializer=kernel_init,
        bias_initializer=bias_init)(x)
    
    conv_3x3 = Conv2D(
        filters=filters_3x3,
        kernel_size=(3, 3), 
        padding='same',
        activation='relu',
        kernel_initializer=kernel_init,
        bias_initializer=bias_init)(conv_3x3)

    conv_5x5 = Conv2D(
        filters=filters_1x1_to_5x5,
        kernel_size=(1, 1),
        padding='same',
        activation='relu',
        kernel_initializer=kernel_init,
        bias_initializer=bias_init)(x)
    
    conv_5x5 = Conv2D(
        filters=filters_5x5,
        kernel_size=(5, 5),
        padding='same',
        activation='relu',
        kernel_initializer=kernel_init,
        bias_initializer=bias_init)(conv_5x5)

    pool = MaxPool2D(
        pool_size=(3, 3), 
        strides=(1, 1),
        padding='same')(x)
    
    pool = Conv2D(
        filters=filters_pool_1x1,
        kernel_size=(1, 1),
        padding='same',
        activation='relu',
        kernel_initializer=kernel_init,
        bias_initializer=bias_init)(pool)

    output = concatenate([conv_1x1, conv_3x3, conv_5x5, pool], axis=3, name=name)

    return output
In [8]:
input_layer = Input(shape=(224, 224, 3))

x = Conv2D(
    filters=64,
    kernel_size=(7, 7),
    padding='same', 
    strides=(2, 2),
    activation='relu',
    kernel_initializer=kernel_init,
    bias_initializer=bias_init,
    name='conv_1_7x7/2')(input_layer)

x = MaxPool2D(
    pool_size=(3, 3), 
    padding='same', 
    strides=(2, 2), 
    name='max_pool_1_3x3/2')(x)

x = BatchNormalization()(x)

x = Conv2D(
    filters=64, 
    kernel_size=(1, 1), 
    padding='same', 
    strides=(1, 1), 
    activation='relu', 
    name='conv_2a_3x3/1')(x)

x = Conv2D(
    filters=192, 
    kernel_size=(3, 3), 
    padding='same', 
    strides=(1, 1), 
    activation='relu', 
    name='conv_2b_3x3/1')(x)

x = BatchNormalization()(x)

x = MaxPool2D(
    pool_size=(3, 3), 
    padding='same', 
    strides=(2, 2), 
    name='max_pool_2_3x3/2')(x)

x = inception_module(
    x=x,
    filters_1x1=64,
    filters_1x1_to_3x3=96,
    filters_3x3=128,
    filters_1x1_to_5x5=16,
    filters_5x5=32,
    filters_pool_1x1=32,
    name='inception_3a'
)

x = inception_module(
    x=x,
    filters_1x1=128,
    filters_1x1_to_3x3=128,
    filters_3x3=192,
    filters_1x1_to_5x5=32,
    filters_5x5=96,
    filters_pool_1x1=64,
    name='inception_3b'
)

x = MaxPool2D(
    pool_size=(3, 3), 
    padding='same', 
    strides=(2, 2), 
    name='max_pool_3_3x3/2')(x)

x = inception_module(
    x=x,
    filters_1x1=192,
    filters_1x1_to_3x3=96,
    filters_3x3=208,
    filters_1x1_to_5x5=16,
    filters_5x5=48,
    filters_pool_1x1=64,
    name='inception_4a')


x1 = AveragePooling2D(pool_size=(5, 5), strides=3)(x)
x1 = Conv2D(filters=128, kernel_size=(1, 1), padding='same', activation='relu')(x1)
x1 = Flatten()(x1)
x1 = Dense(units=1024, activation='relu')(x1)
x1 = Dropout(rate=0.7)(x1)
x1 = Dense(units=10, activation='softmax', name='auxilliary_output_1')(x1)

x = inception_module(
    x=x,
    filters_1x1=160,
    filters_1x1_to_3x3=112,
    filters_3x3=224,
    filters_1x1_to_5x5=24,
    filters_5x5=64,
    filters_pool_1x1=64,
    name='inception_4b'
)

x = inception_module(
    x=x,
    filters_1x1=128,
    filters_1x1_to_3x3=128,
    filters_3x3=256,
    filters_1x1_to_5x5=24,
    filters_5x5=64,
    filters_pool_1x1=64,
    name='inception_4c')

x = inception_module(
    x=x,
    filters_1x1=112,
    filters_1x1_to_3x3=144,
    filters_3x3=288,
    filters_1x1_to_5x5=32,
    filters_5x5=64,
    filters_pool_1x1=64,
    name='inception_4d')


x2 = AveragePooling2D(pool_size=(5, 5), strides=3)(x)
x2 = Conv2D(filters=128, kernel_size=(1, 1), padding='same', activation='relu')(x2)
x2 = Flatten()(x2)
x2 = Dense(units=1024, activation='relu')(x2)
x2 = Dropout(rate=0.7)(x2)
x2 = Dense(units=10, activation='softmax', name='auxilliary_output_2')(x2)

x = inception_module(
    x=x,
    filters_1x1=256,
    filters_1x1_to_3x3=160,
    filters_3x3=320,
    filters_1x1_to_5x5=32,
    filters_5x5=128,
    filters_pool_1x1=128,
    name='inception_4e'
)

x = MaxPool2D(
    pool_size=(3, 3), 
    padding='same', 
    strides=(2, 2), 
    name='max_pool_4_3x3/2')(x)

x = inception_module(
    x=x,
    filters_1x1=256,
    filters_1x1_to_3x3=160,
    filters_3x3=320,
    filters_1x1_to_5x5=32,
    filters_5x5=128,
    filters_pool_1x1=128,
    name='inception_5a'
)

x = inception_module(
    x=x,
    filters_1x1=384,
    filters_1x1_to_3x3=192,
    filters_3x3=384,
    filters_1x1_to_5x5=48,
    filters_5x5=128,
    filters_pool_1x1=128,
    name='inception_5b')

x = GlobalAveragePooling2D(name='avg_pool_5_3x3/1')(x)
x = Dropout(rate=0.4)(x)
x = Dense(units=10, activation='softmax', name='output')(x)

model = Model(input_layer, [x, x1, x2], name='inception_v1')

model.summary()
Model: "inception_v1"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 224, 224, 3) 0                                            
__________________________________________________________________________________________________
conv_1_7x7/2 (Conv2D)           (None, 112, 112, 64) 9472        input_1[0][0]                    
__________________________________________________________________________________________________
max_pool_1_3x3/2 (MaxPooling2D) (None, 56, 56, 64)   0           conv_1_7x7/2[0][0]               
__________________________________________________________________________________________________
batch_normalization_v2 (BatchNo (None, 56, 56, 64)   256         max_pool_1_3x3/2[0][0]           
__________________________________________________________________________________________________
conv_2a_3x3/1 (Conv2D)          (None, 56, 56, 64)   4160        batch_normalization_v2[0][0]     
__________________________________________________________________________________________________
conv_2b_3x3/1 (Conv2D)          (None, 56, 56, 192)  110784      conv_2a_3x3/1[0][0]              
__________________________________________________________________________________________________
batch_normalization_v2_1 (Batch (None, 56, 56, 192)  768         conv_2b_3x3/1[0][0]              
__________________________________________________________________________________________________
max_pool_2_3x3/2 (MaxPooling2D) (None, 28, 28, 192)  0           batch_normalization_v2_1[0][0]   
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 28, 28, 96)   18528       max_pool_2_3x3/2[0][0]           
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 28, 28, 16)   3088        max_pool_2_3x3/2[0][0]           
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 28, 28, 192)  0           max_pool_2_3x3/2[0][0]           
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 28, 28, 64)   12352       max_pool_2_3x3/2[0][0]           
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 28, 28, 128)  110720      conv2d_1[0][0]                   
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 28, 28, 32)   12832       conv2d_3[0][0]                   
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 28, 28, 32)   6176        max_pooling2d[0][0]              
__________________________________________________________________________________________________
inception_3a (Concatenate)      (None, 28, 28, 256)  0           conv2d[0][0]                     
                                                                 conv2d_2[0][0]                   
                                                                 conv2d_4[0][0]                   
                                                                 conv2d_5[0][0]                   
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 28, 28, 128)  32896       inception_3a[0][0]               
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 28, 28, 32)   8224        inception_3a[0][0]               
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 28, 28, 256)  0           inception_3a[0][0]               
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 28, 28, 128)  32896       inception_3a[0][0]               
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 28, 28, 192)  221376      conv2d_7[0][0]                   
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 28, 28, 96)   76896       conv2d_9[0][0]                   
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 28, 28, 64)   16448       max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
inception_3b (Concatenate)      (None, 28, 28, 480)  0           conv2d_6[0][0]                   
                                                                 conv2d_8[0][0]                   
                                                                 conv2d_10[0][0]                  
                                                                 conv2d_11[0][0]                  
__________________________________________________________________________________________________
max_pool_3_3x3/2 (MaxPooling2D) (None, 14, 14, 480)  0           inception_3b[0][0]               
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 14, 14, 96)   46176       max_pool_3_3x3/2[0][0]           
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 14, 14, 16)   7696        max_pool_3_3x3/2[0][0]           
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 14, 14, 480)  0           max_pool_3_3x3/2[0][0]           
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 14, 14, 192)  92352       max_pool_3_3x3/2[0][0]           
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 14, 14, 208)  179920      conv2d_13[0][0]                  
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 14, 14, 48)   19248       conv2d_15[0][0]                  
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 14, 14, 64)   30784       max_pooling2d_2[0][0]            
__________________________________________________________________________________________________
inception_4a (Concatenate)      (None, 14, 14, 512)  0           conv2d_12[0][0]                  
                                                                 conv2d_14[0][0]                  
                                                                 conv2d_16[0][0]                  
                                                                 conv2d_17[0][0]                  
__________________________________________________________________________________________________
conv2d_20 (Conv2D)              (None, 14, 14, 112)  57456       inception_4a[0][0]               
__________________________________________________________________________________________________
conv2d_22 (Conv2D)              (None, 14, 14, 24)   12312       inception_4a[0][0]               
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 14, 14, 512)  0           inception_4a[0][0]               
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 14, 14, 160)  82080       inception_4a[0][0]               
__________________________________________________________________________________________________
conv2d_21 (Conv2D)              (None, 14, 14, 224)  226016      conv2d_20[0][0]                  
__________________________________________________________________________________________________
conv2d_23 (Conv2D)              (None, 14, 14, 64)   38464       conv2d_22[0][0]                  
__________________________________________________________________________________________________
conv2d_24 (Conv2D)              (None, 14, 14, 64)   32832       max_pooling2d_3[0][0]            
__________________________________________________________________________________________________
inception_4b (Concatenate)      (None, 14, 14, 512)  0           conv2d_19[0][0]                  
                                                                 conv2d_21[0][0]                  
                                                                 conv2d_23[0][0]                  
                                                                 conv2d_24[0][0]                  
__________________________________________________________________________________________________
conv2d_26 (Conv2D)              (None, 14, 14, 128)  65664       inception_4b[0][0]               
__________________________________________________________________________________________________
conv2d_28 (Conv2D)              (None, 14, 14, 24)   12312       inception_4b[0][0]               
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D)  (None, 14, 14, 512)  0           inception_4b[0][0]               
__________________________________________________________________________________________________
conv2d_25 (Conv2D)              (None, 14, 14, 128)  65664       inception_4b[0][0]               
__________________________________________________________________________________________________
conv2d_27 (Conv2D)              (None, 14, 14, 256)  295168      conv2d_26[0][0]                  
__________________________________________________________________________________________________
conv2d_29 (Conv2D)              (None, 14, 14, 64)   38464       conv2d_28[0][0]                  
__________________________________________________________________________________________________
conv2d_30 (Conv2D)              (None, 14, 14, 64)   32832       max_pooling2d_4[0][0]            
__________________________________________________________________________________________________
inception_4c (Concatenate)      (None, 14, 14, 512)  0           conv2d_25[0][0]                  
                                                                 conv2d_27[0][0]                  
                                                                 conv2d_29[0][0]                  
                                                                 conv2d_30[0][0]                  
__________________________________________________________________________________________________
conv2d_32 (Conv2D)              (None, 14, 14, 144)  73872       inception_4c[0][0]               
__________________________________________________________________________________________________
conv2d_34 (Conv2D)              (None, 14, 14, 32)   16416       inception_4c[0][0]               
__________________________________________________________________________________________________
max_pooling2d_5 (MaxPooling2D)  (None, 14, 14, 512)  0           inception_4c[0][0]               
__________________________________________________________________________________________________
conv2d_31 (Conv2D)              (None, 14, 14, 112)  57456       inception_4c[0][0]               
__________________________________________________________________________________________________
conv2d_33 (Conv2D)              (None, 14, 14, 288)  373536      conv2d_32[0][0]                  
__________________________________________________________________________________________________
conv2d_35 (Conv2D)              (None, 14, 14, 64)   51264       conv2d_34[0][0]                  
__________________________________________________________________________________________________
conv2d_36 (Conv2D)              (None, 14, 14, 64)   32832       max_pooling2d_5[0][0]            
__________________________________________________________________________________________________
inception_4d (Concatenate)      (None, 14, 14, 528)  0           conv2d_31[0][0]                  
                                                                 conv2d_33[0][0]                  
                                                                 conv2d_35[0][0]                  
                                                                 conv2d_36[0][0]                  
__________________________________________________________________________________________________
conv2d_39 (Conv2D)              (None, 14, 14, 160)  84640       inception_4d[0][0]               
__________________________________________________________________________________________________
conv2d_41 (Conv2D)              (None, 14, 14, 32)   16928       inception_4d[0][0]               
__________________________________________________________________________________________________
max_pooling2d_6 (MaxPooling2D)  (None, 14, 14, 528)  0           inception_4d[0][0]               
__________________________________________________________________________________________________
conv2d_38 (Conv2D)              (None, 14, 14, 256)  135424      inception_4d[0][0]               
__________________________________________________________________________________________________
conv2d_40 (Conv2D)              (None, 14, 14, 320)  461120      conv2d_39[0][0]                  
__________________________________________________________________________________________________
conv2d_42 (Conv2D)              (None, 14, 14, 128)  102528      conv2d_41[0][0]                  
__________________________________________________________________________________________________
conv2d_43 (Conv2D)              (None, 14, 14, 128)  67712       max_pooling2d_6[0][0]            
__________________________________________________________________________________________________
inception_4e (Concatenate)      (None, 14, 14, 832)  0           conv2d_38[0][0]                  
                                                                 conv2d_40[0][0]                  
                                                                 conv2d_42[0][0]                  
                                                                 conv2d_43[0][0]                  
__________________________________________________________________________________________________
max_pool_4_3x3/2 (MaxPooling2D) (None, 7, 7, 832)    0           inception_4e[0][0]               
__________________________________________________________________________________________________
conv2d_45 (Conv2D)              (None, 7, 7, 160)    133280      max_pool_4_3x3/2[0][0]           
__________________________________________________________________________________________________
conv2d_47 (Conv2D)              (None, 7, 7, 32)     26656       max_pool_4_3x3/2[0][0]           
__________________________________________________________________________________________________
max_pooling2d_7 (MaxPooling2D)  (None, 7, 7, 832)    0           max_pool_4_3x3/2[0][0]           
__________________________________________________________________________________________________
conv2d_44 (Conv2D)              (None, 7, 7, 256)    213248      max_pool_4_3x3/2[0][0]           
__________________________________________________________________________________________________
conv2d_46 (Conv2D)              (None, 7, 7, 320)    461120      conv2d_45[0][0]                  
__________________________________________________________________________________________________
conv2d_48 (Conv2D)              (None, 7, 7, 128)    102528      conv2d_47[0][0]                  
__________________________________________________________________________________________________
conv2d_49 (Conv2D)              (None, 7, 7, 128)    106624      max_pooling2d_7[0][0]            
__________________________________________________________________________________________________
inception_5a (Concatenate)      (None, 7, 7, 832)    0           conv2d_44[0][0]                  
                                                                 conv2d_46[0][0]                  
                                                                 conv2d_48[0][0]                  
                                                                 conv2d_49[0][0]                  
__________________________________________________________________________________________________
conv2d_51 (Conv2D)              (None, 7, 7, 192)    159936      inception_5a[0][0]               
__________________________________________________________________________________________________
conv2d_53 (Conv2D)              (None, 7, 7, 48)     39984       inception_5a[0][0]               
__________________________________________________________________________________________________
max_pooling2d_8 (MaxPooling2D)  (None, 7, 7, 832)    0           inception_5a[0][0]               
__________________________________________________________________________________________________
average_pooling2d (AveragePooli (None, 4, 4, 512)    0           inception_4a[0][0]               
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 4, 4, 528)    0           inception_4d[0][0]               
__________________________________________________________________________________________________
conv2d_50 (Conv2D)              (None, 7, 7, 384)    319872      inception_5a[0][0]               
__________________________________________________________________________________________________
conv2d_52 (Conv2D)              (None, 7, 7, 384)    663936      conv2d_51[0][0]                  
__________________________________________________________________________________________________
conv2d_54 (Conv2D)              (None, 7, 7, 128)    153728      conv2d_53[0][0]                  
__________________________________________________________________________________________________
conv2d_55 (Conv2D)              (None, 7, 7, 128)    106624      max_pooling2d_8[0][0]            
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 4, 4, 128)    65664       average_pooling2d[0][0]          
__________________________________________________________________________________________________
conv2d_37 (Conv2D)              (None, 4, 4, 128)    67712       average_pooling2d_1[0][0]        
__________________________________________________________________________________________________
inception_5b (Concatenate)      (None, 7, 7, 1024)   0           conv2d_50[0][0]                  
                                                                 conv2d_52[0][0]                  
                                                                 conv2d_54[0][0]                  
                                                                 conv2d_55[0][0]                  
__________________________________________________________________________________________________
flatten (Flatten)               (None, 2048)         0           conv2d_18[0][0]                  
__________________________________________________________________________________________________
flatten_1 (Flatten)             (None, 2048)         0           conv2d_37[0][0]                  
__________________________________________________________________________________________________
avg_pool_5_3x3/1 (GlobalAverage (None, 1024)         0           inception_5b[0][0]               
__________________________________________________________________________________________________
dense (Dense)                   (None, 1024)         2098176     flatten[0][0]                    
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 1024)         2098176     flatten_1[0][0]                  
__________________________________________________________________________________________________
dropout_2 (Dropout)             (None, 1024)         0           avg_pool_5_3x3/1[0][0]           
__________________________________________________________________________________________________
dropout (Dropout)               (None, 1024)         0           dense[0][0]                      
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 1024)         0           dense_1[0][0]                    
__________________________________________________________________________________________________
output (Dense)                  (None, 10)           10250       dropout_2[0][0]                  
__________________________________________________________________________________________________
auxilliary_output_1 (Dense)     (None, 10)           10250       dropout[0][0]                    
__________________________________________________________________________________________________
auxilliary_output_2 (Dense)     (None, 10)           10250       dropout_1[0][0]                  
==================================================================================================
Total params: 10,335,054
Trainable params: 10,334,542
Non-trainable params: 512
__________________________________________________________________________________________________
In [9]:
initial_lrate = 0.01

def decay(epoch, steps=100):
    drop = 0.96
    epochs_drop = 8
    lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))
    return lrate

lr_sc = LearningRateScheduler(decay, verbose=1)

sgd = SGD(lr=initial_lrate, momentum=0.9, nesterov=True)

model.compile(
    loss=['categorical_crossentropy', 'categorical_crossentropy', 'categorical_crossentropy'],
    loss_weights=[1, 0.3, 0.3],
    optimizer=sgd,
    metrics=['accuracy']
)

epochs = 35

history = model.fit(
    x=X_train,
    y=[y_train, y_train, y_train],
    validation_data=(X_test, [y_test, y_test, y_test]),
    epochs=epochs, batch_size=256, callbacks=[lr_sc]
)
Train on 50000 samples, validate on 10000 samples

Epoch 00001: LearningRateScheduler reducing learning rate to 0.01.
Epoch 1/35
50000/50000 [==============================] - 164s 3ms/sample - loss: 2.9533 - output_loss: 1.8562 - auxilliary_output_1_loss: 1.8227 - auxilliary_output_2_loss: 1.8345 - output_accuracy: 0.3027 - auxilliary_output_1_accuracy: 0.3276 - auxilliary_output_2_accuracy: 0.3150 - val_loss: 4.5563 - val_output_loss: 3.0333 - val_auxilliary_output_1_loss: 2.4836 - val_auxilliary_output_2_loss: 2.5931 - val_output_accuracy: 0.1187 - val_auxilliary_output_1_accuracy: 0.1212 - val_auxilliary_output_2_accuracy: 0.1250

Epoch 00002: LearningRateScheduler reducing learning rate to 0.01.
Epoch 2/35
50000/50000 [==============================] - 153s 3ms/sample - loss: 2.2156 - output_loss: 1.3932 - auxilliary_output_1_loss: 1.3651 - auxilliary_output_2_loss: 1.3764 - output_accuracy: 0.4935 - auxilliary_output_1_accuracy: 0.5027 - auxilliary_output_2_accuracy: 0.5000 - val_loss: 2.6316 - val_output_loss: 1.6831 - val_auxilliary_output_1_loss: 1.6117 - val_auxilliary_output_2_loss: 1.5500 - val_output_accuracy: 0.3811 - val_auxilliary_output_1_accuracy: 0.4099 - val_auxilliary_output_2_accuracy: 0.4276

Epoch 00003: LearningRateScheduler reducing learning rate to 0.01.
Epoch 3/35
50000/50000 [==============================] - 153s 3ms/sample - loss: 1.8995 - output_loss: 1.1896 - auxilliary_output_1_loss: 1.1817 - auxilliary_output_2_loss: 1.1845 - output_accuracy: 0.5721 - auxilliary_output_1_accuracy: 0.5759 - auxilliary_output_2_accuracy: 0.5756 - val_loss: 2.2723 - val_output_loss: 1.4853 - val_auxilliary_output_1_loss: 1.2706 - val_auxilliary_output_2_loss: 1.3526 - val_output_accuracy: 0.4863 - val_auxilliary_output_1_accuracy: 0.5444 - val_auxilliary_output_2_accuracy: 0.5189

Epoch 00004: LearningRateScheduler reducing learning rate to 0.01.
Epoch 4/35
50000/50000 [==============================] - 153s 3ms/sample - loss: 1.6338 - output_loss: 1.0108 - auxilliary_output_1_loss: 1.0438 - auxilliary_output_2_loss: 1.0328 - output_accuracy: 0.6377 - auxilliary_output_1_accuracy: 0.6264 - auxilliary_output_2_accuracy: 0.6311 - val_loss: 2.1753 - val_output_loss: 1.3962 - val_auxilliary_output_1_loss: 1.3135 - val_auxilliary_output_2_loss: 1.2836 - val_output_accuracy: 0.5251 - val_auxilliary_output_1_accuracy: 0.5385 - val_auxilliary_output_2_accuracy: 0.5524

Epoch 00005: LearningRateScheduler reducing learning rate to 0.01.
Epoch 5/35
50000/50000 [==============================] - 153s 3ms/sample - loss: 1.4652 - output_loss: 0.8989 - auxilliary_output_1_loss: 0.9535 - auxilliary_output_2_loss: 0.9339 - output_accuracy: 0.6800 - auxilliary_output_1_accuracy: 0.6602 - auxilliary_output_2_accuracy: 0.6698 - val_loss: 1.8850 - val_output_loss: 1.1790 - val_auxilliary_output_1_loss: 1.2361 - val_auxilliary_output_2_loss: 1.1172 - val_output_accuracy: 0.5860 - val_auxilliary_output_1_accuracy: 0.5590 - val_auxilliary_output_2_accuracy: 0.5984

Epoch 00006: LearningRateScheduler reducing learning rate to 0.01.
Epoch 6/35
50000/50000 [==============================] - 152s 3ms/sample - loss: 1.3292 - output_loss: 0.8077 - auxilliary_output_1_loss: 0.8834 - auxilliary_output_2_loss: 0.8547 - output_accuracy: 0.7121 - auxilliary_output_1_accuracy: 0.6860 - auxilliary_output_2_accuracy: 0.6978 - val_loss: 1.5946 - val_output_loss: 1.0136 - val_auxilliary_output_1_loss: 0.9595 - val_auxilliary_output_2_loss: 0.9773 - val_output_accuracy: 0.6573 - val_auxilliary_output_1_accuracy: 0.6597 - val_auxilliary_output_2_accuracy: 0.6644

Epoch 00007: LearningRateScheduler reducing learning rate to 0.01.
Epoch 7/35
50000/50000 [==============================] - 152s 3ms/sample - loss: 1.2210 - output_loss: 0.7373 - auxilliary_output_1_loss: 0.8240 - auxilliary_output_2_loss: 0.7883 - output_accuracy: 0.7387 - auxilliary_output_1_accuracy: 0.7077 - auxilliary_output_2_accuracy: 0.7228 - val_loss: 1.5383 - val_output_loss: 0.9826 - val_auxilliary_output_1_loss: 0.9048 - val_auxilliary_output_2_loss: 0.9476 - val_output_accuracy: 0.6625 - val_auxilliary_output_1_accuracy: 0.6810 - val_auxilliary_output_2_accuracy: 0.6666

Epoch 00008: LearningRateScheduler reducing learning rate to 0.0096.
Epoch 8/35
50000/50000 [==============================] - 152s 3ms/sample - loss: 1.1247 - output_loss: 0.6722 - auxilliary_output_1_loss: 0.7776 - auxilliary_output_2_loss: 0.7306 - output_accuracy: 0.7603 - auxilliary_output_1_accuracy: 0.7241 - auxilliary_output_2_accuracy: 0.7433 - val_loss: 1.5842 - val_output_loss: 0.9958 - val_auxilliary_output_1_loss: 0.9800 - val_auxilliary_output_2_loss: 0.9811 - val_output_accuracy: 0.6529 - val_auxilliary_output_1_accuracy: 0.6490 - val_auxilliary_output_2_accuracy: 0.6498

Epoch 00009: LearningRateScheduler reducing learning rate to 0.0096.
Epoch 9/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 1.0491 - output_loss: 0.6233 - auxilliary_output_1_loss: 0.7345 - auxilliary_output_2_loss: 0.6850 - output_accuracy: 0.7813 - auxilliary_output_1_accuracy: 0.7409 - auxilliary_output_2_accuracy: 0.7617 - val_loss: 1.3636 - val_output_loss: 0.8544 - val_auxilliary_output_1_loss: 0.8970 - val_auxilliary_output_2_loss: 0.8004 - val_output_accuracy: 0.7013 - val_auxilliary_output_1_accuracy: 0.6760 - val_auxilliary_output_2_accuracy: 0.7185

Epoch 00010: LearningRateScheduler reducing learning rate to 0.0096.
Epoch 10/35
50000/50000 [==============================] - 150s 3ms/sample - loss: 0.9725 - output_loss: 0.5719 - auxilliary_output_1_loss: 0.6970 - auxilliary_output_2_loss: 0.6385 - output_accuracy: 0.7988 - auxilliary_output_1_accuracy: 0.7548 - auxilliary_output_2_accuracy: 0.7756 - val_loss: 1.2748 - val_output_loss: 0.8038 - val_auxilliary_output_1_loss: 0.7835 - val_auxilliary_output_2_loss: 0.7867 - val_output_accuracy: 0.7149 - val_auxilliary_output_1_accuracy: 0.7213 - val_auxilliary_output_2_accuracy: 0.7206

Epoch 00011: LearningRateScheduler reducing learning rate to 0.0096.
Epoch 11/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.8996 - output_loss: 0.5218 - auxilliary_output_1_loss: 0.6622 - auxilliary_output_2_loss: 0.5970 - output_accuracy: 0.8181 - auxilliary_output_1_accuracy: 0.7681 - auxilliary_output_2_accuracy: 0.7938 - val_loss: 1.2819 - val_output_loss: 0.8424 - val_auxilliary_output_1_loss: 0.7392 - val_auxilliary_output_2_loss: 0.7258 - val_output_accuracy: 0.7185 - val_auxilliary_output_1_accuracy: 0.7391 - val_auxilliary_output_2_accuracy: 0.7431

Epoch 00012: LearningRateScheduler reducing learning rate to 0.0096.
Epoch 12/35
50000/50000 [==============================] - 152s 3ms/sample - loss: 0.8250 - output_loss: 0.4713 - auxilliary_output_1_loss: 0.6221 - auxilliary_output_2_loss: 0.5569 - output_accuracy: 0.8352 - auxilliary_output_1_accuracy: 0.7816 - auxilliary_output_2_accuracy: 0.8041 - val_loss: 1.2184 - val_output_loss: 0.7657 - val_auxilliary_output_1_loss: 0.7640 - val_auxilliary_output_2_loss: 0.7453 - val_output_accuracy: 0.7486 - val_auxilliary_output_1_accuracy: 0.7362 - val_auxilliary_output_2_accuracy: 0.7453

Epoch 00013: LearningRateScheduler reducing learning rate to 0.0096.
Epoch 13/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.7628 - output_loss: 0.4293 - auxilliary_output_1_loss: 0.5953 - auxilliary_output_2_loss: 0.5165 - output_accuracy: 0.8507 - auxilliary_output_1_accuracy: 0.7903 - auxilliary_output_2_accuracy: 0.8203 - val_loss: 1.0461 - val_output_loss: 0.6546 - val_auxilliary_output_1_loss: 0.6596 - val_auxilliary_output_2_loss: 0.6451 - val_output_accuracy: 0.7781 - val_auxilliary_output_1_accuracy: 0.7688 - val_auxilliary_output_2_accuracy: 0.7761

Epoch 00014: LearningRateScheduler reducing learning rate to 0.0096.
Epoch 14/35
50000/50000 [==============================] - 150s 3ms/sample - loss: 0.7022 - output_loss: 0.3888 - auxilliary_output_1_loss: 0.5676 - auxilliary_output_2_loss: 0.4773 - output_accuracy: 0.8653 - auxilliary_output_1_accuracy: 0.8003 - auxilliary_output_2_accuracy: 0.8332 - val_loss: 1.0753 - val_output_loss: 0.6545 - val_auxilliary_output_1_loss: 0.7364 - val_auxilliary_output_2_loss: 0.6663 - val_output_accuracy: 0.7858 - val_auxilliary_output_1_accuracy: 0.7425 - val_auxilliary_output_2_accuracy: 0.7744

Epoch 00015: LearningRateScheduler reducing learning rate to 0.0096.
Epoch 15/35
50000/50000 [==============================] - 152s 3ms/sample - loss: 0.6623 - output_loss: 0.3618 - auxilliary_output_1_loss: 0.5464 - auxilliary_output_2_loss: 0.4550 - output_accuracy: 0.8748 - auxilliary_output_1_accuracy: 0.8087 - auxilliary_output_2_accuracy: 0.8435 - val_loss: 1.2700 - val_output_loss: 0.7735 - val_auxilliary_output_1_loss: 0.8887 - val_auxilliary_output_2_loss: 0.7663 - val_output_accuracy: 0.7482 - val_auxilliary_output_1_accuracy: 0.6993 - val_auxilliary_output_2_accuracy: 0.7402

Epoch 00016: LearningRateScheduler reducing learning rate to 0.009216.
Epoch 16/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.6434 - output_loss: 0.3534 - auxilliary_output_1_loss: 0.5298 - auxilliary_output_2_loss: 0.4369 - output_accuracy: 0.8816 - auxilliary_output_1_accuracy: 0.8150 - auxilliary_output_2_accuracy: 0.8500 - val_loss: 0.9491 - val_output_loss: 0.5861 - val_auxilliary_output_1_loss: 0.6254 - val_auxilliary_output_2_loss: 0.5848 - val_output_accuracy: 0.8084 - val_auxilliary_output_1_accuracy: 0.7832 - val_auxilliary_output_2_accuracy: 0.8001

Epoch 00017: LearningRateScheduler reducing learning rate to 0.009216.
Epoch 17/35
50000/50000 [==============================] - 149s 3ms/sample - loss: 0.5504 - output_loss: 0.2868 - auxilliary_output_1_loss: 0.4949 - auxilliary_output_2_loss: 0.3839 - output_accuracy: 0.8988 - auxilliary_output_1_accuracy: 0.8279 - auxilliary_output_2_accuracy: 0.8674 - val_loss: 0.9856 - val_output_loss: 0.6218 - val_auxilliary_output_1_loss: 0.6089 - val_auxilliary_output_2_loss: 0.6039 - val_output_accuracy: 0.8050 - val_auxilliary_output_1_accuracy: 0.7880 - val_auxilliary_output_2_accuracy: 0.7967

Epoch 00018: LearningRateScheduler reducing learning rate to 0.009216.
Epoch 18/35
50000/50000 [==============================] - 152s 3ms/sample - loss: 0.5103 - output_loss: 0.2599 - auxilliary_output_1_loss: 0.4763 - auxilliary_output_2_loss: 0.3584 - output_accuracy: 0.9110 - auxilliary_output_1_accuracy: 0.8339 - auxilliary_output_2_accuracy: 0.8742 - val_loss: 0.9422 - val_output_loss: 0.5915 - val_auxilliary_output_1_loss: 0.5975 - val_auxilliary_output_2_loss: 0.5714 - val_output_accuracy: 0.8130 - val_auxilliary_output_1_accuracy: 0.7917 - val_auxilliary_output_2_accuracy: 0.8108

Epoch 00019: LearningRateScheduler reducing learning rate to 0.009216.
Epoch 19/35
50000/50000 [==============================] - 150s 3ms/sample - loss: 0.4671 - output_loss: 0.2319 - auxilliary_output_1_loss: 0.4546 - auxilliary_output_2_loss: 0.3292 - output_accuracy: 0.9197 - auxilliary_output_1_accuracy: 0.8425 - auxilliary_output_2_accuracy: 0.8863 - val_loss: 0.8964 - val_output_loss: 0.5679 - val_auxilliary_output_1_loss: 0.5555 - val_auxilliary_output_2_loss: 0.5394 - val_output_accuracy: 0.8252 - val_auxilliary_output_1_accuracy: 0.8070 - val_auxilliary_output_2_accuracy: 0.8234

Epoch 00020: LearningRateScheduler reducing learning rate to 0.009216.
Epoch 20/35
50000/50000 [==============================] - 150s 3ms/sample - loss: 0.4286 - output_loss: 0.2064 - auxilliary_output_1_loss: 0.4349 - auxilliary_output_2_loss: 0.3058 - output_accuracy: 0.9276 - auxilliary_output_1_accuracy: 0.8483 - auxilliary_output_2_accuracy: 0.8933 - val_loss: 1.0719 - val_output_loss: 0.6870 - val_auxilliary_output_1_loss: 0.6381 - val_auxilliary_output_2_loss: 0.6451 - val_output_accuracy: 0.7965 - val_auxilliary_output_1_accuracy: 0.7826 - val_auxilliary_output_2_accuracy: 0.7904

Epoch 00021: LearningRateScheduler reducing learning rate to 0.009216.
Epoch 21/35
50000/50000 [==============================] - 152s 3ms/sample - loss: 0.3874 - output_loss: 0.1780 - auxilliary_output_1_loss: 0.4180 - auxilliary_output_2_loss: 0.2800 - output_accuracy: 0.9386 - auxilliary_output_1_accuracy: 0.8527 - auxilliary_output_2_accuracy: 0.9032 - val_loss: 0.9553 - val_output_loss: 0.6273 - val_auxilliary_output_1_loss: 0.5463 - val_auxilliary_output_2_loss: 0.5472 - val_output_accuracy: 0.8168 - val_auxilliary_output_1_accuracy: 0.8066 - val_auxilliary_output_2_accuracy: 0.8207

Epoch 00022: LearningRateScheduler reducing learning rate to 0.009216.
Epoch 22/35
50000/50000 [==============================] - 149s 3ms/sample - loss: 0.3612 - output_loss: 0.1615 - auxilliary_output_1_loss: 0.4025 - auxilliary_output_2_loss: 0.2630 - output_accuracy: 0.9444 - auxilliary_output_1_accuracy: 0.8603 - auxilliary_output_2_accuracy: 0.9093 - val_loss: 0.9709 - val_output_loss: 0.6197 - val_auxilliary_output_1_loss: 0.5941 - val_auxilliary_output_2_loss: 0.5765 - val_output_accuracy: 0.8295 - val_auxilliary_output_1_accuracy: 0.7996 - val_auxilliary_output_2_accuracy: 0.8199

Epoch 00023: LearningRateScheduler reducing learning rate to 0.009216.
Epoch 23/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.3243 - output_loss: 0.1368 - auxilliary_output_1_loss: 0.3857 - auxilliary_output_2_loss: 0.2392 - output_accuracy: 0.9514 - auxilliary_output_1_accuracy: 0.8644 - auxilliary_output_2_accuracy: 0.9160 - val_loss: 1.2389 - val_output_loss: 0.8357 - val_auxilliary_output_1_loss: 0.6289 - val_auxilliary_output_2_loss: 0.7153 - val_output_accuracy: 0.7963 - val_auxilliary_output_1_accuracy: 0.7918 - val_auxilliary_output_2_accuracy: 0.7966

Epoch 00024: LearningRateScheduler reducing learning rate to 0.008847359999999999.
Epoch 24/35
50000/50000 [==============================] - 148s 3ms/sample - loss: 0.2799 - output_loss: 0.1105 - auxilliary_output_1_loss: 0.3578 - auxilliary_output_2_loss: 0.2067 - output_accuracy: 0.9618 - auxilliary_output_1_accuracy: 0.8731 - auxilliary_output_2_accuracy: 0.9287 - val_loss: 0.9797 - val_output_loss: 0.6449 - val_auxilliary_output_1_loss: 0.5490 - val_auxilliary_output_2_loss: 0.5670 - val_output_accuracy: 0.8382 - val_auxilliary_output_1_accuracy: 0.8146 - val_auxilliary_output_2_accuracy: 0.8305

Epoch 00025: LearningRateScheduler reducing learning rate to 0.008847359999999999.
Epoch 25/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.2462 - output_loss: 0.0886 - auxilliary_output_1_loss: 0.3424 - auxilliary_output_2_loss: 0.1828 - output_accuracy: 0.9689 - auxilliary_output_1_accuracy: 0.8792 - auxilliary_output_2_accuracy: 0.9367 - val_loss: 1.4342 - val_output_loss: 0.9446 - val_auxilliary_output_1_loss: 0.7465 - val_auxilliary_output_2_loss: 0.8855 - val_output_accuracy: 0.7930 - val_auxilliary_output_1_accuracy: 0.7680 - val_auxilliary_output_2_accuracy: 0.7735

Epoch 00026: LearningRateScheduler reducing learning rate to 0.008847359999999999.
Epoch 26/35
50000/50000 [==============================] - 149s 3ms/sample - loss: 2.1851 - output_loss: 1.5497 - auxilliary_output_1_loss: 0.8251 - auxilliary_output_2_loss: 1.2929 - output_accuracy: 0.4705 - auxilliary_output_1_accuracy: 0.7110 - auxilliary_output_2_accuracy: 0.5651 - val_loss: 1.3100 - val_output_loss: 0.8958 - val_auxilliary_output_1_loss: 0.6269 - val_auxilliary_output_2_loss: 0.7537 - val_output_accuracy: 0.6829 - val_auxilliary_output_1_accuracy: 0.7865 - val_auxilliary_output_2_accuracy: 0.7363

Epoch 00027: LearningRateScheduler reducing learning rate to 0.008847359999999999.
Epoch 27/35
50000/50000 [==============================] - 150s 3ms/sample - loss: 0.8369 - output_loss: 0.5528 - auxilliary_output_1_loss: 0.4395 - auxilliary_output_2_loss: 0.5074 - output_accuracy: 0.8092 - auxilliary_output_1_accuracy: 0.8467 - auxilliary_output_2_accuracy: 0.8243 - val_loss: 0.9434 - val_output_loss: 0.6110 - val_auxilliary_output_1_loss: 0.5392 - val_auxilliary_output_2_loss: 0.5690 - val_output_accuracy: 0.7945 - val_auxilliary_output_1_accuracy: 0.8126 - val_auxilliary_output_2_accuracy: 0.8054

Epoch 00028: LearningRateScheduler reducing learning rate to 0.008847359999999999.
Epoch 28/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.5419 - output_loss: 0.3264 - auxilliary_output_1_loss: 0.3704 - auxilliary_output_2_loss: 0.3478 - output_accuracy: 0.8866 - auxilliary_output_1_accuracy: 0.8696 - auxilliary_output_2_accuracy: 0.8797 - val_loss: 1.0697 - val_output_loss: 0.7135 - val_auxilliary_output_1_loss: 0.5587 - val_auxilliary_output_2_loss: 0.6288 - val_output_accuracy: 0.7912 - val_auxilliary_output_1_accuracy: 0.8168 - val_auxilliary_output_2_accuracy: 0.8047

Epoch 00029: LearningRateScheduler reducing learning rate to 0.008847359999999999.
Epoch 29/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.4062 - output_loss: 0.2246 - auxilliary_output_1_loss: 0.3364 - auxilliary_output_2_loss: 0.2690 - output_accuracy: 0.9229 - auxilliary_output_1_accuracy: 0.8818 - auxilliary_output_2_accuracy: 0.9079 - val_loss: 1.2388 - val_output_loss: 0.7876 - val_auxilliary_output_1_loss: 0.6928 - val_auxilliary_output_2_loss: 0.8112 - val_output_accuracy: 0.7708 - val_auxilliary_output_1_accuracy: 0.7752 - val_auxilliary_output_2_accuracy: 0.7640

Epoch 00030: LearningRateScheduler reducing learning rate to 0.008847359999999999.
Epoch 30/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.3310 - output_loss: 0.1704 - auxilliary_output_1_loss: 0.3141 - auxilliary_output_2_loss: 0.2212 - output_accuracy: 0.9411 - auxilliary_output_1_accuracy: 0.8905 - auxilliary_output_2_accuracy: 0.9228 - val_loss: 1.0383 - val_output_loss: 0.6780 - val_auxilliary_output_1_loss: 0.5576 - val_auxilliary_output_2_loss: 0.6432 - val_output_accuracy: 0.8133 - val_auxilliary_output_1_accuracy: 0.8152 - val_auxilliary_output_2_accuracy: 0.8093

Epoch 00031: LearningRateScheduler reducing learning rate to 0.008847359999999999.
Epoch 31/35
50000/50000 [==============================] - 150s 3ms/sample - loss: 0.2615 - output_loss: 0.1211 - auxilliary_output_1_loss: 0.2896 - auxilliary_output_2_loss: 0.1784 - output_accuracy: 0.9584 - auxilliary_output_1_accuracy: 0.8969 - auxilliary_output_2_accuracy: 0.9386 - val_loss: 1.1760 - val_output_loss: 0.7806 - val_auxilliary_output_1_loss: 0.6153 - val_auxilliary_output_2_loss: 0.7028 - val_output_accuracy: 0.8010 - val_auxilliary_output_1_accuracy: 0.8003 - val_auxilliary_output_2_accuracy: 0.8034

Epoch 00032: LearningRateScheduler reducing learning rate to 0.008493465599999998.
Epoch 32/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.2185 - output_loss: 0.0944 - auxilliary_output_1_loss: 0.2668 - auxilliary_output_2_loss: 0.1468 - output_accuracy: 0.9672 - auxilliary_output_1_accuracy: 0.9058 - auxilliary_output_2_accuracy: 0.9501 - val_loss: 1.7912 - val_output_loss: 1.2432 - val_auxilliary_output_1_loss: 0.7684 - val_auxilliary_output_2_loss: 1.0583 - val_output_accuracy: 0.7643 - val_auxilliary_output_1_accuracy: 0.7729 - val_auxilliary_output_2_accuracy: 0.7648

Epoch 00033: LearningRateScheduler reducing learning rate to 0.008493465599999998.
Epoch 33/35
50000/50000 [==============================] - 149s 3ms/sample - loss: 0.1932 - output_loss: 0.0769 - auxilliary_output_1_loss: 0.2563 - auxilliary_output_2_loss: 0.1313 - output_accuracy: 0.9742 - auxilliary_output_1_accuracy: 0.9102 - auxilliary_output_2_accuracy: 0.9544 - val_loss: 1.1805 - val_output_loss: 0.8059 - val_auxilliary_output_1_loss: 0.5830 - val_auxilliary_output_2_loss: 0.6655 - val_output_accuracy: 0.8262 - val_auxilliary_output_1_accuracy: 0.8147 - val_auxilliary_output_2_accuracy: 0.8257

Epoch 00034: LearningRateScheduler reducing learning rate to 0.008493465599999998.
Epoch 34/35
50000/50000 [==============================] - 151s 3ms/sample - loss: 0.1730 - output_loss: 0.0660 - auxilliary_output_1_loss: 0.2420 - auxilliary_output_2_loss: 0.1149 - output_accuracy: 0.9772 - auxilliary_output_1_accuracy: 0.9165 - auxilliary_output_2_accuracy: 0.9600 - val_loss: 1.0301 - val_output_loss: 0.6918 - val_auxilliary_output_1_loss: 0.5229 - val_auxilliary_output_2_loss: 0.6048 - val_output_accuracy: 0.8438 - val_auxilliary_output_1_accuracy: 0.8301 - val_auxilliary_output_2_accuracy: 0.8361

Epoch 00035: LearningRateScheduler reducing learning rate to 0.008493465599999998.
Epoch 35/35
50000/50000 [==============================] - 148s 3ms/sample - loss: 0.1580 - output_loss: 0.0586 - auxilliary_output_1_loss: 0.2283 - auxilliary_output_2_loss: 0.1032 - output_accuracy: 0.9805 - auxilliary_output_1_accuracy: 0.9208 - auxilliary_output_2_accuracy: 0.9654 - val_loss: 1.0190 - val_output_loss: 0.6755 - val_auxilliary_output_1_loss: 0.5559 - val_auxilliary_output_2_loss: 0.5889 - val_output_accuracy: 0.8474 - val_auxilliary_output_1_accuracy: 0.8270 - val_auxilliary_output_2_accuracy: 0.8434
In [ ]: