MNIST-Neural Network-Batch Normalization and Weight Decay

In [1]:
# coding: utf-8
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import math

sys.path.append(os.pardir)
from deeplink.mnist import *
from deeplink.networks import *

Multilayer Neural Network Model (Two Hidden Layers) and Learing/Validation

Initializers

In [2]:
# coding: utf-8
import numpy as np

class Initializer:
    def __init__(self, params, params_size_list, use_batch_normalization=False):
        self.params = params
        self.params_size_list = params_size_list
        self.use_batch_normalization = use_batch_normalization

    def initialize_params(self):
        pass

    def get_params(self):
        return self.params


class Zero_Initializer(Initializer):
    def initialize_params(self, use_batch_normalization):
        for idx in range(1, len(self.params_size_list)):
            self.params['W' + str(idx)] = np.zeros(self.params_size_list[idx - 1], self.params_size_list[idx])
            self.params['b' + str(idx)] = np.zeros(self.params_size_list[idx])
            if self.use_batch_normalization and idx < len(self.params_size_list) - 1:
                self.params['gamma' + str(idx)] = np.zeros(self.params_size_list[idx])
                self.params['beta' + str(idx)] = np.zeros(self.params_size_list[idx])

class N1_Initializer(Initializer):
    def initialize_params(self):
        for idx in range(1, len(self.params_size_list)):
            self.params['W' + str(idx)] = np.random.randn(self.params_size_list[idx - 1], self.params_size_list[idx])
            self.params['b' + str(idx)] = np.random.randn(self.params_size_list[idx])
            if self.use_batch_normalization and idx < len(self.params_size_list) - 1:
                self.params['gamma' + str(idx)] = np.random.randn(self.params_size_list[idx])
                self.params['beta' + str(idx)] = np.random.randn(self.params_size_list[idx])

class N2_Initializer(Initializer):
    def initialize_params(self):
        for idx in range(1, len(self.params_size_list)):
            self.params['W' + str(idx)] = np.random.randn(self.params_size_list[idx - 1], self.params_size_list[idx]) * 0.01
            self.params['b' + str(idx)] = np.random.randn(self.params_size_list[idx]) * 0.01
            if self.use_batch_normalization and idx < len(self.params_size_list) - 1:
                self.params['gamma' + str(idx)] = np.random.randn(self.params_size_list[idx]) * 0.01
                self.params['beta' + str(idx)] = np.random.randn(self.params_size_list[idx]) * 0.01

class Xavier_Initializer(Initializer):
    def initialize_params(self):
        for idx in range(1, len(self.params_size_list)):
            self.params['W' + str(idx)] = np.random.randn(self.params_size_list[idx - 1], self.params_size_list[idx]) / np.sqrt(self.params_size_list[idx - 1])
            self.params['b' + str(idx)] = np.random.randn(self.params_size_list[idx]) / np.sqrt(self.params_size_list[idx - 1])
            if self.use_batch_normalization and idx < len(self.params_size_list) - 1:
                self.params['gamma' + str(idx)] = np.random.randn(self.params_size_list[idx]) / np.sqrt(self.params_size_list[idx - 1])
                self.params['beta' + str(idx)] = np.random.randn(self.params_size_list[idx]) / np.sqrt(self.params_size_list[idx - 1])


class He_Initializer(Initializer):
    def initialize_params(self):
        for idx in range(1, len(self.params_size_list)):
            self.params['W' + str(idx)] = np.random.randn(self.params_size_list[idx - 1], self.params_size_list[idx]) * np.sqrt(2) / np.sqrt(self.params_size_list[idx - 1])
            self.params['b' + str(idx)] = np.random.randn(self.params_size_list[idx]) * np.sqrt(2) / np.sqrt(self.params_size_list[idx - 1])
            if self.use_batch_normalization and idx < len(self.params_size_list) - 1:
                self.params['gamma' + str(idx)] = np.random.randn(self.params_size_list[idx]) * np.sqrt(2) / np.sqrt(self.params_size_list[idx - 1])
                self.params['beta' + str(idx)] = np.random.randn(self.params_size_list[idx]) * np.sqrt(2) / np.sqrt(self.params_size_list[idx - 1])

New Layer - Batch Normalization

In [3]:
class BatchNormalization:
    def __init__(self, gamma, beta, momentum=0.9, running_mean=None, running_var=None):
        self.gamma = gamma
        self.beta = beta
        self.momentum = momentum
        self.input_shape = None

        self.running_mean = running_mean
        self.running_var = running_var  
        
        self.batch_size = None
        self.xc = None
        self.std = None
        self.dgamma = None
        self.dbeta = None

    def forward(self, x, is_train=True):
        self.input_shape = x.shape
        if x.ndim != 2:
            N, C, H, W = x.shape
            x = x.reshape(N, -1)

        out = self.__forward(x, is_train)
        
        return out.reshape(*self.input_shape)
            
    def __forward(self, x, is_train):
        if self.running_mean is None:
            N, D = x.shape
            self.running_mean = np.zeros(D)
            self.running_var = np.zeros(D)
                        
        if is_train:
            mu = x.mean(axis=0)
            xc = x - mu
            var = np.mean(xc**2, axis=0)
            std = np.sqrt(var + 10e-7)
            xn = xc / std
            
            self.batch_size = x.shape[0]
            self.xc = xc
            self.xn = xn
            self.std = std
            self.running_mean = self.momentum * self.running_mean + (1-self.momentum) * mu
            self.running_var = self.momentum * self.running_var + (1-self.momentum) * var            
        else:
            xc = x - self.running_mean
            xn = xc / ((np.sqrt(self.running_var + 10e-7)))
            
        out = self.gamma * xn + self.beta 
        return out

    def backward(self, dout):
        if dout.ndim != 2:
            N, C, H, W = dout.shape
            dout = dout.reshape(N, -1)

        dx = self.__backward(dout)

        dx = dx.reshape(*self.input_shape)
        return dx

    def __backward(self, dout):
        dbeta = dout.sum(axis=0)
        dgamma = np.sum(self.xn * dout, axis=0)
        dxn = self.gamma * dout
        dxc = dxn / self.std
        dstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis=0)
        dvar = 0.5 * dstd / self.std
        dxc += (2.0 / self.batch_size) * self.xc * dvar
        dmu = np.sum(dxc, axis=0)
        dx = dxc - dmu / self.batch_size
        
        self.dgamma = dgamma
        self.dbeta = dbeta
        
        return dx
In [4]:
activation_layers = {
    'Sigmoid': Sigmoid,
    'ReLU': ReLU
}

optimizers = {
    "SGD": SGD,
    "Momentum": Momentum,
    "Nesterov": Nesterov,
    "AdaGrad": AdaGrad,
    "RMSprop": RMSprop,
    "Adam": Adam
}

initializers = {
    'Zero': Zero_Initializer,
    'N1': N1_Initializer,
    'N2': N2_Initializer, # We will use this as a new initializer for supporting Batch Normalization
    'Xavier': Xavier_Initializer,
    'He': He_Initializer
}

Multi Layer Model Class

In [5]:
class MultiLayerNetExtended(MultiLayerNet):
    def __init__(self, input_size, hidden_size_list, output_size, activation='ReLU', initializer='N2', 
                 optimizer='AdaGrad', learning_rate=0.01, 
                 use_batch_normalization=False, 
                 use_weight_decay=False, weight_decay_lambda=0.0):
        self.input_size = input_size
        self.output_size = output_size
        self.hidden_size_list = hidden_size_list
        self.hidden_layer_num = len(hidden_size_list)
        
        self.use_batch_normalization = use_batch_normalization

        self.use_weight_decay = use_weight_decay
        self.weight_decay_lambda = weight_decay_lambda
            
        # Weight Initialization
        self.params = {}
        self.weight_initialization(initializer)
        
        # Layering
        self.layers = OrderedDict()
        self.last_layer = None
        self.layering(activation)

        # Optimization Method
        self.optimizer = optimizers[optimizer](lr=learning_rate)
    
    def weight_initialization(self, initializer):
        params_size_list = [self.input_size] + self.hidden_size_list + [self.output_size]
        initializer_obj = initializers[initializer](self.params, 
                                                    params_size_list, 
                                                    self.use_batch_normalization)
        initializer_obj.initialize_params();
        
    def layering(self, activation):
        for idx in range(1, self.hidden_layer_num + 1):
            self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)], self.params['b' + str(idx)])
            if self.use_batch_normalization:
                self.layers['Batch_Normalization' + str(idx)] = BatchNormalization(self.params['gamma' + str(idx)], 
                                                                                   self.params['beta' + str(idx)])
            self.layers['Activation' + str(idx)] = activation_layers[activation]()

        idx = self.hidden_layer_num + 1
        self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)], self.params['b' + str(idx)])

        self.last_layer = SoftmaxWithCrossEntropyLoss()  

    def predict(self, x, is_train=False):
        for key, layer in self.layers.items():
            if "BatchNorm" in key:
                x = layer.forward(x, is_train)
            else:
                x = layer.forward(x)
        return x

    def loss(self, x, t, is_train=False):
        y = self.predict(x, is_train)

        if self.use_weight_decay:
            weight_decay = 0.0
            for idx in range(1, self.hidden_layer_num + 2):
                W = self.params['W' + str(idx)]
                weight_decay += 0.5 * self.weight_decay_lambda * np.sum(W**2)
            return self.last_layer.forward(y, t) + weight_decay
        else:
            return self.last_layer.forward(y, t)

    def accuracy(self, x, t):
        y = self.predict(x, is_train=False)
        y = np.argmax(y, axis=1)
        if t.ndim != 1 : t = np.argmax(t, axis=1)

        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy        

    def backpropagation_gradient(self, x, t):
        # forward
        self.loss(x, t, is_train=True)

        # backward
        din = 1
        din = self.last_layer.backward(din)

        layers = list(self.layers.values())
        layers.reverse()
        for layer in layers:
            din = layer.backward(din)

        grads = {}
        for idx in range(1, self.hidden_layer_num + 2):
            if self.use_weight_decay:
                grads['W' + str(idx)] = self.layers['Affine' + str(idx)].dW + self.weight_decay_lambda * self.params['W' + str(idx)]
            else:
                grads['W' + str(idx)] = self.layers['Affine' + str(idx)].dW
            grads['b' + str(idx)] = self.layers['Affine' + str(idx)].db

            if self.use_batch_normalization and idx <= self.hidden_layer_num:
                grads['gamma' + str(idx)] = self.layers['Batch_Normalization' + str(idx)].dgamma
                grads['beta' + str(idx)] = self.layers['Batch_Normalization' + str(idx)].dbeta
                
        return grads

    def learning(self, x_batch, t_batch):
        grads = self.backpropagation_gradient(x_batch, t_batch)
        self.optimizer.update(self.params, grads)

Training and Evaluation

In [6]:
data = mnist_data("/Users/yhhan/git/aiclass/0.Professor/data/MNIST_data/.")
(img_train, label_train), (img_validation, label_validation), (img_test, label_test) = data.load_mnist(flatten=True, normalize=True, one_hot_label=True)

input_size=784
hidden_layer1_size=128
hidden_layer2_size=128
output_size=10

num_epochs = 50
train_size = img_train.shape[0]
batch_size = 1000
learning_rate = 0.1

markers = {"N2, AdaGrad, No_Batch_Norm, lambda=0.0": "+", 
           "N2, AdaGrad, Batch_Norm, lambda=0.0": "*", 
           "N2, AdaGrad, Batch_Norm, lambda=0.1": "o",
           "He, AdaGrad, No_Batch_Norm, lambda=0.0": "x", 
           "He, AdaGrad, Batch_Norm, lambda=0.0": "h", 
           "He, AdaGrad, Batch_Norm, lambda=0.1": "H"}

networks = {}
train_errors = {}
validation_errors = {}
test_accuracy_values = {}
max_test_accuracy_epoch = {}
max_test_accuracy_value = {}

for key in markers.keys():
    if key == "N2, AdaGrad, No_Batch_Norm, lambda=0.0":
        networks[key] = MultiLayerNetExtended(input_size, [hidden_layer1_size, hidden_layer2_size], output_size, 
                                activation='ReLU', 
                                initializer='N2',
                                optimizer='AdaGrad', learning_rate=learning_rate,
                                use_batch_normalization=False, 
                                use_weight_decay=False, weight_decay_lambda=0.0)
    elif key == "N2, AdaGrad, Batch_Norm, lambda=0.0":
        networks[key] = MultiLayerNetExtended(input_size, [hidden_layer1_size, hidden_layer2_size], output_size, 
                                activation='ReLU', 
                                initializer='N2',
                                optimizer='AdaGrad', learning_rate=learning_rate,
                                use_batch_normalization=True,
                                use_weight_decay=False, weight_decay_lambda=0.0)
    elif key == "N2, AdaGrad, Batch_Norm, lambda=0.1":
        networks[key] = MultiLayerNetExtended(input_size, [hidden_layer1_size, hidden_layer2_size], output_size, 
                                activation='ReLU', 
                                initializer='N2',
                                optimizer='AdaGrad', learning_rate=learning_rate,
                                use_batch_normalization=True,
                                use_weight_decay=True, weight_decay_lambda=0.1)        
    elif key == "He, AdaGrad, No_Batch_Norm, lambda=0.0":
        networks[key] = MultiLayerNetExtended(input_size, [hidden_layer1_size, hidden_layer2_size], output_size, 
                                activation='ReLU', 
                                initializer='He',
                                optimizer='AdaGrad', learning_rate=learning_rate,
                                use_batch_normalization=False,
                                use_weight_decay=False, weight_decay_lambda=0.0)
    elif key == "He, AdaGrad, Batch_Norm, lambda=0.0":
        networks[key] = MultiLayerNetExtended(input_size, [hidden_layer1_size, hidden_layer2_size], output_size, 
                                activation='ReLU', 
                                initializer='He',
                                optimizer='AdaGrad', learning_rate=learning_rate,
                                use_batch_normalization=True,
                                use_weight_decay=False, weight_decay_lambda=0.0)
    elif key == "He, AdaGrad, Batch_Norm, lambda=0.1":
        networks[key] = MultiLayerNetExtended(input_size, [hidden_layer1_size, hidden_layer2_size], output_size, 
                                activation='ReLU', 
                                initializer='He',
                                optimizer='AdaGrad', learning_rate=learning_rate,
                                use_batch_normalization=True,
                                use_weight_decay=True, weight_decay_lambda=0.1)        
        
    train_errors[key] = [] 
    validation_errors[key] = []
    test_accuracy_values[key] = []
    max_test_accuracy_epoch[key] = 0
    max_test_accuracy_value[key] = 0.0
In [7]:
epoch_list = []

num_batch = math.ceil(train_size / batch_size)

for i in range(num_epochs):
    epoch_list.append(i)
    for key in markers.keys():
        for k in range(num_batch):
            x_batch = img_train[k * batch_size : k * batch_size + batch_size]
            t_batch = label_train[k * batch_size : k * batch_size + batch_size]
            networks[key].learning(x_batch, t_batch)

        train_loss = networks[key].loss(x_batch, t_batch, is_train=True)
        train_errors[key].append(train_loss)

        validation_loss = networks[key].loss(img_validation, label_validation, is_train=False)
        validation_errors[key].append(validation_loss)    

        test_accuracy = networks[key].accuracy(img_test, label_test)
        test_accuracy_values[key].append(test_accuracy)
        if test_accuracy > max_test_accuracy_value[key]:
            max_test_accuracy_epoch[key] = i
            max_test_accuracy_value[key] = test_accuracy
        print("{0:38s}-Epoch:{1:3d}, Train Err.:{2:7.5f}, Validation Err.:{3:7.5f}, Test Accuracy:{4:7.5f}, Max Test Accuracy:{5:7.5f}".format(
            key,
            i,
            train_loss,
            validation_loss,
            test_accuracy,
            max_test_accuracy_value[key]
        ))
    print()    
N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  0, Train Err.:0.29420, Validation Err.:0.27180, Test Accuracy:0.90270, Max Test Accuracy:0.90270
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  0, Train Err.:0.09101, Validation Err.:0.11990, Test Accuracy:0.95630, Max Test Accuracy:0.95630
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  0, Train Err.:1.17575, Validation Err.:1.21322, Test Accuracy:0.77510, Max Test Accuracy:0.77510
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  0, Train Err.:0.24563, Validation Err.:0.21871, Test Accuracy:0.91570, Max Test Accuracy:0.91570
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  0, Train Err.:0.07612, Validation Err.:0.11275, Test Accuracy:0.95690, Max Test Accuracy:0.95690
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  0, Train Err.:1.06066, Validation Err.:1.08043, Test Accuracy:0.81700, Max Test Accuracy:0.81700

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  1, Train Err.:0.18584, Validation Err.:0.19378, Test Accuracy:0.93030, Max Test Accuracy:0.93030
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  1, Train Err.:0.04513, Validation Err.:0.09069, Test Accuracy:0.96600, Max Test Accuracy:0.96600
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  1, Train Err.:0.76966, Validation Err.:0.80488, Test Accuracy:0.87300, Max Test Accuracy:0.87300
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  1, Train Err.:0.15998, Validation Err.:0.15808, Test Accuracy:0.93800, Max Test Accuracy:0.93800
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  1, Train Err.:0.03489, Validation Err.:0.08802, Test Accuracy:0.96620, Max Test Accuracy:0.96620
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  1, Train Err.:0.78362, Validation Err.:0.80771, Test Accuracy:0.87490, Max Test Accuracy:0.87490

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  2, Train Err.:0.13651, Validation Err.:0.17047, Test Accuracy:0.93700, Max Test Accuracy:0.93700
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  2, Train Err.:0.02978, Validation Err.:0.08239, Test Accuracy:0.97130, Max Test Accuracy:0.97130
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  2, Train Err.:0.57870, Validation Err.:0.62202, Test Accuracy:0.90760, Max Test Accuracy:0.90760
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  2, Train Err.:0.12686, Validation Err.:0.13883, Test Accuracy:0.94540, Max Test Accuracy:0.94540
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  2, Train Err.:0.02114, Validation Err.:0.07969, Test Accuracy:0.97090, Max Test Accuracy:0.97090
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  2, Train Err.:0.63156, Validation Err.:0.64712, Test Accuracy:0.89290, Max Test Accuracy:0.89290

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  3, Train Err.:0.11136, Validation Err.:0.15961, Test Accuracy:0.94220, Max Test Accuracy:0.94220
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  3, Train Err.:0.02140, Validation Err.:0.07951, Test Accuracy:0.97220, Max Test Accuracy:0.97220
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  3, Train Err.:0.50559, Validation Err.:0.53505, Test Accuracy:0.91840, Max Test Accuracy:0.91840
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  3, Train Err.:0.10503, Validation Err.:0.13074, Test Accuracy:0.94950, Max Test Accuracy:0.94950
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  3, Train Err.:0.01383, Validation Err.:0.07697, Test Accuracy:0.97280, Max Test Accuracy:0.97280
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  3, Train Err.:0.53976, Validation Err.:0.55938, Test Accuracy:0.90210, Max Test Accuracy:0.90210

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  4, Train Err.:0.09677, Validation Err.:0.15059, Test Accuracy:0.94720, Max Test Accuracy:0.94720
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  4, Train Err.:0.01558, Validation Err.:0.08008, Test Accuracy:0.97440, Max Test Accuracy:0.97440
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  4, Train Err.:0.46726, Validation Err.:0.49332, Test Accuracy:0.91740, Max Test Accuracy:0.91840
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  4, Train Err.:0.09079, Validation Err.:0.12461, Test Accuracy:0.95370, Max Test Accuracy:0.95370
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  4, Train Err.:0.01008, Validation Err.:0.07600, Test Accuracy:0.97380, Max Test Accuracy:0.97380
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  4, Train Err.:0.49064, Validation Err.:0.51690, Test Accuracy:0.90960, Max Test Accuracy:0.90960

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  5, Train Err.:0.08653, Validation Err.:0.14609, Test Accuracy:0.95010, Max Test Accuracy:0.95010
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  5, Train Err.:0.01040, Validation Err.:0.08287, Test Accuracy:0.97530, Max Test Accuracy:0.97530
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  5, Train Err.:0.43375, Validation Err.:0.47160, Test Accuracy:0.92680, Max Test Accuracy:0.92680
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  5, Train Err.:0.08114, Validation Err.:0.12357, Test Accuracy:0.95470, Max Test Accuracy:0.95470
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  5, Train Err.:0.00715, Validation Err.:0.07806, Test Accuracy:0.97380, Max Test Accuracy:0.97380
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  5, Train Err.:0.46675, Validation Err.:0.48959, Test Accuracy:0.91550, Max Test Accuracy:0.91550

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  6, Train Err.:0.07786, Validation Err.:0.14373, Test Accuracy:0.95180, Max Test Accuracy:0.95180
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  6, Train Err.:0.00665, Validation Err.:0.08641, Test Accuracy:0.97520, Max Test Accuracy:0.97530
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  6, Train Err.:0.42105, Validation Err.:0.45514, Test Accuracy:0.92440, Max Test Accuracy:0.92680
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  6, Train Err.:0.07369, Validation Err.:0.12240, Test Accuracy:0.95640, Max Test Accuracy:0.95640
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  6, Train Err.:0.00540, Validation Err.:0.07887, Test Accuracy:0.97410, Max Test Accuracy:0.97410
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  6, Train Err.:0.44519, Validation Err.:0.47436, Test Accuracy:0.92050, Max Test Accuracy:0.92050

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  7, Train Err.:0.07099, Validation Err.:0.13986, Test Accuracy:0.95340, Max Test Accuracy:0.95340
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  7, Train Err.:0.00474, Validation Err.:0.09089, Test Accuracy:0.97570, Max Test Accuracy:0.97570
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  7, Train Err.:0.40149, Validation Err.:0.43570, Test Accuracy:0.92450, Max Test Accuracy:0.92680
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  7, Train Err.:0.06394, Validation Err.:0.11814, Test Accuracy:0.95890, Max Test Accuracy:0.95890
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  7, Train Err.:0.00421, Validation Err.:0.08036, Test Accuracy:0.97460, Max Test Accuracy:0.97460
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  7, Train Err.:0.40480, Validation Err.:0.44352, Test Accuracy:0.92420, Max Test Accuracy:0.92420

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  8, Train Err.:0.06634, Validation Err.:0.13857, Test Accuracy:0.95490, Max Test Accuracy:0.95490
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  8, Train Err.:0.00367, Validation Err.:0.09489, Test Accuracy:0.97560, Max Test Accuracy:0.97570
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  8, Train Err.:0.38733, Validation Err.:0.42702, Test Accuracy:0.92470, Max Test Accuracy:0.92680
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  8, Train Err.:0.05801, Validation Err.:0.11635, Test Accuracy:0.96050, Max Test Accuracy:0.96050
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  8, Train Err.:0.00336, Validation Err.:0.08210, Test Accuracy:0.97450, Max Test Accuracy:0.97460
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  8, Train Err.:0.38972, Validation Err.:0.42517, Test Accuracy:0.92570, Max Test Accuracy:0.92570

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  9, Train Err.:0.06232, Validation Err.:0.13746, Test Accuracy:0.95530, Max Test Accuracy:0.95530
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  9, Train Err.:0.00282, Validation Err.:0.09751, Test Accuracy:0.97520, Max Test Accuracy:0.97570
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  9, Train Err.:0.36445, Validation Err.:0.41003, Test Accuracy:0.92980, Max Test Accuracy:0.92980
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch:  9, Train Err.:0.05172, Validation Err.:0.11300, Test Accuracy:0.96190, Max Test Accuracy:0.96190
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch:  9, Train Err.:0.00272, Validation Err.:0.08507, Test Accuracy:0.97490, Max Test Accuracy:0.97490
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch:  9, Train Err.:0.38392, Validation Err.:0.41876, Test Accuracy:0.92540, Max Test Accuracy:0.92570

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 10, Train Err.:0.06004, Validation Err.:0.13731, Test Accuracy:0.95630, Max Test Accuracy:0.95630
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 10, Train Err.:0.00224, Validation Err.:0.09908, Test Accuracy:0.97550, Max Test Accuracy:0.97570
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 10, Train Err.:0.34985, Validation Err.:0.39745, Test Accuracy:0.92970, Max Test Accuracy:0.92980
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 10, Train Err.:0.04633, Validation Err.:0.11245, Test Accuracy:0.96250, Max Test Accuracy:0.96250
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 10, Train Err.:0.00211, Validation Err.:0.08720, Test Accuracy:0.97500, Max Test Accuracy:0.97500
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 10, Train Err.:0.37163, Validation Err.:0.40899, Test Accuracy:0.92710, Max Test Accuracy:0.92710

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 11, Train Err.:0.05818, Validation Err.:0.13716, Test Accuracy:0.95700, Max Test Accuracy:0.95700
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 11, Train Err.:0.00182, Validation Err.:0.10058, Test Accuracy:0.97610, Max Test Accuracy:0.97610
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 11, Train Err.:0.35032, Validation Err.:0.39271, Test Accuracy:0.92890, Max Test Accuracy:0.92980
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 11, Train Err.:0.04288, Validation Err.:0.11264, Test Accuracy:0.96290, Max Test Accuracy:0.96290
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 11, Train Err.:0.00184, Validation Err.:0.09045, Test Accuracy:0.97570, Max Test Accuracy:0.97570
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 11, Train Err.:0.35933, Validation Err.:0.40072, Test Accuracy:0.92710, Max Test Accuracy:0.92710

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 12, Train Err.:0.05542, Validation Err.:0.13980, Test Accuracy:0.95750, Max Test Accuracy:0.95750
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 12, Train Err.:0.00151, Validation Err.:0.10199, Test Accuracy:0.97660, Max Test Accuracy:0.97660
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 12, Train Err.:0.34691, Validation Err.:0.38471, Test Accuracy:0.93040, Max Test Accuracy:0.93040
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 12, Train Err.:0.03984, Validation Err.:0.10996, Test Accuracy:0.96310, Max Test Accuracy:0.96310
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 12, Train Err.:0.00156, Validation Err.:0.09259, Test Accuracy:0.97620, Max Test Accuracy:0.97620
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 12, Train Err.:0.34823, Validation Err.:0.39000, Test Accuracy:0.92910, Max Test Accuracy:0.92910

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 13, Train Err.:0.05578, Validation Err.:0.13774, Test Accuracy:0.95750, Max Test Accuracy:0.95750
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 13, Train Err.:0.00130, Validation Err.:0.10361, Test Accuracy:0.97660, Max Test Accuracy:0.97660
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 13, Train Err.:0.32934, Validation Err.:0.37521, Test Accuracy:0.93140, Max Test Accuracy:0.93140
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 13, Train Err.:0.03662, Validation Err.:0.10900, Test Accuracy:0.96360, Max Test Accuracy:0.96360
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 13, Train Err.:0.00133, Validation Err.:0.09494, Test Accuracy:0.97620, Max Test Accuracy:0.97620
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 13, Train Err.:0.33641, Validation Err.:0.38260, Test Accuracy:0.92810, Max Test Accuracy:0.92910

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 14, Train Err.:0.05166, Validation Err.:0.13853, Test Accuracy:0.95770, Max Test Accuracy:0.95770
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 14, Train Err.:0.00111, Validation Err.:0.10442, Test Accuracy:0.97650, Max Test Accuracy:0.97660
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 14, Train Err.:0.32834, Validation Err.:0.36650, Test Accuracy:0.93290, Max Test Accuracy:0.93290
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 14, Train Err.:0.03324, Validation Err.:0.10699, Test Accuracy:0.96430, Max Test Accuracy:0.96430
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 14, Train Err.:0.00117, Validation Err.:0.09660, Test Accuracy:0.97620, Max Test Accuracy:0.97620
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 14, Train Err.:0.32340, Validation Err.:0.37252, Test Accuracy:0.93150, Max Test Accuracy:0.93150

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 15, Train Err.:0.05007, Validation Err.:0.14017, Test Accuracy:0.95740, Max Test Accuracy:0.95770
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 15, Train Err.:0.00097, Validation Err.:0.10582, Test Accuracy:0.97690, Max Test Accuracy:0.97690
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 15, Train Err.:0.32052, Validation Err.:0.36358, Test Accuracy:0.93420, Max Test Accuracy:0.93420
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 15, Train Err.:0.02984, Validation Err.:0.10748, Test Accuracy:0.96380, Max Test Accuracy:0.96430
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 15, Train Err.:0.00104, Validation Err.:0.09814, Test Accuracy:0.97600, Max Test Accuracy:0.97620
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 15, Train Err.:0.31628, Validation Err.:0.36752, Test Accuracy:0.93160, Max Test Accuracy:0.93160

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 16, Train Err.:0.04876, Validation Err.:0.14164, Test Accuracy:0.95790, Max Test Accuracy:0.95790
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 16, Train Err.:0.00086, Validation Err.:0.10693, Test Accuracy:0.97680, Max Test Accuracy:0.97690
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 16, Train Err.:0.31383, Validation Err.:0.35600, Test Accuracy:0.93450, Max Test Accuracy:0.93450
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 16, Train Err.:0.02816, Validation Err.:0.10686, Test Accuracy:0.96450, Max Test Accuracy:0.96450
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 16, Train Err.:0.00092, Validation Err.:0.09970, Test Accuracy:0.97590, Max Test Accuracy:0.97620
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 16, Train Err.:0.32000, Validation Err.:0.36064, Test Accuracy:0.93310, Max Test Accuracy:0.93310

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 17, Train Err.:0.04692, Validation Err.:0.14243, Test Accuracy:0.95790, Max Test Accuracy:0.95790
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 17, Train Err.:0.00076, Validation Err.:0.10844, Test Accuracy:0.97680, Max Test Accuracy:0.97690
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 17, Train Err.:0.30872, Validation Err.:0.35081, Test Accuracy:0.93570, Max Test Accuracy:0.93570
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 17, Train Err.:0.02668, Validation Err.:0.10748, Test Accuracy:0.96550, Max Test Accuracy:0.96550
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 17, Train Err.:0.00082, Validation Err.:0.10096, Test Accuracy:0.97630, Max Test Accuracy:0.97630
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 17, Train Err.:0.31683, Validation Err.:0.35991, Test Accuracy:0.93290, Max Test Accuracy:0.93310

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 18, Train Err.:0.04615, Validation Err.:0.14246, Test Accuracy:0.95960, Max Test Accuracy:0.95960
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 18, Train Err.:0.00069, Validation Err.:0.10958, Test Accuracy:0.97680, Max Test Accuracy:0.97690
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 18, Train Err.:0.30258, Validation Err.:0.34818, Test Accuracy:0.93560, Max Test Accuracy:0.93570
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 18, Train Err.:0.02500, Validation Err.:0.10849, Test Accuracy:0.96520, Max Test Accuracy:0.96550
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 18, Train Err.:0.00075, Validation Err.:0.10225, Test Accuracy:0.97660, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 18, Train Err.:0.30635, Validation Err.:0.35130, Test Accuracy:0.93340, Max Test Accuracy:0.93340

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 19, Train Err.:0.04534, Validation Err.:0.14372, Test Accuracy:0.95910, Max Test Accuracy:0.95960
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 19, Train Err.:0.00062, Validation Err.:0.11080, Test Accuracy:0.97690, Max Test Accuracy:0.97690
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 19, Train Err.:0.29958, Validation Err.:0.34209, Test Accuracy:0.93620, Max Test Accuracy:0.93620
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 19, Train Err.:0.02391, Validation Err.:0.10896, Test Accuracy:0.96550, Max Test Accuracy:0.96550
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 19, Train Err.:0.00068, Validation Err.:0.10331, Test Accuracy:0.97640, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 19, Train Err.:0.30547, Validation Err.:0.35055, Test Accuracy:0.93330, Max Test Accuracy:0.93340

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 20, Train Err.:0.04435, Validation Err.:0.14378, Test Accuracy:0.95940, Max Test Accuracy:0.95960
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 20, Train Err.:0.00057, Validation Err.:0.11184, Test Accuracy:0.97670, Max Test Accuracy:0.97690
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 20, Train Err.:0.29679, Validation Err.:0.33832, Test Accuracy:0.93700, Max Test Accuracy:0.93700
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 20, Train Err.:0.02276, Validation Err.:0.11006, Test Accuracy:0.96590, Max Test Accuracy:0.96590
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 20, Train Err.:0.00062, Validation Err.:0.10449, Test Accuracy:0.97630, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 20, Train Err.:0.29915, Validation Err.:0.34496, Test Accuracy:0.93540, Max Test Accuracy:0.93540

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 21, Train Err.:0.04319, Validation Err.:0.14539, Test Accuracy:0.96030, Max Test Accuracy:0.96030
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 21, Train Err.:0.00052, Validation Err.:0.11285, Test Accuracy:0.97670, Max Test Accuracy:0.97690
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 21, Train Err.:0.28954, Validation Err.:0.33464, Test Accuracy:0.93670, Max Test Accuracy:0.93700
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 21, Train Err.:0.02176, Validation Err.:0.11107, Test Accuracy:0.96610, Max Test Accuracy:0.96610
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 21, Train Err.:0.00057, Validation Err.:0.10559, Test Accuracy:0.97630, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 21, Train Err.:0.29590, Validation Err.:0.34388, Test Accuracy:0.93430, Max Test Accuracy:0.93540

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 22, Train Err.:0.04199, Validation Err.:0.14508, Test Accuracy:0.96040, Max Test Accuracy:0.96040
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 22, Train Err.:0.00048, Validation Err.:0.11385, Test Accuracy:0.97680, Max Test Accuracy:0.97690
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 22, Train Err.:0.28989, Validation Err.:0.33250, Test Accuracy:0.93680, Max Test Accuracy:0.93700
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 22, Train Err.:0.02073, Validation Err.:0.11237, Test Accuracy:0.96580, Max Test Accuracy:0.96610
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 22, Train Err.:0.00053, Validation Err.:0.10661, Test Accuracy:0.97620, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 22, Train Err.:0.29062, Validation Err.:0.33910, Test Accuracy:0.93560, Max Test Accuracy:0.93560

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 23, Train Err.:0.04078, Validation Err.:0.14599, Test Accuracy:0.96060, Max Test Accuracy:0.96060
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 23, Train Err.:0.00045, Validation Err.:0.11484, Test Accuracy:0.97690, Max Test Accuracy:0.97690
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 23, Train Err.:0.28247, Validation Err.:0.32833, Test Accuracy:0.93870, Max Test Accuracy:0.93870
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 23, Train Err.:0.01981, Validation Err.:0.11324, Test Accuracy:0.96630, Max Test Accuracy:0.96630
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 23, Train Err.:0.00050, Validation Err.:0.10755, Test Accuracy:0.97630, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 23, Train Err.:0.28678, Validation Err.:0.33179, Test Accuracy:0.93570, Max Test Accuracy:0.93570

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 24, Train Err.:0.03917, Validation Err.:0.14634, Test Accuracy:0.96050, Max Test Accuracy:0.96060
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 24, Train Err.:0.00041, Validation Err.:0.11577, Test Accuracy:0.97700, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 24, Train Err.:0.27577, Validation Err.:0.32034, Test Accuracy:0.93940, Max Test Accuracy:0.93940
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 24, Train Err.:0.01866, Validation Err.:0.11512, Test Accuracy:0.96590, Max Test Accuracy:0.96630
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 24, Train Err.:0.00046, Validation Err.:0.10848, Test Accuracy:0.97620, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 24, Train Err.:0.28297, Validation Err.:0.33251, Test Accuracy:0.93570, Max Test Accuracy:0.93570

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 25, Train Err.:0.03928, Validation Err.:0.15092, Test Accuracy:0.96030, Max Test Accuracy:0.96060
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 25, Train Err.:0.00039, Validation Err.:0.11670, Test Accuracy:0.97690, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 25, Train Err.:0.27189, Validation Err.:0.31887, Test Accuracy:0.93930, Max Test Accuracy:0.93940
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 25, Train Err.:0.01798, Validation Err.:0.11687, Test Accuracy:0.96620, Max Test Accuracy:0.96630
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 25, Train Err.:0.00043, Validation Err.:0.10926, Test Accuracy:0.97620, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 25, Train Err.:0.27871, Validation Err.:0.32801, Test Accuracy:0.93720, Max Test Accuracy:0.93720

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 26, Train Err.:0.03778, Validation Err.:0.15127, Test Accuracy:0.96050, Max Test Accuracy:0.96060
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 26, Train Err.:0.00036, Validation Err.:0.11755, Test Accuracy:0.97680, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 26, Train Err.:0.27410, Validation Err.:0.32080, Test Accuracy:0.93830, Max Test Accuracy:0.93940
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 26, Train Err.:0.01710, Validation Err.:0.11880, Test Accuracy:0.96700, Max Test Accuracy:0.96700
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 26, Train Err.:0.00041, Validation Err.:0.11009, Test Accuracy:0.97610, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 26, Train Err.:0.27968, Validation Err.:0.32543, Test Accuracy:0.93670, Max Test Accuracy:0.93720

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 27, Train Err.:0.03660, Validation Err.:0.15307, Test Accuracy:0.96020, Max Test Accuracy:0.96060
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 27, Train Err.:0.00034, Validation Err.:0.11839, Test Accuracy:0.97660, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 27, Train Err.:0.26602, Validation Err.:0.31758, Test Accuracy:0.93840, Max Test Accuracy:0.93940
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 27, Train Err.:0.01626, Validation Err.:0.11943, Test Accuracy:0.96720, Max Test Accuracy:0.96720
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 27, Train Err.:0.00038, Validation Err.:0.11084, Test Accuracy:0.97620, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 27, Train Err.:0.27474, Validation Err.:0.32402, Test Accuracy:0.93710, Max Test Accuracy:0.93720

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 28, Train Err.:0.03512, Validation Err.:0.15302, Test Accuracy:0.96080, Max Test Accuracy:0.96080
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 28, Train Err.:0.00032, Validation Err.:0.11921, Test Accuracy:0.97660, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 28, Train Err.:0.25895, Validation Err.:0.31177, Test Accuracy:0.94010, Max Test Accuracy:0.94010
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 28, Train Err.:0.01539, Validation Err.:0.12042, Test Accuracy:0.96740, Max Test Accuracy:0.96740
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 28, Train Err.:0.00036, Validation Err.:0.11162, Test Accuracy:0.97610, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 28, Train Err.:0.27454, Validation Err.:0.32350, Test Accuracy:0.93620, Max Test Accuracy:0.93720

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 29, Train Err.:0.03440, Validation Err.:0.15515, Test Accuracy:0.96060, Max Test Accuracy:0.96080
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 29, Train Err.:0.00030, Validation Err.:0.11998, Test Accuracy:0.97660, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 29, Train Err.:0.27028, Validation Err.:0.31340, Test Accuracy:0.93690, Max Test Accuracy:0.94010
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 29, Train Err.:0.01432, Validation Err.:0.12217, Test Accuracy:0.96710, Max Test Accuracy:0.96740
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 29, Train Err.:0.00034, Validation Err.:0.11234, Test Accuracy:0.97610, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 29, Train Err.:0.26981, Validation Err.:0.31980, Test Accuracy:0.93780, Max Test Accuracy:0.93780

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 30, Train Err.:0.03342, Validation Err.:0.15591, Test Accuracy:0.96050, Max Test Accuracy:0.96080
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 30, Train Err.:0.00029, Validation Err.:0.12071, Test Accuracy:0.97660, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 30, Train Err.:0.26814, Validation Err.:0.31344, Test Accuracy:0.93920, Max Test Accuracy:0.94010
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 30, Train Err.:0.01303, Validation Err.:0.12385, Test Accuracy:0.96680, Max Test Accuracy:0.96740
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 30, Train Err.:0.00033, Validation Err.:0.11306, Test Accuracy:0.97610, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 30, Train Err.:0.26723, Validation Err.:0.31677, Test Accuracy:0.93850, Max Test Accuracy:0.93850

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 31, Train Err.:0.03243, Validation Err.:0.15623, Test Accuracy:0.96070, Max Test Accuracy:0.96080
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 31, Train Err.:0.00027, Validation Err.:0.12153, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 31, Train Err.:0.25137, Validation Err.:0.30525, Test Accuracy:0.94090, Max Test Accuracy:0.94090
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 31, Train Err.:0.01225, Validation Err.:0.12443, Test Accuracy:0.96740, Max Test Accuracy:0.96740
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 31, Train Err.:0.00031, Validation Err.:0.11376, Test Accuracy:0.97620, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 31, Train Err.:0.26592, Validation Err.:0.31338, Test Accuracy:0.93860, Max Test Accuracy:0.93860

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 32, Train Err.:0.03175, Validation Err.:0.15853, Test Accuracy:0.96050, Max Test Accuracy:0.96080
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 32, Train Err.:0.00026, Validation Err.:0.12219, Test Accuracy:0.97640, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 32, Train Err.:0.25264, Validation Err.:0.30131, Test Accuracy:0.94210, Max Test Accuracy:0.94210
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 32, Train Err.:0.01159, Validation Err.:0.12526, Test Accuracy:0.96740, Max Test Accuracy:0.96740
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 32, Train Err.:0.00029, Validation Err.:0.11450, Test Accuracy:0.97610, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 32, Train Err.:0.26338, Validation Err.:0.31205, Test Accuracy:0.93760, Max Test Accuracy:0.93860

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 33, Train Err.:0.03032, Validation Err.:0.15880, Test Accuracy:0.96060, Max Test Accuracy:0.96080
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 33, Train Err.:0.00025, Validation Err.:0.12286, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 33, Train Err.:0.25775, Validation Err.:0.29989, Test Accuracy:0.94080, Max Test Accuracy:0.94210
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 33, Train Err.:0.01096, Validation Err.:0.12662, Test Accuracy:0.96760, Max Test Accuracy:0.96760
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 33, Train Err.:0.00028, Validation Err.:0.11509, Test Accuracy:0.97610, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 33, Train Err.:0.26122, Validation Err.:0.30823, Test Accuracy:0.93990, Max Test Accuracy:0.93990

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 34, Train Err.:0.02934, Validation Err.:0.16047, Test Accuracy:0.96040, Max Test Accuracy:0.96080
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 34, Train Err.:0.00024, Validation Err.:0.12351, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 34, Train Err.:0.25318, Validation Err.:0.29770, Test Accuracy:0.94240, Max Test Accuracy:0.94240
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 34, Train Err.:0.01021, Validation Err.:0.12845, Test Accuracy:0.96740, Max Test Accuracy:0.96760
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 34, Train Err.:0.00027, Validation Err.:0.11575, Test Accuracy:0.97600, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 34, Train Err.:0.25927, Validation Err.:0.30368, Test Accuracy:0.93970, Max Test Accuracy:0.93990

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 35, Train Err.:0.02863, Validation Err.:0.16114, Test Accuracy:0.96080, Max Test Accuracy:0.96080
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 35, Train Err.:0.00023, Validation Err.:0.12418, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 35, Train Err.:0.24809, Validation Err.:0.29516, Test Accuracy:0.94240, Max Test Accuracy:0.94240
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 35, Train Err.:0.00977, Validation Err.:0.12905, Test Accuracy:0.96780, Max Test Accuracy:0.96780
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 35, Train Err.:0.00026, Validation Err.:0.11632, Test Accuracy:0.97600, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 35, Train Err.:0.25839, Validation Err.:0.30716, Test Accuracy:0.93870, Max Test Accuracy:0.93990

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 36, Train Err.:0.02715, Validation Err.:0.16195, Test Accuracy:0.96040, Max Test Accuracy:0.96080
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 36, Train Err.:0.00022, Validation Err.:0.12471, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 36, Train Err.:0.24926, Validation Err.:0.29458, Test Accuracy:0.94320, Max Test Accuracy:0.94320
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 36, Train Err.:0.00930, Validation Err.:0.13014, Test Accuracy:0.96770, Max Test Accuracy:0.96780
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 36, Train Err.:0.00025, Validation Err.:0.11704, Test Accuracy:0.97600, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 36, Train Err.:0.25514, Validation Err.:0.30490, Test Accuracy:0.93850, Max Test Accuracy:0.93990

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 37, Train Err.:0.02647, Validation Err.:0.16439, Test Accuracy:0.96100, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 37, Train Err.:0.00021, Validation Err.:0.12533, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 37, Train Err.:0.23976, Validation Err.:0.28905, Test Accuracy:0.94420, Max Test Accuracy:0.94420
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 37, Train Err.:0.00892, Validation Err.:0.13128, Test Accuracy:0.96760, Max Test Accuracy:0.96780
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 37, Train Err.:0.00024, Validation Err.:0.11755, Test Accuracy:0.97610, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 37, Train Err.:0.25090, Validation Err.:0.30167, Test Accuracy:0.93870, Max Test Accuracy:0.93990

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 38, Train Err.:0.02544, Validation Err.:0.16335, Test Accuracy:0.96070, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 38, Train Err.:0.00020, Validation Err.:0.12588, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 38, Train Err.:0.24853, Validation Err.:0.28956, Test Accuracy:0.94340, Max Test Accuracy:0.94420
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 38, Train Err.:0.00854, Validation Err.:0.13268, Test Accuracy:0.96780, Max Test Accuracy:0.96780
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 38, Train Err.:0.00023, Validation Err.:0.11807, Test Accuracy:0.97600, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 38, Train Err.:0.25251, Validation Err.:0.30084, Test Accuracy:0.93950, Max Test Accuracy:0.93990

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 39, Train Err.:0.02501, Validation Err.:0.16580, Test Accuracy:0.96090, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 39, Train Err.:0.00019, Validation Err.:0.12646, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 39, Train Err.:0.24411, Validation Err.:0.28804, Test Accuracy:0.94440, Max Test Accuracy:0.94440
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 39, Train Err.:0.00805, Validation Err.:0.13392, Test Accuracy:0.96770, Max Test Accuracy:0.96780
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 39, Train Err.:0.00022, Validation Err.:0.11859, Test Accuracy:0.97580, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 39, Train Err.:0.24540, Validation Err.:0.29953, Test Accuracy:0.94030, Max Test Accuracy:0.94030

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 40, Train Err.:0.02445, Validation Err.:0.16672, Test Accuracy:0.96090, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 40, Train Err.:0.00019, Validation Err.:0.12700, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 40, Train Err.:0.23861, Validation Err.:0.28453, Test Accuracy:0.94490, Max Test Accuracy:0.94490
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 40, Train Err.:0.00774, Validation Err.:0.13473, Test Accuracy:0.96770, Max Test Accuracy:0.96780
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 40, Train Err.:0.00021, Validation Err.:0.11910, Test Accuracy:0.97590, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 40, Train Err.:0.24690, Validation Err.:0.29990, Test Accuracy:0.94120, Max Test Accuracy:0.94120

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 41, Train Err.:0.02432, Validation Err.:0.16874, Test Accuracy:0.96000, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 41, Train Err.:0.00018, Validation Err.:0.12749, Test Accuracy:0.97640, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 41, Train Err.:0.23502, Validation Err.:0.28314, Test Accuracy:0.94530, Max Test Accuracy:0.94530
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 41, Train Err.:0.00752, Validation Err.:0.13642, Test Accuracy:0.96820, Max Test Accuracy:0.96820
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 41, Train Err.:0.00020, Validation Err.:0.11965, Test Accuracy:0.97590, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 41, Train Err.:0.24877, Validation Err.:0.29509, Test Accuracy:0.94160, Max Test Accuracy:0.94160

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 42, Train Err.:0.02285, Validation Err.:0.16934, Test Accuracy:0.96020, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 42, Train Err.:0.00017, Validation Err.:0.12807, Test Accuracy:0.97630, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 42, Train Err.:0.23065, Validation Err.:0.28077, Test Accuracy:0.94620, Max Test Accuracy:0.94620
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 42, Train Err.:0.00703, Validation Err.:0.13766, Test Accuracy:0.96770, Max Test Accuracy:0.96820
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 42, Train Err.:0.00020, Validation Err.:0.12013, Test Accuracy:0.97560, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 42, Train Err.:0.24649, Validation Err.:0.29687, Test Accuracy:0.94010, Max Test Accuracy:0.94160

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 43, Train Err.:0.02299, Validation Err.:0.16936, Test Accuracy:0.96040, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 43, Train Err.:0.00017, Validation Err.:0.12855, Test Accuracy:0.97640, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 43, Train Err.:0.23461, Validation Err.:0.28135, Test Accuracy:0.94460, Max Test Accuracy:0.94620
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 43, Train Err.:0.00681, Validation Err.:0.13921, Test Accuracy:0.96770, Max Test Accuracy:0.96820
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 43, Train Err.:0.00019, Validation Err.:0.12063, Test Accuracy:0.97570, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 43, Train Err.:0.24317, Validation Err.:0.29660, Test Accuracy:0.94070, Max Test Accuracy:0.94160

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 44, Train Err.:0.02193, Validation Err.:0.17098, Test Accuracy:0.96100, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 44, Train Err.:0.00016, Validation Err.:0.12907, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 44, Train Err.:0.23058, Validation Err.:0.28014, Test Accuracy:0.94460, Max Test Accuracy:0.94620
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 44, Train Err.:0.00652, Validation Err.:0.13996, Test Accuracy:0.96760, Max Test Accuracy:0.96820
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 44, Train Err.:0.00018, Validation Err.:0.12114, Test Accuracy:0.97560, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 44, Train Err.:0.24141, Validation Err.:0.29775, Test Accuracy:0.93990, Max Test Accuracy:0.94160

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 45, Train Err.:0.02191, Validation Err.:0.17240, Test Accuracy:0.96070, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 45, Train Err.:0.00016, Validation Err.:0.12953, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 45, Train Err.:0.23597, Validation Err.:0.27892, Test Accuracy:0.94560, Max Test Accuracy:0.94620
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 45, Train Err.:0.00627, Validation Err.:0.14153, Test Accuracy:0.96720, Max Test Accuracy:0.96820
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 45, Train Err.:0.00018, Validation Err.:0.12168, Test Accuracy:0.97560, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 45, Train Err.:0.24047, Validation Err.:0.29621, Test Accuracy:0.93930, Max Test Accuracy:0.94160

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 46, Train Err.:0.02130, Validation Err.:0.17170, Test Accuracy:0.96090, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 46, Train Err.:0.00015, Validation Err.:0.13001, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 46, Train Err.:0.23316, Validation Err.:0.27729, Test Accuracy:0.94490, Max Test Accuracy:0.94620
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 46, Train Err.:0.00601, Validation Err.:0.14269, Test Accuracy:0.96710, Max Test Accuracy:0.96820
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 46, Train Err.:0.00017, Validation Err.:0.12198, Test Accuracy:0.97560, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 46, Train Err.:0.23721, Validation Err.:0.29099, Test Accuracy:0.94170, Max Test Accuracy:0.94170

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 47, Train Err.:0.02095, Validation Err.:0.17358, Test Accuracy:0.96080, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 47, Train Err.:0.00015, Validation Err.:0.13046, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 47, Train Err.:0.23210, Validation Err.:0.27684, Test Accuracy:0.94560, Max Test Accuracy:0.94620
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 47, Train Err.:0.00566, Validation Err.:0.14375, Test Accuracy:0.96770, Max Test Accuracy:0.96820
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 47, Train Err.:0.00017, Validation Err.:0.12249, Test Accuracy:0.97530, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 47, Train Err.:0.23684, Validation Err.:0.29381, Test Accuracy:0.93980, Max Test Accuracy:0.94170

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 48, Train Err.:0.02009, Validation Err.:0.17410, Test Accuracy:0.96010, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 48, Train Err.:0.00014, Validation Err.:0.13089, Test Accuracy:0.97650, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 48, Train Err.:0.23556, Validation Err.:0.27700, Test Accuracy:0.94560, Max Test Accuracy:0.94620
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 48, Train Err.:0.00546, Validation Err.:0.14534, Test Accuracy:0.96750, Max Test Accuracy:0.96820
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 48, Train Err.:0.00016, Validation Err.:0.12290, Test Accuracy:0.97530, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 48, Train Err.:0.23525, Validation Err.:0.28893, Test Accuracy:0.94160, Max Test Accuracy:0.94170

N2, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 49, Train Err.:0.02023, Validation Err.:0.17624, Test Accuracy:0.96070, Max Test Accuracy:0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 49, Train Err.:0.00014, Validation Err.:0.13131, Test Accuracy:0.97660, Max Test Accuracy:0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 49, Train Err.:0.22922, Validation Err.:0.27559, Test Accuracy:0.94510, Max Test Accuracy:0.94620
He, AdaGrad, No_Batch_Norm, lambda=0.0-Epoch: 49, Train Err.:0.00524, Validation Err.:0.14633, Test Accuracy:0.96740, Max Test Accuracy:0.96820
He, AdaGrad, Batch_Norm, lambda=0.0   -Epoch: 49, Train Err.:0.00016, Validation Err.:0.12329, Test Accuracy:0.97540, Max Test Accuracy:0.97660
He, AdaGrad, Batch_Norm, lambda=0.1   -Epoch: 49, Train Err.:0.23311, Validation Err.:0.28942, Test Accuracy:0.94110, Max Test Accuracy:0.94170

In [11]:
f, axarr = plt.subplots(2, 2, figsize=(20, 12))
for key in markers.keys():
    axarr[0, 0].plot(epoch_list[1:], train_errors[key][1:], marker=markers[key], markevery=2, label=key)
axarr[0, 0].set_ylabel('Train - Total Error')
axarr[0, 0].set_xlabel('Epochs')
axarr[0, 0].grid(True)
axarr[0, 0].set_title('Train Error')
axarr[0, 0].legend(loc='upper right')

for key in markers.keys():
    axarr[0, 1].plot(epoch_list[1:], validation_errors[key][1:], marker=markers[key], markevery=2, label=key)
axarr[0, 1].set_ylabel('Validation - Total Error')
axarr[0, 1].set_xlabel('Epochs')
axarr[0, 1].grid(True)
axarr[0, 1].set_title('Validation Error')
axarr[0, 1].legend(loc='upper right')

for key in markers.keys():
    axarr[1, 0].plot(epoch_list[1:], train_errors[key][1:], marker=markers[key], markevery=2, label=key)
axarr[1, 0].set_ylabel('Train - Total Error')
axarr[1, 0].set_xlabel('Epochs')
axarr[1, 0].grid(True)
axarr[1, 0].set_ylim(0, 0.1)
axarr[1, 0].set_title('Train Error (0.00 ~ 0.10)')
axarr[1, 0].legend(loc='upper right')

for key in markers.keys():
    axarr[1, 1].plot(epoch_list[1:], validation_errors[key][1:], marker=markers[key], markevery=2, label=key)
axarr[1, 1].set_ylabel('Validation - Total Error')
axarr[1, 1].set_xlabel('Epochs')
axarr[1, 1].grid(True)
axarr[1, 1].set_ylim(0.05, 0.2)
axarr[1, 1].set_title('Validation Error (0.05 ~ 0.20)')
axarr[1, 1].legend(loc='upper right')

f.subplots_adjust(hspace=0.3)

plt.show()
In [12]:
f, axarr = plt.subplots(2, 1, figsize=(15,10))
for key in markers.keys():
    axarr[0].plot(epoch_list[1:], test_accuracy_values[key][1:], marker=markers[key], markevery=1, label=key)
axarr[0].set_ylabel('Test Accuracy')
axarr[0].set_xlabel('Epochs')
axarr[0].grid(True)
axarr[0].set_title('Test Accuracy')
axarr[0].legend(loc='lower right')

for key in markers.keys():
    axarr[1].plot(epoch_list[1:], test_accuracy_values[key][1:], marker=markers[key], markevery=1, label=key)
axarr[1].set_ylabel('Test Accuracy')
axarr[1].set_xlabel('Epochs')
axarr[1].grid(True)
axarr[1].set_ylim(0.92, 0.99)
axarr[1].set_title('Test Accuracy (0.92 ~ 0.99)')
axarr[1].legend(loc='lower right')

f.subplots_adjust(hspace=0.3)
plt.show()
In [13]:
for key in markers.keys():
    print("{0:26s} - Epoch:{1:3d}, Max Test Accuracy: {2:7.5f}".format(key, max_test_accuracy_epoch[key], max_test_accuracy_value[key]))
N2, AdaGrad, No_Batch_Norm, lambda=0.0 - Epoch: 37, Max Test Accuracy: 0.96100
N2, AdaGrad, Batch_Norm, lambda=0.0 - Epoch: 24, Max Test Accuracy: 0.97700
N2, AdaGrad, Batch_Norm, lambda=0.1 - Epoch: 42, Max Test Accuracy: 0.94620
He, AdaGrad, No_Batch_Norm, lambda=0.0 - Epoch: 41, Max Test Accuracy: 0.96820
He, AdaGrad, Batch_Norm, lambda=0.0 - Epoch: 18, Max Test Accuracy: 0.97660
He, AdaGrad, Batch_Norm, lambda=0.1 - Epoch: 46, Max Test Accuracy: 0.94170