GATE Neural Network with Three Neurons

In [3]:
import numpy as np
import random
import math

1. Three Neurons Model with Only Numerical Differentiation

In [4]:
class Neuron1:
    def __init__(self):
        self.w1 = np.array([random.random(), random.random()])   # weight of one input
        self.b1 = np.array([random.random()])   # bias
        print("Neuron1 - Initial w1: {0}, b1: {1}".format(self.w1, self.b1))

    def u1(self, x):
        return np.dot(self.w1, x) + self.b1

    def f(self, u1):
        return max(0.0, u1)

    def z1(self, x):
        u1 = self.u1(x)
        return self.f(u1)

class Neuron2:
    def __init__(self):
        self.w2 = np.array([random.random(), random.random()])   # weight of one input
        self.b2 = np.array([random.random()])   # bias
        print("Neuron2 - Initial w2: {0}, b2: {1}".format(self.w2, self.b2))

    def u2(self, x):
        return np.dot(self.w2, x) + self.b2

    def f(self, u2):
        return max(0.0, u2)

    def z2(self, x):
        u2 = self.u2(x)
        return self.f(u2)

class Neuron3:
    def __init__(self, n1, n2):
        self.w3 = np.array([random.random(), random.random()])   # weight of one input
        self.b3 = np.array([random.random()])   # bias
        self.n1 = n1
        self.n2 = n2
        print("Neuron2 - Initial w3: {0}, b3: {1}".format(self.w3, self.b3))

    def u3(self, x):
        z1 = self.n1.z1(x)
        z2 = self.n2.z2(x)
        z = np.array([z1, z2])
        return np.dot(self.w3, z) + self.b3

    def f(self, u3):
        return max(0.0, u3)

    def z3(self, x):
        u3 = self.u3(x)
        return self.f(u3)

    def squared_error(self, x, z_target):
        return 1.0 / 2.0 * math.pow(self.z3(x) - z_target, 2)

    def numerical_derivative(self, params, x, z_target):
        delta = 1e-4 # 0.0001
        grad = np.zeros_like(params)
        
        for idx in range(params.size):
            temp_val = params[idx]

            #f(x + delta) 계산
            params[idx] = params[idx] + delta
            fxh1 = self.squared_error(x, z_target)
            
            #f(x - delta) 계산
            params[idx] = params[idx] - delta
            fxh2 = self.squared_error(x, z_target)
            
            #f(x + delta) - f(x - delta) / 2 * delta 계산
            grad[idx] = (fxh1 - fxh2) / (2 * delta)
            params[idx] = temp_val
        return grad

    def learning(self, alpha, maxEpoch, data):
        print_epoch_period = 50
        for i in range(maxEpoch):
            for idx in range(data.numTrainData):
                x = data.training_input_value[idx]
                z_target = data.training_z_target[idx]

                self.n1.w1 = self.n1.w1 - alpha * self.numerical_derivative(self.n1.w1, x, z_target)
                self.n1.b1 = self.n1.b1 - alpha * self.numerical_derivative(self.n1.b1, x, z_target)
                self.n2.w2 = self.n2.w2 - alpha * self.numerical_derivative(self.n2.w2, x, z_target)
                self.n2.b2 = self.n2.b2 - alpha * self.numerical_derivative(self.n2.b2, x, z_target)
                self.w3 = self.w3 - alpha * self.numerical_derivative(self.w3, x, z_target)
                self.b3 = self.b3 - alpha * self.numerical_derivative(self.b3, x, z_target)

            if i % print_epoch_period == 0:
                sum = 0.0
                for idx in range(data.numTrainData):
                    sum = sum + self.squared_error(data.training_input_value[idx], data.training_z_target[idx])
                print("Epoch {0:3d}: Err: {1:5.3f}, w1_0: {2:5.3f}, w1_1: {3:5.3f}, b1: {4:5.3f}, w2_0: {5:5.3f}, w2_1: {6:5.3f}, b2: {7:5.3f}, w3_0: {8:5.3f}, w3_1: {9:5.3f}, b3: {10:5.3f}".format(
                    i, 
                    sum / data.numTrainData,
                    self.n1.w1[0],
                    self.n1.w1[1],
                    self.n1.b1[0],
                    self.n2.w2[0],
                    self.n2.w2[1],
                    self.n2.b2[0],                      
                    self.w3[0],
                    self.w3[1],
                    self.b3[0])
                )

2. OR Gate with Three Neurons

In [5]:
class Data:
    def __init__(self):
        self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)])
        self.training_z_target = np.array([0.0, 1.0, 1.0, 1.0])
        self.numTrainData = len(self.training_input_value)

if __name__ == '__main__':
    n1 = Neuron1()
    n2 = Neuron2()
    n3 = Neuron3(n1, n2)
    d = Data()
    for idx in range(d.numTrainData):
        x = d.training_input_value[idx]
        z3 = n3.z3(x)
        z_target = d.training_z_target[idx]
        error = n3.squared_error(x, z_target)
        print("x: {0:s}, z3: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z3), str(z_target), error))        

    n3.learning(0.05, 1000, d)

    for idx in range(d.numTrainData):
        x = d.training_input_value[idx]
        z3 = n3.z3(x)
        z_target = d.training_z_target[idx]
        error = n3.squared_error(x, z_target)
        print("x: {0:s}, z3: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z3), str(z_target), error))        
Neuron1 - Initial w1: [ 0.38522756  0.20353551], b1: [ 0.42084058]
Neuron2 - Initial w2: [ 0.1493424   0.21953654], b2: [ 0.8024181]
Neuron2 - Initial w3: [ 0.796124    0.13468804], b3: [ 0.09694243]
x: [ 0.  0.], z3: [ 0.54005983], z_target: 0.0, error: 0.14583
x: [ 1.  0.], z3: [ 0.86686338], z_target: 1.0, error: 0.00886
x: [ 0.  1.], z3: [ 0.73166828], z_target: 1.0, error: 0.03600
x: [ 1.  1.], z3: [ 1.05847183], z_target: 1.0, error: 0.00171
Epoch   0: Err: 0.048, w1_0: 0.387, w1_1: 0.208, b1: 0.418, w2_0: 0.150, w2_1: 0.220, b2: 0.802, w3_0: 0.797, w3_1: 0.133, b3: 0.093
Epoch  50: Err: 0.035, w1_0: 0.468, w1_1: 0.376, b1: 0.333, w2_0: 0.160, w2_1: 0.243, b2: 0.785, w3_0: 0.867, w3_1: 0.118, b3: -0.008
Epoch 100: Err: 0.032, w1_0: 0.495, w1_1: 0.442, b1: 0.292, w2_0: 0.161, w2_1: 0.251, b2: 0.777, w3_0: 0.910, w3_1: 0.110, b3: -0.050
Epoch 150: Err: 0.032, w1_0: 0.500, w1_1: 0.461, b1: 0.275, w2_0: 0.159, w2_1: 0.252, b2: 0.772, w3_0: 0.933, w3_1: 0.108, b3: -0.064
Epoch 200: Err: 0.031, w1_0: 0.497, w1_1: 0.463, b1: 0.266, w2_0: 0.156, w2_1: 0.251, b2: 0.767, w3_0: 0.949, w3_1: 0.107, b3: -0.067
Epoch 250: Err: 0.031, w1_0: 0.492, w1_1: 0.460, b1: 0.260, w2_0: 0.152, w2_1: 0.250, b2: 0.763, w3_0: 0.961, w3_1: 0.108, b3: -0.067
Epoch 300: Err: 0.031, w1_0: 0.487, w1_1: 0.455, b1: 0.255, w2_0: 0.149, w2_1: 0.248, b2: 0.760, w3_0: 0.974, w3_1: 0.108, b3: -0.067
Epoch 350: Err: 0.031, w1_0: 0.481, w1_1: 0.450, b1: 0.250, w2_0: 0.146, w2_1: 0.247, b2: 0.756, w3_0: 0.986, w3_1: 0.109, b3: -0.066
Epoch 400: Err: 0.031, w1_0: 0.475, w1_1: 0.444, b1: 0.246, w2_0: 0.143, w2_1: 0.245, b2: 0.752, w3_0: 0.998, w3_1: 0.110, b3: -0.065
Epoch 450: Err: 0.031, w1_0: 0.470, w1_1: 0.439, b1: 0.241, w2_0: 0.140, w2_1: 0.244, b2: 0.748, w3_0: 1.011, w3_1: 0.112, b3: -0.064
Epoch 500: Err: 0.031, w1_0: 0.464, w1_1: 0.433, b1: 0.237, w2_0: 0.137, w2_1: 0.243, b2: 0.745, w3_0: 1.024, w3_1: 0.113, b3: -0.063
Epoch 550: Err: 0.031, w1_0: 0.458, w1_1: 0.427, b1: 0.232, w2_0: 0.134, w2_1: 0.241, b2: 0.741, w3_0: 1.037, w3_1: 0.114, b3: -0.062
Epoch 600: Err: 0.031, w1_0: 0.452, w1_1: 0.421, b1: 0.228, w2_0: 0.131, w2_1: 0.240, b2: 0.738, w3_0: 1.050, w3_1: 0.116, b3: -0.061
Epoch 650: Err: 0.031, w1_0: 0.446, w1_1: 0.416, b1: 0.224, w2_0: 0.129, w2_1: 0.239, b2: 0.734, w3_0: 1.064, w3_1: 0.117, b3: -0.061
Epoch 700: Err: 0.031, w1_0: 0.441, w1_1: 0.410, b1: 0.220, w2_0: 0.126, w2_1: 0.238, b2: 0.731, w3_0: 1.078, w3_1: 0.119, b3: -0.060
Epoch 750: Err: 0.031, w1_0: 0.435, w1_1: 0.404, b1: 0.216, w2_0: 0.123, w2_1: 0.237, b2: 0.727, w3_0: 1.092, w3_1: 0.121, b3: -0.059
Epoch 800: Err: 0.031, w1_0: 0.429, w1_1: 0.398, b1: 0.211, w2_0: 0.121, w2_1: 0.236, b2: 0.724, w3_0: 1.107, w3_1: 0.123, b3: -0.059
Epoch 850: Err: 0.031, w1_0: 0.423, w1_1: 0.392, b1: 0.207, w2_0: 0.118, w2_1: 0.235, b2: 0.720, w3_0: 1.122, w3_1: 0.125, b3: -0.058
Epoch 900: Err: 0.031, w1_0: 0.418, w1_1: 0.387, b1: 0.203, w2_0: 0.116, w2_1: 0.234, b2: 0.717, w3_0: 1.137, w3_1: 0.127, b3: -0.058
Epoch 950: Err: 0.031, w1_0: 0.412, w1_1: 0.381, b1: 0.199, w2_0: 0.114, w2_1: 0.233, b2: 0.713, w3_0: 1.152, w3_1: 0.129, b3: -0.057
x: [ 0.  0.], z3: [ 0.26481341], z_target: 0.0, error: 0.03506
x: [ 1.  0.], z3: [ 0.75387418], z_target: 1.0, error: 0.03029
x: [ 0.  1.], z3: [ 0.73347869], z_target: 1.0, error: 0.03552
x: [ 1.  1.], z3: [ 1.22253946], z_target: 1.0, error: 0.02476

3. AND Gate with Three Neurons

In [7]:
class Data:
    def __init__(self):
        self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)])
        self.training_z_target = np.array([0.0, 0.0, 0.0, 1.0])
        self.numTrainData = len(self.training_input_value)

if __name__ == '__main__':
    n1 = Neuron1()
    n2 = Neuron2()
    n3 = Neuron3(n1, n2)
    d = Data()
    for idx in range(d.numTrainData):
        x = d.training_input_value[idx]
        z3 = n3.z3(x)
        z_target = d.training_z_target[idx]
        error = n3.squared_error(x, z_target)
        print("x: {0:s}, z3: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z3), str(z_target), error))        

    n3.learning(0.05, 1000, d)

    for idx in range(d.numTrainData):
        x = d.training_input_value[idx]
        z3 = n3.z3(x)
        z_target = d.training_z_target[idx]
        error = n3.squared_error(x, z_target)
        print("x: {0:s}, z3: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z3), str(z_target), error))        
Neuron1 - Initial w1: [ 0.20432704  0.64053379], b1: [ 0.38208965]
Neuron2 - Initial w2: [ 0.927959    0.46040146], b2: [ 0.87461856]
Neuron2 - Initial w3: [ 0.73126551  0.1859103 ], b3: [ 0.88054529]
x: [ 0.  0.], z3: [ 1.32255487], z_target: 0.0, error: 0.87458
x: [ 1.  0.], z3: [ 1.64448932], z_target: 0.0, error: 1.35217
x: [ 0.  1.], z3: [ 1.87654852], z_target: 0.0, error: 1.76072
x: [ 1.  1.], z3: [ 2.19848297], z_target: 1.0, error: 0.71818
Epoch   0: Err: 0.518, w1_0: 0.167, w1_1: 0.602, b1: 0.293, w2_0: 0.922, w2_1: 0.456, b2: 0.859, w3_0: 0.649, w3_1: 0.007, b3: 0.766
Epoch  50: Err: 0.047, w1_0: 0.411, w1_1: 0.639, b1: -0.036, w2_0: 0.821, w2_1: 0.427, b2: 0.938, w3_0: 0.738, w3_1: -0.142, b3: 0.182
Epoch 100: Err: 0.021, w1_0: 0.611, w1_1: 0.689, b1: -0.187, w2_0: 0.784, w2_1: 0.415, b2: 0.955, w3_0: 0.919, w3_1: -0.111, b3: -0.030
Epoch 150: Err: 0.007, w1_0: 0.742, w1_1: 0.746, b1: -0.306, w2_0: 0.767, w2_1: 0.407, b2: 0.966, w3_0: 1.067, w3_1: -0.111, b3: -0.163
Epoch 200: Err: 0.002, w1_0: 0.813, w1_1: 0.791, b1: -0.380, w2_0: 0.759, w2_1: 0.403, b2: 0.973, w3_0: 1.159, w3_1: -0.119, b3: -0.237
Epoch 250: Err: 0.000, w1_0: 0.848, w1_1: 0.818, b1: -0.420, w2_0: 0.756, w2_1: 0.401, b2: 0.977, w3_0: 1.208, w3_1: -0.124, b3: -0.274
Epoch 300: Err: 0.000, w1_0: 0.865, w1_1: 0.831, b1: -0.439, w2_0: 0.755, w2_1: 0.399, b2: 0.978, w3_0: 1.231, w3_1: -0.127, b3: -0.291
Epoch 350: Err: 0.000, w1_0: 0.872, w1_1: 0.837, b1: -0.448, w2_0: 0.754, w2_1: 0.399, b2: 0.979, w3_0: 1.242, w3_1: -0.129, b3: -0.298
Epoch 400: Err: 0.000, w1_0: 0.876, w1_1: 0.839, b1: -0.452, w2_0: 0.754, w2_1: 0.399, b2: 0.980, w3_0: 1.246, w3_1: -0.130, b3: -0.302
Epoch 450: Err: 0.000, w1_0: 0.877, w1_1: 0.841, b1: -0.453, w2_0: 0.753, w2_1: 0.399, b2: 0.980, w3_0: 1.248, w3_1: -0.130, b3: -0.303
Epoch 500: Err: 0.000, w1_0: 0.878, w1_1: 0.841, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.249, w3_1: -0.130, b3: -0.304
Epoch 550: Err: 0.000, w1_0: 0.878, w1_1: 0.841, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.250, w3_1: -0.130, b3: -0.304
Epoch 600: Err: 0.000, w1_0: 0.878, w1_1: 0.841, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.250, w3_1: -0.131, b3: -0.304
Epoch 650: Err: 0.000, w1_0: 0.879, w1_1: 0.841, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.250, w3_1: -0.131, b3: -0.304
Epoch 700: Err: 0.000, w1_0: 0.879, w1_1: 0.842, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.250, w3_1: -0.131, b3: -0.304
Epoch 750: Err: 0.000, w1_0: 0.879, w1_1: 0.842, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.250, w3_1: -0.131, b3: -0.304
Epoch 800: Err: 0.000, w1_0: 0.879, w1_1: 0.842, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.250, w3_1: -0.131, b3: -0.304
Epoch 850: Err: 0.000, w1_0: 0.879, w1_1: 0.842, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.250, w3_1: -0.131, b3: -0.304
Epoch 900: Err: 0.000, w1_0: 0.879, w1_1: 0.842, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.250, w3_1: -0.131, b3: -0.304
Epoch 950: Err: 0.000, w1_0: 0.879, w1_1: 0.842, b1: -0.454, w2_0: 0.753, w2_1: 0.398, b2: 0.980, w3_0: 1.250, w3_1: -0.131, b3: -0.304
x: [ 0.  0.], z3: 0.0, z_target: 0.0, error: 0.00000
x: [ 1.  0.], z3: 0.0, z_target: 0.0, error: 0.00000
x: [ 0.  1.], z3: 0.0, z_target: 0.0, error: 0.00000
x: [ 1.  1.], z3: [ 0.99991841], z_target: 1.0, error: 0.00000

4. XOR Gate with Three Neurons

In [12]:
class Data:
    def __init__(self):
        self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)])
        self.training_z_target = np.array([0.0, 1.0, 1.0, 0.0])
        self.numTrainData = len(self.training_input_value)

if __name__ == '__main__':
    n1 = Neuron1()
    n2 = Neuron2()
    n3 = Neuron3(n1, n2)
    d = Data()
    for idx in range(d.numTrainData):
        x = d.training_input_value[idx]
        z3 = n3.z3(x)
        z_target = d.training_z_target[idx]
        error = n3.squared_error(x, z_target)
        print("x: {0:s}, z3: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z3), str(z_target), error))        

    n3.learning(0.05, 1000, d)

    for idx in range(d.numTrainData):
        x = d.training_input_value[idx]
        z3 = n3.z3(x)
        z_target = d.training_z_target[idx]
        error = n3.squared_error(x, z_target)
        print("x: {0:s}, z3: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z3), str(z_target), error))        
Neuron1 - Initial w1: [ 0.98041965  0.33318097], b1: [ 0.90704043]
Neuron2 - Initial w2: [ 0.24376458  0.0993518 ], b2: [ 0.14829085]
Neuron2 - Initial w3: [ 0.73292247  0.86866256], b3: [ 0.31973756]
x: [ 0.  0.], z3: [ 1.11334259], z_target: 0.0, error: 0.61977
x: [ 1.  0.], z3: [ 2.04366335], z_target: 1.0, error: 0.54462
x: [ 0.  1.], z3: [ 1.4438416], z_target: 1.0, error: 0.09850
x: [ 1.  1.], z3: [ 2.37416236], z_target: 0.0, error: 2.81832
Epoch   0: Err: 0.426, w1_0: 0.930, w1_1: 0.295, b1: 0.833, w2_0: 0.181, w2_1: 0.051, b2: 0.059, w3_0: 0.565, w3_1: 0.843, b3: 0.226
Epoch  50: Err: 0.110, w1_0: 0.789, w1_1: 0.242, b1: 0.766, w2_0: 0.221, w2_1: -0.278, b2: 0.041, w3_0: 0.169, w3_1: 0.895, b3: 0.187
Epoch 100: Err: 0.092, w1_0: 0.742, w1_1: 0.252, b1: 0.765, w2_0: 0.413, w2_1: -0.421, b2: 0.000, w3_0: 0.067, w3_1: 1.004, b3: 0.289
Epoch 150: Err: 0.084, w1_0: 0.727, w1_1: 0.248, b1: 0.757, w2_0: 0.522, w2_1: -0.529, b2: 0.002, w3_0: -0.026, w3_1: 1.096, b3: 0.374
Epoch 200: Err: 0.080, w1_0: 0.741, w1_1: 0.211, b1: 0.743, w2_0: 0.579, w2_1: -0.593, b2: -0.002, w3_0: -0.107, w3_1: 1.152, b3: 0.451
Epoch 250: Err: 0.076, w1_0: 0.777, w1_1: 0.133, b1: 0.725, w2_0: 0.621, w2_1: -0.625, b2: -0.009, w3_0: -0.187, w3_1: 1.187, b3: 0.527
Epoch 300: Err: 0.069, w1_0: 0.824, w1_1: 0.008, b1: 0.702, w2_0: 0.652, w2_1: -0.662, b2: 0.001, w3_0: -0.279, w3_1: 1.221, b3: 0.602
Epoch 350: Err: 0.058, w1_0: 0.867, w1_1: -0.166, b1: 0.673, w2_0: 0.699, w2_1: -0.707, b2: 0.001, w3_0: -0.394, w3_1: 1.267, b3: 0.674
Epoch 400: Err: 0.042, w1_0: 0.889, w1_1: -0.375, b1: 0.644, w2_0: 0.771, w2_1: -0.773, b2: -0.004, w3_0: -0.537, w3_1: 1.338, b3: 0.725
Epoch 450: Err: 0.024, w1_0: 0.888, w1_1: -0.576, b1: 0.628, w2_0: 0.858, w2_1: -0.862, b2: -0.000, w3_0: -0.693, w3_1: 1.432, b3: 0.739
Epoch 500: Err: 0.011, w1_0: 0.873, w1_1: -0.744, b1: 0.744, w2_0: 0.950, w2_1: -0.954, b2: 0.001, w3_0: -0.807, w3_1: 1.533, b3: 0.774
Epoch 550: Err: 0.005, w1_0: 0.879, w1_1: -0.864, b1: 0.865, w2_0: 1.019, w2_1: -1.024, b2: 0.001, w3_0: -0.880, w3_1: 1.610, b3: 0.844
Epoch 600: Err: 0.002, w1_0: 0.907, w1_1: -0.930, b1: 0.931, w2_0: 1.065, w2_1: -1.071, b2: 0.001, w3_0: -0.927, w3_1: 1.662, b3: 0.899
Epoch 650: Err: 0.001, w1_0: 0.935, w1_1: -0.963, b1: 0.965, w2_0: 1.095, w2_1: -1.100, b2: 0.003, w3_0: -0.957, w3_1: 1.695, b3: 0.936
Epoch 700: Err: 0.000, w1_0: 0.956, w1_1: -0.983, b1: 0.983, w2_0: 1.113, w2_1: -1.120, b2: 0.007, w3_0: -0.977, w3_1: 1.716, b3: 0.960
Epoch 750: Err: 0.000, w1_0: 0.969, w1_1: -0.995, b1: 0.996, w2_0: 1.124, w2_1: -1.134, b2: 0.010, w3_0: -0.989, w3_1: 1.730, b3: 0.975
Epoch 800: Err: 0.000, w1_0: 0.978, w1_1: -1.003, b1: 1.004, w2_0: 1.130, w2_1: -1.143, b2: 0.012, w3_0: -0.996, w3_1: 1.738, b3: 0.984
Epoch 850: Err: 0.000, w1_0: 0.983, w1_1: -1.009, b1: 1.009, w2_0: 1.135, w2_1: -1.148, b2: 0.013, w3_0: -1.001, w3_1: 1.743, b3: 0.990
Epoch 900: Err: 0.000, w1_0: 0.986, w1_1: -1.012, b1: 1.012, w2_0: 1.137, w2_1: -1.151, b2: 0.014, w3_0: -1.004, w3_1: 1.747, b3: 0.994
Epoch 950: Err: 0.000, w1_0: 0.988, w1_1: -1.014, b1: 1.014, w2_0: 1.139, w2_1: -1.153, b2: 0.014, w3_0: -1.006, w3_1: 1.749, b3: 0.996
x: [ 0.  0.], z3: [ 0.00059514], z_target: 0.0, error: 0.00000
x: [ 1.  0.], z3: [ 0.99918419], z_target: 1.0, error: 0.00000
x: [ 0.  1.], z3: [ 0.99755646], z_target: 1.0, error: 0.00000
x: [ 1.  1.], z3: [ 0.00149791], z_target: 0.0, error: 0.00000