# GATE Neural Network with Linear Two Neurons¶

In [1]:
import numpy as np
import random
import math
from IPython.display import display


## 1. Linear Two Neurons Model with Only Numerical Differentiation¶

In [14]:
class Neuron1:
def __init__(self):
self.w1 = np.array([random.random(), random.random()])   # weight of one input
self.b1 = np.array([random.random()])  # bias
print("Neuron1 - Initial w1: {0}, b1: {1}".format(self.w1, self.b1))

def u1(self, x):
return np.dot(self.w1, x) + self.b1

def f(self, u1):
return max(0.0, u1)

def z1(self, x):
u1 = self.u1(x)
return self.f(u1)

class Neuron2:
def __init__(self, n1):
self.w2 = np.array([random.random()])   # weight of one input
self.b2 = np.array([random.random()])   # bias
self.n1 = n1
print("Neuron2 - Initial w2: {0}, b2: {1}".format(self.w2, self.b2))

def u2(self, x):
z1 = self.n1.z1(x)
return self.w2 * z1 + self.b2

def f(self, u2):
return max(0.0, u2)

def z2(self, x):
u2 = self.u2(x)
return self.f(u2)

def squared_error(self, x, z_target):
return 1.0 / 2.0 * math.pow(self.z2(x) - z_target, 2)

def numerical_derivative(self, params, x, z_target):
delta = 1e-4 # 0.0001

for idx in range(params.size):
temp_val = params[idx]

#f(x + delta) 계산
params[idx] = params[idx] + delta
fxh1 = self.squared_error(x, z_target)

#f(x - delta) 계산
params[idx] = params[idx] - delta
fxh2 = self.squared_error(x, z_target)

#f(x + delta) - f(x - delta) / 2 * delta 계산
grad[idx] = (fxh1 - fxh2) / (2 * delta)
params[idx] = temp_val

def learning(self, alpha, maxEpoch, data):
print_epoch_period = 20
for i in range(maxEpoch):
for idx in range(data.numTrainData):
x = data.training_input_value[idx]
z_target = data.training_z_target[idx]

self.n1.w1 = self.n1.w1 - alpha * self.numerical_derivative(self.n1.w1, x, z_target)
self.n1.b1 = self.n1.b1 - alpha * self.numerical_derivative(self.n1.b1, x, z_target)
self.w2 = self.w2 - alpha * self.numerical_derivative(self.w2, x, z_target)
self.b2 = self.b2 - alpha * self.numerical_derivative(self.b2, x, z_target)

if i % print_epoch_period == 0:
sum = 0.0
for idx in range(data.numTrainData):
sum = sum + self.squared_error(data.training_input_value[idx], data.training_z_target[idx])
print("Epoch{0:4d}: Error: {1:7.5f}, w1_0: {2:7.5f}, w1_1: {3:7.5f}, b1: {4:7.5f}, w2: {5:7.5f}, b2: {6:7.5f}".format(
i,
sum / data.numTrainData,
self.n1.w1[0],
self.n1.w1[1],
self.n1.b1[0],
self.w2[0],
self.b2[0])
)


## 2. OR Gate with Linear Two Neurons¶

In [15]:
class Data:
def __init__(self):
self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)])
self.training_z_target = np.array([0.0, 1.0, 1.0, 1.0])
self.numTrainData = len(self.training_input_value)

if __name__ == '__main__':
n1 = Neuron1()
n2 = Neuron2(n1)
d = Data()
for idx in range(d.numTrainData):
x = d.training_input_value[idx]
z2 = n2.z2(x)
z_target = d.training_z_target[idx]
error = n2.squared_error(x, z_target)
print("x: {0:s}, z2: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z2), str(z_target), error))

n2.learning(0.01, 750, d)

for idx in range(d.numTrainData):
x = d.training_input_value[idx]
z2 = n2.z2(x)
z_target = d.training_z_target[idx]
error = n2.squared_error(x, z_target)
print("x: {0:s}, z2: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z2), str(z_target), error))

Neuron1 - Initial w1: [ 0.84658111  0.76454094], b1: [ 0.04279332]
Neuron2 - Initial w2: [ 0.52140631], b2: [ 0.21846328]
x: [ 0.  0.], z2: [ 0.24077599], z_target: 0.0, error: 0.02899
x: [ 1.  0.], z2: [ 0.68218873], z_target: 1.0, error: 0.05050
x: [ 0.  1.], z2: [ 0.63941246], z_target: 1.0, error: 0.06501
x: [ 1.  1.], z2: [ 1.0808252], z_target: 1.0, error: 0.00327
Epoch   0: Error: 0.03649, w1_0: 0.84718, w1_1: 0.76525, b1: 0.04370, w2: 0.52349, b2: 0.22019
Epoch  20: Error: 0.03245, w1_0: 0.85489, w1_1: 0.77519, b1: 0.05496, w2: 0.55109, b2: 0.24113
Epoch  40: Error: 0.03177, w1_0: 0.85778, w1_1: 0.78042, b1: 0.05854, w2: 0.56336, b2: 0.24764
Epoch  60: Error: 0.03163, w1_0: 0.85868, w1_1: 0.78365, b1: 0.05905, w2: 0.56945, b2: 0.24868
Epoch  80: Error: 0.03157, w1_0: 0.85878, w1_1: 0.78606, b1: 0.05844, w2: 0.57305, b2: 0.24779
Epoch 100: Error: 0.03153, w1_0: 0.85856, w1_1: 0.78808, b1: 0.05747, w2: 0.57561, b2: 0.24628
Epoch 120: Error: 0.03150, w1_0: 0.85819, w1_1: 0.78991, b1: 0.05642, w2: 0.57768, b2: 0.24465
Epoch 140: Error: 0.03147, w1_0: 0.85775, w1_1: 0.79161, b1: 0.05539, w2: 0.57948, b2: 0.24306
Epoch 160: Error: 0.03145, w1_0: 0.85726, w1_1: 0.79321, b1: 0.05442, w2: 0.58109, b2: 0.24158
Epoch 180: Error: 0.03142, w1_0: 0.85675, w1_1: 0.79472, b1: 0.05352, w2: 0.58255, b2: 0.24022
Epoch 200: Error: 0.03141, w1_0: 0.85622, w1_1: 0.79615, b1: 0.05268, w2: 0.58389, b2: 0.23898
Epoch 220: Error: 0.03139, w1_0: 0.85567, w1_1: 0.79751, b1: 0.05190, w2: 0.58511, b2: 0.23785
Epoch 240: Error: 0.03138, w1_0: 0.85512, w1_1: 0.79880, b1: 0.05119, w2: 0.58623, b2: 0.23682
Epoch 260: Error: 0.03137, w1_0: 0.85455, w1_1: 0.80002, b1: 0.05052, w2: 0.58725, b2: 0.23588
Epoch 280: Error: 0.03135, w1_0: 0.85398, w1_1: 0.80119, b1: 0.04990, w2: 0.58819, b2: 0.23503
Epoch 300: Error: 0.03135, w1_0: 0.85340, w1_1: 0.80229, b1: 0.04933, w2: 0.58906, b2: 0.23426
Epoch 320: Error: 0.03134, w1_0: 0.85282, w1_1: 0.80334, b1: 0.04880, w2: 0.58985, b2: 0.23356
Epoch 340: Error: 0.03133, w1_0: 0.85224, w1_1: 0.80434, b1: 0.04830, w2: 0.59058, b2: 0.23293
Epoch 360: Error: 0.03132, w1_0: 0.85166, w1_1: 0.80529, b1: 0.04785, w2: 0.59125, b2: 0.23236
Epoch 380: Error: 0.03132, w1_0: 0.85109, w1_1: 0.80619, b1: 0.04742, w2: 0.59186, b2: 0.23184
Epoch 400: Error: 0.03131, w1_0: 0.85052, w1_1: 0.80705, b1: 0.04702, w2: 0.59243, b2: 0.23137
Epoch 420: Error: 0.03131, w1_0: 0.84995, w1_1: 0.80787, b1: 0.04665, w2: 0.59295, b2: 0.23095
Epoch 440: Error: 0.03130, w1_0: 0.84939, w1_1: 0.80864, b1: 0.04630, w2: 0.59343, b2: 0.23057
Epoch 460: Error: 0.03130, w1_0: 0.84884, w1_1: 0.80938, b1: 0.04597, w2: 0.59387, b2: 0.23022
Epoch 480: Error: 0.03130, w1_0: 0.84829, w1_1: 0.81009, b1: 0.04566, w2: 0.59428, b2: 0.22991
Epoch 500: Error: 0.03129, w1_0: 0.84775, w1_1: 0.81076, b1: 0.04538, w2: 0.59466, b2: 0.22964
Epoch 520: Error: 0.03129, w1_0: 0.84723, w1_1: 0.81139, b1: 0.04511, w2: 0.59500, b2: 0.22939
Epoch 540: Error: 0.03129, w1_0: 0.84671, w1_1: 0.81200, b1: 0.04485, w2: 0.59533, b2: 0.22917
Epoch 560: Error: 0.03128, w1_0: 0.84620, w1_1: 0.81258, b1: 0.04461, w2: 0.59562, b2: 0.22897
Epoch 580: Error: 0.03128, w1_0: 0.84570, w1_1: 0.81313, b1: 0.04438, w2: 0.59590, b2: 0.22880
Epoch 600: Error: 0.03128, w1_0: 0.84521, w1_1: 0.81365, b1: 0.04416, w2: 0.59616, b2: 0.22864
Epoch 620: Error: 0.03128, w1_0: 0.84472, w1_1: 0.81415, b1: 0.04395, w2: 0.59640, b2: 0.22851
Epoch 640: Error: 0.03128, w1_0: 0.84425, w1_1: 0.81463, b1: 0.04376, w2: 0.59662, b2: 0.22839
Epoch 660: Error: 0.03127, w1_0: 0.84379, w1_1: 0.81508, b1: 0.04357, w2: 0.59683, b2: 0.22828
Epoch 680: Error: 0.03127, w1_0: 0.84334, w1_1: 0.81551, b1: 0.04339, w2: 0.59702, b2: 0.22819
Epoch 700: Error: 0.03127, w1_0: 0.84290, w1_1: 0.81592, b1: 0.04322, w2: 0.59720, b2: 0.22811
Epoch 720: Error: 0.03127, w1_0: 0.84247, w1_1: 0.81631, b1: 0.04305, w2: 0.59738, b2: 0.22804
Epoch 740: Error: 0.03127, w1_0: 0.84205, w1_1: 0.81669, b1: 0.04289, w2: 0.59754, b2: 0.22799
x: [ 0.  0.], z2: [ 0.25355634], z_target: 0.0, error: 0.03215
x: [ 1.  0.], z2: [ 0.75665779], z_target: 1.0, error: 0.02961
x: [ 0.  1.], z2: [ 0.74170903], z_target: 1.0, error: 0.03336
x: [ 1.  1.], z2: [ 1.24481048], z_target: 1.0, error: 0.02997


## 3. AND Gate with Linear Two Neurons¶

In [16]:
class Data:
def __init__(self):
self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)])
self.training_z_target = np.array([0.0, 0.0, 0.0, 1.0])
self.numTrainData = len(self.training_input_value)

if __name__ == '__main__':
n1 = Neuron1()
n2 = Neuron2(n1)
d = Data()
for idx in range(d.numTrainData):
x = d.training_input_value[idx]
z2 = n2.z2(x)
z_target = d.training_z_target[idx]
error = n2.squared_error(x, z_target)
print("x: {0:s}, z2: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z2), str(z_target), error))

n2.learning(0.01, 750, d)

for idx in range(d.numTrainData):
x = d.training_input_value[idx]
z2 = n2.z2(x)
z_target = d.training_z_target[idx]
error = n2.squared_error(x, z_target)
print("x: {0:s}, z2: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z2), str(z_target), error))

Neuron1 - Initial w1: [ 0.74899101  0.30840786], b1: [ 0.40417923]
Neuron2 - Initial w2: [ 0.99721456], b2: [ 0.85418624]
x: [ 0.  0.], z2: [ 1.25723966], z_target: 0.0, error: 0.79033
x: [ 1.  0.], z2: [ 2.00414439], z_target: 0.0, error: 2.00830
x: [ 0.  1.], z2: [ 1.56478847], z_target: 0.0, error: 1.22428
x: [ 1.  1.], z2: [ 2.31169321], z_target: 1.0, error: 0.86027
Epoch   0: Error: 1.07471, w1_0: 0.73313, w1_1: 0.29494, b1: 0.37470, w2: 0.97005, b2: 0.82467
Epoch  20: Error: 0.20265, w1_0: 0.57950, w1_1: 0.17241, b1: 0.07310, w2: 0.74399, b2: 0.47004
Epoch  40: Error: 0.09328, w1_0: 0.53067, w1_1: 0.14456, b1: -0.03062, w2: 0.69785, b2: 0.31290
Epoch  60: Error: 0.06828, w1_0: 0.51196, w1_1: 0.14423, b1: -0.07171, w2: 0.68700, b2: 0.22683
Epoch  80: Error: 0.05960, w1_0: 0.50657, w1_1: 0.15609, b1: -0.09364, w2: 0.68840, b2: 0.17496
Epoch 100: Error: 0.05530, w1_0: 0.50771, w1_1: 0.17384, b1: -0.10622, w2: 0.69540, b2: 0.14101
Epoch 120: Error: 0.05228, w1_0: 0.51204, w1_1: 0.19437, b1: -0.11428, w2: 0.70532, b2: 0.11661
Epoch 140: Error: 0.04967, w1_0: 0.51785, w1_1: 0.21607, b1: -0.12031, w2: 0.71686, b2: 0.09740
Epoch 160: Error: 0.04726, w1_0: 0.52423, w1_1: 0.23805, b1: -0.12563, w2: 0.72933, b2: 0.08108
Epoch 180: Error: 0.04496, w1_0: 0.53072, w1_1: 0.25984, b1: -0.13092, w2: 0.74232, b2: 0.06646
Epoch 200: Error: 0.04277, w1_0: 0.53706, w1_1: 0.28119, b1: -0.13654, w2: 0.75559, b2: 0.05293
Epoch 220: Error: 0.04067, w1_0: 0.54316, w1_1: 0.30197, b1: -0.14266, w2: 0.76900, b2: 0.04020
Epoch 240: Error: 0.03867, w1_0: 0.54897, w1_1: 0.32210, b1: -0.14932, w2: 0.78245, b2: 0.02814
Epoch 260: Error: 0.03677, w1_0: 0.55448, w1_1: 0.34154, b1: -0.15656, w2: 0.79587, b2: 0.01667
Epoch 280: Error: 0.03495, w1_0: 0.55970, w1_1: 0.36029, b1: -0.16434, w2: 0.80924, b2: 0.00579
Epoch 300: Error: 0.03322, w1_0: 0.56466, w1_1: 0.37833, b1: -0.17264, w2: 0.82251, b2: -0.00459
Epoch 320: Error: 0.03152, w1_0: 0.56944, w1_1: 0.39573, b1: -0.18133, w2: 0.83572, b2: -0.01510
Epoch 340: Error: 0.02986, w1_0: 0.57414, w1_1: 0.41257, b1: -0.19024, w2: 0.84891, b2: -0.02571
Epoch 360: Error: 0.02824, w1_0: 0.57882, w1_1: 0.42890, b1: -0.19929, w2: 0.86209, b2: -0.03633
Epoch 380: Error: 0.02666, w1_0: 0.58351, w1_1: 0.44472, b1: -0.20841, w2: 0.87525, b2: -0.04687
Epoch 400: Error: 0.02512, w1_0: 0.58822, w1_1: 0.46005, b1: -0.21758, w2: 0.88836, b2: -0.05730
Epoch 420: Error: 0.02363, w1_0: 0.59295, w1_1: 0.47488, b1: -0.22676, w2: 0.90141, b2: -0.06759
Epoch 440: Error: 0.02219, w1_0: 0.59770, w1_1: 0.48921, b1: -0.23593, w2: 0.91437, b2: -0.07773
Epoch 460: Error: 0.02080, w1_0: 0.60248, w1_1: 0.50304, b1: -0.24506, w2: 0.92721, b2: -0.08769
Epoch 480: Error: 0.01946, w1_0: 0.60729, w1_1: 0.51637, b1: -0.25415, w2: 0.93990, b2: -0.09746
Epoch 500: Error: 0.01818, w1_0: 0.61211, w1_1: 0.52920, b1: -0.26316, w2: 0.95243, b2: -0.10702
Epoch 520: Error: 0.01696, w1_0: 0.61695, w1_1: 0.54154, b1: -0.27209, w2: 0.96478, b2: -0.11637
Epoch 540: Error: 0.01579, w1_0: 0.62179, w1_1: 0.55338, b1: -0.28092, w2: 0.97691, b2: -0.12550
Epoch 560: Error: 0.01468, w1_0: 0.62664, w1_1: 0.56474, b1: -0.28962, w2: 0.98882, b2: -0.13440
Epoch 580: Error: 0.01362, w1_0: 0.63148, w1_1: 0.57562, b1: -0.29820, w2: 1.00050, b2: -0.14305
Epoch 600: Error: 0.01262, w1_0: 0.63631, w1_1: 0.58603, b1: -0.30663, w2: 1.01192, b2: -0.15146
Epoch 620: Error: 0.01168, w1_0: 0.64112, w1_1: 0.59598, b1: -0.31490, w2: 1.02307, b2: -0.15962
Epoch 640: Error: 0.01079, w1_0: 0.64590, w1_1: 0.60548, b1: -0.32300, w2: 1.03395, b2: -0.16753
Epoch 660: Error: 0.00995, w1_0: 0.65064, w1_1: 0.61454, b1: -0.33092, w2: 1.04454, b2: -0.17519
Epoch 680: Error: 0.00916, w1_0: 0.65533, w1_1: 0.62318, b1: -0.33866, w2: 1.05484, b2: -0.18259
Epoch 700: Error: 0.00843, w1_0: 0.65997, w1_1: 0.63140, b1: -0.34620, w2: 1.06484, b2: -0.18973
Epoch 720: Error: 0.00774, w1_0: 0.66454, w1_1: 0.63923, b1: -0.35354, w2: 1.07454, b2: -0.19662
Epoch 740: Error: 0.00710, w1_0: 0.66904, w1_1: 0.64668, b1: -0.36067, w2: 1.08394, b2: -0.20326
x: [ 0.  0.], z2: 0.0, z_target: 0.0, error: 0.00000
x: [ 1.  0.], z2: [ 0.12812268], z_target: 0.0, error: 0.00821
x: [ 0.  1.], z2: [ 0.10512664], z_target: 0.0, error: 0.00553
x: [ 1.  1.], z2: [ 0.83526685], z_target: 1.0, error: 0.01357


## 4. XOR Gate with Linear Two Neurons¶

In [21]:
class Data:
def __init__(self):
self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)])
self.training_z_target = np.array([0.0, 1.0, 1.0, 0.0])
self.numTrainData = len(self.training_input_value)

if __name__ == '__main__':
n1 = Neuron1()
n2 = Neuron2(n1)
d = Data()
for idx in range(d.numTrainData):
x = d.training_input_value[idx]
z2 = n2.z2(x)
z_target = d.training_z_target[idx]
error = n2.squared_error(x, z_target)
print("x: {0:s}, z2: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z2), str(z_target), error))

n2.learning(0.01, 750, d)

for idx in range(d.numTrainData):
x = d.training_input_value[idx]
z2 = n2.z2(x)
z_target = d.training_z_target[idx]
error = n2.squared_error(x, z_target)
print("x: {0:s}, z2: {1:s}, z_target: {2:s}, error: {3:7.5f}".format(str(x), str(z2), str(z_target), error))

Neuron1 - Initial w1: [ 0.872904    0.03981136], b1: [ 0.07419065]
Neuron2 - Initial w2: [ 0.96176869], b2: [ 0.19185716]
x: [ 0.  0.], z2: [ 0.2632114], z_target: 0.0, error: 0.03464
x: [ 1.  0.], z2: [ 1.10274314], z_target: 1.0, error: 0.00528
x: [ 0.  1.], z2: [ 0.30150073], z_target: 1.0, error: 0.24395
x: [ 1.  1.], z2: [ 1.14103247], z_target: 0.0, error: 0.65098
Epoch   0: Error: 0.22863, w1_0: 0.86691, w1_1: 0.03767, b1: 0.07034, w2: 0.95610, b2: 0.18790
Epoch  20: Error: 0.18390, w1_0: 0.77953, w1_1: 0.01696, b1: 0.03454, w2: 0.87716, b2: 0.14977
Epoch  40: Error: 0.17017, w1_0: 0.72348, w1_1: 0.01444, b1: 0.03304, w2: 0.82917, b2: 0.14834
Epoch  60: Error: 0.16203, w1_0: 0.68060, w1_1: 0.01674, b1: 0.04091, w2: 0.79367, b2: 0.15819
Epoch  80: Error: 0.15607, w1_0: 0.64482, w1_1: 0.02006, b1: 0.05095, w2: 0.76496, b2: 0.17117
Epoch 100: Error: 0.15146, w1_0: 0.61364, w1_1: 0.02327, b1: 0.06090, w2: 0.74066, b2: 0.18447
Epoch 120: Error: 0.14778, w1_0: 0.58580, w1_1: 0.02604, b1: 0.07008, w2: 0.71957, b2: 0.19712
Epoch 140: Error: 0.14480, w1_0: 0.56061, w1_1: 0.02832, b1: 0.07835, w2: 0.70098, b2: 0.20884
Epoch 160: Error: 0.14235, w1_0: 0.53758, w1_1: 0.03017, b1: 0.08576, w2: 0.68443, b2: 0.21962
Epoch 180: Error: 0.14032, w1_0: 0.51639, w1_1: 0.03164, b1: 0.09239, w2: 0.66957, b2: 0.22949
Epoch 200: Error: 0.13861, w1_0: 0.49678, w1_1: 0.03279, b1: 0.09835, w2: 0.65616, b2: 0.23855
Epoch 220: Error: 0.13716, w1_0: 0.47855, w1_1: 0.03367, b1: 0.10371, w2: 0.64399, b2: 0.24689
Epoch 240: Error: 0.13592, w1_0: 0.46153, w1_1: 0.03433, b1: 0.10857, w2: 0.63291, b2: 0.25457
Epoch 260: Error: 0.13485, w1_0: 0.44558, w1_1: 0.03481, b1: 0.11297, w2: 0.62277, b2: 0.26168
Epoch 280: Error: 0.13393, w1_0: 0.43059, w1_1: 0.03513, b1: 0.11700, w2: 0.61348, b2: 0.26826
Epoch 300: Error: 0.13312, w1_0: 0.41647, w1_1: 0.03532, b1: 0.12067, w2: 0.60493, b2: 0.27439
Epoch 320: Error: 0.13241, w1_0: 0.40312, w1_1: 0.03539, b1: 0.12405, w2: 0.59704, b2: 0.28009
Epoch 340: Error: 0.13178, w1_0: 0.39049, w1_1: 0.03538, b1: 0.12717, w2: 0.58975, b2: 0.28542
Epoch 360: Error: 0.13123, w1_0: 0.37849, w1_1: 0.03529, b1: 0.13004, w2: 0.58301, b2: 0.29041
Epoch 380: Error: 0.13073, w1_0: 0.36708, w1_1: 0.03513, b1: 0.13271, w2: 0.57675, b2: 0.29509
Epoch 400: Error: 0.13029, w1_0: 0.35621, w1_1: 0.03491, b1: 0.13518, w2: 0.57093, b2: 0.29949
Epoch 420: Error: 0.12989, w1_0: 0.34584, w1_1: 0.03465, b1: 0.13749, w2: 0.56551, b2: 0.30363
Epoch 440: Error: 0.12953, w1_0: 0.33593, w1_1: 0.03434, b1: 0.13963, w2: 0.56047, b2: 0.30753
Epoch 460: Error: 0.12921, w1_0: 0.32644, w1_1: 0.03400, b1: 0.14165, w2: 0.55576, b2: 0.31122
Epoch 480: Error: 0.12891, w1_0: 0.31735, w1_1: 0.03363, b1: 0.14353, w2: 0.55137, b2: 0.31470
Epoch 500: Error: 0.12865, w1_0: 0.30862, w1_1: 0.03323, b1: 0.14530, w2: 0.54726, b2: 0.31801
Epoch 520: Error: 0.12840, w1_0: 0.30024, w1_1: 0.03281, b1: 0.14696, w2: 0.54341, b2: 0.32114
Epoch 540: Error: 0.12818, w1_0: 0.29218, w1_1: 0.03237, b1: 0.14852, w2: 0.53981, b2: 0.32412
Epoch 560: Error: 0.12798, w1_0: 0.28442, w1_1: 0.03192, b1: 0.15000, w2: 0.53643, b2: 0.32695
Epoch 580: Error: 0.12779, w1_0: 0.27694, w1_1: 0.03146, b1: 0.15140, w2: 0.53327, b2: 0.32964
Epoch 600: Error: 0.12762, w1_0: 0.26972, w1_1: 0.03098, b1: 0.15272, w2: 0.53030, b2: 0.33221
Epoch 620: Error: 0.12746, w1_0: 0.26276, w1_1: 0.03050, b1: 0.15397, w2: 0.52751, b2: 0.33466
Epoch 640: Error: 0.12731, w1_0: 0.25603, w1_1: 0.03001, b1: 0.15515, w2: 0.52490, b2: 0.33700
Epoch 660: Error: 0.12717, w1_0: 0.24953, w1_1: 0.02951, b1: 0.15628, w2: 0.52244, b2: 0.33924
Epoch 680: Error: 0.12705, w1_0: 0.24324, w1_1: 0.02901, b1: 0.15735, w2: 0.52013, b2: 0.34138
Epoch 700: Error: 0.12693, w1_0: 0.23715, w1_1: 0.02850, b1: 0.15837, w2: 0.51796, b2: 0.34343
Epoch 720: Error: 0.12682, w1_0: 0.23125, w1_1: 0.02800, b1: 0.15934, w2: 0.51591, b2: 0.34539
Epoch 740: Error: 0.12672, w1_0: 0.22553, w1_1: 0.02749, b1: 0.16026, w2: 0.51400, b2: 0.34727
x: [ 0.  0.], z2: [ 0.43054694], z_target: 0.0, error: 0.09269
x: [ 1.  0.], z2: [ 0.54499381], z_target: 1.0, error: 0.10352
x: [ 0.  1.], z2: [ 0.44453855], z_target: 1.0, error: 0.15427
x: [ 1.  1.], z2: [ 0.55898542], z_target: 0.0, error: 0.15623