#!/usr/bin/env python # coding: utf-8 # # 단일 뉴런 (Single Neuron) - 다중 입력 # ## Gate Neuron # In[1]: import numpy as np import random import math # In[2]: class GateNeuron: def __init__(self): self.w = np.array([0.0, 0.0]) # weight of one input self.b = np.array([0.0]) # bias print("Initial w: {0}, b: {1}".format(self.w, self.b)) def u(self, x): return np.dot(self.w, x) + self.b def f(self, u): return max(0.0, u) def z(self, x): u = self.u(x) return self.f(u) def squared_error(self, x, z_target): return 1.0 / 2.0 * math.pow(self.z(x) - z_target, 2) def numerical_f_derivative(self, u): delta = 1e-4 # 0.0001 return (self.f(u + delta) - self.f(u - delta)) / (2 * delta) def d_E_over_d_w(self, input, z_target): u = self.u(input) z = self.f(u) error = z - z_target return error * self.numerical_f_derivative(u) * input def d_E_over_d_b(self, input, z_target): u = self.u(input) z = self.f(u) error = z - z_target return error * self.numerical_f_derivative(u) def learning(self, alpha, maxEpoch, data): for i in range(maxEpoch): for idx in range(data.numTrainData): x = data.training_input_value[idx] z_target = data.training_z_target[idx] self.w = self.w - alpha * self.d_E_over_d_w(input, z_target) self.b = self.b - alpha * self.d_E_over_d_b(input, z_target) sum = 0.0 for idx in range(data.numTrainData): sum = sum + self.squared_error(data.training_input_value[idx], data.training_z_target[idx]) print("Epoch {0}: Error: {1}, w: {2}, b: {3}".format(i, sum / data.numTrainData, self.w, self.b)) # ### 1. And Gate # In[3]: class Data: def __init__(self): self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)]) self.training_z_target = np.array([0.0, 0.0, 0.0, 1.0]) self.numTrainData = len(self.training_input_value) if __name__ == '__main__': n = GateNeuron() d = Data() for idx in range(d.numTrainData): input = d.training_input_value[idx] z = n.z(input) z_target = d.training_z_target[idx] error = n.squared_error(input, z_target) print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error)) n.learning(0.1, 100, d) for idx in range(d.numTrainData): input = d.training_input_value[idx] z = n.z(input) z_target = d.training_z_target[idx] error = n.squared_error(input, z_target) print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error)) # ### 2. Or Gate # In[4]: class Data: def __init__(self): self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)]) self.training_z_target = np.array([0.0, 1.0, 1.0, 1.0]) self.numTrainData = len(self.training_input_value) if __name__ == '__main__': n = GateNeuron() d = Data() for idx in range(d.numTrainData): input = d.training_input_value[idx] z = n.z(input) z_target = d.training_z_target[idx] error = n.squared_error(input, z_target) print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error)) n.learning(0.1, 100, d) for idx in range(d.numTrainData): input = d.training_input_value[idx] z = n.z(input) z_target = d.training_z_target[idx] error = n.squared_error(input, z_target) print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error)) # ### 3. XOR Gate # In[6]: class Data: def __init__(self): self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)]) self.training_z_target = np.array([0.0, 1.0, 1.0, 0.0]) self.numTrainData = len(self.training_input_value) if __name__ == '__main__': n = GateNeuron() d = Data() for idx in range(d.numTrainData): input = d.training_input_value[idx] z = n.z(input) z_target = d.training_z_target[idx] error = n.squared_error(input, z_target) print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error)) n.learning(0.1, 100, d) for idx in range(d.numTrainData): input = d.training_input_value[idx] z = n.z(input) z_target = d.training_z_target[idx] error = n.squared_error(input, z_target) print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error))