#!/usr/bin/env python # coding: utf-8 # # 단일 뉴런 (Single Neuron) - 단일 입력 # # ## Very Simple Neuron # In[4]: import numpy as np import random import math # In[22]: class SimpleNeuron: def __init__(self): self.w = np.array([0.0]) # weight of one input self.b = np.array([0.0]) # bias print("Initial w: {0}, b: {1}".format(self.w, self.b)) def u(self, x): return np.dot(self.w, x) + self.b def f(self, u): return max(0.0, u) def z(self, x): u = self.u(x) return self.f(u) def squared_error(self, x, z_target): return 1.0 / 2.0 * math.pow(self.z(x) - z_target, 2) def numerical_f_derivative(self, u): delta = 1e-6 # 0.000001 return (self.f(u + delta) - self.f(u - delta)) / (2 * delta) def d_E_over_d_w(self, x, z_target): u = self.u(x) z = self.f(u) error = z - z_target return error * self.numerical_f_derivative(u) * x def d_E_over_d_b(self, x, z_target): u = self.u(x) z = self.f(u) error = z - z_target return error * self.numerical_f_derivative(u) def learning(self, alpha, maxEpoch, data): for i in range(maxEpoch): for idx in range(data.numTrainData): x = data.training_input_value[idx] z_target = data.training_z_target[idx] self.w = self.w - alpha * self.d_E_over_d_w(x, z_target) self.b = self.b - alpha * self.d_E_over_d_b(x, z_target) sum = 0.0 for idx in range(data.numTrainData): sum = sum + self.squared_error(data.training_input_value[idx], data.training_z_target[idx]) print("Epoch {0}: Error: {1}, w: {2}, b: {3}".format(i, sum / data.numTrainData, self.w, self.b)) # ### Function Estimation (or Generation) # - $f(x) = 10 \cdot x + 4$ # In[24]: class Data: def __init__(self): self.training_input_value = np.array([1.0, 2.0, 3.0]) self.training_z_target = np.array([14.0, 24.0, 34.0]) self.numTrainData = len(self.training_input_value) if __name__ == '__main__': n = SimpleNeuron() d = Data() for idx in range(d.numTrainData): x = d.training_input_value[idx] z = n.z(x) z_target = d.training_z_target[idx] error = n.squared_error(x, z_target) print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(x, n.z(x), z_target, error)) n.learning(0.01, 100, d) for idx in range(d.numTrainData): x = d.training_input_value[idx] z = n.z(x) z_target = d.training_z_target[idx] error = n.squared_error(x, z_target) print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(x, n.z(x), z_target, error)) # In[ ]: