import numpy as np
ACTIVATION = 3
INITIAL_WEIGHT = .5
INITIAL_BIAS = 2
ACTUAL_OUTPUT_NEURON_ACTIVATION = 0
N_EPOCHS = 1000
LEARNING_RATE = .05
def cost_function(y_predicted):
return .5*(ACTUAL_OUTPUT_NEURON_ACTIVATION - y_predicted)**2
def cost_function_prime(y_predicted):
return y_predicted - ACTUAL_OUTPUT_NEURON_ACTIVATION
def sigmoid(linear_combination):
return 1/(1 + np.exp(-linear_combination))
def activation_function(linear_combination):
return sigmoid(linear_combination)
def activation_function_prime(linear_combination):
return sigmoid(linear_combination)*(1-sigmoid(linear_combination))
def feed_forward(weight, bias):
linear_combination = ACTIVATION * weight + bias
return linear_combination, activation_function(linear_combination)
def back_propagate(linear_combination, prediction, weight, bias):
weight_gradient = cost_function_prime(prediction)*activation_function_prime(linear_combination)*ACTIVATION
bias_gradient = cost_function_prime(prediction)*activation_function_prime(linear_combination)
return weight_gradient, bias_gradient
weight = INITIAL_WEIGHT
bias = INITIAL_BIAS
for i in range(N_EPOCHS):
linear_combination, prediction = feed_forward(weight, bias)
cost = cost_function(prediction)
if i % 100 == 0: print('iterations: {} | cost: {}'.format(i, cost))
weight_gradient, bias_gradient = back_propagate(linear_combination, prediction, weight, bias)
weight += -LEARNING_RATE * weight_gradient
bias += -LEARNING_RATE * bias_gradient
iterations: 0 | cost: 0.471117372684454 iterations: 100 | cost: 0.08009999161826546 iterations: 200 | cost: 0.006898517164612306 iterations: 300 | cost: 0.003235101899172852 iterations: 400 | cost: 0.002075246837316176 iterations: 500 | cost: 0.0015172576638674323 iterations: 600 | cost: 0.001191719904968802 iterations: 700 | cost: 0.0009792811328075911 iterations: 800 | cost: 0.0008300864977082135 iterations: 900 | cost: 0.0007197278463177123