# Neuron class¶

In :
# Numerical arrays and operations.
import numpy as np


#### Simple neuron¶ In :
# Just one input for now.
def neuron(x, w, b):
return b + x * w

In :
neuron(4.0, 0.5, 0.1)

Out:
2.1

#### Class¶

In :
# Represents a neuron.
# Note how we can now save the weight and bias with the neuron.
# The neuron has state now.
class Neuron:
# This is the constructor.
def __init__(self, winit, binit):
self.w = winit
self.b = binit

# The neuron takes an input and gives an output.
# Uses the internal state (the weight and bias).
def fire(self, x):
return self.b + x * self.w

In :
# Create a neuron using the above class blueprint.
n = Neuron(0.5, 0.1)

In :
# n is a variable an instance of the class.
# Memory has been allocated for it and it has value for w and b.
n

Out:
<__main__.Neuron at 0x7f1d382f5d00>
In :
# n's w
n.w

Out:
0.5
In :
# n's b
n.b

Out:
0.1
In :
# Call n's fire method with an x input.
n.fire(4.0)

Out:
2.1

#### Activation function¶

In :
# Let's add a function to be called just before the output is generated.
class Neuron:
def __init__(self, winit, binit, finit):
self.w = winit
self.b = binit
self.f = finit

def fire(self, x):
return self.f(self.b + x * self.w)

In :
# A not so interesting function - it just return it's input.
def identity(x):
return x

In :
# In Python we can use lambda to create quick/anonymous functions.
# This is a different way of writing the above code.
# Typically you don't give the function a name when using lambda, but here we do.
identity = lambda x: x

In :
# Create the neuron.
n = Neuron(0.5, 0.1, identity)

In :
# Same output as before, because f is the identity function.
n.fire(4.0)

Out:
2.1
In :
# The Sigmoid function, as per WikiPedia.
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))

In :
# Create a neuron that users the sigmoid function instead of the identity function.
n = Neuron(0.5, 0.1, sigmoid)

In :
# Now it gives a different output - a value between 0 and 1.
n.fire(4.0)

Out:
0.8909031788043871

#### Arrays¶

In :
# What if we have more than one input value?
class Neuron:
# Incorporate the bias in the weight array.
# We'll have to adjust x to incorporate this.
def __init__(self, winit, finit):
self.w = winit
self.f = finit

def fire(self, x):
# Append a 1.0 to x.
x = np.append(x, np.array([1.0]))
# Now we can use the dot product.
# This is the calculation we've been using in all but name all along.
return self.f(np.dot(x, self.w))

In :
# Create the neuron with our weight vector and sigmoid activation.
n = Neuron(np.array([0.5, 0.1]), sigmoid)

In :
# Fire the neuron.
n.fire(np.array([4.0]))

Out:
0.8909031788043871
In :
# We can use the neuron class with more than one input now.
n = Neuron(np.array([0.4, 0.5, 0.1]), sigmoid)
n.fire(np.array([4.0, -1.3]))

Out:
0.740774899182154

#### What next?¶

• How do we use more than one neuron?
• How do we update the w array to make the neuron(s) match known inputs and outputs?