Notebook
acc[i,j] is the accuracy with ith threshold and jth learning rate for 100 iterations
Practice #ignore n = len(x_train) m = len(x_train[0]) threshold = 0.85 learning_rate = 0.5 my_res = np.array([]) weights = np.random.random_sample((28,28)) #weights = (1/23)*weights it = 0 while it < 100: for i in range(n): I = np.array([]) O = np.array([]) #forward pass #calculating net input and output for each node #layer 1 inputs = x_train[i] for j in range(m): I = np.append(I,inputs[j]) O = np.append(O,inputs[j]) #layer 2 #Inputs for j in range(5): input_add = sum([weights[i,22+j]*O[i] for i in range(22)]) input_add = input_add + bias[22+j] I = np.append(I,input_add) #Outputs for j in range(5): output_add = 1/(1 + (math.exp(-I[22+j]))) O = np.append(O,output_add) #Output Layer input_add = sum([weights[i,27]*O[i] for i in range(22,27)]) input_add = input_add + bias[27] I = np.append(I,input_add) output_add = 1/(1 + (math.exp(-input_add))) if output_add < threshold: O = np.append(O,0) else: O = np.append(O,1) print(O[27]) #backward pass #Output Layer error[27] = O[27]*(1-O[27])*(y_train[i]-O[27]) #bias[27] = bias[27] + learning_rate*error[27] #Hidden Layer for j in range(22,27): error[j] = O[j]*(1-O[j])*(error[27]*weights[j,27]) bias[j] = bias[j] + learning_rate*error[j] weights[j,27] = weights[j,27] + learning_rate*error[27]*O[j] #First layer for j in range(22): error[j] = O[j]*(1-O[j])*sum([error[k]*weights[j,k] for k in range(22,27)]) bias[j] = bias[j] + learning_rate*error[j] for l in range(22,27): weights[j,l] = weights[j,l] + learning_rate * error[l]*O[j] it += 1 #print('I=',I,'O=',O) #print(weights) my_res = Predict(weights,x_test,my_res) print(my_res) accuracy = sk.metrics.accuracy_score(y_test,my_res,normalize=True,sample_weight=None) print(accuracy)
0.568376068376 [[ 11.4 9.8] [ 1.7 3.8]] 0.502874902875 0.808823529412