#!/usr/bin/env python # coding: utf-8 # In[1]: import numpy as np # In[2]: y = [0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0] # In[3]: t = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0] # In[4]: def mean_square_error(y, t): return 0.5 * np.sum((y - t) ** 2) # In[6]: mean_square_error(np.array(y), np.array(t)) # In[7]: y2 = [0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0] # In[8]: mean_square_error(np.array(y2), np.array(t)) # In[9]: def cross_entropy_error(y, t): delta = 1e-7 return -np.sum(t * np.log(y + delta)) # In[11]: cross_entropy_error(np.array(y), np.array(t)) # In[12]: cross_entropy_error(np.array(y2), np.array(t)) # In[13]: from dataset.mnist import load_mnist # In[14]: (x_train, t_train), (x_test, t_test) = \ load_mnist(normalize=True, one_hot_label=True) # In[15]: x_train.shape # In[16]: t_train.shape # In[21]: train_size = x_train.shape[0] # In[17]: batch_size = 10 # In[22]: batch_mask = np.random.choice(train_size, batch_size) # In[23]: x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] # In[24]: x_batch # In[25]: t_batch # In[34]: def cross_entropy_error(y, t): if y.ndim == 1: t = t.reshape(1, t.size) y = y.reshape(1, y.size) delta = 1e-7 batch_size = y.shape[0] return -np.sum(t * np.log(y + delta)) / batch_size # In[35]: cross_entropy_error(np.array(y), np.array(t)) # In[33]: y # In[36]: def numerical_diff(f, x): h = 1e-4 return (f(x+h) - f(x-h)) / (2*h) # In[37]: def function_1(x): return 0.01*x**2 + 0.1*x # In[38]: numerical_diff(function_1, 5) # In[39]: numerical_diff(function_1, 10) # In[40]: def function_2(x0, x1): return x0**2 + x1**2 # In[41]: def function_tmp1(x0): return function_2(x0, 4.0) # In[43]: numerical_diff(function_tmp1, 3.0) # In[42]: def function_tmp2(x1): return function_2(3.0, x1) # In[44]: numerical_diff(function_tmp2, 4.0) # In[ ]: