require 'numo/narray'
true
y = [0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0]
[0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0]
t = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
# 2乗和誤差
def mean_squared_error(y, t)
# ニューラルネットワークの出力と教師データの各要素の差の2乗、の総和
return 0.5 * ((y-t)**2).sum
end
:mean_squared_error
mean_squared_error(Numo::DFloat.asarray(y), Numo::DFloat.asarray(t))
0.09750000000000003
y2 = [0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0];
[0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0]
mean_squared_error(Numo::DFloat.asarray(y2), Numo::DFloat.asarray(t))
0.5974999999999999
# 交差エントロピー誤差
def cross_entropy_error(y, t)
delta = 1e-7 # マイナス無限大を発生させないように微小な値を追加する
return -(t * Numo::NMath.log(y + delta)).sum
end
:cross_entropy_error
cross_entropy_error(Numo::DFloat.asarray(y), Numo::DFloat.asarray(t))
0.510825457099338
cross_entropy_error(Numo::DFloat.asarray(y2), Numo::DFloat.asarray(t))
2.302584092994546
require_relative 'dataset/mnist'
true
x_train, t_train, x_test, t_test = MNIST.load_mnist(
normalize: true, one_hot_label: true)
[<MNISTImages @length=60000, @normalize=true, @flatten=true>, <MNISTLabels @length=60000, @one_hot=true>, <MNISTImages @length=10000, @normalize=true, @flatten=true>, <MNISTLabels @length=10000, @one_hot=true>]
x_train.shape
[60000, 784]
t_train.shape
[60000, 10]
x_batch = x_train[0...10]
t_batch = t_train[0...10]
[x_batch, t_batch]
[Numo::SFloat#shape=[10,784] [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...]], Numo::UInt8#shape=[10,10] [[0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]]
def cross_entropy_error(y, t)
if y.ndim == 1
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
end
delta = 1e-7
batch_size = y.shape[0]
return -(t * Numo::NMath.log(y + delta)).sum / batch_size
end
:cross_entropy_error
cross_entropy_error(Numo::DFloat.asarray(y), Numo::DFloat.asarray(t))
0.510825457099338
def numerical_diff(f, x)
h = 1e-4
# return (f(x+h) - f(x-h)) / (2*h)
return (f.(x+h) - f.(x-h)) / (2*h)
end
:numerical_diff
# def function_1(x)
function_1 = -> x do
0.01*x**2 + 0.1*x
end
#<Proc:0x005564bf558178@(pry):43 (lambda)>
numerical_diff(function_1, 5)
0.1999999999990898
numerical_diff(function_1, 10)
0.2999999999986347
def function_2(x0, x1)
return x0**2 + x1**2
end
:function_2
function_tmp1 = -> x0 do
function_2(x0, 4.0)
end
#<Proc:0x005564bf5a80d8@(pry):51 (lambda)>
numerical_diff(function_tmp1, 3.0)
6.00000000000378
function_tmp2 = -> x1 do
function_2(3.0, x1)
end
#<Proc:0x005564bf5d7dd8@(pry):55 (lambda)>
numerical_diff(function_tmp2, 4.0)
7.999999999999119