# python用のライブラリ import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import torch from torch import nn param = sns.set() m = torch.tensor([[1,2], [3, 4], [5, 6]]) # R^{3 x 2}の行列mを生成 v = torch.tensor([1, 2]) # R^2 のベクトルを生成 print(m) print(v) # 内積 d = torch.dot(v, v) print(type(d)) print(d) # 行列・ベクトルの積 mv = torch.mv(m, v) print(mv) # 求める変数 kernel = torch.tensor([2.0],requires_grad=True) bias = torch.tensor([1.0], requires_grad=True) # 観測された値 y = torch.Tensor([2]) x = torch.Tensor([3]) # 予想される値 y_pred = kernel * x + bias # 観測値と予想値の2乗平均を損失関数とする loss = torch.mean((y - y_pred)**2) # 勾配を計算 loss.backward() print(kernel.grad, bias.grad) trainData = { 'sizeMB': [0.080, 9.000, 0.001, 0.100, 8.000, 5.000, 0.100, 6.000, 0.050, 0.500, 0.002, 2.000, 0.005, 10.00, 0.010, 7.000, 6.000, 5.000, 1.000, 1.000], 'timeMB': [0.135, 0.739, 0.067, 0.126, 0.646, 0.435, 0.069, 0.497, 0.068, 0.116, 0.070, 0.289, 0.076, 0.744, 0.083, 0.560, 0.480, 0.399, 0.153, 0.149] } testData = { 'sizeMB': [5.000, 0.200, 0.001, 9.000, 0.002, 0.020, 0.008, 4.000, 0.001, 1.000, 0.005, 0.080, 0.800, 0.200, 0.050, 7.000, 0.005, 0.002, 8.000, 0.008], 'timeMB': [0.425, 0.098, 0.052, 0.686, 0.066, 0.078, 0.070, 0.375, 0.058, 0.136, 0.052, 0.063, 0.183, 0.087, 0.066, 0.558, 0.066, 0.068, 0.610, 0.057] } trainDf = pd.DataFrame(trainData) trainDf['type'] = 'train' testDf = pd.DataFrame(testData) testDf['type'] = 'test' df = trainDf.append(testDf) sns.scatterplot(x='sizeMB', y='timeMB', hue='type', data=df) plt.show() def model(sizeMB, kernel, bias): return kernel * sizeMB + bias # 入力と出力テンソル sizeMB = torch.Tensor(trainData['sizeMB']) timeSec = torch.Tensor(trainData['timeMB']) # カーネルとバイアス kernel = torch.ones(1, requires_grad=True) bias = torch.zeros(1, requires_grad=True) # 損失関数の結果ログ losses = [] kernels = [] biases = [] # 学習率 lr = 0.01 for i in range(100): kernel.grad = None bias.grad = None predict = model(sizeMB, kernel, bias) loss = torch.mean((timeSec - predict)**2) loss.backward() if i%20 == 0: kernels.append(kernel.item()) biases.append(bias.item()) losses.append(loss.item()) with torch.no_grad(): kernel -= lr * kernel.grad bias -= lr * bias.grad # 損失関数の最初の5個を取り出す losses[:5] plt.plot(losses) plt.show() print(kernel.item(), bias.item()) x = torch.arange(0, 10, 0.01) y = model(x, kernel, bias) print(x[:5]) print(y[:5]) ax = sns.lineplot(x=x.detach().numpy(), y=y.detach().numpy()) sns.scatterplot(x='sizeMB', y='timeMB', hue='type', data=df, ax=ax) plt.show() # 損失関数のコンター N=20 X = np.linspace(-0.05, 0.15, N) Y = np.linspace(-0.1, 0.15, N) def f(x, y): kernel = torch.Tensor([x]) bias = torch.Tensor([y]) predict = model(sizeMB, kernel, bias) return torch.mean((timeSec - predict)**2) # kernal, biasの値によって損失関数の値を計算 Z = [[f(X[j], Y[i]).numpy() for j in range(N)] for i in range(N)] print(list(zip(kernels, biases))) plt.scatter(kernels, biases, color='red') plt.show() # コンター図を描画 Xs, Ys = np.meshgrid(X, Y) fig, ax = plt.subplots() plt.contourf(Xs, Ys, Z, levels=10, cmap='jet') plt.scatter(kernels[1:], biases[1:], marker='o', color='red') plt.scatter(kernel.item(), bias.item(), marker='x', color='red') plt.colorbar(); plt.show() from torch import nn, optim # カーネルとバイアス kernel = torch.ones(1, requires_grad=True) bias = torch.zeros(1, requires_grad=True) # 損失関数として平均二乗誤差 (mean-squared error) を使用 loss_fn = nn.MSELoss() #loss_fn = nn.L1Loss() # オプティマイザーとして最も簡単な確率的勾配法(Stochastic Gradient Descent)を使用 optimizer = optim.SGD((kernel, bias), lr=0.01) # 損失関数の結果ログ losses = [] for i in range(1000): optimizer.zero_grad() # 勾配をゼロクリア predict = model(sizeMB, kernel, bias) # 予測値を求める loss = loss_fn(timeSec, predict) # 損失値を求める loss.backward() # 損失関数の勾配を求める optimizer.step() # オプティマイザーで変数を更新 losses.append(loss.item()) plt.plot(losses) plt.show() print(kernel, bias) a = torch.tensor([1,2,3]) b = torch.tensor([4,5,6]) torch.stack([a, b], dim=1) A = torch.stack([sizeMB, torch.ones(20)], dim=1) (U, S, V) = torch.svd(A) A_inv = torch.mm(torch.mm(V.t(), torch.diag(1/S)), U.t()) print(A_inv) torch.mv(A_inv, timeSec)