#!/usr/bin/env python # coding: utf-8 # ### Hello Torch # https://pytorch.org/tutorials/beginner/pytorch_with_examples.html # In[18]: import torch import numpy as np import pandas as pd tnn = torch.nn topt = torch.optim # ### Data for training # In[19]: # sample data study = np.array([2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8, 8, 8, 8]) / 24 sleep = np.array([2, 4, 6, 8, 2, 4, 6, 8, 2, 4, 6, 8, 2, 4, 6, 8]) / 24 score = np.array([4, 3, 2, 1, 6, 5, 4, 3, 8, 7, 6, 5, 10, 9, 8, 7]) / 10 # ### Define network # In[28]: #input size, hidden-layer size, output size I, H, O = 2, 3, 1 # input tensors ip_train = torch.FloatTensor([study, sleep]) op_train = torch.FloatTensor([score]) # define model model = tnn.Sequential( tnn.Linear(I, H), tnn.Sigmoid(), tnn.Linear(H, O), tnn.Sigmoid() ) # define loss calculation method loss_fn = tnn.MSELoss() # define solver optimizer = topt.Adam(model.parameters(), lr=0.1) # ### Training loop # In[30]: loss_values = [] for t in range(200): # forward pass op_pred = model(ip_train.transpose(0, 1)[t%16]) # calc loss loss = loss_fn(op_pred, op_train.transpose(0, 1)[t%16]) loss_values.append(loss.item()) if t%5 == 0: print(t, loss.item()) # set grads to 0 before backpropagating optimizer.zero_grad() # backpropagate the loss loss.backward() # update params based on grad optimizer.step(None) # plot loss once training complete. ideally loss should reduce to 0. df = pd.DataFrame({ 'loss': loss_values }) df.plot() # ### Test model # In[31]: # test it on a single value of study, sleep test_study = 24 / 24 test_sleep = 1 / 24 test_score = model(torch.as_tensor([test_study, test_sleep])).item() print(test_score * 10) # In[32]: # run it against training data df = pd.DataFrame({ 'study': study, 'sleep': sleep, 'score': score }) predicted_score = model(torch.FloatTensor(np.array([study, sleep]).T)) df['predicted_score'] = np.array(predicted_score.tolist()).T[0] df.plot(y=['score', 'predicted_score'])