下面我们使用nn来实现上一节中的多层感知机。首先导入所需的包或模块。
import d2ltorch as d2lt
import torch
from torch import nn, optim
和softmax回归唯一的不同在于,我们多加了一个全连接层作为隐藏层。它的隐藏单元个数为256,并使用ReLU函数作为激活函数。
class MyMLP(nn.Module):
def __init__(self, **kwargs):
super(MyMLP, self).__init__(**kwargs)
self.mlp = nn.Sequential(
nn.Linear(28 * 28, 256),
nn.ReLU(),
nn.Linear(256, 10)
)
def forward(self, x):
return self.mlp(x.reshape(-1, 28*28))
net = MyMLP()
d2lt.params_init(net, nn.init.normal_, std=0.01)
我们使用与“softmax回归的简洁实现”一节中训练softmax回归几乎相同的步骤来读取数据并训练模型。
root = '~/dataset/'
batch_size = 256
train_iter, test_iter = d2lt.load_data_fashion_mnist(root, batch_size=batch_size)
loss = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.5)
num_epochs = 5
d2lt.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None,
None, optimizer)
epoch 1, loss 0.0031, train acc 0.704, test acc 0.795 epoch 2, loss 0.0019, train acc 0.823, test acc 0.834 epoch 3, loss 0.0016, train acc 0.844, test acc 0.836 epoch 4, loss 0.0015, train acc 0.855, test acc 0.744 epoch 5, loss 0.0014, train acc 0.864, test acc 0.855