1. Pytorch实现softmax回归模型
使用Pytorch来实现一个softmax回归模型。首先导入所需的包或模块。
import torch from torch import nn from torch.nn import init import numpy as np import sys import d2lzh_pytorch as d2l
1.1 获取和读取数据
我们仍然使用Fashion-MNIST数据集和上一篇文章中设置的批量大小。
batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
1.2 定义和初始化模型
因为softmax回归的输出层是一个全连接层,所以我们用一个线性模块就可以了。因为前面我们数据返回的每个batch样本x的形状为(batch_size, 1, 28, 28), 所以我们要先用view()将x的形状转换成(batch_size, 784)才送入全连接层。
num_inputs = 784 num_outputs = 10 class LinearNet(nn.Module): def __init__(self, num_inputs, num_outputs): super(LinearNet, self).__init__() self.linear = nn.Linear(num_inputs, num_outputs) def forward(self, x): # x shape: (batch, 1, 28, 28) y = self.linear(x.view(x.shape[0], -1)) return y net = LinearNet(num_inputs, num_outputs)
我们将对x的形状转换的这个功能自定义一个FlattenLayer。
class FlattenLayer(nn.Module): def __init__(self): super(FlattenLayer, self).__init__() def forward(self, x): # x shape: (batch, *, *, ...) return x.view(x.shape[0], -1)
这样我们就可以更方便地定义我们的模型:
from collections import OrderedDict net = nn.Sequential( # FlattenLayer(), # nn.Linear(num_inputs, num_outputs) OrderedDict([ ('flatten', FlattenLayer()), ('linear', nn.Linear(num_inputs, num_outputs)) ]) )
然后,我们使用均值为0、标准差为0.01的正态分布随机初始化模型的权重参数。
init.normal_(net.linear.weight, mean=0, std=0.01) init.constant_(net.linear.bias, val=0)
1.3 softmax和交叉熵损失函数
PyTorch提供了一个包括softmax运算和交叉熵损失计算的函数CrossEntropyLoss。
loss = nn.CrossEntropyLoss()
1.4 定义优化算法
我们使用学习率为0.1的小批量随机梯度下降作为优化算法。
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
1.5 训练模型
接下来,我们使用上一节中定义的训练函数来训练模型。
num_epochs = 5 d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)
输出:
epoch 1, loss 0.0031, train acc 0.745, test acc 0.790 epoch 2, loss 0.0022, train acc 0.812, test acc 0.807 epoch 3, loss 0.0021, train acc 0.825, test acc 0.806 epoch 4, loss 0.0020, train acc 0.832, test acc 0.810 epoch 5, loss 0.0019, train acc 0.838, test acc 0.823
1.6 完整代码
import torch from torch import nn from torch.nn import init import numpy as np import sys import d2lzh_pytorch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 class LinearNet(nn.Module): def __init__(self, num_inputs, num_outputs): super(LinearNet, self).__init__() self.linear = nn.Linear(num_inputs, num_outputs) def forward(self, x): # x shape: (batch, 1, 28, 28) y = self.linear(x.view(x.shape[0], -1)) return y # 将图片进行展开 class FlattenLayer(nn.Module): def __init__(self): super(FlattenLayer, self).__init__() def forward(self, x): # x shape: (batch, *, *, ...) return x.view(x.shape[0], -1) # 定义模型 from collections import OrderedDict net = nn.Sequential( # FlattenLayer(), # nn.Linear(num_inputs, num_outputs) OrderedDict([ ('flatten', FlattenLayer()), ('linear', nn.Linear(num_inputs, num_outputs)) ]) ) # 初始化模型 init.normal_(net.linear.weight, mean=0, std=0.01) init.constant_(net.linear.bias, val=0) # 损失函数 loss = nn.CrossEntropyLoss() # 使用学习率为0.1的小批量随机梯度下降作为优化算法 optimizer = torch.optim.SGD(net.parameters(), lr=0.1) num_epochs = 5 d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)