9.5.3 Exercise
若对该神经网络进行改进:
- Conv2d Layer * 3
- ReLU Layer * 3
- MaxPooling Layer * 3
- Linear Layer * 3
9.5.4 Code 2
将神经网络改成如下即可:
def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=5) self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5) self.conv3 = torch.nn.Conv2d(32, 64, kernel_size=3) self.pooling = torch.nn.MaxPool2d(2) self.fc1 = torch.nn.Linear(64, 32) self.fc2 = torch.nn.Linear(32, 16) self.fc3 = torch.nn.Linear(16, 10) def forward(self, x): batch_size = x.size(0) x = self.pooling(F.relu(self.conv1(x))) x = self.pooling(F.relu(self.conv2(x))) x = self.pooling(F.relu(self.conv3(x))) x = x.view(batch_size, -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x
[1, 300] loss: 0.345 [1, 600] loss: 0.273 [1, 900] loss: 0.069 Accuracy on test set: 91 % [9194/10000] [2, 300] loss: 0.034 [2, 600] loss: 0.025 [2, 900] loss: 0.020 Accuracy on test set: 96 % [9670/10000] [3, 300] loss: 0.015 [3, 600] loss: 0.015 [3, 900] loss: 0.014 Accuracy on test set: 97 % [9754/10000] [4, 300] loss: 0.011 [4, 600] loss: 0.010 [4, 900] loss: 0.011 Accuracy on test set: 98 % [9810/10000] [5, 300] loss: 0.008 [5, 600] loss: 0.009 [5, 900] loss: 0.009 Accuracy on test set: 98 % [9808/10000] [6, 300] loss: 0.008 [6, 600] loss: 0.007 [6, 900] loss: 0.008 Accuracy on test set: 98 % [9859/10000] [7, 300] loss: 0.006 [7, 600] loss: 0.006 [7, 900] loss: 0.007 Accuracy on test set: 98 % [9862/10000] [8, 300] loss: 0.005 [8, 600] loss: 0.006 [8, 900] loss: 0.006 Accuracy on test set: 97 % [9784/10000] [9, 300] loss: 0.005 [9, 600] loss: 0.005 [9, 900] loss: 0.006 Accuracy on test set: 98 % [9842/10000] [10, 300] loss: 0.005 [10, 600] loss: 0.005 [10, 900] loss: 0.004 Accuracy on test set: 98 % [9878/10000] [91.94, 96.7, 97.54, 98.1, 98.08, 98.59, 98.62, 97.84, 98.42, 98.78]
9.6 GoogLeNet
注意:Convolution 、 Pooling 、 Softmax、 Other
若以上图来编写神经网络,则会有许多重复,为减少代码冗余,可以尽量多使用函数/类。
9.6.1 Inception Module
构造神经网络时,有一些超参数是难以选择的,比如卷积核Kernel,应该选择哪一种卷积核比较好用?
GoogLeNet在一个块中将几种卷积核(1 × 1 、 3 × 3 、 5 × 5 、 . . . 1)都使用,然后将其结果罗列到一起,将来通过训练自动找到一种最优的组合。
- Concatenate:将张量拼接到一块
- Average Pooling 均值池化:保证输入输出宽高一致(可借助padding和stride)
9.6.2 1 x 1 convolution
为什么要引入 $1 \times 1 $ convolution ?
9.6.3 Implementation of Inception Module
计算方向:由下至上
# 第一列 self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) # 第二列 self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1) branch1x1 = self.branch1x1(x) # 第三列 self.branch5x5_1 = nn.Conv2d(in_channels,16, kernel_size=1) self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) # 第四列 self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1) self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1) self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1) branch3x3 = self.branch3x3_1(x) branch3x3 = self.branch3x3_2(branch3x3) branch3x3 = self.branch3x3_3(branch3x3)
再进行拼接:
outputs = [branch1x1, branch5x5, branch3x3, branch_pool] return torch.cat(outputs, dim=1)
Using Inception Module:
class InceptionA(nn.Module): def __init__(self, in_channels): super(InceptionA, self).__init__() self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1) self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1) self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2) self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1) self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1) self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1) self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1) def forward(self, x): branch1x1 = self.branch1x1(x) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) branch3x3 = self.branch3x3_1(x) branch3x3 = self.branch3x3_2(branch3x3) branch3x3 = self.branch3x3_3(branch3x3) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch5x5, branch3x3, branch_pool] return torch.cat(outputs, dim=1)
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(88, 20, kernel_size=5) self.incep1 = InceptionA(in_channels=10) self.incep2 = InceptionA(in_channels=20) self.mp = nn.MaxPool2d(2) self.fc = nn.Linear(1408, 10) def forward(self, x): in_size = x.size(0) x = F.relu(self.mp(self.conv1(x))) x = self.incep1(x) x = F.relu(self.mp(self.conv2(x))) x = self.incep2(x) x = x.view(in_size, -1) x = self.fc(x) return x
完整代码:
import torch from torch import nn from torchvision import transforms from torchvision import datasets from torch.utils.data import DataLoader import torch.nn.functional as F import torch.optim as optim import matplotlib.pyplot as plt # 1、准备数据集 batch_size = 64 transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) train_dataset = datasets.MNIST(root='../data/mnist', train=True, download=True, transform=transform) train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size) test_dataset = datasets.MNIST(root='../data/mnist', train=False, download=True, transform=transform) test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size) # 2、建立模型 # 定义一个Inception类 class InceptionA(nn.Module): def __init__(self, in_channels): super(InceptionA, self).__init__() self.branch1X1 = nn.Conv2d(in_channels, 16, kernel_size=1) # 设置padding保证 宽 高 不变 self.branch5X5_1 = nn.Conv2d(in_channels, 16, kernel_size=1) self.branch5X5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2) self.branch3X3_1 = nn.Conv2d(in_channels, 16, kernel_size=1) self.branch3X3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1) self.branch3X3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1) self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1) def forward(self, x): branch1X1 = self.branch1X1(x) branch5X5 = self.branch5X5_1(x) branch5X5 = self.branch5X5_2(branch5X5) branch3X3 = self.branch3X3_1(x) branch3X3 = self.branch3X3_2(branch3X3) branch3X3 = self.branch3X3_3(branch3X3) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1X1, branch5X5, branch3X3, branch_pool] # (b, c, w, h),dim=1 以第一个维度channel来拼接 return torch.cat(outputs, dim=1) # 定义模型 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) # 88 = 24*3 + 16 self.conv2 = nn.Conv2d(88, 20, kernel_size=5) self.incep1 = InceptionA(in_channels=10) self.incep2 = InceptionA(in_channels=20) self.mp = nn.MaxPool2d(2) # 确定输出张量的尺寸 # 在定义时先不定义fc层,随便选取一个输入,经过模型后查看其尺寸 # 在init函数中把fc层去掉,forward函数中把最后两行去掉,确定输出的尺寸后再定义Lear层的大小 self.fc = nn.Linear(1408, 10) def forward(self, x): in_size = x.size(0) # 1 --> 10 x = F.relu(self.mp(self.conv1(x))) # 10 --> 88 x = self.incep1(x) # 88 --> 20 x = F.relu(self.mp(self.conv2(x))) # 20 --> 88 x = self.incep2(x) x = x.view(in_size, -1) x = self.fc(x) return x model = Net() # 将模型迁移到GPU上运行 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) # 3、建立损失函数和优化器 criterion = torch.nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) # 4、定义训练函数 def train(epoch): running_loss = 0 for batch_idx, data in enumerate(train_loader, 0): inputs, target = data # 将计算的张量迁移到GPU上 inputs, target = inputs.to(device), target.to(device) optimizer.zero_grad() # 前馈 反馈 更新 outputs = model(inputs) loss = criterion(outputs, target) loss.backward() optimizer.step() running_loss += loss.item() if batch_idx % 300 == 299: print('[%d, %3d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300)) running_loss = 0 # 5、定义测试函数 accuracy = [] def test(): correct = 0 total = 0 with torch.no_grad(): for data in test_loader: images, labels = data # 将测试中的张量迁移到GPU上 images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, dim=1) total += labels.size(0) # 得出其中相等元素的个数 correct += (predicted == labels).sum().item() print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total, correct, total)) accuracy.append(100 * correct / total) if __name__ == '__main__': for epoch in range(10): train(epoch) test() print(accuracy) plt.plot(range(10), accuracy) plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.grid() # 表格 plt.show()
[1, 300] loss: 0.836 [1, 600] loss: 0.196 [1, 900] loss: 0.145 Accuracy on test set: 96 % [9690/10000] [2, 300] loss: 0.106 [2, 600] loss: 0.099 [2, 900] loss: 0.091 Accuracy on test set: 97 % [9785/10000] [3, 300] loss: 0.075 [3, 600] loss: 0.078 [3, 900] loss: 0.071 Accuracy on test set: 98 % [9831/10000] [4, 300] loss: 0.064 [4, 600] loss: 0.067 [4, 900] loss: 0.061 Accuracy on test set: 98 % [9845/10000] [5, 300] loss: 0.057 [5, 600] loss: 0.058 [5, 900] loss: 0.052 Accuracy on test set: 98 % [9846/10000] [6, 300] loss: 0.051 [6, 600] loss: 0.049 [6, 900] loss: 0.050 Accuracy on test set: 98 % [9852/10000] [7, 300] loss: 0.047 [7, 600] loss: 0.043 [7, 900] loss: 0.045 Accuracy on test set: 98 % [9848/10000] [8, 300] loss: 0.039 [8, 600] loss: 0.044 [8, 900] loss: 0.042 Accuracy on test set: 98 % [9871/10000] [9, 300] loss: 0.041 [9, 600] loss: 0.034 [9, 900] loss: 0.041 Accuracy on test set: 98 % [9866/10000] [10, 300] loss: 0.032 [10, 600] loss: 0.038 [10, 900] loss: 0.037 Accuracy on test set: 98 % [9881/10000] [96.9, 97.85, 98.31, 98.45, 98.46, 98.52, 98.48, 98.71, 98.66, 98.81]