9.7 Residual Net
如果将 3×3 的卷积一直堆下去,该神经网络的性能会不会更好?
Paper:He K, Zhang X, Ren S, et al. Deep Residual Learning for Image Recognition[C]// IEEE Conference on Computer Vision and Pattern Recognition. IEEE Computer Society, 2016:770-778.
研究发现:20 层的错误率低于56 层的错误率,所以并不是层数越多,性能越好。为解决 梯度消失 的问题,见下图:
多一个 跳连接 :
9.7.1 Residual Network
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=5) self.conv2 = nn.Conv2d(16, 32, kernel_size=5) self.mp = nn.MaxPool2d(2) self.rblock1 = ResidualBlock(16) self.rblock2 = ResidualBlock(32) self.fc = nn.Linear(512, 10) def forward(self, x): in_size = x.size(0) x = self.mp(F.relu(self.conv1(x))) x = self.rblock1(x) x = self.mp(F.relu(self.conv2(x))) x = self.rblock2(x) x = x.view(in_size, -1) x = self.fc(x) return x
9.7.2 Residual Block
class ResidualBlock(nn.Module): def __init__(self, channels): super(ResidualBlock, self).__init__() self.channels = channels self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) def forward(self, x): y = F.relu(self.conv1(x)) y = self.conv2(y) return F.relu(x + y)
9.7.3 Code 3
import torch from torch import nn from torchvision import transforms from torchvision import datasets from torch.utils.data import DataLoader import torch.nn.functional as F import torch.optim as optim import matplotlib.pyplot as plt batch_size = 64 transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) train_dataset = datasets.MNIST(root='../data/mnist', train=True, download=True, transform=transform) train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size) test_dataset = datasets.MNIST(root='../data/mnist', train=False, download=True, transform=transform) test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size) class ResidualBlock(nn.Module): def __init__(self, channels): super(ResidualBlock, self).__init__() self.channels = channels self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) def forward(self, x): y = F.relu(self.conv1(x)) y = self.conv2(y) return F.relu(x + y) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=5) self.conv2 = nn.Conv2d(16, 32, kernel_size=5) self.mp = nn.MaxPool2d(2) self.rblock1 = ResidualBlock(16) self.rblock2 = ResidualBlock(32) self.fc = nn.Linear(512, 10) def forward(self, x): in_size = x.size(0) x = self.mp(F.relu(self.conv1(x))) x = self.rblock1(x) x = self.mp(F.relu(self.conv2(x))) x = self.rblock2(x) x = x.view(in_size, -1) x = self.fc(x) return x model = Net() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) criterion = torch.nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) def train(epoch): running_loss = 0 for batch_idx, data in enumerate(train_loader, 0): inputs, target = data inputs, target = inputs.to(device), target.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, target) loss.backward() optimizer.step() running_loss += loss.item() if batch_idx % 300 == 299: print('[%d, %3d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300)) running_loss = 0 accuracy = [] def test(): correct = 0 total = 0 with torch.no_grad(): for data in test_loader: images, labels = data images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, dim=1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total, correct, total)) accuracy.append(100 * correct / total) if __name__ == '__main__': for epoch in range(10): train(epoch) test() print(accuracy) plt.plot(range(10), accuracy) plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.grid() plt.show()
[1, 300] loss: 0.563 [1, 600] loss: 0.157 [1, 900] loss: 0.111 Accuracy on test set: 97 % [9721/10000] [2, 300] loss: 0.085 [2, 600] loss: 0.077 [2, 900] loss: 0.081 Accuracy on test set: 98 % [9831/10000] [3, 300] loss: 0.063 [3, 600] loss: 0.059 [3, 900] loss: 0.053 Accuracy on test set: 98 % [9841/10000] [4, 300] loss: 0.047 [4, 600] loss: 0.052 [4, 900] loss: 0.042 Accuracy on test set: 98 % [9877/10000] [5, 300] loss: 0.039 [5, 600] loss: 0.037 [5, 900] loss: 0.041 Accuracy on test set: 98 % [9871/10000] [6, 300] loss: 0.035 [6, 600] loss: 0.032 [6, 900] loss: 0.035 Accuracy on test set: 98 % [9895/10000] [7, 300] loss: 0.029 [7, 600] loss: 0.032 [7, 900] loss: 0.029 Accuracy on test set: 98 % [9899/10000] [8, 300] loss: 0.026 [8, 600] loss: 0.028 [8, 900] loss: 0.025 Accuracy on test set: 98 % [9892/10000] [9, 300] loss: 0.021 [9, 600] loss: 0.027 [9, 900] loss: 0.024 Accuracy on test set: 98 % [9886/10000] [10, 300] loss: 0.019 [10, 600] loss: 0.021 [10, 900] loss: 0.023 Accuracy on test set: 99 % [9902/10000] [97.21, 98.31, 98.41, 98.77, 98.71, 98.95, 98.99, 98.92, 98.86, 99.02]
9.7.4 Reading Paper
Paper 1:He K, Zhang X, Ren S, et al. Identity Mappings in Deep Residual Networks[C]
constant scaling:
class ResidualBlock(nn.Module): def __init__(self, channels): super(ResidualBlock, self).__init__() self.channels = channels self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) def forward(self, x): y = F.relu(self.conv1(x)) y = self.conv2(x) z = 0.5 * (x + y) return F.relu(z)
[1, 300] loss: 1.204 [1, 600] loss: 0.243 [1, 900] loss: 0.165 Accuracy on test set: 96 % [9637/10000] [2, 300] loss: 0.121 [2, 600] loss: 0.105 [2, 900] loss: 0.099 Accuracy on test set: 97 % [9777/10000] [3, 300] loss: 0.085 [3, 600] loss: 0.076 [3, 900] loss: 0.069 Accuracy on test set: 98 % [9815/10000] [4, 300] loss: 0.061 [4, 600] loss: 0.063 [4, 900] loss: 0.063 Accuracy on test set: 98 % [9849/10000] [5, 300] loss: 0.053 [5, 600] loss: 0.052 [5, 900] loss: 0.052 Accuracy on test set: 98 % [9853/10000] [6, 300] loss: 0.041 [6, 600] loss: 0.051 [6, 900] loss: 0.047 Accuracy on test set: 98 % [9871/10000] [7, 300] loss: 0.040 [7, 600] loss: 0.044 [7, 900] loss: 0.043 Accuracy on test set: 98 % [9869/10000] [8, 300] loss: 0.039 [8, 600] loss: 0.038 [8, 900] loss: 0.037 Accuracy on test set: 98 % [9859/10000] [9, 300] loss: 0.031 [9, 600] loss: 0.039 [9, 900] loss: 0.036 Accuracy on test set: 98 % [9875/10000] [10, 300] loss: 0.035 [10, 600] loss: 0.031 [10, 900] loss: 0.033 Accuracy on test set: 98 % [9888/10000] [96.37, 97.77, 98.15, 98.49, 98.53, 98.71, 98.69, 98.59, 98.75, 98.88]
conv shortcut:
class ResidualBlock(nn.Module): def __init__(self, channels): super(ResidualBlock, self).__init__() self.channels = channels self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(channels, channels, kernel_size=1) def forward(self, x): y = F.relu(self.conv1(x)) y = self.conv2(x) z = self.conv3(x) + y return F.relu(z)
[1, 300] loss: 0.760 [1, 600] loss: 0.170 [1, 900] loss: 0.119 Accuracy on test set: 97 % [9717/10000] [2, 300] loss: 0.092 [2, 600] loss: 0.084 [2, 900] loss: 0.075 Accuracy on test set: 98 % [9826/10000] [3, 300] loss: 0.064 [3, 600] loss: 0.063 [3, 900] loss: 0.055 Accuracy on test set: 98 % [9817/10000] [4, 300] loss: 0.048 [4, 600] loss: 0.047 [4, 900] loss: 0.048 Accuracy on test set: 98 % [9851/10000] [5, 300] loss: 0.039 [5, 600] loss: 0.039 [5, 900] loss: 0.044 Accuracy on test set: 98 % [9864/10000] [6, 300] loss: 0.035 [6, 600] loss: 0.033 [6, 900] loss: 0.038 Accuracy on test set: 98 % [9890/10000] [7, 300] loss: 0.030 [7, 600] loss: 0.030 [7, 900] loss: 0.030 Accuracy on test set: 98 % [9881/10000] [8, 300] loss: 0.027 [8, 600] loss: 0.026 [8, 900] loss: 0.029 Accuracy on test set: 98 % [9884/10000] [9, 300] loss: 0.021 [9, 600] loss: 0.026 [9, 900] loss: 0.025 Accuracy on test set: 98 % [9894/10000] [10, 300] loss: 0.019 [10, 600] loss: 0.019 [10, 900] loss: 0.025 Accuracy on test set: 98 % [9897/10000] [97.17, 98.26, 98.17, 98.51, 98.64, 98.9, 98.81, 98.84, 98.94, 98.97]
Paper 2:Huang G, Liu Z, Laurens V D M, et al. Densely Connected Convolutional Networks[J]. 2016:2261-2269.