说明 0、前一部分叫做Feature Extraction,后一部分叫做classification
1、每一个卷积核它的通道数量要求和输入通道是一样的。这种卷积核的总数有多少个和你输出通道的数量是一样的。
2、卷积(convolution)后,C(Channels)变,W(width)和H(Height)可变可不变,取决于是否padding。subsampling(或pooling)后,C不变,W和H变。
3、卷积层:保留图像的空间信息。
4、卷积层要求输入输出是四维张量(B,C,W,H),全连接层的输入与输出都是二维张量(B,Input_feature)。
5、卷积(线性变换),激活函数(非线性变换),池化;这个过程若干次后,view打平,进入全连接层~
1. 卷积操作
import torch # 定义输入、输出通道 in_channels, out_channels = 5, 10 # 定义图像尺寸 width, height = 100, 100 # 定义卷积核的大小,下式表示大小为3*3的正方形,同时,卷积核的通道数与输入图像的通道数一致,均为5 kernel_size = 3 # 定义一次输入图像的数量 batch_size = 1 input = torch.randn(batch_size, in_channels, width, height) # out_channels 决定了卷积核的数量, 即一共有10个3*3*5的卷积核 conv_layer = torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size) output = conv_layer(input) print(input.shape) print(output.shape) print(conv_layer.weight.shape)
输出:
torch.Size([1, 5, 100, 100]) torch.Size([1, 10, 98, 98]) torch.Size([10, 5, 3, 3])
有时,我们希望获得与原图像相同大小的卷积后的图像,这时需要属性padding,默认为0
conv_layer_with_padding = torch.nn.Conv2d(in_channels, out_channels, padding=1, kernel_size = kernel_size) output_with_padding = conv_layer_with_padding(input) print(output_with_padding.shape)
输出:
torch.Size([1, 10, 100, 100])
还有时,我们希望再次降低网络的大小,以降低运算量。此时引入卷积核移动步长stride的概念,默认为1
conv_layer_with_stride = torch.nn.Conv2d(in_channels, out_channels, stride=2, kernel_size=kernel_size) output_with_stride = conv_layer_with_stride(input) print(output_with_stride.shape)
输出:
torch.Size([1, 10, 49, 49])
2. 下采样
下采样与卷积无本质区别,不同的在于目的。下采样的目的是将数据维度再次减少。
最常用的下采样手段是Max Pooling 最大池化。
input = [ 3,4,6,5, 2,4,6,8, 1,6,7,8, 9,7,4,6, ] input = torch.Tensor(input).view(1,1,4,4) maxpooling_layer = torch.nn.MaxPool2d(kernel_size=2) # 注意,我们将kernel_size设为2,此时stride默认也为2 output = maxpooling_layer(input) print(output)
输出:
tensor([[[[4., 8.], [9., 8.]]]])
3. 卷积神经基础代码
代码说明:
1、torch.nn.Conv2d(1,10,kernel_size=3,stride=2,bias=False)
1是指输入的Channel,灰色图像是1维的;10是指输出的Channel,也可以说第一个卷积层需要10个卷积核;kernel_size=3,卷积核大小是3x3;stride=2进行卷积运算时的步长,默认为1;bias=False卷积运算是否需要偏置bias,默认为False。padding = 0,卷积操作是否补0。
2、self.fc = torch.nn.Linear(320, 10),这个320获取的方式,可以通过x = x.view(batch_size, -1)
# print(x.shape)可得到(64,320),64指的是batch,320就是指要进行全连接操作时,输入的特征维度。
import torch from torchvision import transforms from torchvision import datasets from torch.utils.data import DataLoader import torch.nn.functional as F import torch.optim as optim import matplotlib.pyplot as plt # prepare dataset batch_size = 64 transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform) train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size) test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform) test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size) # design model using class class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5) self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5) self.pooling = torch.nn.MaxPool2d(2) self.fc = torch.nn.Linear(320, 10) def forward(self, x): # flatten data from (n,1,28,28) to (n, 784) batch_size = x.size(0) x = F.relu(self.pooling(self.conv1(x))) x = F.relu(self.pooling(self.conv2(x))) x = x.view(batch_size, -1) # -1 此处自动算出的是320 # print("x.shape",x.shape) x = self.fc(x) return x model = Net() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # construct loss and optimizer criterion = torch.nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) # training cycle forward, backward, update def train(epoch): running_loss = 0.0 for batch_idx, data in enumerate(train_loader, 0): inputs, target = data inputs, target = inputs.to(device), target.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, target) loss.backward() optimizer.step() running_loss += loss.item() if batch_idx % 300 == 299: print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300)) running_loss = 0.0 def test(): correct = 0 total = 0 with torch.no_grad(): for data in test_loader: images, labels = data images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, dim=1) total += labels.size(0) correct += (predicted == labels).sum().item() print('accuracy on test set: %d %% ' % (100 * correct / total)) return correct / total if __name__ == '__main__': epoch_list = [] acc_list = [] for epoch in range(10): train(epoch) acc = test() epoch_list.append(epoch) acc_list.append(acc) plt.plot(epoch_list, acc_list) plt.ylabel('accuracy') plt.xlabel('epoch') plt.show()