【Computer Vision】基于VGG-16实现中草药分类
作者简介:在校大学生一枚,华为云享专家,阿里云星级博主,腾云先锋(TDP)成员,云曦智划项目总负责人,全国高等学校计算机教学与产业实践资源建设专家委员会(TIPCC)志愿者,以及编程爱好者,期待和大家一起学习,一起进步~
.
博客主页: ぃ灵彧が的学习日志
.
本文专栏: 机器学习
.
专栏寄语:若你决定灿烂,山无遮,海无拦
.
前言
任务描述
如何根据据图像的视觉内容为图像赋予一个语义类别(例如,教室、街道等)是图像场景分类的目标,也是图像检索、图像内容分析和目标识别等问题的基础。但由于图片的尺度、角度、光照等多样性以及场景定义的复杂性,场景分类一直是计算机视觉中的一个挑战性问题。
本实践旨在通过一个美食分类的案列,让大家理解和掌握如何使用飞桨动态图搭建一个卷积神经网络。
特别提示:本实践所用数据集均来自互联网,请勿用于商务用途。
一、中草药分类数据集准备
(一)、参数配置
- 导入相关包:
#导入必要的包
import os
import zipfile
import random
import json
import paddle
import sys
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from paddle.io import Dataset
- 参数配置
'''
参数配置
'''
train_parameters = {
"input_size": [3, 224, 224], #输入图片的shape
"class_dim": -1, #分类数
"src_path":"/home/aistudio/data/data55190/Chinese Medicine.zip", #原始数据集路径
"target_path":"/home/aistudio/data/", #要解压的路径
"train_list_path": "/home/aistudio/data/train.txt", #train.txt路径
"eval_list_path": "/home/aistudio/data/eval.txt", #eval.txt路径
"readme_path": "/home/aistudio/data/readme.json", #readme.json路径
"label_dict":{}, #标签字典
"num_epochs": 1, #训练轮数
"train_batch_size": 8, #训练时每个批次的大小
"skip_steps": 10,
"save_steps": 30,
"learning_strategy": { #优化函数相关的配置
"lr": 0.0001 #超参数学习率
},
"checkpoints": "/home/aistudio/work/checkpoints" #保存的路径
}
(二)、解压原始数据集
def unzip_data(src_path,target_path):
'''
解压原始数据集,将src_path路径下的zip包解压至target_path目录下
'''
if(not os.path.isdir(target_path + "Chinese Medicine")):
z = zipfile.ZipFile(src_path, 'r')
z.extractall(path=target_path)
z.close()
(三)、生成数据列表
def get_data_list(target_path,train_list_path,eval_list_path):
'''
生成数据列表
'''
#存放所有类别的信息
class_detail = []
#获取所有类别保存的文件夹名称
data_list_path=target_path+"Chinese Medicine/"
class_dirs = os.listdir(data_list_path)
#总的图像数量
all_class_images = 0
#存放类别标签
class_label=0
#存放类别数目
class_dim = 0
#存储要写进eval.txt和train.txt中的内容
trainer_list=[]
eval_list=[]
#读取每个类别,['river', 'lawn','church','ice','desert']
for class_dir in class_dirs:
if class_dir != ".DS_Store":
class_dim += 1
#每个类别的信息
class_detail_list = {}
eval_sum = 0
trainer_sum = 0
#统计每个类别有多少张图片
class_sum = 0
#获取类别路径
path = data_list_path + class_dir
# 获取所有图片
img_paths = os.listdir(path)
for img_path in img_paths: # 遍历文件夹下的每个图片
name_path = path + '/' + img_path # 每张图片的路径
if class_sum % 8 == 0: # 每8张图片取一个做验证数据
eval_sum += 1 # test_sum为测试数据的数目
eval_list.append(name_path + "\t%d" % class_label + "\n")
else:
trainer_sum += 1
trainer_list.append(name_path + "\t%d" % class_label + "\n")#trainer_sum测试数据的数目
class_sum += 1 #每类图片的数目
all_class_images += 1 #所有类图片的数目
# 说明的json文件的class_detail数据
class_detail_list['class_name'] = class_dir #类别名称
class_detail_list['class_label'] = class_label #类别标签
class_detail_list['class_eval_images'] = eval_sum #该类数据的测试集数目
class_detail_list['class_trainer_images'] = trainer_sum #该类数据的训练集数目
class_detail.append(class_detail_list)
#初始化标签列表
train_parameters['label_dict'][str(class_label)] = class_dir
class_label += 1
#初始化分类数
train_parameters['class_dim'] = class_dim
#乱序
random.shuffle(eval_list)
with open(eval_list_path, 'a') as f:
for eval_image in eval_list:
f.write(eval_image)
random.shuffle(trainer_list)
with open(train_list_path, 'a') as f2:
for train_image in trainer_list:
f2.write(train_image)
# 说明的json文件信息
readjson = {}
readjson['all_class_name'] = data_list_path #文件父目录
readjson['all_class_images'] = all_class_images
readjson['class_detail'] = class_detail
jsons = json.dumps(readjson, sort_keys=True, indent=4, separators=(',', ': '))
with open(train_parameters['readme_path'],'w') as f:
f.write(jsons)
print ('生成数据列表完成!')
(四)、参数初始化
'''
参数初始化
'''
src_path=train_parameters['src_path']
target_path=train_parameters['target_path']
train_list_path=train_parameters['train_list_path']
eval_list_path=train_parameters['eval_list_path']
'''
解压原始数据到指定路径
'''
unzip_data(src_path,target_path)
'''
划分训练集与验证集,乱序,生成数据列表
'''
#每次生成数据列表前,首先清空train.txt和eval.txt
with open(train_list_path, 'w') as f:
f.seek(0)
f.truncate()
with open(eval_list_path, 'w') as f:
f.seek(0)
f.truncate()
#生成数据列表
get_data_list(target_path,train_list_path,eval_list_path)
(五)、构造数据读取器
class dataset(Dataset):
def __init__(self, data_path, mode='train'):
"""
数据读取器
:param data_path: 数据集所在路径
:param mode: train or eval
"""
super().__init__()
self.data_path = data_path
self.img_paths = []
self.labels = []
if mode == 'train':
with open(os.path.join(self.data_path, "train.txt"), "r", encoding="utf-8") as f:
self.info = f.readlines()
for img_info in self.info:
img_path, label = img_info.strip().split('\t')
self.img_paths.append(img_path)
self.labels.append(int(label))
else:
with open(os.path.join(self.data_path, "eval.txt"), "r", encoding="utf-8") as f:
self.info = f.readlines()
for img_info in self.info:
img_path, label = img_info.strip().split('\t')
self.img_paths.append(img_path)
self.labels.append(int(label))
def __getitem__(self, index):
"""
获取一组数据
:param index: 文件索引号
:return:
"""
# 第一步打开图像文件并获取label值
img_path = self.img_paths[index]
img = Image.open(img_path)
if img.mode != 'RGB':
img = img.convert('RGB')
img = img.resize((224, 224), Image.BILINEAR)
img = np.array(img).astype('float32')
img = img.transpose((2, 0, 1)) / 255
label = self.labels[index]
label = np.array([label], dtype="int64")
return img, label
def print_sample(self, index: int = 0):
print("文件名", self.img_paths[index], "\t标签值", self.labels[index])
def __len__(self):
return len(self.img_paths)
(六)、数据加载并输出
#训练数据加载
train_dataset = dataset('/home/aistudio/data',mode='train')
train_loader = paddle.io.DataLoader(train_dataset, batch_size=16, shuffle=True)
#测试数据加载
eval_dataset = dataset('/home/aistudio/data',mode='eval')
eval_loader = paddle.io.DataLoader(eval_dataset, batch_size = 8, shuffle=False)
train_dataset.print_sample(200)
print(train_dataset.__len__())
eval_dataset.print_sample(0)
print(eval_dataset.__len__())
print(eval_dataset.__getitem__(10)[0].shape)
print(eval_dataset.__getitem__(10)[1].shape)
输出结果如下图1所示:
二、模型配置
VGG的核心是五组卷积操作,每两组之间做Max-Pooling空间降维。同一组内采用多次连续的3X3卷积,卷积核的数目由较浅组的64增多到最深组的512,同一组内的卷积核数目是一样的。卷积之后接两层全连 接层,之后是分类层。由于每组内卷积层的不同,有11、13、16、19层这几种模型,如下图2所示,展示一个16层的网络结构。
(一)、卷积+池化
class ConvPool(paddle.nn.Layer):
'''卷积+池化'''
def __init__(self,
num_channels,
num_filters,
filter_size,
pool_size,
pool_stride,
groups,
conv_stride=1,
conv_padding=1,
):
super(ConvPool, self).__init__()
for i in range(groups):
self.add_sublayer( #添加子层实例
'bb_%d' % i,
paddle.nn.Conv2D( # layer
in_channels=num_channels, #通道数
out_channels=num_filters, #卷积核个数
kernel_size=filter_size, #卷积核大小
stride=conv_stride, #步长
padding = conv_padding, #padding
)
)
self.add_sublayer(
'relu%d' % i,
paddle.nn.ReLU()
)
num_channels = num_filters
self.add_sublayer(
'Maxpool',
paddle.nn.MaxPool2D(
kernel_size=pool_size, #池化核大小
stride=pool_stride #池化步长
)
)
def forward(self, inputs):
x = inputs
for prefix, sub_layer in self.named_children():
# print(prefix,sub_layer)
x = sub_layer(x)
return x
(二)、定义卷积神经网络
class VGGNet(paddle.nn.Layer):
def __init__(self):
super(VGGNet, self).__init__()
self.convpool01 = ConvPool(
3, 64, 3, 2, 2, 2) #3:通道数,64:卷积核个数,3:卷积核大小,2:池化核大小,2:池化步长,2:连续卷积个数
self.convpool02 = ConvPool(
64, 128, 3, 2, 2, 2)
self.convpool03 = ConvPool(
128, 256, 3, 2, 2, 3)
self.convpool04 = ConvPool(
256, 512, 3, 2, 2, 3)
self.convpool05 = ConvPool(
512, 512, 3, 2, 2, 3)
self.pool_5_shape = 512 * 7* 7
self.fc01 = paddle.nn.Linear(self.pool_5_shape, 4096)
self.fc02 = paddle.nn.Linear(4096, 4096)
self.fc03 = paddle.nn.Linear(4096, train_parameters['class_dim'])
def forward(self, inputs, label=None):
# print('input_shape:', inputs.shape) #[8, 3, 224, 224]
"""前向计算"""
out = self.convpool01(inputs)
out = self.convpool02(out)
out = self.convpool03(out)
out = self.convpool04(out)
out = self.convpool05(out)
out = paddle.reshape(out, shape=[-1, 512*7*7])
out = self.fc01(out)
out = self.fc02(out)
out = self.fc03(out)
if label is not None:
acc = paddle.metric.accuracy(input=out, label=label)
return out, acc
else:
return out
三、模型训练
(一)、定义训练函数
def draw_process(title,color,iters,data,label):
plt.title(title, fontsize=24)
plt.xlabel("iter", fontsize=20)
plt.ylabel(label, fontsize=20)
plt.plot(iters, data,color=color,label=label)
plt.legend()
plt.grid()
plt.show()
print(train_parameters['class_dim'])
print(train_parameters['label_dict'])
输出结果如下图3所示:
(二)、模型实例化
model = VGGNet()
model.train()
cross_entropy = paddle.nn.CrossEntropyLoss()
optimizer = paddle.optimizer.Adam(learning_rate=train_parameters['learning_strategy']['lr'],
parameters=model.parameters())
steps = 0
Iters, total_loss, total_acc = [], [], []
for epo in range(train_parameters['num_epochs']):
for _, data in enumerate(train_loader()):
steps += 1
x_data = data[0]
y_data = data[1]
predicts, acc = model(x_data, y_data)
loss = cross_entropy(predicts, y_data)
loss.backward()
optimizer.step()
optimizer.clear_grad()
if steps % train_parameters["skip_steps"] == 0:
Iters.append(steps)
total_loss.append(loss.numpy()[0])
total_acc.append(acc.numpy()[0])
#打印中间过程
print('epo: {}, step: {}, loss is: {}, acc is: {}'\
.format(epo, steps, loss.numpy(), acc.numpy()))
#保存模型参数
if steps % train_parameters["save_steps"] == 0:
save_path = train_parameters["checkpoints"]+"/"+"save_dir_" + str(steps) + '.pdparams'
print('save model to: ' + save_path)
paddle.save(model.state_dict(),save_path)
paddle.save(model.state_dict(),train_parameters["checkpoints"]+"/"+"save_dir_final.pdparams")
draw_process("trainning loss","red",Iters,total_loss,"trainning loss")
draw_process("trainning acc","green",Iters,total_acc,"trainning acc")
输出结果如图4、5、6所示:
四、模型评估
'''
模型评估
'''
model__state_dict = paddle.load('work/checkpoints/save_dir_final.pdparams')
model_eval = VGGNet()
model_eval.set_state_dict(model__state_dict)
model_eval.eval()
accs = []
for _, data in enumerate(eval_loader()):
x_data = data[0]
y_data = data[1]
predicts = model_eval(x_data)
acc = paddle.metric.accuracy(predicts, y_data)
accs.append(acc.numpy()[0])
print('模型在验证集上的准确率为:',np.mean(accs))
输出结果如下图7所示:
五、模型预测
(一)、数据集处理
def unzip_infer_data(src_path,target_path):
'''
解压预测数据集
'''
if(not os.path.isdir(target_path + "Chinese Medicine Infer")):
z = zipfile.ZipFile(src_path, 'r')
z.extractall(path=target_path)
z.close()
def load_image(img_path):
'''
预测图片预处理
'''
img = Image.open(img_path)
if img.mode != 'RGB':
img = img.convert('RGB')
img = img.resize((224, 224), Image.BILINEAR)
img = np.array(img).astype('float32')
img = img.transpose((2, 0, 1)) / 255 # HWC to CHW 及归一化
return img
infer_src_path = '/home/aistudio/data/data55194/Chinese Medicine Infer.zip'
infer_dst_path = '/home/aistudio/data/'
unzip_infer_data(infer_src_path,infer_dst_path)
label_dic = train_parameters['label_dict']
(二)、预测评估
model__state_dict = paddle.load('work/checkpoints/save_dir_final.pdparams')
model_predict = VGGNet()
model_predict.set_state_dict(model__state_dict)
model_predict.eval()
infer_imgs_path = os.listdir(infer_dst_path+"Chinese Medicine Infer")
print(infer_imgs_path)
for infer_img_path in infer_imgs_path:
infer_img = load_image(infer_dst_path+"Chinese Medicine Infer/"+infer_img_path)
infer_img = infer_img[np.newaxis,:, : ,:] #reshape(-1,3,224,224)
infer_img = paddle.to_tensor(infer_img)
result = model_predict(infer_img)
lab = np.argmax(result.numpy())
print("样本: {},被预测为:{}".format(infer_img_path,label_dic[str(lab)]))
输出结果如下图8所示:
总结
本系列文章内容为根据清华社出版的《机器学习实践》所作的相关笔记和感悟,其中代码均为基于百度飞桨开发,若有任何侵权和不妥之处,请私信于我,定积极配合处理,看到必回!!!
最后,引用本次活动的一句话,来作为文章的结语~( ̄▽ ̄~)~:
【学习的最大理由是想摆脱平庸,早一天就多一份人生的精彩;迟一天就多一天平庸的困扰。】