在使用lossfuntion的时候,只需要关注输入形状和输出形状
L1Loss
关注点是输入形状:N是batch_size大小
一个具体的使用案例
L1Loss1 默认分别做差,加和计算平均值
import torch
from torch.nn import L1Loss
inputs=torch.tensor([1,2,3],dtype=torch.float32) #在使用L1Loss的过程中,是需要变量的浮点类型的 #输入
targets=torch.tensor([1,2,5],dtype=torch.float32) #目标
inputs=torch.reshape(inputs,(1,1,1,3))
targets=torch.reshape(targets,(1,1,1,3))
loss=L1Loss()
result=loss(inputs,targets)
print(result)
运行结果截图:
不计算平均值
import torch
from torch.nn import L1Loss
inputs=torch.tensor([1,2,3],dtype=torch.float32) #在使用L1Loss的过程中,是需要变量的浮点类型的 #输入
targets=torch.tensor([1,2,5],dtype=torch.float32) #目标
inputs=torch.reshape(inputs,(1,1,1,3))
targets=torch.reshape(targets,(1,1,1,3))
loss=L1Loss(reduction='sum')
result=loss(inputs,targets)
print(result)
运行结果截图:
MSELOSS 均方误差
均方误差是常用的
实战LossFunction
未加lossfunction
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
# 数据集
dataset=torchvision.datasets.CIFAR10("../data",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataLoader=DataLoader(dataset,batch_size=1)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self,x):
x=self.model1(x)
return x
tudui=Tudui()
for data in dataLoader:
imgs,targets=data
outputs=tudui(imgs)
print(outputs)
print(targets)
输出10个类别的概率
运行结果截图:
加入lossfunction
lossfunction作用一 计算实际输出与目标之间的差距
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
# 数据集
dataset=torchvision.datasets.CIFAR10("../data",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataLoader=DataLoader(dataset,batch_size=1)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self,x):
x=self.model1(x)
return x
loss=nn.CrossEntropyLoss()
tudui=Tudui()
for data in dataLoader:
imgs,targets=data
outputs=tudui(imgs)
result_loss=loss(outputs,targets)
print(result_loss)
运行结果截图:
lossfunction作用二 为我们更新提供一定的依据(反向传播)
加入梯度下降
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
# 数据集
dataset=torchvision.datasets.CIFAR10("../data",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataLoader=DataLoader(dataset,batch_size=1)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self,x):
x=self.model1(x)
return x
loss=nn.CrossEntropyLoss()
tudui=Tudui()
for data in dataLoader:
imgs,targets=data
outputs=tudui(imgs)
result_loss=loss(outputs,targets)
result_loss.backward()
print("ok")
在debug中梯度会更新