创建wandb个人账户
DSW操作
创建并激活环境
conda create -n wan python=3.7
conda activate wan
安装必要的包
appdirs==1.4.4
charset-normalizer==3.1.0
click==8.1.3
docker-pycreds==0.4.0
gitdb==4.0.10
GitPython==3.1.31
idna==3.4
importlib-metadata==6.7.0
numpy==1.21.6
nvidia-cublas-cu11==11.10.3.66
nvidia-cuda-nvrtc-cu11==11.7.99
nvidia-cuda-runtime-cu11==11.7.99
nvidia-cudnn-cu11==8.5.0.96
pathtools==0.1.2
Pillow==9.5.0
protobuf==4.23.3
psutil==5.9.5
PyYAML==6.0
requests==2.31.0
sentry-sdk==1.25.1
setproctitle==1.3.2
six==1.16.0
smmap==5.0.0
torch==1.13.1
torchvision==0.14.1
typing_extensions==4.6.3
urllib3==2.0.3
wandb==0.15.4
zipp==3.15.0
pip install -r requirements.txt
python文件
import argparse
import random
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import logging
logging.propagate = False
logging.getLogger().setLevel(logging.ERROR)
import wandb
# 配置自己的key
wandb.login(key="831ea3*******")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if batch_idx > 20:
break
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
best_loss = 1
example_images = []
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
example_images.append(wandb.Image(
data[0], caption="Pred: {} Truth: {}".format(pred[0].item(), target[0])))
#通过wandb来记录模型在测试集上的Accuracy和Loss
wandb.log({
"Examples": example_images,
"Test Accuracy": 100. * correct / len(test_loader.dataset),
"Test Loss": test_loss})
# 定义项目在wandb上保存的名称
wandb.init(project="wandb-test")
wandb.watch_called = False
# 在wandb上保存超参数
config = wandb.config
config.batch_size = 4
config.test_batch_size = 10
config.epochs = 50
config.lr = 0.1
config.momentum = 0.1
config.no_cuda = False
config.seed = 42
config.log_interval = 10
def main():
use_cuda = not config.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {
'num_workers': 1, 'pin_memory': True} if use_cuda else {
}
random.seed(config.seed)
torch.manual_seed(config.seed)
numpy.random.seed(config.seed)
torch.backends.cudnn.deterministic = True
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=config.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=config.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=config.lr,
momentum=config.momentum)
#记录模型层的维度,梯度,参数信息
wandb.watch(model, log="all")
for epoch in range(1, config.epochs + 1):
train(config, model, device, train_loader, optimizer, epoch)
test(config, model, device, test_loader)
#保存模型
torch.save(model.state_dict(), "model.h5")
#在wandb上保存模型
wandb.save('model.h5')
if __name__ == '__main__':
main()
参考链接
wandb不可缺少的机器学习分析工具