import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration 设备配置,开始脚本,创建一个张量tensor(需要在训练的过程中,将网络参数都放入多GPU中进行训练)
device = torch.device(‘cuda:0‘ if torch.cuda.is_available() else ‘cpu‘)
#module:需要多GPU训练的网络模型,device_ids:GPU的编号,dim:tensors被分散的维度
#cuda:0代表其实的GPU编号为0(其实默认也为0即直接写cuda)
# Hyper parameters
num_epochs = 5 #训练5遍 重复训练
num_classes = 10 #目标类别
batch_size = 100 #训练批次中,每个批次要加载的样本数量100
learning_rate = 0.001 #学习率
# MNIST dataset MNIST数据集
train_dataset = torchvision.datasets.MNIST(root=‘../../data/‘,
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root=‘../../data/‘,
train=False,
transform=transforms.ToTensor())
#MNIST(root =存储路径,train =是否属于训练集,transform =转换格式(ToTensor:图像转换为tensor)
# Data loader 数据加载器
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
#这是一个迭代器,方便我们去多线程地读取数据,并且可以实现batch以及shuffle的读取等 Dataloader(dataset = Dataset- 从中??加载数据的数据集。batch_size为100,shuffle = 是否打乱顺序,默认为否
# Convolutional neural network (two convolutional layers)两个卷积层
class ConvNet(nn.Module):
#torch.nn.Model 所有神经网络模块的基类
def __init__(self, num_classes=10):#初始化构造函数:
super(ConvNet, self).__init__()
#继承基类的构造函数,固定写法:super(NewModel, self).__init__()
self.layer1 = nn.Sequential(
#torch.nn.Sequential一个连续的容器。模块将按照它们在构造函数中传递的顺序添加到它中
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
#输入维度1*28*28
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
#变为深度为16即通道数16*14*14
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7*7*32, num_classes)
#最后成为7*7*32,然后这是全链接层
#torch.nn.Conv2d(in_channels, 输入通道:灰度图像为1,彩色RGB为3
# out_channels, 输出通道:与卷积核数量一致
# kernel_size, 卷积核尺寸,可以为int或者tuple
# stride = 1, 卷积步长(卷积时每次滑过的像素数)
# padding = 2, 填充(边缘补两行0))
#前向传播函数 顺序进行,往后走
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
model = ConvNet(num_classes).to(device)
#module:需要多GPU训练的网络模型
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
#叉熵损失函数
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#优化 adam算法,通过optimizer.step()对所有参数进行更新
对于Adam算法https://blog.csdn.net/kgzhang/article/details/77479737有详细解释
二。训练
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs): #数据集便利num-epochs次
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
#PyTorch默认会对梯度进行累加,所以如果不想先前的梯度影响当前梯度的计算,需要手动清0
loss.backward()#误差反向传播计算参数梯度(loss为标量)
optimizer.step()#实现 参数更新
if (i+1) % 100 == 0:
print (‘Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}‘
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
训练共有四步:
第一步:将输入input向前传播,进行运算后得到输出output
第二步:将output再输入loss函数,计算loss值(是个标量)
第三步:将梯度反向传播到每个参数
第四步:利用下面公式进行权重更新
新权重w = 旧权重w + 学习速率?? x 梯度向量g
pytorch在反向传播的时候,确实是默认累加上了上一次求的梯度, 如果不想让上一次的梯度影响自己本次梯度计算的话,需要手动的清零
Pytorch反向传播中的细节-计算梯度时的默认累加https://blog.csdn.net/wuzhongqiang/article/details/102572324
Pytorch optimizer.step() 和loss.backward()和scheduler.step()的关系与区别 (Pytorch 代码讲解)https://blog.csdn.net/xiaoxifei/article/details/87797935
三。训练完了当然是测试了
# Test the model
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)#output.data是100*10的张量
total += labels.size(0)
correct += (predicted == labels).sum().item()#将两个一维张量逐行对比,相同的行记为1,不同的行记为0
print(‘Test Accuracy of the model on the 10000 test images: {} %‘.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), ‘model.ckpt‘)
对于测试我们要得到我们训练出来的模型准确率,及测试集中正确数/总数
开始correct = 0,total = 0
将检测图像信息得到output,取张量中每行最大的值,total自加100(batch_size = 100 ),然后逐行比对,若正确correct自加