六大分类模型下载方式和使用方法:
Resnet
inception
Densenet
Alexnet
vggnet
Resnet:
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
inception:
model_urls = {
# Inception v3 ported from TensorFlow
'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
}
Densenet:
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
Alexnet:
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
vggnet:
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
学习内容:测试实现预训练模型的使用,并牢记该方式-拿为己用
关键步骤讲述:
默认已经安装好环境和pytorch框架,以及torchvision等需要的库。
import torchvision.models as models
所有成熟网络模型几乎都在里面
# 初始化模型 model = models.resnet18()
此处应用ResNet18来分类。
修改尾巴,毕竟你的输出不一定和原版(1000)一模一样。
# 修改网络结构,将fc层1000个输出改为9个输出。
# 获取最后一层的输入特征层信息。 fc_input_feature = model.fc.in_features
# 取代原来输出层为新的nn。 model.fc = nn.Linear(fc_input_feature, 9)
到这里,网络就构建好了。
下载预训练参数,为己所用。# load除最后一层的预训练权重 pretrained_weight = torch.hub.load_state_dict_from_url( url='https://download.pytorch.org/models/resnet18-5c106cde.pth', progress=True)
到这里,下载的是原版的1000分类的参数,我们需要删除不需要的尾巴,并训练自己的尾巴。del pretrained_weight['fc.weight']
del pretrained_weight['fc.bias']
因为分类就是用的线性函数,包括权重w和偏移b,只需删除尾巴。
最后,将剩下的模型参数load到我们的模型上即可。model.load_state_dict(pretrained_weight, strict=False)
模型准备完毕,剩下的操作和所有训练方法一样。参见详细训练代码。
import os
import torch
from torch.utils.data import DataLoader
from torch import nn
from torch import optim
import torchvision.models as models
import time
# use res18
# from resnet.resnetmini import ClassificModel as Model
from datasets.read_data_sleep import PlayPhoneData
def train(data_path=r"E:\Datasets\sleep_traindata"):
# 设置超参数
batch_size = 1 # 每次训练的数据量
LR = 0.01 # 学习率
STEP_SIZE = 5 # 控制学习率变化
MAX_EPOCH = 20 # 总的训练次数
num_print = 100 # 每n个batch打印一次
playPhoneData = PlayPhoneData(data_path)
# 利用dataloader加载数据集
train_loader = torch.utils.data.DataLoader(playPhoneData, batch_size=batch_size, shuffle=True, drop_last=True)
# 生成驱动器
use_gpu = torch.cuda.is_available()
if use_gpu:
print('congratulation! You can use gpu to support acceleration')
else:
print('oppps, please use a small batch size')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 初始化模型
model = models.resnet18()
# 修改网络结构,将fc层1000个输出改为9个输出
fc_input_feature = model.fc.in_features
model.fc = nn.Linear(fc_input_feature, 9)
# load除最后一层的预训练权重
pretrained_weight = torch.hub.load_state_dict_from_url(
url='https://download.pytorch.org/models/resnet18-5c106cde.pth', progress=True)
del pretrained_weight['fc.weight']
del pretrained_weight['fc.bias']
model.load_state_dict(pretrained_weight, strict=False)
model.to(device)
# net = Model(8).to(device) # class_num=8分八类:睡岗(趴着睡,躺着睡,仰着睡,低头睡),玩手机(俯视玩手机,平视玩手机,侧视玩手机),其他=[0,1,2,3,4,5,6,7]
# net = Model(9).to(device) # class_num=9分九类:睡岗(趴着睡,躺着睡,低头睡),站立,半蹲,坐(背坐,正坐,侧坐),其他=[0,1,2,3,4,5,6,7,8]
# 损失函数
get_loss = nn.CrossEntropyLoss() #交叉熵损失函数
# SGD优化器 第一个参数是输入需要优化的参数,第二个是学习率,第三个是动量,大致就是借助上一次导数结果,加快收敛速度。
'''
这一行代码里面实际上包含了多种优化:
一个是动量优化,增加了一个关于上一次迭代得到的系数的偏置,借助上一次的指导,减小梯度震荡,加快收敛速度
一个是权重衰减,通过对权重增加一个(正则项),该正则项会使得迭代公式中的权重按照比例缩减,这么做的原因是,过拟合的表现一般为参数浮动大,使用小参数可以防止过拟合
'''
optimizer = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=0.001)
# optimizer = optim.Adam(net.parameters(), lr=learn_rate)
# 动态调整学习率 StepLR 是等间隔调整学习率,每step_size 令lr=lr*gamma
# 学习率衰减,随着训练的加深,目前的权重也越来越接近最优权重,原本的学习率会使得,loss上下震荡,逐步减小学习率能加快收敛速度。
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=0.5, last_epoch=-1)
# Step:设置学习率下降策略
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
loss_list = []
start = time.time()
for epoch in range(MAX_EPOCH):
running_loss = 0.0
# enumerate()是python自带的函数,用于迭代字典。参数1,是需要迭代的对象,第二参数是迭代的起始位置
for i, (inputs, labels) in enumerate(train_loader, 0):
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs) # 前向传播求出预测的值
optimizer.zero_grad() # 将梯度初始化为0
loss = get_loss(outputs, labels.long())
loss.backward() # 反向传播求梯度
optimizer.step() # 更新所有参数
running_loss += loss.item() # loss是张量,访问值时需要使用item()
loss_list.append(loss.item())
if i % num_print == num_print - 1: # 每num_print打印平均loss
print('[%d epoch, %d] loss: %.6f' % (epoch + 1, i + 1, running_loss / num_print))
running_loss = 0.0
lr = optimizer.param_groups[0]['lr'] # 查看目前的学习率
print('learn_rate : %.5f' % lr)
scheduler.step() # 根据迭代epoch更新学习率
end = time.time()
print('time:{}'.format(end - start))
torch.save(model, f'E:/model/playphone+sleepthepose/model_resnetmini_睡岗9分类{end}.pth')
if __name__ == "__main__":
train()
训练情况:
......
[3 epoch, 500] loss: 2.186424
[3 epoch, 600] loss: 2.192622
[3 epoch, 700] loss: 2.165229
[3 epoch, 800] loss: 2.125184
[3 epoch, 900] loss: 2.185377
learn_rate : 0.01000
[4 epoch, 100] loss: 2.138786
[4 epoch, 200] loss: 2.177925
[4 epoch, 300] loss: 2.103718
......
备注:代码只是讲解工具,并非可以运行的实例,因为里面的数据集需要有并自己写数据集的代码。
直接拿来用固然不错,但自己分装一遍再用,显得更加标准,有水平。
比如封装如下:
class ResNet18forClassify(nn.Module):
def __init__(self, phase="train"):
super(ResNet18forClassify, self).__init__()
self.phase = phase
self.net = models.resnet18()
fc_input_feature = self.net.fc.in_features
self.net.fc = nn.Linear(fc_input_feature, 9)
pretrained_weight = torch.hub.load_state_dict_from_url(
url='https://download.pytorch.org/models/resnet18-5c106cde.pth', progress=True)
del pretrained_weight['fc.weight']
del pretrained_weight['fc.bias']
self.net.load_state_dict(pretrained_weight, strict=False)
self.softmax = nn.Softmax(dim=1)
def forward(self, input_img):
out = self.net(input_img)
if self.phase == "test":
return self.softmax(out)
return out
备注:封装成自己的网络模型,更加方便。
其中,if self.phase == "test": return self.softmax(out)
,分类时训练输出的是类别标签与实际标签做损失计算;测试时,预测结果由激活函数转换为–类型和该类型可能性概率。输出可能是该类别的概率值。
1.https://github.com/pytorch/vision/tree/master/torchvision/models
2.环境搭建:NVIDIA+CUDA+cudaNN的配置与Anaconda虚拟环境的搭建–深度学习第一步
3.Parallax:常用预训练模型下载地址