ResNet-50——pytorch版
声明本文为365天深度学习训练营中的学习记录博客原作者K同学啊先验知识ResNet残差网络根据网络层数可以分为ResNet-18、ResNet-34、ResNet-50、ResNet-101等他和普通的CNN网络不同的地方就是他提出了一个残差的概念解决了卷积网络在深度加深时候的梯度爆炸、梯度消失的问题梯度消失在反向传播的过程中随着网络层数的增加前几层的梯度值会变得非常小接近于0梯度爆炸在反向传播过程中随着网络层数增加梯度值变得非常大导致网络权重更新幅度过大模型无法收敛BN层的提出虽然在一定情况下解决了这个问题但是加了BN会带来网络变得更复杂更不容易收敛的影响。故何凯明证明了一个问题就是只要有合适的网络结构深的网络肯定比浅的好残差网络孕育而生。主要使用的残差单元有这两种分别是两层的浅残差和3层的深残差。我的环境Python版本3.8.10PyTorch版本2.4.1cpuTorchvision版本0.19.1cpu学习记录由于是刚回归Pytorch的第一篇我会尽量讲细一点整体流程跟tensorflow差不多的都是初始化GPU,数据集划分处理网络选择训练测试函数撰写然后正式训练最后成果可视化1.设置GPUdevice torch.device(cuda if torch.cuda.is_available() else cpu)2.数据导入在数据导入的时候我们要设置一个transform来对数据进行处理train_transforms transforms.Compose([ #尺寸调节 transforms.Resize((224, 224)), #totensor类型 transforms.ToTensor(), #归一化 transforms.Normalize(mean[0.485, 0.456, 0.406], std[0.229, 0.224, 0.225]) ])pytorch在使用时要记得将图片变成tensor类型然后就可以调用datasets.ImageFolder来对其进行图片批量处理了这个函数还会生成dataset格式就是将每个图像根据文件名自动生成标签total_dataset datasets.ImageFolder(data_dir, transformtrain_transforms)后面经过数据集比例划分就可以调用DataLoader来生产测试集和训练集了train_size int(0.8 * len(total_dataset)) test_size len(total_dataset) - train_size train_dataset, test_dataset torch.utils.data.random_split(total_dataset, [train_size, test_size]) batch_size 4 train_dl torch.utils.data.DataLoader(train_dataset, batch_sizebatch_size, shuffleTrue) test_dl torch.utils.data.DataLoader(test_dataset, batch_sizebatch_size, shuffleFalse)3.网络选择这次我们选的是自建ResNet-50网络如图所示因为中间反复使用卷积块和恒等块故我们需要先将他们俩给定义了恒等块三层卷积后与原始值相加后通过一个激活函数class IdentityBlock(nn.Module): def __init__(self, in_channels, filters, kernel_size): super(IdentityBlock, self).__init__() #filters输入是个数组 f1, f2, f3 filters self.conv1 nn.Sequential( #biasFalse, 不使用偏执函数有批归一化了 nn.Conv2d(in_channels, f1, kernel_size1, stride1, padding0, biasFalse), nn.BatchNorm2d(f1), #节约内存 nn.ReLU(inplaceTrue) ) self.conv2 nn.Sequential( nn.Conv2d(f1, f2, kernel_sizekernel_size, stride1, paddingsame, biasFalse), nn.BatchNorm2d(f2), nn.ReLU(inplaceTrue) ) self.conv3 nn.Sequential( nn.Conv2d(f2, f3, kernel_size1, stride1, padding0, biasFalse), nn.BatchNorm2d(f3) ) #先加后激活保留通路的线性特征 self.relu nn.ReLU(inplaceTrue) def forward(self, x): identity x out self.conv1(x) out self.conv2(out) out self.conv3(out) #先加后激活 out identity out self.relu(out) return out卷积块一条路卷三次一条路卷一次最后相加后激活class ConvBlock(nn.Module): def __init__(self, in_channels, filters, kernel_size, stride2): super(ConvBlock, self).__init__() f1, f2, f3 filters self.conv1 nn.Sequential( nn.Conv2d(in_channels, f1, kernel_size1, stridestride, padding0, biasFalse), nn.BatchNorm2d(f1), nn.ReLU(inplaceTrue) ) self.conv2 nn.Sequential( nn.Conv2d(f1, f2, kernel_sizekernel_size, stride1, paddingsame, biasFalse), nn.BatchNorm2d(f2), nn.ReLU(inplaceTrue) ) self.conv3 nn.Sequential( nn.Conv2d(f2, f3, kernel_size1, stride1, padding0, biasFalse), nn.BatchNorm2d(f3) ) self.shortcut nn.Sequential( nn.Conv2d(in_channels, f3, kernel_size1, stridestride, padding0, biasFalse), nn.BatchNorm2d(f3) ) self.relu nn.ReLU(inplaceTrue) def forward(self, x): identity x out self.conv1(x) out self.conv2(out) out self.conv3(out) shortcut self.shortcut(identity) out shortcut out self.relu(out) return out接下来就可以写resnet-50了class ResNet50(nn.Module): def __init__(self, num_classes3): super(ResNet50, self).__init__() self.conv1 nn.Sequential( nn.Conv2d(3, 64, kernel_size7, stride2, padding3, biasFalse, padding_modezeros), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size3, stride2, padding1) ) self.conv2 nn.Sequential( ConvBlock(64, [64, 64, 256], kernel_size3, stride1), IdentityBlock(256, [64, 64, 256], kernel_size3), IdentityBlock(256, [64, 64, 256], kernel_size3) ) self.conv3 nn.Sequential( ConvBlock(256, [128, 128, 512], kernel_size3), IdentityBlock(512, [128, 128, 512], kernel_size3), IdentityBlock(512, [128, 128, 512], kernel_size3), IdentityBlock(512, [128, 128, 512], kernel_size3) ) self.conv4 nn.Sequential( ConvBlock(512, [256, 256, 1024], kernel_size3), IdentityBlock(1024, [256, 256, 1024], kernel_size3), IdentityBlock(1024, [256, 256, 1024], kernel_size3), IdentityBlock(1024, [256, 256, 1024], kernel_size3), IdentityBlock(1024, [256, 256, 1024], kernel_size3), IdentityBlock(1024, [256, 256, 1024], kernel_size3) ) self.conv5 nn.Sequential( ConvBlock(1024, [512, 512, 2048], kernel_size3), IdentityBlock(2048, [512, 512, 2048], kernel_size3), IdentityBlock(2048, [512, 512, 2048], kernel_size3) ) self.avgpool nn.AvgPool2d(kernel_size7, stride7, padding0) self.fc nn.Linear(2048, num_classes) def forward(self, x): x self.conv1(x) x self.conv2(x) x self.conv3(x) x self.conv4(x) x self.conv5(x) x self.avgpool(x) x torch.flatten(x, 1) x self.fc(x) return x model ResNet50(num_classes3).to(device)看一眼参数吧import torchsummary as summary summary.summary(model, (3, 224, 224))50层是指卷积层开头的和结尾的fc层4.训练测试函数训练测试函数核心在于前向传播与反向传播和todevicedef train(dataloader, model, loss_fn, optimizer): size len(dataloader.dataset) num_batches len(dataloader) train_loss, correct 0, 0 for X, y in dataloader: X, y X.to(device), y.to(device) # 前向传播 pred model(X) loss loss_fn(pred, y) # 反向传播和优化 optimizer.zero_grad() loss.backward() optimizer.step() train_loss loss.item() correct (pred.argmax(1) y).type(torch.float).sum().item() train_loss / num_batches correct / size return train_loss, correctdef test(dataloader, model, loss_fn): size len(dataloader.dataset) num_batches len(dataloader) test_loss, correct 0, 0 with torch.no_grad(): for X, y in dataloader: X, y X.to(device), y.to(device) pred model(X) test_loss loss_fn(pred, y).item() correct (pred.argmax(1) y).type(torch.float).sum().item() test_loss / num_batches correct / size return test_loss, correct5.开始训练选择后优化器和损失函数就可以开始训练了import copy optimizer torch.optim.AdamW(model.parameters(), lr0.0001) loss_fn nn.CrossEntropyLoss() num_epochs 10 train_loss_history [] train_acc_history [] test_loss_history [] test_acc_history [] best_acc 0.0 for epoch in range(num_epochs): model.train() train_loss, train_acc train(train_dl, model, loss_fn, optimizer) train_loss_history.append(train_loss) train_acc_history.append(train_acc) model.eval() test_loss, test_acc test(test_dl, model, loss_fn) test_loss_history.append(test_loss) test_acc_history.append(test_acc) if test_acc best_acc: best_acc test_acc best_model_wts copy.deepcopy(model.state_dict()) lr optimizer.state_dict()[param_groups][0][lr] template Epoch [{}/{}], LR: {:.6f}, Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f} print(template.format(epoch1, num_epochs, lr, train_loss, train_acc, test_loss, test_acc)) PATH ../model/resnet50.pth torch.save(best_model_wts, PATH) print(DONE)6.数据可视化import matplotlib.pyplot as plt import warnings warnings.filterwarnings(ignore) plt.rcParams[font.sans-serif] [SimHei] plt.rcParams[axes.unicode_minus] False plt.rcParams[figure.dpi] 100 from datetime import datetime current_time datetime.now().strftime(%Y-%m-%d %H:%M:%S) epochs range(1, num_epochs 1) plt.figure(figsize(12, 3)) plt.subplot(1, 2, 1) plt.plot(epochs, train_loss_history, label训练损失) plt.plot(epochs, test_loss_history, label测试损失) plt.title(训练和测试损失) plt.xlabel(current_time) plt.subplot(1, 2, 2) plt.plot(epochs, train_acc_history, label训练准确率) plt.plot(epochs, test_acc_history, label测试准确率) plt.title(训练和测试准确率) plt.xlabel(current_time) plt.legend() plt.show()总结这一节我们重新回到了Pytorch环境进行训练在该环境下可以调用OpenCV等图像处理库。并且了解了ResNet 这个经典的CNN网络结构并完成了他的搭建与训练。
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2521819.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!