手把手教你用PyTorch实现ViT模型(附完整代码和数据集)
手把手教你用PyTorch实现ViT模型附完整代码和数据集在计算机视觉领域Transformer架构正掀起一场革命。传统CNN长期主导的格局被打破Vision TransformerViT以其独特的序列建模方式展现出惊人的性能表现。本文将带您从零开始用PyTorch完整实现一个ViT模型涵盖数据准备、模型构建、训练优化全流程并分享实际开发中的避坑指南。1. 环境配置与数据准备工欲善其事必先利其器。我们首先需要搭建适合ViT开发的Python环境。推荐使用Anaconda创建独立环境避免依赖冲突conda create -n vit_env python3.8 conda activate vit_env pip install torch1.12.0cu113 torchvision0.13.0cu113 -f https://download.pytorch.org/whl/torch_stable.html pip install timm matplotlib tqdm对于数据集选择CIFAR-10是个不错的起点。这个包含10类6万张32x32小图像的数据集既能验证模型有效性又不会消耗过多计算资源。以下是数据加载与预处理的完整实现import torch from torchvision import datasets, transforms # 数据增强策略 train_transform transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) test_transform transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # 加载数据集 train_data datasets.CIFAR10(data, trainTrue, downloadTrue, transformtrain_transform) test_data datasets.CIFAR10(data, trainFalse, downloadTrue, transformtest_transform) # 创建数据加载器 batch_size 256 train_loader torch.utils.data.DataLoader(train_data, batch_sizebatch_size, shuffleTrue) test_loader torch.utils.data.DataLoader(test_data, batch_sizebatch_size)注意ViT对数据规模敏感如果资源允许建议使用更大数据集如ImageNet。对小数据集可考虑使用预训练权重或知识蒸馏技术。2. ViT模型架构实现ViT的核心思想是将图像分割为补丁序列通过Transformer编码器处理。让我们拆解这个过程的每个关键组件2.1 补丁嵌入层传统CNN通过卷积核滑动提取特征ViT则先将图像分割为固定大小的补丁。对于32x32的CIFAR-10图像采用4x4补丁大小将得到64个补丁import torch.nn as nn class PatchEmbedding(nn.Module): def __init__(self, img_size32, patch_size4, in_chans3, embed_dim64): super().__init__() self.img_size img_size self.patch_size patch_size self.n_patches (img_size // patch_size) ** 2 self.proj nn.Conv2d( in_chans, embed_dim, kernel_sizepatch_size, stridepatch_size ) def forward(self, x): x self.proj(x) # (B, E, H/P, W/P) x x.flatten(2) # (B, E, N) x x.transpose(1, 2) # (B, N, E) return x2.2 位置编码Transformer需要位置信息来理解补丁的空间关系。不同于原始论文使用固定编码我们采用更灵活的可学习位置编码class PositionalEncoding(nn.Module): def __init__(self, n_patches64, embed_dim64): super().__init__() self.pos_embed nn.Parameter(torch.zeros(1, n_patches 1, embed_dim)) nn.init.trunc_normal_(self.pos_embed, std0.02) def forward(self, x): return x self.pos_embed[:, :x.size(1)]2.3 Transformer编码器实现多头自注意力机制和前馈网络的核心模块class Attention(nn.Module): def __init__(self, dim, n_heads8): super().__init__() self.n_heads n_heads self.scale (dim // n_heads) ** -0.5 self.qkv nn.Linear(dim, dim * 3) self.proj nn.Linear(dim, dim) def forward(self, x): B, N, C x.shape qkv self.qkv(x).reshape(B, N, 3, self.n_heads, C // self.n_heads).permute(2, 0, 3, 1, 4) q, k, v qkv[0], qkv[1], qkv[2] attn (q k.transpose(-2, -1)) * self.scale attn attn.softmax(dim-1) x (attn v).transpose(1, 2).reshape(B, N, C) x self.proj(x) return x class MLP(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net nn.Sequential( nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim) ) def forward(self, x): return self.net(x) class EncoderBlock(nn.Module): def __init__(self, dim, n_heads, mlp_ratio4): super().__init__() self.norm1 nn.LayerNorm(dim) self.attn Attention(dim, n_heads) self.norm2 nn.LayerNorm(dim) self.mlp MLP(dim, dim * mlp_ratio) def forward(self, x): x x self.attn(self.norm1(x)) x x self.mlp(self.norm2(x)) return x3. 完整ViT模型组装整合所有组件构建完整ViT模型添加分类头和必要的初始化class VisionTransformer(nn.Module): def __init__(self, img_size32, patch_size4, in_chans3, n_classes10, embed_dim64, depth6, n_heads8, mlp_ratio4): super().__init__() self.patch_embed PatchEmbedding(img_size, patch_size, in_chans, embed_dim) self.cls_token nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed PositionalEncoding(self.patch_embed.n_patches, embed_dim) self.blocks nn.ModuleList([ EncoderBlock(embed_dim, n_heads, mlp_ratio) for _ in range(depth) ]) self.norm nn.LayerNorm(embed_dim) self.head nn.Linear(embed_dim, n_classes) nn.init.trunc_normal_(self.cls_token, std0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std0.02) if m.bias is not None: nn.init.zeros_(m.bias) def forward(self, x): B x.shape[0] x self.patch_embed(x) # (B, N, E) cls_token self.cls_token.expand(B, -1, -1) x torch.cat((cls_token, x), dim1) # (B, 1N, E) x self.pos_embed(x) for block in self.blocks: x block(x) x self.norm(x) cls_token_final x[:, 0] # 提取分类token x self.head(cls_token_final) return x4. 模型训练与优化实现训练循环时有几个关键点需要特别注意4.1 学习率调度ViT训练通常需要精心设计的学习率调度。我们采用带热身的余弦退火策略def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps): def lr_lambda(current_step): if current_step num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return 0.5 * (1.0 math.cos(math.pi * progress)) return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)4.2 训练流程实现完整的训练循环包含以下关键组件def train_model(model, train_loader, test_loader, epochs50): device torch.device(cuda if torch.cuda.is_available() else cpu) model model.to(device) criterion nn.CrossEntropyLoss() optimizer torch.optim.AdamW(model.parameters(), lr3e-4, weight_decay0.05) total_steps len(train_loader) * epochs scheduler get_cosine_schedule_with_warmup( optimizer, num_warmup_steps500, num_training_stepstotal_steps ) best_acc 0.0 for epoch in range(epochs): model.train() train_loss 0.0 for images, labels in tqdm(train_loader): images, labels images.to(device), labels.to(device) optimizer.zero_grad() outputs model(images) loss criterion(outputs, labels) loss.backward() optimizer.step() scheduler.step() train_loss loss.item() # 验证阶段 model.eval() correct 0 total 0 with torch.no_grad(): for images, labels in test_loader: images, labels images.to(device), labels.to(device) outputs model(images) _, predicted torch.max(outputs.data, 1) total labels.size(0) correct (predicted labels).sum().item() acc 100 * correct / total print(fEpoch {epoch1}/{epochs}, Loss: {train_loss/len(train_loader):.4f}, Acc: {acc:.2f}%) if acc best_acc: best_acc acc torch.save(model.state_dict(), best_vit_model.pth) print(fTraining complete. Best accuracy: {best_acc:.2f}%)4.3 实际训练技巧在真实项目中我们发现以下技巧能显著提升ViT训练效果梯度裁剪防止梯度爆炸torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm1.0)混合精度训练减少显存占用scaler torch.cuda.amp.GradScaler() with torch.cuda.amp.autocast(): outputs model(images) loss criterion(outputs, labels) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update()标签平滑缓解过拟合criterion nn.CrossEntropyLoss(label_smoothing0.1)5. 模型评估与可视化训练完成后我们需要深入分析模型表现和理解其工作原理5.1 评估指标计算除了准确率还应关注其他重要指标from sklearn.metrics import classification_report def evaluate_model(model, test_loader): device next(model.parameters()).device all_preds [] all_labels [] with torch.no_grad(): for images, labels in test_loader: images images.to(device) outputs model(images) _, preds torch.max(outputs, 1) all_preds.extend(preds.cpu().numpy()) all_labels.extend(labels.numpy()) print(classification_report(all_labels, all_preds)) return all_preds, all_labels5.2 注意力可视化理解ViT如何看图像至关重要。我们可以可视化注意力权重import matplotlib.pyplot as plt def visualize_attention(model, image, patch_size4): device next(model.parameters()).device model.eval() # 获取注意力权重 with torch.no_grad(): outputs model.patch_embed(image.unsqueeze(0).to(device)) cls_token model.cls_token.expand(1, -1, -1) x torch.cat((cls_token, outputs), dim1) x model.pos_embed(x) attention_maps [] for block in model.blocks: x block.norm1(x) B, N, C x.shape qkv block.attn.qkv(x).reshape(B, N, 3, model.blocks[0].attn.n_heads, C // model.blocks[0].attn.n_heads) q, k, v qkv.permute(2, 0, 3, 1, 4).unbind(0) attn (q k.transpose(-2, -1)) * block.attn.scale attn attn.softmax(dim-1) attention_maps.append(attn[:, :, 0, 1:].mean(dim1)) # CLS token对其他补丁的注意力 # 可视化 fig, axes plt.subplots(2, 3, figsize(12, 8)) for i, ax in enumerate(axes.flat): if i len(attention_maps): attn attention_maps[i].cpu().numpy().reshape(8, 8) ax.imshow(attn, cmaphot) ax.set_title(fBlock {i1}) ax.axis(off) plt.tight_layout() plt.show()6. 实战优化与调参建议基于实际项目经验分享ViT调优的关键策略6.1 超参数优化指南参数推荐范围影响分析补丁大小4-16像素小补丁捕获细节但增加计算量嵌入维度64-768越大模型容量越高头数8-12影响并行注意力机制数量深度6-12层更深可能带来梯度问题学习率3e-4到5e-4需配合热身策略6.2 常见问题解决方案训练不稳定增加梯度裁剪使用更小的学习率添加更多层归一化过拟合增强数据增强提高权重衰减(0.05-0.1)使用Dropout(0.1-0.3)性能瓶颈# 使用更高效的注意力实现 torch.backends.cuda.enable_flash_sdp(True)6.3 扩展应用方向ViT架构灵活可轻松扩展到其他视觉任务目标检测class ViTForDetection(nn.Module): def __init__(self, backbone, num_boxes5, num_classes20): super().__init__() self.backbone backbone self.bbox_head nn.Linear(backbone.embed_dim, num_boxes * 4) self.cls_head nn.Linear(backbone.embed_dim, num_boxes * num_classes)语义分割class ViTForSegmentation(nn.Module): def __init__(self, backbone, num_classes21): super().__init__() self.backbone backbone self.decoder nn.Sequential( nn.ConvTranspose2d(embed_dim, 256, kernel_size4, stride4), nn.Conv2d(256, num_classes, kernel_size1) )在真实项目中我们发现ViT在数据量充足时表现惊人但对超参数选择比CNN更敏感。建议从小规模实验开始逐步扩展模型规模。完整代码已托管在GitHub仓库包含更多高级特性和优化技巧。
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2420494.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!