保姆级教程:用PyTorch复现STANet遥感变化检测模型(附LEVIR-CD数据集下载与配置)
从零实现STANet基于PyTorch的遥感变化检测实战指南开篇为什么选择STANet进行遥感变化检测当我们需要监测城市扩张、灾害评估或基础设施变化时遥感变化检测技术显得尤为重要。传统方法往往受限于光照变化和配准误差而STANet通过时空自注意力机制有效捕捉远距离像素间的时空依赖关系显著提升了检测精度。本文将带您从环境搭建到模型训练完整复现这一前沿算法。1. 环境准备与数据获取1.1 配置PyTorch开发环境推荐使用Anaconda创建独立Python环境以避免依赖冲突conda create -n stanet python3.8 conda activate stanet pip install torch1.9.0cu111 torchvision0.10.0cu111 -f https://download.pytorch.org/whl/torch_stable.html核心依赖包清单opencv-python4.5.3scikit-learn0.24.2tqdm4.62.0tensorboard2.6.0提示CUDA版本需与显卡驱动匹配可通过nvidia-smi查询兼容版本1.2 LEVIR-CD数据集处理数据集包含637对1024×1024的谷歌地球图像标注了31,333个建筑变化实例。下载后需执行以下预处理import os from PIL import Image def split_images(img_dir, patch_size256): 将大图切割为训练所需的小图块 for img_name in os.listdir(img_dir): img Image.open(os.path.join(img_dir, img_name)) for i in range(0, img.width, patch_size): for j in range(0, img.height, patch_size): patch img.crop((i, j, ipatch_size, jpatch_size)) patch.save(fpatches/{img_name[:-4]}_{i}_{j}.png)建议按7:1:2比例划分训练/验证/测试集目录结构应如下LEVIR-CD/ ├── train/ │ ├── time1/ # 时相1图像 │ ├── time2/ # 时相2图像 │ └── label/ # 变化标注图 ├── val/ └── test/2. STANet模型架构深度解析2.1 特征提取网络实现采用修改后的ResNet-18作为骨干网络关键修改点import torch.nn as nn from torchvision.models import resnet18 class FeatureExtractor(nn.Module): def __init__(self): super().__init__() resnet resnet18(pretrainedTrue) self.conv1 resnet.conv1 self.bn1 resnet.bn1 self.relu resnet.relu self.maxpool resnet.maxpool self.layer1 resnet.layer1 # 输出64通道 self.layer2 resnet.layer2 # 输出128通道 self.layer3 resnet.layer3 # 输出256通道 def forward(self, x): # 标准化输入ImageNet统计量 x (x - 0.406)/0.225 x self.conv1(x) x self.bn1(x) x self.relu(x) x self.maxpool(x) f1 self.layer1(x) # 1/4分辨率 f2 self.layer2(f1) # 1/8分辨率 f3 self.layer3(f2) # 1/16分辨率 return [f1, f2, f3] # 多尺度特征输出2.2 时空注意力模块精讲2.2.1 基础注意力模块(BAM)class BAM(nn.Module): def __init__(self, in_channels): super().__init__() self.query_conv nn.Conv2d(in_channels, in_channels//8, 1) self.key_conv nn.Conv2d(in_channels, in_channels//8, 1) self.value_conv nn.Conv2d(in_channels, in_channels, 1) self.gamma nn.Parameter(torch.zeros(1)) def forward(self, x): B, C, H, W x.shape # 计算query, key, value q self.query_conv(x).view(B, -1, H*W).permute(0,2,1) # (B, HW, C) k self.key_conv(x).view(B, -1, H*W) # (B, C, HW) v self.value_conv(x).view(B, -1, H*W) # (B, C, HW) # 计算注意力权重 energy torch.bmm(q, k) # (B, HW, HW) attention torch.softmax(energy, dim-1) # 应用注意力 out torch.bmm(v, attention.permute(0,2,1)) out out.view(B, C, H, W) return self.gamma * out x # 残差连接2.2.2 金字塔注意力模块(PAM)PAM通过多尺度处理提升小目标检测能力class PAM(nn.Module): def __init__(self, in_channels): super().__init__() self.branches nn.ModuleList([ BAM(in_channels) for _ in range(4) ]) self.conv nn.Conv2d(4*in_channels, in_channels, 1) def forward(self, x): B, C, H, W x.shape outputs [] # 1x1分支全局 out1 self.branches[0](x) outputs.append(out1) # 2x2分支 patch_size 2 patches x.unfold(2, H//patch_size, H//patch_size)\ .unfold(3, W//patch_size, W//patch_size) patches patches.contiguous().view(B, C, -1, H//patch_size, W//patch_size) out2 torch.cat([ self.branches[1](patches[:,:,i]) for i in range(patches.size(2)) ], dim0) out2 out2.view(B, C, H, W) outputs.append(out2) # 类似实现4x4和8x8分支... # 融合多尺度特征 out torch.cat(outputs, dim1) out self.conv(out) return out x3. 训练策略与损失函数3.1 批量平衡对比损失(BCL)针对变化检测中的类别不平衡问题class BCLoss(nn.Module): def __init__(self, margin2.0): super().__init__() self.margin margin def forward(self, distance_map, label): # 计算变化/未变化像素数 pos_num torch.sum(label1).float() neg_num torch.sum(label0).float() # 计算两类损失 pos_loss (label) * torch.pow(distance_map, 2) neg_loss (1-label) * torch.pow(torch.clamp(self.margin - distance_map, min0), 2) # 平衡权重 pos_weight neg_num / (pos_num neg_num) neg_weight pos_num / (pos_num neg_num) loss pos_weight * pos_loss neg_weight * neg_loss return torch.mean(loss)3.2 训练流程优化技巧推荐采用分阶段训练策略阶段学习率数据增强训练目标11e-4随机翻转特征提取器25e-5添加旋转完整模型31e-5全部增强微调注意力关键训练代码片段optimizer torch.optim.AdamW(model.parameters(), lr1e-4, weight_decay1e-4) scheduler torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max100) for epoch in range(200): model.train() for img1, img2, label in train_loader: optimizer.zero_grad() distance_map model(img1, img2) loss criterion(distance_map, label) loss.backward() optimizer.step() scheduler.step() # 验证集评估 if epoch % 10 0: model.eval() with torch.no_grad(): val_loss evaluate(model, val_loader) print(fEpoch {epoch}: val_loss{val_loss:.4f})4. 模型评估与可视化4.1 定量评估指标建议在测试集上计算以下指标def compute_metrics(pred, label): TP ((pred 1) (label 1)).sum().float() FP ((pred 1) (label 0)).sum().float() FN ((pred 0) (label 1)).sum().float() precision TP / (TP FP 1e-6) recall TP / (TP FN 1e-6) f1 2 * precision * recall / (precision recall 1e-6) return precision.item(), recall.item(), f1.item()4.2 注意力可视化技巧通过hook机制提取中间注意力权重def register_hooks(model): activations {} def get_activation(name): def hook(model, input, output): activations[name] output.detach() return hook model.bam1.register_forward_hook(get_activation(bam1)) model.pam.register_forward_hook(get_activation(pam)) return activations # 可视化示例 activations register_hooks(model) output model(img1, img2) visualize_attention(activations[bam1][0, 0]) # 第一个样本的第一个通道典型可视化效果对比未使用注意力变化区域边界模糊BAM版本能捕捉大范围变化PAM版本同时保留精细结构5. 实际部署建议5.1 模型轻量化方案对于资源受限场景可尝试以下优化知识蒸馏使用训练好的PAM模型指导BAM模型训练通道剪枝移除特征提取网络中冗余通道量化部署将模型转换为FP16或INT8格式# 示例量化代码 quantized_model torch.quantization.quantize_dynamic( model, {nn.Conv2d}, dtypetorch.qint8 ) torch.jit.save(torch.jit.script(quantized_model), stanet_quantized.pt)5.2 处理超大尺寸图像对于超出显存的大图可采用滑动窗口策略def predict_large_image(model, img1, img2, window_size256, stride128): 分块预测大图并融合结果 height, width img1.shape[-2:] pred torch.zeros((height, width)) for i in range(0, height, stride): for j in range(0, width, stride): patch1 img1[:, :, i:iwindow_size, j:jwindow_size] patch2 img2[:, :, i:iwindow_size, j:jwindow_size] with torch.no_grad(): patch_pred model(patch1, patch2) # 使用汉宁窗平滑边界 window np.hanning(window_size) * np.hanning(window_size)[:, None] pred[i:iwindow_size, j:jwindow_size] patch_pred * window return pred 0.5 # 二值化在实际项目中我们发现PAM模块虽然计算量较大但对小目标建筑的检测效果提升显著。当处理2000×2000像素的图像时建议使用RTX 3090及以上显卡以获得实时性能。
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2605654.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!