知识点回顾:
彩色和灰度图片测试和训练的规范写法:封装在函数中
展平操作:除第一个维度batchsize外全部展平
dropout操作:训练阶段随机丢弃神经元,测试阶段eval模式关闭dropout
作业:仔细学习下测试和训练代码的逻辑,这是基础,这个代码框架后续会一直沿用,后续的重点慢慢就是转向模型定义阶段了。这个模型结构是一个简单的全连接神经网络,用于处理输入大小为 28×28(即 784 个特征)的数据,通常用于 MNIST 手写数字识别任务
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
# 设置中文字体支持
plt.rcParams["font.family"] = ["SimHei"]
plt.rcParams['axes.unicode_minus'] = False # 解决负号显示问题
# 1. 数据预处理
transform = transforms.Compose([
transforms.ToTensor(), # 转换为张量并归一化到[0,1]
transforms.Normalize((0.1307,), (0.3081,)) # MNIST数据集的均值和标准差
])
# 2. 加载MNIST数据集
train_dataset = datasets.MNIST(
root='./data',
train=True,
download=True,
transform=transform
)
test_dataset = datasets.MNIST(
root='./data',
train=False,
transform=transform
)
# 3. 创建数据加载器
batch_size = 64 # 每批处理64个样本
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# 4. 定义模型、损失函数和优化器
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.flatten = nn.Flatten() # 将28x28的图像展平为784维向量
self.layer1 = nn.Linear(784, 128) # 第一层:784个输入,128个神经元
self.relu = nn.ReLU() # 激活函数
self.layer2 = nn.Linear(128, 10) # 第二层:128个输入,10个输出(对应10个数字类别)
def forward(self, x):
x = self.flatten(x) # 展平图像
x = self.layer1(x) # 第一层线性变换
x = self.relu(x) # 应用ReLU激活函数
x = self.layer2(x) # 第二层线性变换,输出logits
return x
# 检查GPU是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 初始化模型
model = MLP()
model = model.to(device) # 将模型移至GPU(如果可用)
criterion = nn.CrossEntropyLoss() # 交叉熵损失函数,适用于多分类问题
optimizer = optim.Adam(model.parameters(), lr=0.001) # Adam优化器
# 5. 训练模型(记录每个 iteration 的损失)
def train(model, train_loader, test_loader, criterion, optimizer, device, epochs):
model.train() # 设置为训练模式
# 新增:记录每个 iteration 的损失
all_iter_losses = [] # 存储所有 batch 的损失
iter_indices = [] # 存储 iteration 序号(从1开始)
for epoch in range(epochs):
running_loss = 0.0
correct = 0
total = 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device) # 移至GPU(如果可用)
optimizer.zero_grad() # 梯度清零
output = model(data) # 前向传播
loss = criterion(output, target) # 计算损失
loss.backward() # 反向传播
optimizer.step() # 更新参数
# 记录当前 iteration 的损失(注意:这里直接使用单 batch 损失,而非累加平均)
iter_loss = loss.item()
all_iter_losses.append(iter_loss)
iter_indices.append(epoch * len(train_loader) + batch_idx + 1) # iteration 序号从1开始
# 统计准确率和损失(原逻辑保留,用于 epoch 级统计)
running_loss += iter_loss
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
# 每100个批次打印一次训练信息(可选:同时打印单 batch 损失)
if (batch_idx + 1) % 100 == 0:
print(f'Epoch: {epoch+1}/{epochs} | Batch: {batch_idx+1}/{len(train_loader)} '
f'| 单Batch损失: {iter_loss:.4f} | 累计平均损失: {running_loss/(batch_idx+1):.4f}')
# 原 epoch 级逻辑(测试、打印 epoch 结果)不变
epoch_train_loss = running_loss / len(train_loader)
epoch_train_acc = 100. * correct / total
epoch_test_loss, epoch_test_acc = test(model, test_loader, criterion, device)
print(f'Epoch {epoch+1}/{epochs} 完成 | 训练准确率: {epoch_train_acc:.2f}% | 测试准确率: {epoch_test_acc:.2f}%')
# 绘制所有 iteration 的损失曲线
plot_iter_losses(all_iter_losses, iter_indices)
# 保留原 epoch 级曲线(可选)
# plot_metrics(train_losses, test_losses, train_accuracies, test_accuracies, epochs)
return epoch_test_acc # 返回最终测试准确率
# 6. 测试模型
def test(model, test_loader, criterion, device):
model.eval() # 设置为评估模式
test_loss = 0
correct = 0
total = 0
with torch.no_grad(): # 不计算梯度,节省内存和计算资源
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target).item()
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
avg_loss = test_loss / len(test_loader)
accuracy = 100. * correct / total
return avg_loss, accuracy # 返回损失和准确率
# 7.绘制每个 iteration 的损失曲线
def plot_iter_losses(losses, indices):
plt.figure(figsize=(10, 4))
plt.plot(indices, losses, 'b-', alpha=0.7, label='Iteration Loss')
plt.xlabel('Iteration(Batch序号)')
plt.ylabel('损失值')
plt.title('每个 Iteration 的训练损失')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()
# 8. 执行训练和测试(设置 epochs=2 验证效果)
epochs = 2
print("开始训练模型...")
final_accuracy = train(model, train_loader, test_loader, criterion, optimizer, device, epochs)
print(f"训练完成!最终测试准确率: {final_accuracy:.2f}%")
@浙大疏锦行