05-模型部署与工程化: 推理优化:知识蒸馏
推理优化知识蒸馏教师-学生架构、蒸馏损失、轻量化部署一、知识蒸馏概述1.1 什么是知识蒸馏importnumpyasnpimportmatplotlib.pyplotaspltfrommatplotlib.patchesimportRectangle,FancyBboxPatchimportwarnings warnings.filterwarnings(ignore)print(*60)print(知识蒸馏大模型教小模型)print(*60)# 知识蒸馏架构图fig,axesplt.subplots(1,2,figsize(14,6))# 教师-学生架构ax1axes[0]ax1.axis(off)ax1.set_title(教师-学生架构,fontsize11)# 教师模型teacherFancyBboxPatch((0.1,0.5),0.3,0.3,boxstyleround,pad0.05,facecolorlightcoral,ecblack)ax1.add_patch(teacher)ax1.text(0.25,0.65,教师模型\n(大模型),hacenter,vacenter,fontsize9)# 学生模型studentFancyBboxPatch((0.6,0.5),0.3,0.3,boxstyleround,pad0.05,facecolorlightgreen,ecblack)ax1.add_patch(student)ax1.text(0.75,0.65,学生模型\n(小模型),hacenter,vacenter,fontsize9)# 知识蒸馏ax1.annotate(,xy(0.6,0.65),xytext(0.4,0.65),arrowpropsdict(arrowstyle-,lw2))ax1.text(0.5,0.7,知识蒸馏,hacenter,fontsize9)# 效果对比ax2axes[1]methods[教师模型,学生模型\n(从头训练),学生模型\n(知识蒸馏)]sizes[100,10,10]accuracies[95,82,90]xnp.arange(len(methods))width0.35bars1ax2.bar(x-width/2,sizes,width,label参数量 (M),colorlightblue)bars2ax2.bar(xwidth/2,accuracies,width,label准确率 (%),colorlightcoral)ax2.set_xticks(x)ax2.set_xticklabels(methods)ax2.set_ylabel(数值)ax2.set_title(知识蒸馏效果对比)ax2.legend()plt.suptitle(知识蒸馏用大模型教小模型,fontsize14)plt.tight_layout()plt.show()print(\n 知识蒸馏核心思想:)print( 教师模型: 训练好的大模型提供软标签)print( 学生模型: 待训练的小模型学习教师的知识)print( 蒸馏损失: 让学生模型模仿教师模型的输出)二、蒸馏损失2.1 软标签与温度defsoft_labels_temperature():软标签与温度参数print(\n*60)print(软标签与温度参数)print(*60)fig,axesplt.subplots(1,2,figsize(14,6))# 硬标签 vs 软标签ax1axes[0]classes[猫,狗,鸟,鱼]hard_labels[1,0,0,0]soft_labels[0.7,0.2,0.05,0.05]xnp.arange(len(classes))width0.35bars1ax1.bar(x-width/2,hard_labels,width,label硬标签,colorlightcoral)bars2ax1.bar(xwidth/2,soft_labels,width,label软标签,colorlightgreen)ax1.set_xlabel(类别)ax1.set_ylabel(概率)ax1.set_title(硬标签 vs 软标签)ax1.set_xticks(x)ax1.set_xticklabels(classes)ax1.legend()# 温度参数影响ax2axes[1]defsoftmax_with_temperature(logits,T):exp_logitsnp.exp(logits/T)returnexp_logits/np.sum(exp_logits)logitsnp.array([3.0,1.0,0.5,0.2])temperatures[0.5,1.0,2.0,5.0]xnp.arange(len(logits))width0.2colors[blue,green,orange,red]fori,Tinenumerate(temperatures):probssoftmax_with_temperature(logits,T)ax2.bar(xi*width-0.3,probs,width,labelfT{T},alpha0.7)ax2.set_xlabel(类别)ax2.set_ylabel(概率)ax2.set_title(温度参数对概率分布的影响)ax2.set_xticks(x)ax2.set_xticklabels([A,B,C,D])ax2.legend()plt.suptitle(软标签与温度参数,fontsize12)plt.tight_layout()plt.show()print(\n 温度参数作用:)print( T1: 标准Softmax)print( T1: 分布更平滑类别间差距变小)print( T1: 分布更尖锐突出最大类别)print( 蒸馏时通常使用T1让学生学习类别间关系)soft_labels_temperature()2.2 蒸馏损失函数defdistillation_loss():蒸馏损失函数print(\n*60)print(蒸馏损失函数)print(*60)code import torch import torch.nn as nn import torch.nn.functional as F class DistillationLoss(nn.Module): def __init__(self, temperature3.0, alpha0.7): super(DistillationLoss, self).__init__() self.temperature temperature self.alpha alpha def forward(self, student_logits, teacher_logits, labels): # 蒸馏损失软标签 soft_loss F.kl_div( F.log_softmax(student_logits / self.temperature, dim1), F.softmax(teacher_logits / self.temperature, dim1), reductionbatchmean ) * (self.temperature ** 2) # 硬标签损失 hard_loss F.cross_entropy(student_logits, labels) # 总损失 total_loss self.alpha * soft_loss (1 - self.alpha) * hard_loss return total_loss # 使用示例 criterion DistillationLoss(temperature3.0, alpha0.7) student_logits torch.randn(32, 10) teacher_logits torch.randn(32, 10) labels torch.randint(0, 10, (32,)) loss criterion(student_logits, teacher_logits, labels) print(f蒸馏损失: {loss.item():.4f}) print(code)distillation_loss()三、PyTorch蒸馏实现3.1 基础蒸馏defbasic_distillation():基础蒸馏实现print(\n*60)print(PyTorch基础蒸馏)print(*60)code import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader class DistillationTrainer: def __init__(self, teacher_model, student_model, train_loader, val_loader, temperature3.0, alpha0.7, lr0.001, devicecuda): self.teacher teacher_model.to(device) self.student student_model.to(device) self.train_loader train_loader self.val_loader val_loader self.device device self.temperature temperature self.alpha alpha self.criterion DistillationLoss(temperature, alpha) self.optimizer optim.Adam(self.student.parameters(), lrlr) def train_epoch(self): self.student.train() self.teacher.eval() total_loss 0 for batch_idx, (data, target) in enumerate(self.train_loader): data, target data.to(self.device), target.to(self.device) # 教师模型预测不计算梯度 with torch.no_grad(): teacher_logits self.teacher(data) # 学生模型预测 student_logits self.student(data) # 计算蒸馏损失 loss self.criterion(student_logits, teacher_logits, target) # 反向传播 self.optimizer.zero_grad() loss.backward() self.optimizer.step() total_loss loss.item() return total_loss / len(self.train_loader) def train(self, epochs50): for epoch in range(epochs): train_loss self.train_epoch() val_acc self.evaluate() if (epoch 1) % 10 0: print(fEpoch {epoch1}, Loss: {train_loss:.4f}, Val Acc: {val_acc:.2f}%) def evaluate(self): self.student.eval() correct 0 total 0 with torch.no_grad(): for data, target in self.val_loader: data, target data.to(self.device), target.to(self.device) output self.student(data) _, predicted torch.max(output, 1) total target.size(0) correct (predicted target).sum().item() return 100 * correct / total # 使用示例 teacher ResNet50() student ResNet18() trainer DistillationTrainer(teacher, student, train_loader, val_loader) trainer.train(epochs30) print(code)basic_distillation()3.2 特征蒸馏deffeature_distillation():特征蒸馏print(\n*60)print(特征蒸馏中间层)print(*60)code class FeatureDistillationLoss(nn.Module): def __init__(self, temperature3.0, alpha0.7, beta0.5): super(FeatureDistillationLoss, self).__init__() self.temperature temperature self.alpha alpha self.beta beta self.mse nn.MSELoss() def forward(self, student_logits, teacher_logits, student_features, teacher_features, labels): # 输出蒸馏 soft_loss F.kl_div( F.log_softmax(student_logits / self.temperature, dim1), F.softmax(teacher_logits / self.temperature, dim1), reductionbatchmean ) * (self.temperature ** 2) # 硬标签损失 hard_loss F.cross_entropy(student_logits, labels) # 特征蒸馏让学生的中间层模仿教师 feature_loss 0 for s_feat, t_feat in zip(student_features, teacher_features): feature_loss self.mse(s_feat, t_feat) total_loss (self.alpha * soft_loss (1 - self.alpha) * hard_loss self.beta * feature_loss) return total_loss # 带特征蒸馏的学生模型 class StudentWithFeatures(nn.Module): def __init__(self): super(StudentWithFeatures, self).__init__() self.conv1 nn.Conv2d(3, 32, 3, padding1) self.conv2 nn.Conv2d(32, 64, 3, padding1) self.fc nn.Linear(64 * 32 * 32, 10) self.relu nn.ReLU() self.pool nn.MaxPool2d(2) def forward(self, x): features [] x self.pool(self.relu(self.conv1(x))) features.append(x) x self.pool(self.relu(self.conv2(x))) features.append(x) x x.view(x.size(0), -1) x self.fc(x) return x, features # 教师模型类似结构但更深 class TeacherWithFeatures(nn.Module): def __init__(self): super(TeacherWithFeatures, self).__init__() self.conv1 nn.Conv2d(3, 64, 3, padding1) self.conv2 nn.Conv2d(64, 128, 3, padding1) self.conv3 nn.Conv2d(128, 256, 3, padding1) self.fc nn.Linear(256 * 16 * 16, 10) self.relu nn.ReLU() self.pool nn.MaxPool2d(2) def forward(self, x): features [] x self.pool(self.relu(self.conv1(x))) features.append(x) x self.pool(self.relu(self.conv2(x))) features.append(x) x self.pool(self.relu(self.conv3(x))) features.append(x) x x.view(x.size(0), -1) x self.fc(x) return x, features print(code)feature_distillation()四、自蒸馏4.1 自蒸馏原理defself_distillation():自蒸馏print(\n*60)print(自蒸馏自己教自己)print(*60)code class SelfDistillationLoss(nn.Module): def __init__(self, temperature3.0, alpha0.5): super(SelfDistillationLoss, self).__init__() self.temperature temperature self.alpha alpha def forward(self, logits, aux_logits, labels): # 主输出和辅助输出之间的蒸馏 soft_loss F.kl_div( F.log_softmax(logits / self.temperature, dim1), F.softmax(aux_logits / self.temperature, dim1), reductionbatchmean ) * (self.temperature ** 2) # 硬标签损失 hard_loss F.cross_entropy(logits, labels) return self.alpha * soft_loss (1 - self.alpha) * hard_loss # 带辅助分类器的模型 class SelfDistillationModel(nn.Module): def __init__(self): super(SelfDistillationModel, self).__init__() self.conv1 nn.Conv2d(3, 32, 3, padding1) self.conv2 nn.Conv2d(32, 64, 3, padding1) self.conv3 nn.Conv2d(64, 128, 3, padding1) # 辅助分类器浅层 self.aux_pool nn.AdaptiveAvgPool2d(1) self.aux_fc nn.Linear(64, 10) # 主分类器深层 self.main_pool nn.AdaptiveAvgPool2d(1) self.main_fc nn.Linear(128, 10) self.relu nn.ReLU() self.pool nn.MaxPool2d(2) def forward(self, x): x self.pool(self.relu(self.conv1(x))) x2 self.pool(self.relu(self.conv2(x))) # 用于辅助分类 aux_out self.aux_fc(self.aux_pool(x2).squeeze(-1).squeeze(-1)) x3 self.pool(self.relu(self.conv3(x2))) main_out self.main_fc(self.main_pool(x3).squeeze(-1).squeeze(-1)) return main_out, aux_out # 训练 model SelfDistillationModel() criterion SelfDistillationLoss() for epoch in range(epochs): for data, target in train_loader: optimizer.zero_grad() main_out, aux_out model(data) loss criterion(main_out, aux_out, target) loss.backward() optimizer.step() print(code)self_distillation()五、实战BERT蒸馏5.1 DistilBERT实现defbert_distillation():BERT蒸馏print(\n*60)print(BERT知识蒸馏)print(*60)code from transformers import ( BertForSequenceClassification, BertConfig, Trainer, TrainingArguments, AutoTokenizer ) import torch import torch.nn as nn class DistilBERTForSequenceClassification(nn.Module): def __init__(self, teacher_model, num_labels2, hidden_size384, num_layers6): super(DistilBERTForSequenceClassification, self).__init__() # 学生模型配置更小 config BertConfig( vocab_sizeteacher_model.config.vocab_size, hidden_sizehidden_size, num_hidden_layersnum_layers, num_attention_heads6, intermediate_size4 * hidden_size, num_labelsnum_labels ) self.model BertForSequenceClassification(config) self.teacher teacher_model self.temperature 3.0 self.alpha 0.7 def forward(self, input_ids, attention_mask, labelsNone): # 学生预测 student_outputs self.model(input_ids, attention_maskattention_mask) student_logits student_outputs.logits if labels is not None: # 教师预测 with torch.no_grad(): teacher_outputs self.teacher(input_ids, attention_maskattention_mask) teacher_logits teacher_outputs.logits # 蒸馏损失 soft_loss nn.KLDivLoss(reductionbatchmean)( nn.LogSoftmax(dim1)(student_logits / self.temperature), nn.Softmax(dim1)(teacher_logits / self.temperature) ) * (self.temperature ** 2) # 硬标签损失 hard_loss nn.CrossEntropyLoss()(student_logits, labels) loss self.alpha * soft_loss (1 - self.alpha) * hard_loss return loss return student_logits # 使用Hugging Face蒸馏 from transformers import DistilBertForSequenceClassification, DistilBertConfig # 加载教师模型 teacher BertForSequenceClassification.from_pretrained(bert-base-uncased) # 配置学生模型 config DistilBertConfig( vocab_sizeteacher.config.vocab_size, hidden_size384, num_hidden_layers6, num_attention_heads6, intermediate_size1536 ) student DistilBertForSequenceClassification(config) # 蒸馏训练 training_args TrainingArguments( output_dir./distilbert_results, num_train_epochs3, per_device_train_batch_size16, warmup_steps500, weight_decay0.01, logging_dir./logs, ) # 自定义蒸馏Trainer class DistillationTrainer(Trainer): def compute_loss(self, model, inputs, return_outputsFalse): input_ids inputs[input_ids] attention_mask inputs[attention_mask] labels inputs[labels] # 教师预测 with torch.no_grad(): teacher_outputs self.teacher_model(input_ids, attention_maskattention_mask) teacher_logits teacher_outputs.logits # 学生预测 student_outputs model(input_ids, attention_maskattention_mask) student_logits student_outputs.logits # 蒸馏损失 loss_fct nn.KLDivLoss(reductionbatchmean) soft_loss loss_fct( nn.LogSoftmax(dim1)(student_logits / 3.0), nn.Softmax(dim1)(teacher_logits / 3.0) ) * (3.0 ** 2) hard_loss nn.CrossEntropyLoss()(student_logits, labels) loss 0.7 * soft_loss 0.3 * hard_loss return loss trainer DistillationTrainer( modelstudent, argstraining_args, train_datasettrain_dataset, teacher_modelteacher ) trainer.train() print(code)bert_distillation()六、总结蒸馏类型知识来源复杂度效果适用场景输出蒸馏软标签低好通用压缩特征蒸馏中间层中很好深度模型自蒸馏自身辅助层中好单模型训练在线蒸馏互相学习高好多模型集成蒸馏最佳实践温度参数T通常设为3-5α通常设为0.7软标签权重学生模型大小约为教师的1/3-1/2蒸馏微调效果更好知识蒸馏应用模型压缩BERT → DistilBERT模型加速大模型 → 小模型迁移学习通用模型 → 任务模型
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2564108.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!