NLP情感分析:从传统方法到深度学习
NLP情感分析从传统方法到深度学习1. 技术分析1.1 情感分析任务类型描述典型应用二分类积极/消极评论分析三分类积极/中性/消极舆情监测多标签多种情感混合复杂文本1.2 方法对比方法特点性能词典方法基于情感词典中等传统MLTF-IDFSVM良好深度学习Word2VecCNN/RNN优秀预训练模型BERT等最佳2. 核心功能实现2.1 词典方法from nltk.sentiment import SentimentIntensityAnalyzer class LexiconSentimentAnalyzer: def __init__(self): import nltk nltk.download(vader_lexicon, quietTrue) self.analyzer SentimentIntensityAnalyzer() def analyze(self, text): scores self.analyzer.polarity_scores(text) if scores[compound] 0.05: return positive, scores[compound] elif scores[compound] -0.05: return negative, scores[compound] else: return neutral, scores[compound] analyzer LexiconSentimentAnalyzer() result, score analyzer.analyze(I love this product! Its amazing.) print(fSentiment: {result}, Score: {score})2.2 传统机器学习方法from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.pipeline import Pipeline class TraditionalSentimentAnalyzer: def __init__(self): self.pipeline Pipeline([ (tfidf, TfidfVectorizer( max_features10000, ngram_range(1, 2), stop_wordsenglish )), (clf, LinearSVC(C1.0)) ]) def train(self, texts, labels): self.pipeline.fit(texts, labels) def predict(self, texts): return self.pipeline.predict(texts) def predict_proba(self, texts): decision self.pipeline.decision_function(texts) # 转换为概率 import numpy as np proba 1 / (1 np.exp(-decision)) return np.column_stack([1-proba, proba]) analyzer TraditionalSentimentAnalyzer() analyzer.train(train_texts, train_labels) predictions analyzer.predict(test_texts)2.3 深度学习方法import torch import torch.nn as nn class TextCNN(nn.Module): def __init__(self, vocab_size, embed_dim128, num_classes2, num_filters100, filter_sizes[3, 4, 5]): super().__init__() self.embedding nn.Embedding(vocab_size, embed_dim) self.convs nn.ModuleList([ nn.Conv2d(1, num_filters, (k, embed_dim)) for k in filter_sizes ]) self.fc nn.Linear(len(filter_sizes) * num_filters, num_classes) self.dropout nn.Dropout(0.5) def forward(self, x): x self.embedding(x) x x.unsqueeze(1) conv_outputs [] for conv in self.convs: conv_out torch.relu(conv(x)) pooled torch.max_pool2d(conv_out, (conv_out.size(2), 1)) pooled pooled.squeeze(3).squeeze(2) conv_outputs.append(pooled) concat torch.cat(conv_outputs, dim1) output self.dropout(concat) return self.fc(output) class DeepSentimentAnalyzer: def __init__(self, vocab_size): self.device torch.device(cuda if torch.cuda.is_available() else cpu) self.model TextCNN(vocab_size).to(self.device) self.criterion nn.CrossEntropyLoss() self.optimizer torch.optim.Adam(self.model.parameters(), lr1e-3) def train_epoch(self, dataloader): self.model.train() total_loss 0 correct 0 total 0 for texts, labels in dataloader: texts texts.to(self.device) labels labels.to(self.device) self.optimizer.zero_grad() outputs self.model(texts) loss self.criterion(outputs, labels) loss.backward() self.optimizer.step() total_loss loss.item() _, predicted outputs.max(1) total labels.size(0) correct predicted.eq(labels).sum().item() return total_loss / len(dataloader), 100. * correct / total def predict(self, texts): self.model.eval() with torch.no_grad(): outputs self.model(texts) _, predicted outputs.max(1) return predicted.cpu().numpy()3. 预训练模型方法from transformers import BertTokenizer, BertForSequenceClassification from transformers import Trainer, TrainingArguments class BertSentimentAnalyzer: def __init__(self, model_namebert-base-uncased): self.tokenizer BertTokenizer.from_pretrained(model_name) self.model BertForSequenceClassification.from_pretrained( model_name, num_labels3 ) self.device torch.device(cuda if torch.cuda.is_available() else cpu) self.model.to(self.device) def encode_texts(self, texts, max_length128): return self.tokenizer( texts, paddingTrue, truncationTrue, max_lengthmax_length, return_tensorspt ) def train(self, train_texts, train_labels, eval_textsNone, eval_labelsNone): train_encodings self.encode_texts(train_texts) class TextDataset(torch.utils.data.Dataset): def __init__(self, encodings, labels): self.encodings encodings self.labels labels def __getitem__(self, idx): item {key: val[idx] for key, val in self.encodings.items()} item[labels] torch.tensor(self.labels[idx]) return item def __len__(self): return len(self.labels) train_dataset TextDataset(train_encodings, train_labels) training_args TrainingArguments( output_dir./results, num_train_epochs3, per_device_train_batch_size16, warmup_steps500, weight_decay0.01, logging_dir./logs, ) trainer Trainer( modelself.model, argstraining_args, train_datasettrain_dataset, ) trainer.train() def predict(self, texts): self.model.eval() encodings self.encode_texts(texts) with torch.no_grad(): inputs {k: v.to(self.device) for k, v in encodings.items()} outputs self.model(**inputs) predictions torch.argmax(outputs.logits, dim1) return predictions.cpu().numpy()4. 性能对比4.1 方法对比方法准确率训练时间推理速度VADER0.78无极快TF-IDFSVM0.87分钟级快TextCNN0.91小时级快BERT0.94小时级毫秒级4.2 评估指标from sklearn.metrics import classification_report, confusion_matrix def evaluate(y_true, y_pred): print(分类报告:) print(classification_report(y_true, y_pred)) print(\n混淆矩阵:) print(confusion_matrix(y_true, y_pred))5. 总结情感分析方法选择快速分析使用VADER词典方法中等规模数据TF-IDFSVM高精度需求BERT等预训练模型
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2593149.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!