特征工程:从数据到特征
特征工程从数据到特征1. 技术分析1.1 特征工程流程特征工程是机器学习的核心环节特征工程流程 数据理解 → 特征提取 → 特征选择 → 特征转换 → 特征验证1.2 特征类型类型描述处理方法数值型连续数值归一化、标准化分类型类别标签独热编码、标签编码文本型文本数据TF-IDF、Word2Vec时间型时间数据时间差、周期性特征空间型地理数据距离计算、网格编码1.3 特征选择方法特征选择方法 过滤法: 基于统计指标 包裹法: 基于模型性能 嵌入法: 基于模型内部特征2. 核心功能实现2.1 特征提取import pandas as pd import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import OneHotEncoder, StandardScaler class FeatureExtractor: def __init__(self): self.extractors {} def add_extractor(self, name, extractor): self.extractors[name] extractor def extract(self, data): features {} for name, extractor in self.extractors.items(): if hasattr(extractor, fit_transform): features[name] extractor.fit_transform(data) else: features[name] extractor(data) return features class NumericalFeatureExtractor: def __init__(self, columnsNone): self.columns columns self.scaler StandardScaler() def fit_transform(self, df): if self.columns: data df[self.columns] else: data df.select_dtypes(include[np.number]) return self.scaler.fit_transform(data) def transform(self, df): if self.columns: data df[self.columns] else: data df.select_dtypes(include[np.number]) return self.scaler.transform(data) class CategoricalFeatureExtractor: def __init__(self, columnsNone): self.columns columns self.encoder OneHotEncoder(sparseFalse, handle_unknownignore) def fit_transform(self, df): if self.columns: data df[self.columns] else: data df.select_dtypes(include[object]) return self.encoder.fit_transform(data) def transform(self, df): if self.columns: data df[self.columns] else: data df.select_dtypes(include[object]) return self.encoder.transform(data) class TextFeatureExtractor: def __init__(self, column, max_features5000): self.column column self.vectorizer TfidfVectorizer(max_featuresmax_features) def fit_transform(self, df): return self.vectorizer.fit_transform(df[self.column]).toarray() def transform(self, df): return self.vectorizer.transform(df[self.column]).toarray()2.2 特征选择from sklearn.feature_selection import SelectKBest, mutual_info_classif, RFE from sklearn.ensemble import RandomForestClassifier class FeatureSelector: def __init__(self, methodfilter, k10): self.method method self.k k self.selector None def fit(self, X, y): if self.method filter: self.selector SelectKBest(score_funcmutual_info_classif, kself.k) elif self.method rfe: estimator RandomForestClassifier() self.selector RFE(estimator, n_features_to_selectself.k) self.selector.fit(X, y) def transform(self, X): return self.selector.transform(X) def get_selected_features(self): if hasattr(self.selector, get_support): return self.selector.get_support(indicesTrue) return self.selector.ranking_ class FeatureImportanceAnalyzer: def __init__(self, model): self.model model def analyze(self, X, y, feature_names): self.model.fit(X, y) if hasattr(self.model, feature_importances_): importances self.model.feature_importances_ elif hasattr(self.model, coef_): importances np.abs(self.model.coef_[0]) else: return None indices np.argsort(importances)[::-1] return [(feature_names[i], importances[i]) for i in indices] class DimensionalityReducer: def __init__(self, methodpca, n_components2): self.method method self.n_components n_components if method pca: from sklearn.decomposition import PCA self.reducer PCA(n_componentsn_components) elif method tsne: from sklearn.manifold import TSNE self.reducer TSNE(n_componentsn_components) elif method umap: import umap self.reducer umap.UMAP(n_componentsn_components) def fit_transform(self, X): return self.reducer.fit_transform(X) def transform(self, X): return self.reducer.transform(X)2.3 特征验证class FeatureValidator: def __init__(self): pass def check_missing_values(self, df): missing df.isnull().sum() return missing[missing 0] def check_cardinality(self, df, threshold100): high_cardinality [] for col in df.columns: if df[col].nunique() threshold: high_cardinality.append(col) return high_cardinality def check_feature_correlation(self, df, threshold0.8): corr_matrix df.corr().abs() high_corr [] for i in range(len(corr_matrix.columns)): for j in range(i): if corr_matrix.iloc[i, j] threshold: high_corr.append((corr_matrix.columns[i], corr_matrix.columns[j], corr_matrix.iloc[i, j])) return high_corr class FeatureDriftDetector: def __init__(self): pass def detect_drift(self, reference_data, current_data, threshold0.05): drift_scores [] for col in reference_data.columns: if reference_data[col].dtype in [int64, float64]: ref_mean reference_data[col].mean() curr_mean current_data[col].mean() diff abs(ref_mean - curr_mean) / ref_mean if diff threshold: drift_scores.append((col, diff)) return drift_scores class FeatureStore: def __init__(self): self.features {} def add_feature(self, name, feature): self.features[name] feature def get_feature(self, name): return self.features.get(name) def save(self, path): import pickle with open(path, wb) as f: pickle.dump(self.features, f) classmethod def load(cls, path): import pickle with open(path, rb) as f: features pickle.load(f) store cls() store.features features return store3. 性能对比3.1 特征选择方法对比方法计算速度效果适用场景Filter快中高维数据RFE慢高中等维度Embedded中高通用3.2 降维方法对比方法保留信息计算速度可视化效果PCA高快中t-SNE中慢高UMAP高中高3.3 特征编码方法对比方法维度扩展处理速度适用场景One-Hot高快低基数Label无快有序类别Embedding可控中高基数4. 最佳实践4.1 特征工程流程def build_feature_pipeline(config): extractors [] if config.get(numerical, True): extractors.append(NumericalFeatureExtractor()) if config.get(categorical, True): extractors.append(CategoricalFeatureExtractor()) if config.get(text, False): extractors.append(TextFeatureExtractor(text)) return extractors class FeatureEngineeringPipeline: def __init__(self, extractors, selectorNone, reducerNone): self.extractors extractors self.selector selector self.reducer reducer def fit_transform(self, data): features [] for extractor in self.extractors: features.append(extractor.fit_transform(data)) X np.hstack(features) if self.selector: self.selector.fit(X, data[target]) X self.selector.transform(X) if self.reducer: X self.reducer.fit_transform(X) return X def transform(self, data): features [] for extractor in self.extractors: features.append(extractor.transform(data)) X np.hstack(features) if self.selector: X self.selector.transform(X) if self.reducer: X self.reducer.transform(X) return X4.2 特征验证流程class FeatureValidationPipeline: def __init__(self): self.validator FeatureValidator() self.drift_detector FeatureDriftDetector() def validate(self, df): issues {} missing self.validator.check_missing_values(df) if len(missing) 0: issues[missing_values] missing.to_dict() high_card self.validator.check_cardinality(df) if len(high_card) 0: issues[high_cardinality] high_card high_corr self.validator.check_feature_correlation(df) if len(high_corr) 0: issues[high_correlation] high_corr return issues def detect_drift(self, reference, current): return self.drift_detector.detect_drift(reference, current)5. 总结特征工程是机器学习成功的关键特征提取从原始数据中提取有价值的特征特征选择选择最有信息量的特征特征验证确保特征质量特征存储管理和复用特征对比数据如下UMAP 在降维可视化上效果最好RFE 特征选择效果最佳但速度较慢One-Hot 编码适合低基数类别特征推荐使用特征存储系统管理特征
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2611582.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!