#!/usr/bin/env python # coding: utf-8 # # 07- Feature Selection # by [Alejandro Correa Bahnsen](albahnsen.com/) # # version 0.2, May 2016 # # ## Part of the class [Machine Learning for Security Informatics](https://github.com/albahnsen/ML_SecurityInformatics) # # # This notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US). Special thanks goes to [Kevin Markham](https://github.com/justmarkham) # # Preprocessing & Cross-validation (review) # # In[9]: import pandas as pd import numpy as np import zipfile with zipfile.ZipFile('../datasets/titanic.csv.zip', 'r') as z: f = z.open('titanic.csv') titanic = pd.read_csv(f, sep=',', index_col=0) titanic.head() # In[10]: titanic.Age.fillna(titanic.Age.median(), inplace=True) titanic.loc[titanic.Embarked.isnull(), 'Embarked'] = titanic.Embarked.mode().values # In[11]: titanic['Sex_Female'] = titanic.Sex.map({'male':0, 'female':1}) embarked_dummies = pd.get_dummies(titanic.Embarked, prefix='Embarked') titanic = pd.concat([titanic, embarked_dummies], axis=1) # In[12]: titanic['Age2'] = titanic['Age'] ** 2 titanic['Age3'] = titanic['Age'] ** 3 # In[13]: from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import cross_val_score logreg = LogisticRegression(C=1e9) # In[14]: features = ['Pclass', 'Age', 'Age2', 'Age3', 'Parch', 'SibSp', 'Sex_Female', 'Embarked_C', 'Embarked_Q', 'Embarked_S'] X = titanic[list(features)] y = titanic['Survived'] pd.Series(cross_val_score(logreg, X, y, cv=10, scoring='accuracy')).describe() # # Removing features with low variance # # VarianceThreshold is a simple baseline approach to feature selection. It removes all features whose variance doesn’t meet some threshold. By default, it removes all zero-variance features, i.e. features that have the same value in all samples. # # As an example, suppose that we have a dataset with boolean features, and we want to remove all features that are either one or zero (on or off) in more than 80% of the samples. Boolean features are Bernoulli random variables, and the variance of such variables is given by # $$\mathrm{Var}[X] = p(1 - p)$$ # so we can select using the threshold .8 * (1 - .8): # In[15]: from sklearn.feature_selection import VarianceThreshold sel = VarianceThreshold(threshold=(.8 * (1 - .8))) sel.fit(X) sel.variances_, sel.get_support() # In[16]: X_sel = sel.transform(X) features_sel = np.array(features)[sel.get_support()] print(np.array(features)[~sel.get_support()]) # In[17]: pd.Series(cross_val_score(logreg, X_sel, y, cv=10, scoring='accuracy')).describe() # # Univariate feature selection # # Univariate feature selection works by selecting the best features based on univariate statistical tests. It can be seen as a preprocessing step to an estimator. Scikit-learn exposes feature selection routines as objects that implement the transform method: # # * SelectKBest removes all but the k highest scoring features # * SelectPercentile removes all but a user-specified highest scoring percentage of features # using common univariate statistical tests for each feature: false positive rate SelectFpr, false discovery rate SelectFdr, or family wise error SelectFwe. # * GenericUnivariateSelect allows to perform univariate feature # selection with a configurable strategy. This allows to select the best univariate selection strategy with hyper-parameter search estimator # In[18]: from sklearn.feature_selection import SelectKBest sel = SelectKBest(k=8) sel.fit(X, y) sel.get_support() # In[19]: print(np.array(features)[~sel.get_support()]) # In[20]: print(np.array(features)[sel.get_support()]) # In[21]: X_sel = sel.transform(X) pd.Series(cross_val_score(logreg, X_sel, y, cv=10, scoring='accuracy')).describe() # There is still the question of how to select the parameter k # In[22]: from sklearn.feature_selection import SelectPercentile, f_classif sel = SelectPercentile(f_classif, percentile=50) sel.fit(X, y) sel.get_support() # In[23]: print(np.array(features)[~sel.get_support()]) # In[24]: print(np.array(features)[sel.get_support()]) # In[25]: X_sel = sel.transform(X) pd.Series(cross_val_score(logreg, X_sel, y, cv=10, scoring='accuracy')).describe() # # Recursive feature elimination # # Given an external estimator that assigns weights to features (e.g., the coefficients of a linear model), recursive feature elimination (RFE) is to select features by recursively considering smaller and smaller sets of features. First, the estimator is trained on the initial set of features and weights are assigned to each one of them. Then, features whose absolute weights are the smallest are pruned from the current set features. That procedure is recursively repeated on the pruned set until the desired number of features to select is eventually reached. # # RFECV performs RFE in a cross-validation loop to find the optimal number of features. # In[26]: from sklearn.feature_selection import RFE # In[27]: sel = RFE(estimator=logreg, n_features_to_select=6) sel.fit(X, y) # In[28]: sel.get_support() # In[29]: print(np.array(features)[~sel.get_support()]) # In[30]: print(np.array(features)[sel.get_support()]) # In[31]: X_sel = sel.transform(X) pd.Series(cross_val_score(logreg, X_sel, y, cv=10, scoring='accuracy')).describe() # In[ ]: