import numpy as np
import pandas as pd
np.set_printoptions(suppress=True)
pt = np.get_printoptions()['threshold']
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(min_df=0.1, max_df=0.85, max_features=2000)
cv
CountVectorizer(analyzer='word', binary=False, decode_error='strict', dtype=<class 'numpy.int64'>, encoding='utf-8', input='content', lowercase=True, max_df=0.85, max_features=2000, min_df=0.1, ngram_range=(1, 1), preprocessor=None, stop_words=None, strip_accents=None, token_pattern='(?u)\\b\\w\\w+\\b', tokenizer=None, vocabulary=None)
df = pd.read_csv('datasets/Pokemon.csv')
poke_gen = pd.get_dummies(df['Generation'])
poke_gen.head()
Gen 1 | Gen 2 | Gen 3 | Gen 4 | Gen 5 | Gen 6 | |
---|---|---|---|---|---|---|
0 | 1 | 0 | 0 | 0 | 0 | 0 |
1 | 1 | 0 | 0 | 0 | 0 | 0 |
2 | 1 | 0 | 0 | 0 | 0 | 0 |
3 | 1 | 0 | 0 | 0 | 0 | 0 |
4 | 1 | 0 | 0 | 0 | 0 | 0 |
from sklearn.feature_selection import VarianceThreshold
vt = VarianceThreshold(threshold=.15)
vt.fit(poke_gen)
VarianceThreshold(threshold=0.15)
pd.DataFrame({'variance': vt.variances_,
'select_feature': vt.get_support()},
index=poke_gen.columns).T
Gen 1 | Gen 2 | Gen 3 | Gen 4 | Gen 5 | Gen 6 | |
---|---|---|---|---|---|---|
select_feature | True | False | True | False | True | False |
variance | 0.164444 | 0.114944 | 0.16 | 0.128373 | 0.163711 | 0.0919937 |
poke_gen_subset = poke_gen.iloc[:,vt.get_support()].head()
poke_gen_subset
Gen 1 | Gen 3 | Gen 5 | |
---|---|---|---|
0 | 1 | 0 | 0 |
1 | 1 | 0 | 0 |
2 | 1 | 0 | 0 |
3 | 1 | 0 | 0 |
4 | 1 | 0 | 0 |
from sklearn.datasets import load_breast_cancer
bc_data = load_breast_cancer()
bc_features = pd.DataFrame(bc_data.data, columns=bc_data.feature_names)
bc_classes = pd.DataFrame(bc_data.target, columns=['IsMalignant'])
# build featureset and response class labels
bc_X = np.array(bc_features)
bc_y = np.array(bc_classes).T[0]
print('Feature set shape:', bc_X.shape)
print('Response class shape:', bc_y.shape)
Feature set shape: (569, 30) Response class shape: (569,)
np.set_printoptions(threshold=30)
print('Feature set data [shape: '+str(bc_X.shape)+']')
print(np.round(bc_X, 2), '\n')
print('Feature names:')
print(np.array(bc_features.columns), '\n')
print('Predictor Class label data [shape: '+str(bc_y.shape)+']')
print(bc_y, '\n')
print('Predictor name:', np.array(bc_classes.columns))
np.set_printoptions(threshold=pt)
Feature set data [shape: (569, 30)] [[ 17.99 10.38 122.8 ..., 0.27 0.46 0.12] [ 20.57 17.77 132.9 ..., 0.19 0.28 0.09] [ 19.69 21.25 130. ..., 0.24 0.36 0.09] ..., [ 16.6 28.08 108.3 ..., 0.14 0.22 0.08] [ 20.6 29.33 140.1 ..., 0.26 0.41 0.12] [ 7.76 24.54 47.92 ..., 0. 0.29 0.07]] Feature names: ['mean radius' 'mean texture' 'mean perimeter' 'mean area' 'mean smoothness' 'mean compactness' 'mean concavity' 'mean concave points' 'mean symmetry' 'mean fractal dimension' 'radius error' 'texture error' 'perimeter error' 'area error' 'smoothness error' 'compactness error' 'concavity error' 'concave points error' 'symmetry error' 'fractal dimension error' 'worst radius' 'worst texture' 'worst perimeter' 'worst area' 'worst smoothness' 'worst compactness' 'worst concavity' 'worst concave points' 'worst symmetry' 'worst fractal dimension'] Predictor Class label data [shape: (569,)] [0 0 0 ..., 0 0 1] Predictor name: ['IsMalignant']
from sklearn.feature_selection import chi2, SelectKBest
skb = SelectKBest(score_func=chi2, k=15)
skb.fit(bc_X, bc_y)
SelectKBest(k=15, score_func=<function chi2 at 0x00000166BF43A7B8>)
feature_scores = [(item, score) for item, score in zip(bc_data.feature_names, skb.scores_)]
sorted(feature_scores, key=lambda x: -x[1])[:10]
[('worst area', 112598.43156405364), ('mean area', 53991.655923750892), ('area error', 8758.5047053344697), ('worst perimeter', 3665.0354163405909), ('mean perimeter', 2011.1028637679051), ('worst radius', 491.68915743332195), ('mean radius', 266.10491719517802), ('perimeter error', 250.57189635982184), ('worst texture', 174.44939960571074), ('mean texture', 93.897508098633352)]
select_features_kbest = skb.get_support()
feature_names_kbest = bc_data.feature_names[select_features_kbest]
feature_subset_df = bc_features[feature_names_kbest]
bc_SX = np.array(feature_subset_df)
print(bc_SX.shape)
print(feature_names_kbest)
(569, 15) ['mean radius' 'mean texture' 'mean perimeter' 'mean area' 'mean concavity' 'radius error' 'perimeter error' 'area error' 'worst radius' 'worst texture' 'worst perimeter' 'worst area' 'worst compactness' 'worst concavity' 'worst concave points']
np.round(feature_subset_df.iloc[20:25], 2)
mean radius | mean texture | mean perimeter | mean area | mean concavity | radius error | perimeter error | area error | worst radius | worst texture | worst perimeter | worst area | worst compactness | worst concavity | worst concave points | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20 | 13.08 | 15.71 | 85.63 | 520.0 | 0.05 | 0.19 | 1.38 | 14.67 | 14.50 | 20.49 | 96.09 | 630.5 | 0.28 | 0.19 | 0.07 |
21 | 9.50 | 12.44 | 60.34 | 273.9 | 0.03 | 0.28 | 1.91 | 15.70 | 10.23 | 15.66 | 65.13 | 314.9 | 0.11 | 0.09 | 0.06 |
22 | 15.34 | 14.26 | 102.50 | 704.4 | 0.21 | 0.44 | 3.38 | 44.91 | 18.07 | 19.08 | 125.10 | 980.9 | 0.60 | 0.63 | 0.24 |
23 | 21.16 | 23.04 | 137.20 | 1404.0 | 0.11 | 0.69 | 4.30 | 93.99 | 29.17 | 35.59 | 188.00 | 2615.0 | 0.26 | 0.32 | 0.20 |
24 | 16.65 | 21.38 | 110.00 | 904.6 | 0.15 | 0.81 | 5.46 | 102.60 | 26.46 | 31.56 | 177.00 | 2215.0 | 0.36 | 0.47 | 0.21 |
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
# build logistic regression model
lr = LogisticRegression()
# evaluating accuracy for model built on full featureset
full_feat_acc = np.average(cross_val_score(lr, bc_X, bc_y, scoring='accuracy', cv=5))
# evaluating accuracy for model built on selected featureset
sel_feat_acc = np.average(cross_val_score(lr, bc_SX, bc_y, scoring='accuracy', cv=5))
print('Model accuracy statistics with 5-fold cross validation')
print('Model accuracy with complete feature set', bc_X.shape, ':', full_feat_acc)
print('Model accuracy with selected feature set', bc_SX.shape, ':', sel_feat_acc)
Model accuracy statistics with 5-fold cross validation Model accuracy with complete feature set (569, 30) : 0.950904193921 Model accuracy with selected feature set (569, 15) : 0.952643324356
from sklearn.feature_selection import RFE
lr = LogisticRegression()
rfe = RFE(estimator=lr, n_features_to_select=15, step=1)
rfe.fit(bc_X, bc_y)
RFE(estimator=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), n_features_to_select=15, step=1, verbose=0)
select_features_rfe = rfe.get_support()
feature_names_rfe = bc_data.feature_names[select_features_rfe]
print(feature_names_rfe)
['mean radius' 'mean texture' 'mean perimeter' 'mean smoothness' 'mean concavity' 'mean concave points' 'mean symmetry' 'texture error' 'worst radius' 'worst texture' 'worst smoothness' 'worst concavity' 'worst concave points' 'worst symmetry' 'worst fractal dimension']
set(feature_names_kbest) & set(feature_names_rfe)
{'mean concavity', 'mean perimeter', 'mean radius', 'mean texture', 'worst concave points', 'worst concavity', 'worst radius', 'worst texture'}
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc.fit(bc_X, bc_y)
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=None, max_features='auto', max_leaf_nodes=None, min_impurity_split=1e-07, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1, oob_score=False, random_state=None, verbose=0, warm_start=False)
importance_scores = rfc.feature_importances_
feature_importances = [(feature, score) for feature, score in zip(bc_data.feature_names, importance_scores)]
sorted(feature_importances, key=lambda x: -x[1])[:10]
[('worst concave points', 0.22465186401289805), ('worst area', 0.22183657032316897), ('mean concave points', 0.18192574025833769), ('worst perimeter', 0.099521838900566054), ('worst radius', 0.084068507192381611), ('worst texture', 0.02243708745933972), ('mean perimeter', 0.020073882937172081), ('worst smoothness', 0.014608966775322443), ('mean radius', 0.01374196961657885), ('worst concavity', 0.011340255118074721)]
# center the feature set
bc_XC = bc_X - bc_X.mean(axis=0)
# decompose using SVD
U, S, VT = np.linalg.svd(bc_XC)
# get principal components
PC = VT.T
# get first 3 principal components
PC3 = PC[:, 0:3]
PC3.shape
(30, 3)
# reduce feature set dimensionality
np.round(bc_XC.dot(PC3), 2)
array([[-1160.14, -293.92, -48.58], [-1269.12, 15.63, 35.39], [ -995.79, 39.16, 1.71], ..., [ -314.5 , 47.55, 10.44], [-1124.86, 34.13, 19.74], [ 771.53, -88.64, -23.89]])
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
pca.fit(bc_X)
PCA(copy=True, iterated_power='auto', n_components=3, random_state=None, svd_solver='auto', tol=0.0, whiten=False)
pca.explained_variance_ratio_
array([ 0.98204467, 0.01617649, 0.00155751])
bc_pca = pca.transform(bc_X)
np.round(bc_pca, 2)
array([[ 1160.14, -293.92, 48.58], [ 1269.12, 15.63, -35.39], [ 995.79, 39.16, -1.71], ..., [ 314.5 , 47.55, -10.44], [ 1124.86, 34.13, -19.74], [ -771.53, -88.64, 23.89]])
np.average(cross_val_score(lr, bc_pca, bc_y, scoring='accuracy', cv=5))
0.92808003078106949