import pandas as pd
import os
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
print pd.__version__
0.20.3
/usr/local/lib/python2.7/dist-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20. "This module will be removed in 0.20.", DeprecationWarning)
# reading data into a dataframe
DATA_DIR = 'data'
df = pd.read_csv(
os.path.abspath(os.path.join(DATA_DIR, 'day9/banknote_authentication.csv'))
)
df.head(5)
f1 | f2 | f3 | f4 | target | |
---|---|---|---|---|---|
0 | 3.62160 | 8.6661 | -2.8073 | -0.44699 | 0 |
1 | 4.54590 | 8.1674 | -2.4586 | -1.46210 | 0 |
2 | 3.86600 | -2.6383 | 1.9242 | 0.10645 | 0 |
3 | 3.45660 | 9.5228 | -4.0112 | -3.59440 | 0 |
4 | 0.32924 | -4.4552 | 4.5718 | -0.98880 | 0 |
# (rows, columns)
df.shape
(1372, 5)
# look for NaN values
df.isnull().sum()
f1 0 f2 0 f3 0 f4 0 target 0 dtype: int64
# let's figure out the distribution of target variable
# there are many options to handle this imbalance; of which one is to add false data to class 1; secondly we can
# delete records from 0 to make it equal to 1
# we will leave it for now
df['target'].value_counts()
0 762 1 610 Name: target, dtype: int64
X = df.iloc[:, :-1].values
Y = df.iloc[:, -1].values
# ideal practice is to use test as 20% - 30% of training data
# defined by test_size in train_test_split()
# random_state is required to avoid sequential biasness in the data distribution
def data_split(X, Y):
X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.3, random_state = 10)
return X_train, X_test, Y_train, Y_test
X_train, X_test, Y_train, Y_test = data_split(X, Y)
X_train.shape, X_test.shape
((960, 4), (412, 4))
class SVMModel:
def __init__(self):
self.classifier = SVC()
def train(self, X_train, Y_train):
model = self.classifier.fit(X_train, Y_train)
return model
def predict(self, model, X_test):
return model.predict(X_test)
def evaluate(self, Y_test, Y_pred, measure):
if measure=='matrix':
cm = confusion_matrix(Y_test, Y_pred, labels=[0, 1])
return cm
elif measure=='accuracy':
return accuracy_score(Y_test, Y_pred)*100
else: return None
# train the model
svm = SVMModel()
model = svm.train(X_train, Y_train)
predictions = svm.predict(model, X_test)
# evaluating the model
print svm.evaluate(Y_test, predictions, 'matrix')
print
print svm.evaluate(Y_test, predictions, 'accuracy')
[[226 0] [ 0 186]] 100.0