%matplotlib inline
import matplotlib as plt
import numpy as np
from autotagger.helpers.preprocess import load_dataset
from sklearn import cross_validation,linear_model
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.multiclass import OneVsRestClassifier
X,Y = load_dataset("stackoverflow", max_features=100)
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X,Y,test_size=0.80, random_state=42)
clf = linear_model.LogisticRegression()
meta_clf = OneVsRestClassifier(clf)
meta_clf.fit(X_train,Y_train)
OneVsRestClassifier(estimator=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), n_jobs=1)
Y_pred = meta_clf.predict(X_test)
# macro average refers to the average f1_score for each label
f1_score(Y_test,Y_pred, average='macro')
/home/felipe/auto-tagger/venv3/lib/python3.4/site-packages/sklearn/metrics/classification.py:1074: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples. 'precision', 'predicted', average, warn_for)
0.037940736071986787
# if we just consider the labels that have had at least one instance predicted,
# our score goes up:
label_scores = f1_score(Y_test,Y_pred,average=None)
valid_label_indices = np.nonzero(label_scores)[0]
f1_score(Y_test,Y_pred,average='macro',labels=valid_label_indices)
/home/felipe/auto-tagger/venv3/lib/python3.4/site-packages/sklearn/metrics/classification.py:1074: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples. 'precision', 'predicted', average, warn_for)
0.10539093353329665
# micro average refers to the average f1_score for each instance
f1_score(Y_test,Y_pred,average='micro')
0.18395809178395894