# Load libraries
import numpy as np # Math
import scipy.io # Import data
import time
import sklearn.neighbors, sklearn.linear_model, sklearn.ensemble, sklearn.naive_bayes # Baseline classification techniques
import matplotlib.pyplot as plt
# Load 400 text documents representing 5 classes
# X_train matrix contains the training data
# y_train vector contains the training labels
# X_test matrix contains the test data
# y_test vector contains the test labels
[X_train, y_train, X_test, y_test] = np.load('datasets/20news_5classes_400docs.npy')
print('X_train size=',X_train.shape)
print('X_test size=',X_test.shape)
print('y_train size=',y_train.shape)
print('y_test size=',y_test.shape)
train_accuracy = YOUR CODE HERE
test_accuracy = YOUR CODE HERE
exec_time =YOUR CODE HERE
Observe the best result. What is the best technique?
Do you think the other classification techniques are not as efficient?
Should you believe all blackbox data analysis techniques?
Let us consider one classification technique like logistic regression:
model = sklearn.linear_model.LogisticRegression(C=C_value)
and its hyperparamater C, which is the trade-off between the data term and the regularization term.
Hint: You may use the function np.array_split()
num_folds = 5
X_train = X_train.toarray() # for np.array_split
X_train_folds = np.array_split(YOUR CODE HERE)
y_train_folds = YOUR CODE HERE
Values of the hyperparameter C:
C_choices = [1e-2, 5*1e-2, 1e-1, 5*1e-1, 1e0, 5*1e0, 1e1, 5*1e1, 1e2, 5*1e2, 1e3, 5*1e3]
num_Cs = len(C_choices)
accuracy_tab = np.zeros([num_folds,num_Cs])
for C_idx, C_value in enumerate(C_choices):
for fold_idx in range(num_folds):
# Extract train dataset for the current fold
fold_x_train = np.concatenate([X_train_folds[i] for i in range(num_folds) if i!=fold_idx])
fold_y_train = YOUR CODE HERE
# validation dataset for the current fold
fold_x_val = X_train_folds[fold_idx]
fold_y_val = YOUR CODE HERE
# Run Logistic Regression model for the current fold
accuracy = YOUR CODE HERE
# Store accuracy value
accuracy_tab[fold_idx,C_idx] = accuracy
print(accuracy_tab)
Hint: You may use the function plt.scatter(), np.mean(), np.std(), plt.errorbar(), plt.show()
# plot the raw observations
for C_idx, C_value in enumerate(C_choices):
accuracies_C_idx = accuracy_tab[:,C_idx]
plt.scatter(YOUR CODE HERE)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = YOUR CODE HERE
accuracies_std = YOUR CODE HERE
plt.errorbar(np.log(C_choices), accuracies_mean, yerr=accuracies_std)
# Add text
plt.title('Cross-validation on C')
plt.xlabel('log C')
plt.ylabel('Cross-validation accuracy')
plt.show()
Did we do better than the best technique in Question 1? or not?
Hint: You may use the function np.argmax()
idx_best_C = YOUR CODE HERE
accuracy_testset = YOUR CODE HERE
print('best accuracy=',accuracy_testset)