import pandas as pd
df = pd.read_csv('http://bit.ly/kaggletrain')
# use single brackets around "Name" because CountVectorizer expects 1-D input
X = df['Name']
y = df['Survived']
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(CountVectorizer(), MultinomialNB())
# cross-validate the pipeline using default parameters
from sklearn.model_selection import cross_val_score
cross_val_score(pipe, X, y, cv=5, scoring='accuracy').mean()
0.8001820350260498
# specify parameter values to search (use a distribution for any continuous parameters)
import scipy as sp
params = {}
params['countvectorizer__min_df'] = [1, 2, 3, 4]
params['countvectorizer__lowercase'] = [True, False]
params['multinomialnb__alpha'] = sp.stats.uniform(scale=1)
# try "n_iter" random combinations of those parameter values
from sklearn.model_selection import RandomizedSearchCV
rand = RandomizedSearchCV(pipe, params, n_iter=10, cv=5, scoring='accuracy', random_state=1)
rand.fit(X, y);
# what was the best score found during the search?
rand.best_score_
0.8080534806352395
# which combination of parameters produced the best score?
rand.best_params_
{'countvectorizer__lowercase': False, 'countvectorizer__min_df': 3, 'multinomialnb__alpha': 0.1981014890848788}
© 2020 Data School. All rights reserved.