Follow the instructions below to help you create your ML pipeline.
read_sql_table
# import libraries
import pandas as pd
import numpy as np
import pickle
from sqlalchemy import create_engine
import warnings
warnings.filterwarnings("ignore")
# import NLP libraries
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
# nltk.download('punkt')
# nltk.download('stopwords')
# nltk.download('wordnet') # download for lemmatization
# import sklearn
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
# load data from database
engine = create_engine('sqlite:///data/DisasterResponse.db')
df = pd.read_sql_table('DisasterResponse', engine)
X = df['message']
Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)
def tokenize(text):
# Define url pattern
url_re = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# Detect and replace urls
detected_urls = re.findall(url_re, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# tokenize sentences
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
# save cleaned tokens
clean_tokens = [lemmatizer.lemmatize(tok).lower().strip() for tok in tokens]
# remove stopwords
STOPWORDS = list(set(stopwords.words('english')))
clean_tokens = [token for token in clean_tokens if token not in STOPWORDS]
return clean_tokens
def build_pipeline():
# build NLP pipeline - count words, tf-idf, multiple output classifier
pipeline = Pipeline([
('vec', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators = 100, n_jobs = 6)))
])
return pipeline
X_train, X_test, y_train, y_test = train_test_split(X, Y)
pipeline = build_pipeline()
pipeline.fit(X_train, y_train)
Pipeline(memory=None, steps=[('vec', CountVectorizer(analyzer='word', binary=False, decode_error='strict', dtype=<class 'numpy.int64'>, encoding='utf-8', input='content', lowercase=True, max_df=1.0, max_features=None, min_df=1, ngram_range=(1, 1), preprocessor=None, stop_words=None, strip_..._score=False, random_state=None, verbose=0, warm_start=False), n_jobs=None))])
Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's classification_report
on each.
def build_report(pipeline, X_test, y_test):
# predict on the X_test
y_pred = pipeline.predict(X_test)
# build classification report on every column
performances = []
for i in range(len(y_test.columns)):
performances.append([f1_score(y_test.iloc[:, i].values, y_pred[:, i], average='micro'),
precision_score(y_test.iloc[:, i].values, y_pred[:, i], average='micro'),
recall_score(y_test.iloc[:, i].values, y_pred[:, i], average='micro')])
# build dataframe
performances = pd.DataFrame(performances, columns=['f1 score', 'precision', 'recall'],
index = y_test.columns)
return performances
build_report(pipeline, X_test, y_test)
f1 score | precision | recall | |
---|---|---|---|
related | 0.801648 | 0.801648 | 0.801648 |
request | 0.894873 | 0.894873 | 0.894873 |
offer | 0.995880 | 0.995880 | 0.995880 |
aid_related | 0.777388 | 0.777388 | 0.777388 |
medical_help | 0.920507 | 0.920507 | 0.920507 |
medical_products | 0.956057 | 0.956057 | 0.956057 |
search_and_rescue | 0.971773 | 0.971773 | 0.971773 |
security | 0.982301 | 0.982301 | 0.982301 |
military | 0.968111 | 0.968111 | 0.968111 |
child_alone | 1.000000 | 1.000000 | 1.000000 |
water | 0.958193 | 0.958193 | 0.958193 |
food | 0.940189 | 0.940189 | 0.940189 |
shelter | 0.935002 | 0.935002 | 0.935002 |
clothing | 0.986726 | 0.986726 | 0.986726 |
money | 0.978639 | 0.978639 | 0.978639 |
missing_people | 0.989930 | 0.989930 | 0.989930 |
refugees | 0.968721 | 0.968721 | 0.968721 |
death | 0.962161 | 0.962161 | 0.962161 |
other_aid | 0.871224 | 0.871224 | 0.871224 |
infrastructure_related | 0.934696 | 0.934696 | 0.934696 |
transport | 0.953769 | 0.953769 | 0.953769 |
buildings | 0.951785 | 0.951785 | 0.951785 |
electricity | 0.981233 | 0.981233 | 0.981233 |
tools | 0.994812 | 0.994812 | 0.994812 |
hospitals | 0.990540 | 0.990540 | 0.990540 |
shops | 0.995270 | 0.995270 | 0.995270 |
aid_centers | 0.987946 | 0.987946 | 0.987946 |
other_infrastructure | 0.955600 | 0.955600 | 0.955600 |
weather_related | 0.878700 | 0.878700 | 0.878700 |
floods | 0.949954 | 0.949954 | 0.949954 |
storm | 0.937290 | 0.937290 | 0.937290 |
fire | 0.991456 | 0.991456 | 0.991456 |
earthquake | 0.971010 | 0.971010 | 0.971010 |
cold | 0.980623 | 0.980623 | 0.980623 |
other_weather | 0.946903 | 0.946903 | 0.946903 |
direct_report | 0.867562 | 0.867562 | 0.867562 |
Use grid search to find better parameters.
parameters = {'clf__estimator__max_features':['sqrt', 0.5],
'clf__estimator__n_estimators':[50, 100]}
cv = GridSearchCV(estimator=pipeline, param_grid = parameters, cv = 5, n_jobs = 6)
cv.fit(X_train, y_train)
GridSearchCV(cv=5, error_score='raise-deprecating', estimator=Pipeline(memory=None, steps=[('vec', CountVectorizer(analyzer='word', binary=False, decode_error='strict', dtype=<class 'numpy.int64'>, encoding='utf-8', input='content', lowercase=True, max_df=1.0, max_features=None, min_df=1, ngram_range=(1, 1), preprocessor=None, stop_words=None, strip_..._score=False, random_state=None, verbose=0, warm_start=False), n_jobs=None))]), fit_params=None, iid='warn', n_jobs=6, param_grid={'clf__estimator__max_features': ['sqrt', 0.5], 'clf__estimator__n_estimators': [50, 100]}, pre_dispatch='2*n_jobs', refit=True, return_train_score='warn', scoring=None, verbose=0)
Show the accuracy, precision, and recall of the tuned model.
Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
build_report(cv, X_test, y_test)
f1 score | precision | recall | |
---|---|---|---|
related | 0.801953 | 0.801953 | 0.801953 |
request | 0.888007 | 0.888007 | 0.888007 |
offer | 0.995270 | 0.995270 | 0.995270 |
aid_related | 0.765334 | 0.765334 | 0.765334 |
medical_help | 0.920659 | 0.920659 | 0.920659 |
medical_products | 0.962313 | 0.962313 | 0.962313 |
search_and_rescue | 0.970552 | 0.970552 | 0.970552 |
security | 0.978944 | 0.978944 | 0.978944 |
military | 0.966890 | 0.966890 | 0.966890 |
child_alone | 1.000000 | 1.000000 | 1.000000 |
water | 0.966280 | 0.966280 | 0.966280 |
food | 0.951480 | 0.951480 | 0.951480 |
shelter | 0.948581 | 0.948581 | 0.948581 |
clothing | 0.989014 | 0.989014 | 0.989014 |
money | 0.978486 | 0.978486 | 0.978486 |
missing_people | 0.990388 | 0.990388 | 0.990388 |
refugees | 0.971620 | 0.971620 | 0.971620 |
death | 0.973604 | 0.973604 | 0.973604 |
other_aid | 0.868630 | 0.868630 | 0.868630 |
infrastructure_related | 0.929661 | 0.929661 | 0.929661 |
transport | 0.954379 | 0.954379 | 0.954379 |
buildings | 0.956363 | 0.956363 | 0.956363 |
electricity | 0.979707 | 0.979707 | 0.979707 |
tools | 0.993592 | 0.993592 | 0.993592 |
hospitals | 0.988404 | 0.988404 | 0.988404 |
shops | 0.994355 | 0.994355 | 0.994355 |
aid_centers | 0.987794 | 0.987794 | 0.987794 |
other_infrastructure | 0.952395 | 0.952395 | 0.952395 |
weather_related | 0.880684 | 0.880684 | 0.880684 |
floods | 0.955752 | 0.955752 | 0.955752 |
storm | 0.945682 | 0.945682 | 0.945682 |
fire | 0.991913 | 0.991913 | 0.991913 |
earthquake | 0.973146 | 0.973146 | 0.973146 |
cold | 0.983064 | 0.983064 | 0.983064 |
other_weather | 0.941105 | 0.941105 | 0.941105 |
direct_report | 0.856424 | 0.856424 | 0.856424 |
cv.best_params_
{'clf__estimator__max_features': 0.5, 'clf__estimator__n_estimators': 100}
pipeline_improved = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(AdaBoostClassifier(n_estimators = 100)))
])
pipeline_improved.fit(X_train, y_train)
y_pred_improved = pipeline_improved.predict(X_test)
build_report(pipeline_improved, X_test, y_test)
f1 score | precision | recall | |
---|---|---|---|
related | 0.762893 | 0.762893 | 0.762893 |
request | 0.892127 | 0.892127 | 0.892127 |
offer | 0.994049 | 0.994049 | 0.994049 |
aid_related | 0.767318 | 0.767318 | 0.767318 |
medical_help | 0.923711 | 0.923711 | 0.923711 |
medical_products | 0.961398 | 0.961398 | 0.961398 |
search_and_rescue | 0.970705 | 0.970705 | 0.970705 |
security | 0.977876 | 0.977876 | 0.977876 |
military | 0.971468 | 0.971468 | 0.971468 |
child_alone | 1.000000 | 1.000000 | 1.000000 |
water | 0.963381 | 0.963381 | 0.963381 |
food | 0.946903 | 0.946903 | 0.946903 |
shelter | 0.941868 | 0.941868 | 0.941868 |
clothing | 0.987946 | 0.987946 | 0.987946 |
money | 0.977418 | 0.977418 | 0.977418 |
missing_people | 0.989319 | 0.989319 | 0.989319 |
refugees | 0.969484 | 0.969484 | 0.969484 |
death | 0.968721 | 0.968721 | 0.968721 |
other_aid | 0.868782 | 0.868782 | 0.868782 |
infrastructure_related | 0.928746 | 0.928746 | 0.928746 |
transport | 0.955447 | 0.955447 | 0.955447 |
buildings | 0.954837 | 0.954837 | 0.954837 |
electricity | 0.980928 | 0.980928 | 0.980928 |
tools | 0.993592 | 0.993592 | 0.993592 |
hospitals | 0.987489 | 0.987489 | 0.987489 |
shops | 0.994049 | 0.994049 | 0.994049 |
aid_centers | 0.986726 | 0.986726 | 0.986726 |
other_infrastructure | 0.951938 | 0.951938 | 0.951938 |
weather_related | 0.876259 | 0.876259 | 0.876259 |
floods | 0.953616 | 0.953616 | 0.953616 |
storm | 0.938969 | 0.938969 | 0.938969 |
fire | 0.991150 | 0.991150 | 0.991150 |
earthquake | 0.970705 | 0.970705 | 0.970705 |
cold | 0.981843 | 0.981843 | 0.981843 |
other_weather | 0.942630 | 0.942630 | 0.942630 |
direct_report | 0.858865 | 0.858865 | 0.858865 |
pickle.dump(pipeline, open('rf_model.pkl', 'wb'))
pickle.dump(pipeline_improved, open('adaboost_model.pkl', 'wb'))
train.py
¶Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.