In [1]:
# To run benchmark script, you will need to install XGBoost 
# (pip install XGBoost)

import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer

def load_breast_data():
    breast = load_breast_cancer()
    feature_names = list(breast.feature_names)
    X, y = pd.DataFrame(breast.data, columns=feature_names), breast.target
    dataset = {
        'problem': 'classification',
        'full': {
            'X': X,
            'y': y,
        },
    }
    return dataset


def load_adult_data():
    df = pd.read_csv(
        "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
        header=None)
    df.columns = [
        "Age", "WorkClass", "fnlwgt", "Education", "EducationNum",
        "MaritalStatus", "Occupation", "Relationship", "Race", "Gender",
        "CapitalGain", "CapitalLoss", "HoursPerWeek", "NativeCountry", "Income"
    ]
    train_cols = df.columns[0:-1]
    label = df.columns[-1]
    X_df = df[train_cols]
    y_df = df[label]

    dataset = {
        'problem': 'classification',
        'full': {
            'X': X_df,
            'y': y_df,
        },
    }

    return dataset

def load_heart_data():
    # https://www.kaggle.com/ronitf/heart-disease-uci
    df = pd.read_csv(r'D:\datasets\heart-disease-uci\heart.csv')
    train_cols = df.columns[0:-1]
    label = df.columns[-1]
    X_df = df[train_cols]
    y_df = df[label]
    dataset = {
        'problem': 'classification',
        'full': {
            'X': X_df,
            'y': y_df,
        },
    }
    
    return dataset


def load_credit_data():
    # https://www.kaggle.com/mlg-ulb/creditcardfraud
    df = pd.read_csv(r'D:\datasets\creditcardfraud\creditcard.csv')
    train_cols = df.columns[0:-1]
    label = df.columns[-1]
    X_df = df[train_cols]
    y_df = df[label]
    dataset = {
        'problem': 'classification',
        'full': {
            'X': X_df,
            'y': y_df,
        },
    }
    
    return dataset


def load_telco_churn_data():
    # https://www.kaggle.com/blastchar/telco-customer-churn/downloads/WA_Fn-UseC_-Telco-Customer-Churn.csv/1
    df = pd.read_csv(r'D:\datasets\telco-customer-churn\WA_Fn-UseC_-Telco-Customer-Churn.csv')
    train_cols = df.columns[1:-1] # First column is an ID
    label = df.columns[-1]
    X_df = df[train_cols]
    y_df = df[label] # 'Yes, No'
    dataset = {
        'problem': 'classification',
        'full': {
            'X': X_df,
            'y': y_df,
        },
    }
    
    return dataset
In [2]:
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedShuffleSplit, cross_validate

from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from xgboost import XGBClassifier

from sklearn.linear_model import SGDClassifier, LogisticRegression

from interpret.glassbox import ExplainableBoostingClassifier


def format_n(x):
    return "{0:.3f}".format(x)

def process_model(clf, name, X, y, n_splits=3):
    # Evaluate model
    ss = StratifiedShuffleSplit(n_splits=n_splits, test_size=0.25, random_state=1337)
    scores = cross_validate(
        clf, X, y, scoring='roc_auc', cv=ss,
        n_jobs=None, return_estimator=True
    )

    record = dict()
    record['model_name'] = name
    record['fit_time_mean'] = format_n(np.mean(scores['fit_time']))
    record['fit_time_std'] = format_n(np.std(scores['fit_time']))
    record['test_score_mean'] = format_n(np.mean(scores['test_score']))
    record['test_score_std'] = format_n(np.std(scores['test_score']))

    return record



def benchmark_models(dataset_name, X, y, ct=None, n_splits=3, random_state=1337):
    if ct is None:
        is_cat = np.array([dt.kind == 'O' for dt in X.dtypes])
        cat_cols = X.columns.values[is_cat]
        num_cols = X.columns.values[~is_cat]

        cat_ohe_step = ('ohe', OneHotEncoder(sparse=False,
                                             handle_unknown='ignore'))

        cat_pipe = Pipeline([cat_ohe_step])
        num_pipe = Pipeline([('identity', FunctionTransformer())])
        transformers = [
            ('cat', cat_pipe, cat_cols),
            ('num', num_pipe, num_cols)
        ]
        ct = ColumnTransformer(transformers=transformers)

    records = []

    summary_record = {}
    summary_record['dataset_name'] = dataset_name
    print()
    print('-' * 78)
    print(dataset_name)
    print('-' * 78)
    print(summary_record)
    print()

    pipe = Pipeline([
        ('ct', ct),
        ('std', StandardScaler()),
        ('linear-sgd', SGDClassifier(random_state=random_state)),
    ])
    record = process_model(pipe, 'linear-sgd', X, y, n_splits=n_splits)
    print(record)
    record.update(summary_record)
    records.append(record)

    pipe = Pipeline([
        ('ct', ct),
        ('std', StandardScaler()),
        ('lr', LogisticRegression(random_state=random_state)),
    ])
    record = process_model(pipe, 'lr', X, y, n_splits=n_splits)
    print(record)
    record.update(summary_record)
    records.append(record)

    pipe = Pipeline([
        ('ct', ct),
        # n_estimators updated from 10 to 100 due to sci-kit defaults changing in future versions
        ('rf-100', RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=random_state)),
    ])
    record = process_model(pipe, 'rf-100', X, y, n_splits=n_splits)
    print(record)
    record.update(summary_record)
    records.append(record)
    
    pipe = Pipeline([
        ('ct', ct),
        ('xgb', XGBClassifier(random_state=random_state)),
    ])
    record = process_model(pipe, 'xgb', X, y, n_splits=n_splits)
    print(record)
    record.update(summary_record)
    records.append(record)

    # No pipeline needed due to EBM handling string datatypes
    ebm_main = ExplainableBoostingClassifier(n_jobs=-1, interactions=0, random_state=random_state)
    record = process_model(ebm_main, 'ebm main', X, y, n_splits=n_splits)
    print(record)
    record.update(summary_record)
    records.append(record)

    return records
In [3]:
results = []
n_splits = 3
In [4]:
dataset = load_heart_data()
result = benchmark_models('heart', dataset['full']['X'], dataset['full']['y'], n_splits=n_splits)
results.append(result)
------------------------------------------------------------------------------
heart
------------------------------------------------------------------------------
{'dataset_name': 'heart'}

{'model_name': 'linear-sgd', 'fit_time_mean': '0.015', 'fit_time_std': '0.003', 'test_score_mean': '0.823', 'test_score_std': '0.013'}
{'model_name': 'lr', 'fit_time_mean': '0.013', 'fit_time_std': '0.000', 'test_score_mean': '0.895', 'test_score_std': '0.030'}
{'model_name': 'rf-100', 'fit_time_mean': '1.564', 'fit_time_std': '1.628', 'test_score_mean': '0.890', 'test_score_std': '0.008'}
{'model_name': 'xgb', 'fit_time_mean': '0.411', 'fit_time_std': '0.455', 'test_score_mean': '0.870', 'test_score_std': '0.014'}
{'model_name': 'ebm main', 'fit_time_mean': '9.765', 'fit_time_std': '1.174', 'test_score_mean': '0.916', 'test_score_std': '0.005'}
{'model_name': 'ebm-interact', 'fit_time_mean': '1.607', 'fit_time_std': '0.359', 'test_score_mean': '0.905', 'test_score_std': '0.010'}
In [5]:
dataset = load_breast_data()
result = benchmark_models('breast-cancer', dataset['full']['X'], dataset['full']['y'], n_splits=n_splits)
results.append(result)
------------------------------------------------------------------------------
breast-cancer
------------------------------------------------------------------------------
{'dataset_name': 'breast-cancer'}

{'model_name': 'linear-sgd', 'fit_time_mean': '0.014', 'fit_time_std': '0.003', 'test_score_mean': '0.989', 'test_score_std': '0.003'}
{'model_name': 'lr', 'fit_time_mean': '0.016', 'fit_time_std': '0.000', 'test_score_mean': '0.995', 'test_score_std': '0.005'}
{'model_name': 'rf-100', 'fit_time_mean': '0.409', 'fit_time_std': '0.011', 'test_score_mean': '0.992', 'test_score_std': '0.009'}
{'model_name': 'xgb', 'fit_time_mean': '0.294', 'fit_time_std': '0.087', 'test_score_mean': '0.995', 'test_score_std': '0.006'}
{'model_name': 'ebm main', 'fit_time_mean': '1.026', 'fit_time_std': '0.439', 'test_score_mean': '0.995', 'test_score_std': '0.006'}
{'model_name': 'ebm-interact', 'fit_time_mean': '192.805', 'fit_time_std': '126.188', 'test_score_mean': '0.995', 'test_score_std': '0.006'}
In [6]:
dataset = load_adult_data()
result = benchmark_models('adult', dataset['full']['X'], dataset['full']['y'], n_splits=n_splits)
results.append(result)
------------------------------------------------------------------------------
adult
------------------------------------------------------------------------------
{'dataset_name': 'adult'}

{'model_name': 'linear-sgd', 'fit_time_mean': '0.418', 'fit_time_std': '0.060', 'test_score_mean': '0.841', 'test_score_std': '0.019'}
{'model_name': 'lr', 'fit_time_mean': '2.342', 'fit_time_std': '0.177', 'test_score_mean': '0.907', 'test_score_std': '0.003'}
{'model_name': 'rf-100', 'fit_time_mean': '2.072', 'fit_time_std': '1.484', 'test_score_mean': '0.903', 'test_score_std': '0.002'}
{'model_name': 'xgb', 'fit_time_mean': '15.498', 'fit_time_std': '0.563', 'test_score_mean': '0.922', 'test_score_std': '0.002'}
{'model_name': 'ebm main', 'fit_time_mean': '24.705', 'fit_time_std': '3.341', 'test_score_mean': '0.928', 'test_score_std': '0.002'}
{'model_name': 'ebm-interact', 'fit_time_mean': '36.416', 'fit_time_std': '0.466', 'test_score_mean': '0.928', 'test_score_std': '0.002'}
In [7]:
dataset = load_credit_data()
result = benchmark_models('credit-fraud', dataset['full']['X'], dataset['full']['y'], n_splits=n_splits)
results.append(result)
------------------------------------------------------------------------------
credit-fraud
------------------------------------------------------------------------------
{'dataset_name': 'credit-fraud'}

{'model_name': 'linear-sgd', 'fit_time_mean': '1.166', 'fit_time_std': '0.086', 'test_score_mean': '0.984', 'test_score_std': '0.002'}
{'model_name': 'lr', 'fit_time_mean': '6.145', 'fit_time_std': '0.688', 'test_score_mean': '0.979', 'test_score_std': '0.002'}
{'model_name': 'rf-100', 'fit_time_mean': '12.063', 'fit_time_std': '0.053', 'test_score_mean': '0.950', 'test_score_std': '0.007'}
{'model_name': 'xgb', 'fit_time_mean': '94.712', 'fit_time_std': '0.766', 'test_score_mean': '0.981', 'test_score_std': '0.003'}
{'model_name': 'ebm main', 'fit_time_mean': '99.003', 'fit_time_std': '12.889', 'test_score_mean': '0.975', 'test_score_std': '0.005'}
{'model_name': 'ebm-interact', 'fit_time_mean': '435.343', 'fit_time_std': '61.869', 'test_score_mean': '0.978', 'test_score_std': '0.004'}
In [8]:
dataset = load_telco_churn_data()
result = benchmark_models('telco-churn', dataset['full']['X'], dataset['full']['y'], n_splits=3)
results.append(result)
------------------------------------------------------------------------------
telco-churn
------------------------------------------------------------------------------
{'dataset_name': 'telco-churn'}

{'model_name': 'linear-sgd', 'fit_time_mean': '2.236', 'fit_time_std': '0.213', 'test_score_mean': '0.798', 'test_score_std': '0.008'}
{'model_name': 'lr', 'fit_time_mean': '25.970', 'fit_time_std': '2.931', 'test_score_mean': '0.804', 'test_score_std': '0.015'}
{'model_name': 'rf-100', 'fit_time_mean': '3.310', 'fit_time_std': '1.204', 'test_score_mean': '0.824', 'test_score_std': '0.002'}
{'model_name': 'xgb', 'fit_time_mean': '140.873', 'fit_time_std': '0.970', 'test_score_mean': '0.850', 'test_score_std': '0.006'}
{'model_name': 'ebm main', 'fit_time_mean': '11.451', 'fit_time_std': '1.413', 'test_score_mean': '0.851', 'test_score_std': '0.005'}
{'model_name': 'ebm-interact', 'fit_time_mean': '5.363', 'fit_time_std': '0.854', 'test_score_mean': '0.851', 'test_score_std': '0.005'}
In [9]:
records = [item for result in results for item in result]
record_df = pd.DataFrame.from_records(records)[['dataset_name', 'model_name', 'test_score_mean', 'test_score_std']]
record_df.to_csv('ebm-perf-classification-overnight.csv')