import numpy as np
import pandas as pd
df = pd.read_parquet("fraud-cleaned-sample.parquet")
We're using time-series data, so we'll split based on time.
first = df['timestamp'].min()
last = df['timestamp'].max()
cutoff = first + ((last - first) * 0.7)
df = df.copy()
train = df[df['timestamp'] <= cutoff]
test = df[df['timestamp'] > cutoff]
import cloudpickle as cp
feature_pipeline = cp.load(open('feature_pipeline.sav', 'rb'))
from sklearn.ensemble import RandomForestClassifier
from sklearn import model_selection
rfc = RandomForestClassifier(n_estimators=16, max_depth=8, random_state=404, class_weight="balanced_subsample")
svecs = feature_pipeline.fit_transform(train)
rfc.fit(svecs, train["label"])
from sklearn.metrics import classification_report
predictions = rfc.predict(feature_pipeline.fit_transform(test))
print(classification_report(test.label.values, predictions))
from mlworkflows import plot
df, chart = plot.binary_confusion_matrix(test["label"], predictions)
chart
df
from mlworkflows import util
util.serialize_to(rfc, "rfc.sav")