In [1]:
from __future__ import print_function
%matplotlib inline
In [2]:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("talk")

Load Raw Data Files

In [3]:
with np.load("data_files.npz") as data:
    X_train = data['X_train']
    Y_train = data['Y_train']
    X_test = data['X_test']
    Y_test = data['Y_test']
    X_comp = data['X_comp']
del data

Work in floating point this time

In [4]:
X_train = np.float64(X_train)
X_test = np.float64(X_test)
X_comp = np.float64(X_comp)
In [5]:
X_train.shape
Out[5]:
(15680, 3333)

Visualize Time Samples

In [6]:
plt.plot(X_train[0])
Out[6]:
[<matplotlib.lines.Line2D at 0x7fb2786ad0d0>]

Convert to Frequency Spectra

In [7]:
def convert_to_spectra(X):
    out = []
    for row in X:
        # Compute frequency sprectrum
        xfft = np.fft.fft(row)
        n = len(xfft)
        
        # Fold negative frequencies and drop DC component
        half_n = np.ceil(n/2.0)
        xfft = (2.0 / n) * xfft[1:half_n]
        
        out.append(np.abs(xfft))
    out = np.array(out)
    return out
In [8]:
X_train_spectra = convert_to_spectra(X_train)
X_test_spectra = convert_to_spectra(X_test)
X_comp_spectra = convert_to_spectra(X_comp)
In [9]:
X_train_spectra.shape
Out[9]:
(15680, 1666)

Plot Frequency Spectrum

In [10]:
plt.plot(X_train_spectra[0])
plt.xlabel("Frequency")
plt.ylabel("Amplitude")
Out[10]:
<matplotlib.text.Text at 0x7fb27873f050>

Smooth the Frequency Spectra

In [11]:
def moving_average(X, n=3):
    ret = []
    for row in X:
        row = np.cumsum(row)
        row[n:] = row[n:] - row[:-n]
        row = row[n - 1:] / n
        ret.append(row)
    ret = np.array(ret)
    return ret
In [12]:
X_train_spectra_no_average = X_train_spectra
X_train_spectra = moving_average(X_train_spectra, n=5)
X_test_spectra = moving_average(X_test_spectra, n=5)
X_comp_spectra = moving_average(X_comp_spectra, n=5)

Plot Smoothed Spectra Comparison

In [13]:
plt.subplot(2, 1, 1)
plt.plot(X_train_spectra_no_average[0])
plt.ylabel("Unaveraged Amplitude")
plt.subplot(2, 1, 2)
plt.plot(X_train_spectra[0])
plt.ylabel("Averaged Amplitude")
plt.xlabel("Frequency")
Out[13]:
<matplotlib.text.Text at 0x7fb278492d10>

Check Data Ranges, Convert Back to int

In [14]:
print(X_train_spectra.min(), X_train_spectra.max())
print(X_test_spectra.min(), X_test_spectra.max())
print(X_comp_spectra.min(), X_comp_spectra.max())
0.0 5079.24144214
0.0 4924.13100536
0.0 5131.84586544
In [15]:
X_train_spectra = np.int16(X_train_spectra)
X_test_spectra = np.int16(X_test_spectra)
X_comp_spectra = np.int16(X_comp_spectra)

Save Converted Input Features for Use with Google Prediction

In [16]:
for_google = np.c_[Y_train, X_train_spectra]
# np.savetxt("X_train_spectra_ave_goog.csv", for_google, delimiter=",", fmt='%i')
In [17]:
print(X_train_spectra.shape)
print(Y_train.shape)
print(X_test_spectra.shape)
print(Y_test.shape)
print(X_comp_spectra.shape)
(15680, 1662)
(15680,)
(6720, 1662)
(6720,)
(9600, 1662)

Train the Classifier Model

In [18]:
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, verbose=True,max_depth=None,min_samples_split=1, random_state=0)
model.fit(X_train_spectra,Y_train)
[Parallel(n_jobs=1)]: Done   1 jobs       | elapsed:  1.5min
[Parallel(n_jobs=1)]: Done   1 out of   1 | elapsed:  1.5min finished
Out[18]:
RandomForestClassifier(bootstrap=True, compute_importances=None,
            criterion='gini', max_depth=None, max_features='auto',
            max_leaf_nodes=None, min_density=None, min_samples_leaf=1,
            min_samples_split=1, n_estimators=100, n_jobs=1,
            oob_score=False, random_state=0, verbose=True)

Measure Classifier Performance on the Test Set

In [19]:
my_score = model.score(X_test_spectra,Y_test)
print(my_score)
0.991666666667
[Parallel(n_jobs=1)]: Done   1 jobs       | elapsed:    0.4s
[Parallel(n_jobs=1)]: Done   1 out of   1 | elapsed:    0.4s finished
In [20]:
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
In [21]:
Y_pred = model.predict(X_test_spectra)
[Parallel(n_jobs=1)]: Done   1 jobs       | elapsed:    0.5s
[Parallel(n_jobs=1)]: Done   1 out of   1 | elapsed:    0.5s finished
In [22]:
accuracy_score(Y_test, Y_pred)
Out[22]:
0.9916666666666667
In [23]:
for_google.shape
Out[23]:
(15680, 1663)
In [24]:
print(classification_report(Y_test, Y_pred))
             precision    recall  f1-score   support

          0       0.99      0.99      0.99      3381
          1       0.99      0.99      0.99      3339

avg / total       0.99      0.99      0.99      6720

Confusion Matrix

In [25]:
confusion_matrix(Y_test, Y_pred, labels=[0, 1])
Out[25]:
array([[3358,   23],
       [  33, 3306]])

Examine Input Feature Importance

In [26]:
plt.plot(model.feature_importances_)
plt.ylabel("Relative Feature Importance")
plt.xlabel("Frequency")
Out[26]:
<matplotlib.text.Text at 0x7fb27468dcd0>