# Introduction to Streamlit¶

Here is a link to the Streamlit tutorials.

And here is useful youtube review of several ways to make dashboards in python, by Michael Chow.

In :
%%writefile first_streamlit.py
import numpy as np
import matplotlib.pyplot as plt
import streamlit as st

st.title('Our first streamlit app')

st.write('This should be fun...')

me = 'Chuck'

f'Even more fun for {me} and everyone and their dog'

Overwriting first_streamlit.py

In :
# type at command line
# !streamlit run first_streamlit.py

  You can now view your Streamlit app in your browser.

Local URL: http://localhost:8501
Network URL: http://192.168.0.120:8501

^C
Stopping...

In [ ]:


In :
%%writefile nnet.py

import numpy as np
import matplotlib.pyplot as plt
import streamlit as st

return np.insert(X, 0, 1, axis=1)

def make_weights(n_inputs, n_hiddens, n_outputs):
# Create and return weight matrices, V and W, for the hidden and output layers.
# Initialize them to uniformly-distribted random values between -sqrt(n_in) and +sqrt(n_in)
V = np.random.uniform(-1, 1, size=(1 + n_inputs, n_hiddens)) / np.sqrt(n_inputs + 1)
W = np.random.uniform(-1, 1, size=(1 + n_hiddens, n_outputs)) / np.sqrt(n_hiddens + 1)
return V, W

def forward(Xst, V, W):
# Calculate the outputs, Z, of all hidden units, given all input samples in X.
# Calculate the outputs, Y, of all output units, given all outputs of the hidden units.
return Z, Yst

def backward(Xst, Tst, V, W):
n_samples = Xst.shape
n_outputs = Tst.shape
# Calculate the outputs of both layers.
Z, Yst = forward(Xst, V, W)
# Calculate the delta value for the output layer. Divide by n_samples * n_outputs
# because we are calculating the gradient of the mean sqauared error with respect to weights.
delta = -(Tst - Yst) /  (n_samples * n_outputs)
# The gradient of the mean squared error with respect to the output layer weights W.
# Back-propagate the delta value from the output layer, through the output layer weights,
# to the hidden units.  Multiply the result by the derivative of the hidden units'
# activation function, tanh
delta = (delta @ W[1:, :].T) * (1 - Z ** 2)
# The gradient of the mean squared error with respect to the hidden layer weights, V.
# Return both gradients.  Each should be the same shape as the respective weight matrices.

@st.cache
def train_sgd(X, T, V, W, learning_rate, n_epochs):
# Store standardization parameters in dictionary stand_parms.
stand_parms = calc_standardize_parameters(X, T)
# Standardize X and T.
Xst = standardize_X(X, stand_parms)
Tst = standardize_T(T, stand_parms)

error_trace = []

# Update weights for n_epochs passes through the training data
for epoch in range(n_epochs):
# Calculate the gradients of the mean squared error with respect to each weight matrix.

# Update the values in each weight matrix using SGD.

# Calculate the outputs of both layers given the current weight values.
_, Yst = forward(Xst, V, W)
Y = unstandardize_T(Yst, stand_parms)
error_trace.append(rmse(Y, T))

return V, W, stand_parms, error_trace

def use(X, V, W, stand_parms):
# Standardize inputs X
Xst = standardize_X(X, stand_parms)
# Calculate outputs of each layer.
Z, Yst = forward(Xst, V, W)
# Unstandardize output of output layer
return Z, unstandardize_T(Yst, stand_parms)

def rmse(Y, T):
error = T - Y
return np.sqrt(np.mean(error ** 2))

Overwriting nnet.py

In :
%%writefile -a nnet.py

def calc_standardize_parameters(X, T):
Xmeans = X.mean(axis=0)
Xstds = X.std(axis=0)
Tmeans = T.mean(axis=0)
Tstds = T.std(axis=0)
return {'Xmeans': Xmeans, 'Xstds': Xstds,
'Tmeans': Tmeans, 'Tstds': Tstds}

def standardize_X(X, stand_parms):
return (X - stand_parms['Xmeans']) / stand_parms['Xstds']

def unstandardize_X(Xst, stand_parms):
return Xst * stand_parms['Xstds'] + stand_parms['Xmeans']

def standardize_T(T, stand_parms):
return (T - stand_parms['Tmeans']) / stand_parms['Tstds']

def unstandardize_T(Tst, stand_parms):
return Tst * stand_parms['Tstds'] + stand_parms['Tmeans']

Appending to nnet.py

In :
%%writefile -a nnet.py

n_samples = st.slider('number of samples', 5, 1000, 30)
# n_samples = 30

test_offset = st.slider('Test data offset', 0, 50, 0)

Xtrain = np.linspace(0., 20.0, n_samples).reshape((n_samples, 1))
Ttrain = 0.2 + 0.05 * (Xtrain) + 0.4 * np.sin(Xtrain / 2) + 0.2 * np.random.normal(size=(n_samples, 1))

Xtest = Xtrain  + test_offset +  0.1 * np.random.normal(size=(n_samples, 1))
Ttest = 0.2  + 0.05 * (Xtest) + 0.4 * np.sin(Xtest / 2) + 0.2 * np.random.normal(size=(n_samples, 1))

Appending to nnet.py

In :
%%writefile -a nnet.py

plt.plot(Xtrain, Ttrain, 'o', label='Train')
plt.plot(Xtest, Ttest, 'o', label='Test')
plt.legend()

st.pyplot()

Appending to nnet.py

In :
%%writefile -a nnet.py

n_inputs = Xtrain.shape
n_hiddens = st.slider('Number of Hidden Units', 1, 50, 5)
n_outputs = Ttrain.shape

n_epochs = 2000
learning_rate = 0.1

V, W = make_weights(n_inputs, n_hiddens, n_outputs)

V, W, stand_parms, error_trace = train_sgd(Xtrain, Ttrain, V, W, learning_rate, n_epochs)

_, Ytrain = use(Xtrain, V, W, stand_parms)  #
rmse_train = rmse(Ytrain, Ttrain)
_, Ytest = use(Xtest, V, W, stand_parms)
rmse_test = rmse(Ytest, Ttest)

res = f'RMSE: Train {rmse_train:.2f} Test {rmse_test:.2f}'
res

Appending to nnet.py

In :
%%writefile -a nnet.py

plt.plot(Xtrain, Ttrain, 'o', label='Training Data')
plt.plot(Xtest, Ttest, 'o', label='Testing Data')
x_min = min(Xtrain.min(), Xtest.min())
x_max = max(Xtrain.max(), Xtest.max())
X_for_plot = np.linspace(x_min, x_max, 100).reshape(-1, 1)
Z_train, Y_train = use(X_for_plot, V, W, stand_parms)
plt.plot(X_for_plot, Y_train, label='Neural Net Output')
plt.legend()
plt.xlabel('X')
plt.ylabel('Y')

st.pyplot()

Appending to nnet.py

In [ ]:
# Now, at the command line, run

# streamlit run nnet.py

# This can also be run right here by uncommenting the following line:

# ! streamlit run nnet.py