Python Machine Learning 3rd Edition by Sebastian Raschka & Vahid Mirjalili, Packt Publishing Ltd. 2019
Code Repository: https://github.com/rasbt/python-machine-learning-book-3rd-edition
Code License: MIT License
Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
%load_ext watermark
%watermark -a "Sebastian Raschka & Vahid Mirjalili" -u -d -p numpy,scipy,matplotlib,tensorflow
Sebastian Raschka & Vahid Mirjalili last updated: 2019-12-06 numpy 1.17.4 scipy 1.3.1 matplotlib 3.1.0 tensorflow 2.0.0
%matplotlib inline
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
X_train = np.arange(10).reshape((10, 1))
y_train = np.array([1.0, 1.3, 3.1,
2.0, 5.0, 6.3,
6.6, 7.4, 8.0,
9.0])
plt.plot(X_train, y_train, 'o', markersize=10)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
X_train_norm = (X_train - np.mean(X_train))/np.std(X_train)
ds_train_orig = tf.data.Dataset.from_tensor_slices(
(tf.cast(X_train_norm, tf.float32),
tf.cast(y_train, tf.float32)))
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.w = tf.Variable(0.0, name='weight')
self.b = tf.Variable(0.0, name='bias')
def call(self, x):
return self.w*x + self.b
model = MyModel()
model.build(input_shape=(None, 1))
model.summary()
Model: "my_model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= Total params: 2 Trainable params: 2 Non-trainable params: 0 _________________________________________________________________
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
## testing the function:
yt = tf.convert_to_tensor([1.0])
yp = tf.convert_to_tensor([1.5])
loss_fn(yt, yp)
<tf.Tensor: id=33, shape=(), dtype=float32, numpy=0.25>
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as tape:
current_loss = loss_fn(model(inputs), outputs)
dW, db = tape.gradient(current_loss, [model.w, model.b])
model.w.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
tf.random.set_seed(1)
num_epochs = 200
log_steps = 100
learning_rate = 0.001
batch_size = 1
steps_per_epoch = int(np.ceil(len(y_train) / batch_size))
ds_train = ds_train_orig.shuffle(buffer_size=len(y_train))
ds_train = ds_train.repeat(count=None)
ds_train = ds_train.batch(1)
Ws, bs = [], []
for i, batch in enumerate(ds_train):
if i >= steps_per_epoch * num_epochs:
break
Ws.append(model.w.numpy())
bs.append(model.b.numpy())
bx, by = batch
loss_val = loss_fn(model(bx), by)
train(model, bx, by, learning_rate=learning_rate)
if i%log_steps==0:
print('Epoch {:4d} Step {:2d} Loss {:6.4f}'.format(
int(i/steps_per_epoch), i, loss_val))
Epoch 0 Step 0 Loss 43.5600 Epoch 10 Step 100 Loss 0.7530 Epoch 20 Step 200 Loss 20.1759 Epoch 30 Step 300 Loss 23.3976 Epoch 40 Step 400 Loss 6.3481 Epoch 50 Step 500 Loss 4.6356 Epoch 60 Step 600 Loss 0.2411 Epoch 70 Step 700 Loss 0.2036 Epoch 80 Step 800 Loss 3.8177 Epoch 90 Step 900 Loss 0.9416 Epoch 100 Step 1000 Loss 0.7035 Epoch 110 Step 1100 Loss 0.0348 Epoch 120 Step 1200 Loss 0.5404 Epoch 130 Step 1300 Loss 0.1170 Epoch 140 Step 1400 Loss 0.1195 Epoch 150 Step 1500 Loss 0.0944 Epoch 160 Step 1600 Loss 0.4670 Epoch 170 Step 1700 Loss 2.0695 Epoch 180 Step 1800 Loss 0.0020 Epoch 190 Step 1900 Loss 0.3612
print('Final Parameters:', model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training examples', 'Linear Reg.'], fontsize=15)
ax.set_xlabel('x', size=15)
ax.set_ylabel('y', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['Weight w', 'Bias unit b'], fontsize=15)
ax.set_xlabel('Iteration', size=15)
ax.set_ylabel('Value', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
#plt.savefig('ch13-linreg-1.pdf')
plt.show()
Final Parameters: 2.6576622 4.8798566
tf.random.set_seed(1)
model = MyModel()
#model.build((None, 1))
model.compile(optimizer='sgd',
loss=loss_fn,
metrics=['mae', 'mse'])
model.fit(X_train_norm, y_train,
epochs=num_epochs, batch_size=batch_size,
verbose=1)
Train on 10 samples Epoch 1/200 10/10 [==============================] - 0s 33ms/sample - loss: 27.8562 - mae: 4.5967 - mse: 27.8562 Epoch 2/200 10/10 [==============================] - 0s 1ms/sample - loss: 18.6235 - mae: 3.7249 - mse: 18.6235 Epoch 3/200 10/10 [==============================] - 0s 904us/sample - loss: 12.5081 - mae: 3.0572 - mse: 12.5081 Epoch 4/200 10/10 [==============================] - 0s 941us/sample - loss: 8.4484 - mae: 2.4816 - mse: 8.4484 Epoch 5/200 10/10 [==============================] - 0s 899us/sample - loss: 5.7520 - mae: 2.0644 - mse: 5.7520 Epoch 6/200 10/10 [==============================] - 0s 913us/sample - loss: 3.9580 - mae: 1.7283 - mse: 3.9580 Epoch 7/200 10/10 [==============================] - 0s 902us/sample - loss: 2.7617 - mae: 1.4792 - mse: 2.7617 Epoch 8/200 10/10 [==============================] - 0s 956us/sample - loss: 1.9714 - mae: 1.2577 - mse: 1.9714 Epoch 9/200 10/10 [==============================] - 0s 956us/sample - loss: 1.4485 - mae: 1.0911 - mse: 1.4485 Epoch 10/200 10/10 [==============================] - 0s 938us/sample - loss: 1.1002 - mae: 0.9636 - mse: 1.1002 Epoch 11/200 10/10 [==============================] - 0s 938us/sample - loss: 0.8714 - mae: 0.8620 - mse: 0.8714 Epoch 12/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.7190 - mae: 0.7764 - mse: 0.7190 Epoch 13/200 10/10 [==============================] - 0s 991us/sample - loss: 0.6173 - mae: 0.7089 - mse: 0.6173 Epoch 14/200 10/10 [==============================] - 0s 938us/sample - loss: 0.5492 - mae: 0.6419 - mse: 0.5492 Epoch 15/200 10/10 [==============================] - 0s 971us/sample - loss: 0.5042 - mae: 0.6007 - mse: 0.5042 Epoch 16/200 10/10 [==============================] - 0s 966us/sample - loss: 0.4740 - mae: 0.5553 - mse: 0.4740 Epoch 17/200 10/10 [==============================] - 0s 962us/sample - loss: 0.4546 - mae: 0.5334 - mse: 0.4546 Epoch 18/200 10/10 [==============================] - 0s 983us/sample - loss: 0.4402 - mae: 0.5155 - mse: 0.4402 Epoch 19/200 10/10 [==============================] - 0s 931us/sample - loss: 0.4310 - mae: 0.5022 - mse: 0.4310 Epoch 20/200 10/10 [==============================] - 0s 915us/sample - loss: 0.4248 - mae: 0.5025 - mse: 0.4248 Epoch 21/200 10/10 [==============================] - 0s 944us/sample - loss: 0.4211 - mae: 0.4927 - mse: 0.4211 Epoch 22/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4175 - mae: 0.5016 - mse: 0.4175 Epoch 23/200 10/10 [==============================] - 0s 956us/sample - loss: 0.4166 - mae: 0.4911 - mse: 0.4166 Epoch 24/200 10/10 [==============================] - 0s 980us/sample - loss: 0.4155 - mae: 0.4918 - mse: 0.4155 Epoch 25/200 10/10 [==============================] - 0s 971us/sample - loss: 0.4147 - mae: 0.4931 - mse: 0.4147 Epoch 26/200 10/10 [==============================] - 0s 941us/sample - loss: 0.4144 - mae: 0.4922 - mse: 0.4144 Epoch 27/200 10/10 [==============================] - 0s 890us/sample - loss: 0.4141 - mae: 0.4902 - mse: 0.4141 Epoch 28/200 10/10 [==============================] - 0s 887us/sample - loss: 0.4141 - mae: 0.4900 - mse: 0.4141 Epoch 29/200 10/10 [==============================] - 0s 987us/sample - loss: 0.4139 - mae: 0.4923 - mse: 0.4139 Epoch 30/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4917 - mse: 0.4138 Epoch 31/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4893 - mse: 0.4137 Epoch 32/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4854 - mse: 0.4139 Epoch 33/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4882 - mse: 0.4139 Epoch 34/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4898 - mse: 0.4139 Epoch 35/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4816 - mse: 0.4137 Epoch 36/200 10/10 [==============================] - 0s 993us/sample - loss: 0.4137 - mae: 0.4933 - mse: 0.4137 Epoch 37/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4883 - mse: 0.4137 Epoch 38/200 10/10 [==============================] - 0s 975us/sample - loss: 0.4132 - mae: 0.4801 - mse: 0.4132 Epoch 39/200 10/10 [==============================] - 0s 972us/sample - loss: 0.4137 - mae: 0.4855 - mse: 0.4137 Epoch 40/200 10/10 [==============================] - 0s 979us/sample - loss: 0.4135 - mae: 0.4975 - mse: 0.4135 Epoch 41/200 10/10 [==============================] - 0s 879us/sample - loss: 0.4137 - mae: 0.4905 - mse: 0.4137 Epoch 42/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4851 - mse: 0.4138 Epoch 43/200 10/10 [==============================] - 0s 976us/sample - loss: 0.4132 - mae: 0.4889 - mse: 0.4132 Epoch 44/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4928 - mse: 0.4137 Epoch 45/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4864 - mse: 0.4136 Epoch 46/200 10/10 [==============================] - 0s 963us/sample - loss: 0.4140 - mae: 0.4923 - mse: 0.4140 Epoch 47/200 10/10 [==============================] - 0s 996us/sample - loss: 0.4138 - mae: 0.4837 - mse: 0.4138 Epoch 48/200 10/10 [==============================] - 0s 927us/sample - loss: 0.4138 - mae: 0.4909 - mse: 0.4138 Epoch 49/200 10/10 [==============================] - 0s 978us/sample - loss: 0.4137 - mae: 0.4855 - mse: 0.4137 Epoch 50/200 10/10 [==============================] - 0s 985us/sample - loss: 0.4138 - mae: 0.4883 - mse: 0.4138 Epoch 51/200 10/10 [==============================] - 0s 966us/sample - loss: 0.4139 - mae: 0.4890 - mse: 0.4139 Epoch 52/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4979 - mse: 0.4138 Epoch 53/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4861 - mse: 0.4136 Epoch 54/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4132 - mae: 0.4830 - mse: 0.4132 Epoch 55/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4130 - mae: 0.4840 - mse: 0.4130 Epoch 56/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4892 - mse: 0.4135 Epoch 57/200 10/10 [==============================] - 0s 978us/sample - loss: 0.4133 - mae: 0.4945 - mse: 0.4133 Epoch 58/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4131 - mae: 0.4946 - mse: 0.4131 Epoch 59/200 10/10 [==============================] - 0s 976us/sample - loss: 0.4135 - mae: 0.4834 - mse: 0.4135 Epoch 60/200 10/10 [==============================] - 0s 941us/sample - loss: 0.4139 - mae: 0.4922 - mse: 0.4139 Epoch 61/200 10/10 [==============================] - 0s 963us/sample - loss: 0.4138 - mae: 0.4860 - mse: 0.4138 Epoch 62/200 10/10 [==============================] - 0s 960us/sample - loss: 0.4134 - mae: 0.4980 - mse: 0.4134 Epoch 63/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4926 - mse: 0.4137 Epoch 64/200 10/10 [==============================] - 0s 994us/sample - loss: 0.4140 - mae: 0.4892 - mse: 0.4140 Epoch 65/200 10/10 [==============================] - 0s 940us/sample - loss: 0.4129 - mae: 0.4811 - mse: 0.4129 Epoch 66/200 10/10 [==============================] - 0s 965us/sample - loss: 0.4138 - mae: 0.4914 - mse: 0.4138 Epoch 67/200 10/10 [==============================] - 0s 891us/sample - loss: 0.4137 - mae: 0.4957 - mse: 0.4137 Epoch 68/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4915 - mse: 0.4138 Epoch 69/200 10/10 [==============================] - 0s 971us/sample - loss: 0.4129 - mae: 0.4910 - mse: 0.4129 Epoch 70/200 10/10 [==============================] - 0s 974us/sample - loss: 0.4138 - mae: 0.4919 - mse: 0.4138 Epoch 71/200 10/10 [==============================] - 0s 987us/sample - loss: 0.4137 - mae: 0.4926 - mse: 0.4137 Epoch 72/200 10/10 [==============================] - 0s 997us/sample - loss: 0.4138 - mae: 0.4884 - mse: 0.4138 Epoch 73/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4141 - mae: 0.4875 - mse: 0.4141 Epoch 74/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4875 - mse: 0.4136 Epoch 75/200 10/10 [==============================] - 0s 995us/sample - loss: 0.4136 - mae: 0.4810 - mse: 0.4136 Epoch 76/200 10/10 [==============================] - 0s 991us/sample - loss: 0.4140 - mae: 0.4874 - mse: 0.4140 Epoch 77/200 10/10 [==============================] - 0s 967us/sample - loss: 0.4140 - mae: 0.4874 - mse: 0.4140 Epoch 78/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4810 - mse: 0.4135 Epoch 79/200 10/10 [==============================] - 0s 978us/sample - loss: 0.4137 - mae: 0.4840 - mse: 0.4137 Epoch 80/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4855 - mse: 0.4138 Epoch 81/200 10/10 [==============================] - 0s 981us/sample - loss: 0.4132 - mae: 0.4876 - mse: 0.4132 Epoch 82/200 10/10 [==============================] - 0s 983us/sample - loss: 0.4138 - mae: 0.4899 - mse: 0.4138 Epoch 83/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4850 - mse: 0.4136 Epoch 84/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4958 - mse: 0.4137 Epoch 85/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4924 - mse: 0.4138 Epoch 86/200 10/10 [==============================] - 0s 986us/sample - loss: 0.4135 - mae: 0.4956 - mse: 0.4135 Epoch 87/200 10/10 [==============================] - 0s 980us/sample - loss: 0.4132 - mae: 0.4832 - mse: 0.4132 Epoch 88/200 10/10 [==============================] - 0s 960us/sample - loss: 0.4139 - mae: 0.4920 - mse: 0.4139 Epoch 89/200 10/10 [==============================] - 0s 924us/sample - loss: 0.4136 - mae: 0.4900 - mse: 0.4136 Epoch 90/200 10/10 [==============================] - 0s 922us/sample - loss: 0.4139 - mae: 0.4911 - mse: 0.4139 Epoch 91/200 10/10 [==============================] - 0s 916us/sample - loss: 0.4137 - mae: 0.4846 - mse: 0.4137 Epoch 92/200 10/10 [==============================] - 0s 945us/sample - loss: 0.4140 - mae: 0.4901 - mse: 0.4140 Epoch 93/200 10/10 [==============================] - 0s 897us/sample - loss: 0.4137 - mae: 0.4845 - mse: 0.4137 Epoch 94/200 10/10 [==============================] - 0s 970us/sample - loss: 0.4134 - mae: 0.4819 - mse: 0.4134 Epoch 95/200 10/10 [==============================] - 0s 919us/sample - loss: 0.4139 - mae: 0.4921 - mse: 0.4139 Epoch 96/200 10/10 [==============================] - 0s 963us/sample - loss: 0.4139 - mae: 0.4903 - mse: 0.4139 Epoch 97/200 10/10 [==============================] - 0s 964us/sample - loss: 0.4135 - mae: 0.4885 - mse: 0.4135 Epoch 98/200 10/10 [==============================] - 0s 984us/sample - loss: 0.4137 - mae: 0.4818 - mse: 0.4137 Epoch 99/200 10/10 [==============================] - 0s 940us/sample - loss: 0.4138 - mae: 0.4875 - mse: 0.4138 Epoch 100/200 10/10 [==============================] - 0s 944us/sample - loss: 0.4137 - mae: 0.4857 - mse: 0.4137 Epoch 101/200 10/10 [==============================] - 0s 930us/sample - loss: 0.4139 - mae: 0.4911 - mse: 0.4139 Epoch 102/200 10/10 [==============================] - 0s 965us/sample - loss: 0.4139 - mae: 0.4904 - mse: 0.4139 Epoch 103/200 10/10 [==============================] - 0s 949us/sample - loss: 0.4138 - mae: 0.4911 - mse: 0.4138 Epoch 104/200 10/10 [==============================] - 0s 961us/sample - loss: 0.4140 - mae: 0.4912 - mse: 0.4140 Epoch 105/200 10/10 [==============================] - 0s 919us/sample - loss: 0.4139 - mae: 0.4946 - mse: 0.4139 Epoch 106/200 10/10 [==============================] - 0s 921us/sample - loss: 0.4139 - mae: 0.4861 - mse: 0.4139 Epoch 107/200 10/10 [==============================] - 0s 963us/sample - loss: 0.4135 - mae: 0.4843 - mse: 0.4135 Epoch 108/200 10/10 [==============================] - 0s 973us/sample - loss: 0.4136 - mae: 0.4863 - mse: 0.4136 Epoch 109/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4938 - mse: 0.4139 Epoch 110/200 10/10 [==============================] - 0s 971us/sample - loss: 0.4134 - mae: 0.4853 - mse: 0.4134 Epoch 111/200 10/10 [==============================] - 0s 980us/sample - loss: 0.4138 - mae: 0.4918 - mse: 0.4138 Epoch 112/200 10/10 [==============================] - 0s 959us/sample - loss: 0.4139 - mae: 0.4904 - mse: 0.4139 Epoch 113/200 10/10 [==============================] - 0s 982us/sample - loss: 0.4135 - mae: 0.4854 - mse: 0.4135 Epoch 114/200 10/10 [==============================] - 0s 950us/sample - loss: 0.4139 - mae: 0.4937 - mse: 0.4139 Epoch 115/200 10/10 [==============================] - 0s 991us/sample - loss: 0.4140 - mae: 0.4901 - mse: 0.4140 Epoch 116/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4897 - mse: 0.4138 Epoch 117/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4881 - mse: 0.4139 Epoch 118/200 10/10 [==============================] - 0s 947us/sample - loss: 0.4140 - mae: 0.4906 - mse: 0.4140 Epoch 119/200 10/10 [==============================] - 0s 934us/sample - loss: 0.4139 - mae: 0.4877 - mse: 0.4139 Epoch 120/200 10/10 [==============================] - 0s 933us/sample - loss: 0.4136 - mae: 0.4859 - mse: 0.4136 Epoch 121/200 10/10 [==============================] - 0s 935us/sample - loss: 0.4136 - mae: 0.4972 - mse: 0.4136 Epoch 122/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4900 - mse: 0.4139 Epoch 123/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4132 - mae: 0.4932 - mse: 0.4132 Epoch 124/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4805 - mse: 0.4135 Epoch 125/200 10/10 [==============================] - 0s 992us/sample - loss: 0.4132 - mae: 0.4915 - mse: 0.4132 Epoch 126/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4916 - mse: 0.4139 Epoch 127/200 10/10 [==============================] - 0s 959us/sample - loss: 0.4138 - mae: 0.4923 - mse: 0.4138 Epoch 128/200 10/10 [==============================] - 0s 965us/sample - loss: 0.4138 - mae: 0.4930 - mse: 0.4138 Epoch 129/200 10/10 [==============================] - 0s 974us/sample - loss: 0.4139 - mae: 0.4863 - mse: 0.4139 Epoch 130/200 10/10 [==============================] - 0s 949us/sample - loss: 0.4140 - mae: 0.4908 - mse: 0.4140 Epoch 131/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4875 - mse: 0.4138 Epoch 132/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4131 - mae: 0.4763 - mse: 0.4131 Epoch 133/200 10/10 [==============================] - 0s 972us/sample - loss: 0.4139 - mae: 0.4887 - mse: 0.4139 Epoch 134/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4131 - mae: 0.4830 - mse: 0.4131 Epoch 135/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4861 - mse: 0.4137 Epoch 136/200 10/10 [==============================] - 0s 920us/sample - loss: 0.4135 - mae: 0.4861 - mse: 0.4135 Epoch 137/200 10/10 [==============================] - 0s 944us/sample - loss: 0.4137 - mae: 0.4842 - mse: 0.4137 Epoch 138/200 10/10 [==============================] - 0s 923us/sample - loss: 0.4141 - mae: 0.4924 - mse: 0.4141 Epoch 139/200 10/10 [==============================] - 0s 961us/sample - loss: 0.4138 - mae: 0.4948 - mse: 0.4138 Epoch 140/200 10/10 [==============================] - 0s 962us/sample - loss: 0.4136 - mae: 0.4842 - mse: 0.4136 Epoch 141/200 10/10 [==============================] - 0s 940us/sample - loss: 0.4137 - mae: 0.4956 - mse: 0.4137 Epoch 142/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4872 - mse: 0.4137 Epoch 143/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4895 - mse: 0.4139 Epoch 144/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4865 - mse: 0.4136 Epoch 145/200 10/10 [==============================] - 0s 955us/sample - loss: 0.4137 - mae: 0.4940 - mse: 0.4137 Epoch 146/200 10/10 [==============================] - 0s 881us/sample - loss: 0.4139 - mae: 0.4884 - mse: 0.4139 Epoch 147/200 10/10 [==============================] - 0s 918us/sample - loss: 0.4130 - mae: 0.4828 - mse: 0.4130 Epoch 148/200 10/10 [==============================] - 0s 902us/sample - loss: 0.4140 - mae: 0.4875 - mse: 0.4140 Epoch 149/200 10/10 [==============================] - 0s 936us/sample - loss: 0.4133 - mae: 0.4924 - mse: 0.4133 Epoch 150/200 10/10 [==============================] - 0s 920us/sample - loss: 0.4137 - mae: 0.4875 - mse: 0.4137 Epoch 151/200 10/10 [==============================] - 0s 888us/sample - loss: 0.4138 - mae: 0.4866 - mse: 0.4138 Epoch 152/200 10/10 [==============================] - 0s 930us/sample - loss: 0.4133 - mae: 0.4924 - mse: 0.4133 Epoch 153/200 10/10 [==============================] - 0s 884us/sample - loss: 0.4140 - mae: 0.4889 - mse: 0.4140 Epoch 154/200 10/10 [==============================] - 0s 957us/sample - loss: 0.4140 - mae: 0.4895 - mse: 0.4140 Epoch 155/200 10/10 [==============================] - 0s 970us/sample - loss: 0.4138 - mae: 0.4835 - mse: 0.4138 Epoch 156/200 10/10 [==============================] - 0s 957us/sample - loss: 0.4139 - mae: 0.4862 - mse: 0.4139 Epoch 157/200 10/10 [==============================] - 0s 997us/sample - loss: 0.4138 - mae: 0.4868 - mse: 0.4138 Epoch 158/200 10/10 [==============================] - 0s 933us/sample - loss: 0.4138 - mae: 0.4910 - mse: 0.4138 Epoch 159/200 10/10 [==============================] - 0s 993us/sample - loss: 0.4138 - mae: 0.4858 - mse: 0.4138 Epoch 160/200 10/10 [==============================] - 0s 951us/sample - loss: 0.4139 - mae: 0.4881 - mse: 0.4139 Epoch 161/200 10/10 [==============================] - 0s 977us/sample - loss: 0.4140 - mae: 0.4895 - mse: 0.4140 Epoch 162/200 10/10 [==============================] - 0s 894us/sample - loss: 0.4131 - mae: 0.4949 - mse: 0.4131 Epoch 163/200 10/10 [==============================] - 0s 949us/sample - loss: 0.4138 - mae: 0.4830 - mse: 0.4138 Epoch 164/200 10/10 [==============================] - 0s 938us/sample - loss: 0.4135 - mae: 0.4943 - mse: 0.4135 Epoch 165/200 10/10 [==============================] - 0s 904us/sample - loss: 0.4138 - mae: 0.4824 - mse: 0.4138 Epoch 166/200 10/10 [==============================] - 0s 973us/sample - loss: 0.4140 - mae: 0.4889 - mse: 0.4140 Epoch 167/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4134 - mae: 0.4799 - mse: 0.4134 Epoch 168/200 10/10 [==============================] - 0s 941us/sample - loss: 0.4137 - mae: 0.4864 - mse: 0.4137 Epoch 169/200 10/10 [==============================] - 0s 976us/sample - loss: 0.4134 - mae: 0.4937 - mse: 0.4134 Epoch 170/200 10/10 [==============================] - 0s 964us/sample - loss: 0.4140 - mae: 0.4894 - mse: 0.4140 Epoch 171/200 10/10 [==============================] - 0s 947us/sample - loss: 0.4137 - mae: 0.4887 - mse: 0.4137 Epoch 172/200 10/10 [==============================] - 0s 955us/sample - loss: 0.4139 - mae: 0.4906 - mse: 0.4139 Epoch 173/200 10/10 [==============================] - 0s 907us/sample - loss: 0.4135 - mae: 0.4860 - mse: 0.4135 Epoch 174/200 10/10 [==============================] - 0s 962us/sample - loss: 0.4135 - mae: 0.4947 - mse: 0.4135 Epoch 175/200 10/10 [==============================] - 0s 885us/sample - loss: 0.4139 - mae: 0.4869 - mse: 0.4139 Epoch 176/200 10/10 [==============================] - 0s 940us/sample - loss: 0.4135 - mae: 0.4864 - mse: 0.4135 Epoch 177/200 10/10 [==============================] - 0s 976us/sample - loss: 0.4138 - mae: 0.4841 - mse: 0.4138 Epoch 178/200 10/10 [==============================] - 0s 928us/sample - loss: 0.4133 - mae: 0.4857 - mse: 0.4133 Epoch 179/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4915 - mse: 0.4140 Epoch 180/200 10/10 [==============================] - 0s 947us/sample - loss: 0.4139 - mae: 0.4901 - mse: 0.4139 Epoch 181/200 10/10 [==============================] - 0s 934us/sample - loss: 0.4137 - mae: 0.4932 - mse: 0.4137 Epoch 182/200 10/10 [==============================] - 0s 951us/sample - loss: 0.4138 - mae: 0.4887 - mse: 0.4138 Epoch 183/200 10/10 [==============================] - 0s 914us/sample - loss: 0.4137 - mae: 0.4905 - mse: 0.4137 Epoch 184/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4861 - mse: 0.4138 Epoch 185/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4954 - mse: 0.4136 Epoch 186/200 10/10 [==============================] - 0s 909us/sample - loss: 0.4139 - mae: 0.4933 - mse: 0.4139 Epoch 187/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4905 - mse: 0.4139 Epoch 188/200 10/10 [==============================] - 0s 937us/sample - loss: 0.4135 - mae: 0.4797 - mse: 0.4135 Epoch 189/200 10/10 [==============================] - 0s 947us/sample - loss: 0.4140 - mae: 0.4899 - mse: 0.4140 Epoch 190/200 10/10 [==============================] - 0s 936us/sample - loss: 0.4138 - mae: 0.4942 - mse: 0.4138 Epoch 191/200 10/10 [==============================] - 0s 863us/sample - loss: 0.4139 - mae: 0.4856 - mse: 0.4139 Epoch 192/200 10/10 [==============================] - 0s 989us/sample - loss: 0.4137 - mae: 0.4908 - mse: 0.4137 Epoch 193/200 10/10 [==============================] - 0s 913us/sample - loss: 0.4139 - mae: 0.4914 - mse: 0.4139 Epoch 194/200 10/10 [==============================] - 0s 919us/sample - loss: 0.4139 - mae: 0.4909 - mse: 0.4139 Epoch 195/200 10/10 [==============================] - 0s 932us/sample - loss: 0.4139 - mae: 0.4894 - mse: 0.4139 Epoch 196/200 10/10 [==============================] - 0s 876us/sample - loss: 0.4135 - mae: 0.4965 - mse: 0.4135 Epoch 197/200 10/10 [==============================] - 0s 989us/sample - loss: 0.4138 - mae: 0.4930 - mse: 0.4138 Epoch 198/200 10/10 [==============================] - 0s 905us/sample - loss: 0.4136 - mae: 0.4804 - mse: 0.4136 Epoch 199/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4902 - mse: 0.4137 Epoch 200/200 10/10 [==============================] - 0s 999us/sample - loss: 0.4139 - mae: 0.4925 - mse: 0.4139
<tensorflow.python.keras.callbacks.History at 0x7f8f9bf78c88>
print(model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training Samples', 'Linear Regression'], fontsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['W', 'bias'], fontsize=15)
plt.show()
2.7058775 4.971019
import tensorflow_datasets as tfds
iris, iris_info = tfds.load('iris', with_info=True)
print(iris_info)
WARNING:absl:Warning: Setting shuffle_files=True because split=TRAIN and shuffle_files=None. This behavior will be deprecated on 2019-08-06, at which point shuffle_files=False will be the default for all splits.
tfds.core.DatasetInfo( name='iris', version=1.0.0, description='This is perhaps the best known database to be found in the pattern recognition literature. Fisher's paper is a classic in the field and is referenced frequently to this day. (See Duda & Hart, for example.) The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. One class is linearly separable from the other 2; the latter are NOT linearly separable from each other. ', urls=['https://archive.ics.uci.edu/ml/datasets/iris'], features=FeaturesDict({ 'features': Tensor(shape=(4,), dtype=tf.float32), 'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=3), }), total_num_examples=150, splits={ 'train': 150, }, supervised_keys=('features', 'label'), citation="""@misc{Dua:2019 , author = "Dua, Dheeru and Graff, Casey", year = "2017", title = "{UCI} Machine Learning Repository", url = "http://archive.ics.uci.edu/ml", institution = "University of California, Irvine, School of Information and Computer Sciences" }""", redistribution_info=, )
tf.random.set_seed(1)
ds_orig = iris['train']
ds_orig = ds_orig.shuffle(150, reshuffle_each_iteration=False)
print(next(iter(ds_orig)))
ds_train_orig = ds_orig.take(100)
ds_test = ds_orig.skip(100)
{'features': <tf.Tensor: id=117451, shape=(4,), dtype=float32, numpy=array([5.5, 3.5, 1.3, 0.2], dtype=float32)>, 'label': <tf.Tensor: id=117452, shape=(), dtype=int64, numpy=0>}
## checking the number of examples:
n = 0
for example in ds_train_orig:
n += 1
print(n)
n = 0
for example in ds_test:
n += 1
print(n)
100 50
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
next(iter(ds_train_orig))
(<tf.Tensor: id=117786, shape=(4,), dtype=float32, numpy=array([5.5, 3.5, 1.3, 0.2], dtype=float32)>, <tf.Tensor: id=117787, shape=(), dtype=int64, numpy=0>)
iris_model = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation='sigmoid',
name='fc1', input_shape=(4,)),
tf.keras.layers.Dense(3, name='fc2', activation='softmax')])
iris_model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= fc1 (Dense) (None, 16) 80 _________________________________________________________________ fc2 (Dense) (None, 3) 51 ================================================================= Total params: 131 Trainable params: 131 Non-trainable params: 0 _________________________________________________________________
iris_model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
num_epochs = 100
training_size = 100
batch_size = 2
steps_per_epoch = np.ceil(training_size / batch_size)
ds_train = ds_train_orig.shuffle(buffer_size=training_size)
ds_train = ds_train.repeat()
ds_train = ds_train.batch(batch_size=batch_size)
ds_train = ds_train.prefetch(buffer_size=1000)
history = iris_model.fit(ds_train, epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
verbose=0)
hist = history.history
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(1, 2, 1)
ax.plot(hist['loss'], lw=3)
ax.set_title('Training loss', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(hist['accuracy'], lw=3)
ax.set_title('Training accuracy', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.tight_layout()
#plt.savefig('ch13-cls-learning-curve.pdf')
plt.show()
results = iris_model.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
Test loss: 0.1461 Test Acc.: 1.0000
iris_model.save('iris-classifier.h5',
overwrite=True,
include_optimizer=True,
save_format='h5')
iris_model_new = tf.keras.models.load_model('iris-classifier.h5')
iris_model_new.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= fc1 (Dense) (None, 16) 80 _________________________________________________________________ fc2 (Dense) (None, 3) 51 ================================================================= Total params: 131 Trainable params: 131 Non-trainable params: 0 _________________________________________________________________
results = iris_model_new.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
Test loss: 0.1491 Test Acc.: 1.0000
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
Training Set: 100 Test Set: 50
iris_model_new.to_json()
'{"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "Dense", "config": {"name": "fc1", "trainable": true, "batch_input_shape": [null, 4], "dtype": "float32", "units": 16, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dense", "config": {"name": "fc2", "trainable": true, "dtype": "float32", "units": 3, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.2.4-tf", "backend": "tensorflow"}'
import numpy as np
X = np.array([1, 1.4, 2.5]) ## first value must be 1
w = np.array([0.4, 0.3, 0.5])
def net_input(X, w):
return np.dot(X, w)
def logistic(z):
return 1.0 / (1.0 + np.exp(-z))
def logistic_activation(X, w):
z = net_input(X, w)
return logistic(z)
print('P(y=1|x) = %.3f' % logistic_activation(X, w))
P(y=1|x) = 0.888
# W : array with shape = (n_output_units, n_hidden_units+1)
# note that the first column are the bias units
W = np.array([[1.1, 1.2, 0.8, 0.4],
[0.2, 0.4, 1.0, 0.2],
[0.6, 1.5, 1.2, 0.7]])
# A : data array with shape = (n_hidden_units + 1, n_samples)
# note that the first column of this array must be 1
A = np.array([[1, 0.1, 0.4, 0.6]])
Z = np.dot(W, A[0])
y_probas = logistic(Z)
print('Net Input: \n', Z)
print('Output Units:\n', y_probas)
Net Input: [1.78 0.76 1.65] Output Units: [0.85569687 0.68135373 0.83889105]
y_class = np.argmax(Z, axis=0)
print('Predicted class label: %d' % y_class)
Predicted class label: 0
def softmax(z):
return np.exp(z) / np.sum(np.exp(z))
y_probas = softmax(Z)
print('Probabilities:\n', y_probas)
np.sum(y_probas)
Probabilities: [0.44668973 0.16107406 0.39223621]
1.0
import tensorflow as tf
Z_tensor = tf.expand_dims(Z, axis=0)
tf.keras.activations.softmax(Z_tensor)
<tf.Tensor: id=134679, shape=(1, 3), dtype=float64, numpy=array([[0.44668973, 0.16107406, 0.39223621]])>
import matplotlib.pyplot as plt
%matplotlib inline
def tanh(z):
e_p = np.exp(z)
e_m = np.exp(-z)
return (e_p - e_m) / (e_p + e_m)
z = np.arange(-5, 5, 0.005)
log_act = logistic(z)
tanh_act = tanh(z)
plt.ylim([-1.5, 1.5])
plt.xlabel('Net input $z$')
plt.ylabel('Activation $\phi(z)$')
plt.axhline(1, color='black', linestyle=':')
plt.axhline(0.5, color='black', linestyle=':')
plt.axhline(0, color='black', linestyle=':')
plt.axhline(-0.5, color='black', linestyle=':')
plt.axhline(-1, color='black', linestyle=':')
plt.plot(z, tanh_act,
linewidth=3, linestyle='--',
label='Tanh')
plt.plot(z, log_act,
linewidth=3,
label='Logistic')
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
np.tanh(z)
array([-0.9999092 , -0.99990829, -0.99990737, ..., 0.99990644, 0.99990737, 0.99990829])
import tensorflow as tf
tf.keras.activations.tanh(z)
<tf.Tensor: id=134681, shape=(2000,), dtype=float64, numpy= array([-0.9999092 , -0.99990829, -0.99990737, ..., 0.99990644, 0.99990737, 0.99990829])>
from scipy.special import expit
expit(z)
array([0.00669285, 0.00672617, 0.00675966, ..., 0.99320669, 0.99324034, 0.99327383])
tf.keras.activations.sigmoid(z)
<tf.Tensor: id=134683, shape=(2000,), dtype=float64, numpy= array([0.00669285, 0.00672617, 0.00675966, ..., 0.99320669, 0.99324034, 0.99327383])>
import tensorflow as tf
tf.keras.activations.relu(z)
<tf.Tensor: id=134685, shape=(2000,), dtype=float64, numpy=array([0. , 0. , 0. , ..., 4.985, 4.99 , 4.995])>
## the correct way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=False)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
{0, 1, 2, 3, 6, 7, 9, 10, 11, 13} {4, 5, 8, 12, 14}
## The wrong way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=True)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
tfds.Split
¶##--------------------------- Attention ------------------------##
## ##
## Note: currently, tfds.Split has a bug in TF 2.0.0 ##
## ##
## I.e., splitting [2, 1] is expected to result in ##
## 100 train and 50 test examples ##
## ##
## but instead, it results in 116 train and 34 test examples ##
## ##
##--------------------------------------------------------------##
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
## method 1: specifying percentage:
#first_67_percent = tfds.Split.TRAIN.subsplit(tfds.percent[:67])
#last_33_percent = tfds.Split.TRAIN.subsplit(tfds.percent[-33:])
#ds_train_orig = tfds.load('iris', split=first_67_percent)
#ds_test = tfds.load('iris', split=last_33_percent)
## method 2: specifying the weights
split_train, split_test = tfds.Split.TRAIN.subsplit([2, 1])
ds_train_orig = tfds.load('iris', split=split_train)
ds_test = tfds.load('iris', split=split_test)
print(next(iter(ds_train_orig)))
print()
print(next(iter(ds_test)))
ds_train_orig = ds_train_orig.shuffle(100, reshuffle_each_iteration=True)
ds_test = ds_test.shuffle(50, reshuffle_each_iteration=False)
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
print(next(iter(ds_train_orig)))
for j in range(5):
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
labels_test = np.array(labels_test)
print(np.sum(labels_test == 0), np.sum(labels_test == 1), np.sum(labels_test == 2))
{'features': <tf.Tensor: id=135251, shape=(4,), dtype=float32, numpy=array([6.1, 2.8, 4.7, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=135252, shape=(), dtype=int64, numpy=1>} {'features': <tf.Tensor: id=135258, shape=(4,), dtype=float32, numpy=array([5.7, 3. , 4.2, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=135259, shape=(), dtype=int64, numpy=1>} (<tf.Tensor: id=135303, shape=(4,), dtype=float32, numpy=array([6.8, 2.8, 4.8, 1.4], dtype=float32)>, <tf.Tensor: id=135304, shape=(), dtype=int64, numpy=1>) Training Set: 116 Test Set: 34 10 12 12 Training Set: 116 Test Set: 34 10 12 12 Training Set: 116 Test Set: 34 10 12 12 Training Set: 116 Test Set: 34 10 12 12 Training Set: 116 Test Set: 34 10 12 12
Readers may ignore the next cell.
! python ../.convert_notebook_to_script.py --input ch13_part2.ipynb --output ch13_part2.py
[NbConvertApp] Converting notebook ch13_part2.ipynb to script [NbConvertApp] Writing 14023 bytes to ch13_part2.py