Python Machine Learning 3rd Edition by Sebastian Raschka & Vahid Mirjalili, Packt Publishing Ltd. 2019
Code Repository: https://github.com/rasbt/python-machine-learning-book-3rd-edition
Code License: MIT License
import tensorflow as tf
print('TensorFlow version: ', tf.__version__)
import numpy as np
np.set_printoptions(precision=3)
TensorFlow version: 2.0.0
! python -c 'import tensorflow as tf; print(tf.__version__)'
2.0.0
a = np.array([1, 2, 3], dtype=np.int32)
b = [4, 5, 6]
t_a = tf.convert_to_tensor(a)
t_b = tf.convert_to_tensor(b)
print(t_a)
print(t_b)
tf.Tensor([1 2 3], shape=(3,), dtype=int32) tf.Tensor([4 5 6], shape=(3,), dtype=int32)
tf.is_tensor(a), tf.is_tensor(t_a)
(False, True)
t_ones = tf.ones((2, 3))
t_ones.shape
TensorShape([2, 3])
t_ones.numpy()
array([[1., 1., 1.], [1., 1., 1.]], dtype=float32)
const_tensor = tf.constant([1.2, 5, np.pi], dtype=tf.float32)
print(const_tensor)
tf.Tensor([1.2 5. 3.142], shape=(3,), dtype=float32)
t_a_new = tf.cast(t_a, tf.int64)
print(t_a_new.dtype)
<dtype: 'int64'>
t = tf.random.uniform(shape=(3, 5))
t_tr = tf.transpose(t)
print(t.shape, ' --> ', t_tr.shape)
(3, 5) --> (5, 3)
t = tf.zeros((30,))
t_reshape = tf.reshape(t, shape=(5, 6))
print(t_reshape.shape)
(5, 6)
t = tf.zeros((1, 2, 1, 4, 1))
t_sqz = tf.squeeze(t, axis=(2, 4))
print(t.shape, ' --> ', t_sqz.shape)
(1, 2, 1, 4, 1) --> (1, 2, 4)
tf.random.set_seed(1)
t1 = tf.random.uniform(shape=(5, 2),
minval=-1.0, maxval=1.0)
t2 = tf.random.normal(shape=(5, 2),
mean=0.0, stddev=1.0)
t3 = tf.multiply(t1, t2).numpy()
print(t3)
[[-0.27 -0.874] [-0.017 -0.175] [-0.296 -0.139] [-0.727 0.135] [-0.401 0.004]]
t4 = tf.math.reduce_mean(t1, axis=0)
print(t4)
tf.Tensor([0.09 0.207], shape=(2,), dtype=float32)
t5 = tf.linalg.matmul(t1, t2, transpose_b=True)
print(t5.numpy())
[[-1.144 1.115 -0.87 -0.321 0.856] [ 0.248 -0.191 0.25 -0.064 -0.331] [-0.478 0.407 -0.436 0.022 0.527] [ 0.525 -0.234 0.741 -0.593 -1.194] [-0.099 0.26 0.125 -0.462 -0.396]]
t6 = tf.linalg.matmul(t1, t2, transpose_a=True)
print(t6.numpy())
[[-1.711 0.302] [ 0.371 -1.049]]
norm_t1 = tf.norm(t1, ord=2, axis=1).numpy()
print(norm_t1)
[1.046 0.293 0.504 0.96 0.383]
np.sqrt(np.sum(np.square(t1), axis=1))
array([1.046, 0.293, 0.504, 0.96 , 0.383], dtype=float32)
tf.random.set_seed(1)
t = tf.random.uniform((6,))
print(t.numpy())
t_splits = tf.split(t, 3)
[item.numpy() for item in t_splits]
[0.165 0.901 0.631 0.435 0.292 0.643]
[array([0.165, 0.901], dtype=float32), array([0.631, 0.435], dtype=float32), array([0.292, 0.643], dtype=float32)]
tf.random.set_seed(1)
t = tf.random.uniform((5,))
print(t.numpy())
t_splits = tf.split(t, [3, 2])
[item.numpy() for item in t_splits]
[0.165 0.901 0.631 0.435 0.292]
[array([0.165, 0.901, 0.631], dtype=float32), array([0.435, 0.292], dtype=float32)]
A = tf.ones((3,))
B = tf.zeros((2,))
C = tf.concat([A, B], axis=0)
print(C.numpy())
[1. 1. 1. 0. 0.]
A = tf.ones((3,))
B = tf.zeros((3,))
S = tf.stack([A, B], axis=1)
print(S.numpy())
[[1. 0.] [1. 0.] [1. 0.]]
a = [1.2, 3.4, 7.5, 4.1, 5.0, 1.0]
ds = tf.data.Dataset.from_tensor_slices(a)
print(ds)
<TensorSliceDataset shapes: (), types: tf.float32>
for item in ds:
print(item)
tf.Tensor(1.2, shape=(), dtype=float32) tf.Tensor(3.4, shape=(), dtype=float32) tf.Tensor(7.5, shape=(), dtype=float32) tf.Tensor(4.1, shape=(), dtype=float32) tf.Tensor(5.0, shape=(), dtype=float32) tf.Tensor(1.0, shape=(), dtype=float32)
ds_batch = ds.batch(3)
for i, elem in enumerate(ds_batch, 1):
print('batch {}: '.format(i), elem.numpy())
batch 1: [1.2 3.4 7.5] batch 2: [4.1 5. 1. ]
tf.random.set_seed(1)
t_x = tf.random.uniform([4, 3], dtype=tf.float32)
t_y = tf.range(4)
ds_x = tf.data.Dataset.from_tensor_slices(t_x)
ds_y = tf.data.Dataset.from_tensor_slices(t_y)
ds_joint = tf.data.Dataset.zip((ds_x, ds_y))
for example in ds_joint:
print(' x: ', example[0].numpy(),
' y: ', example[1].numpy())
x: [0.165 0.901 0.631] y: 0 x: [0.435 0.292 0.643] y: 1 x: [0.976 0.435 0.66 ] y: 2 x: [0.605 0.637 0.614] y: 3
## method 2:
ds_joint = tf.data.Dataset.from_tensor_slices((t_x, t_y))
for example in ds_joint:
print(' x: ', example[0].numpy(),
' y: ', example[1].numpy())
x: [0.165 0.901 0.631] y: 0 x: [0.435 0.292 0.643] y: 1 x: [0.976 0.435 0.66 ] y: 2 x: [0.605 0.637 0.614] y: 3
ds_trans = ds_joint.map(lambda x, y: (x*2-1.0, y))
for example in ds_trans:
print(' x: ', example[0].numpy(),
' y: ', example[1].numpy())
x: [-0.67 0.803 0.262] y: 0 x: [-0.131 -0.416 0.285] y: 1 x: [ 0.952 -0.13 0.32 ] y: 2 x: [0.21 0.273 0.229] y: 3
tf.random.set_seed(1)
ds = ds_joint.shuffle(buffer_size=len(t_x))
for example in ds:
print(' x: ', example[0].numpy(),
' y: ', example[1].numpy())
x: [0.976 0.435 0.66 ] y: 2 x: [0.435 0.292 0.643] y: 1 x: [0.165 0.901 0.631] y: 0 x: [0.605 0.637 0.614] y: 3
ds = ds_joint.batch(batch_size=3,
drop_remainder=False)
batch_x, batch_y = next(iter(ds))
print('Batch-x : \n', batch_x.numpy())
print('Batch-y : ', batch_y.numpy())
Batch-x : [[0.165 0.901 0.631] [0.435 0.292 0.643] [0.976 0.435 0.66 ]] Batch-y : [0 1 2]
ds = ds_joint.batch(3).repeat(count=2)
for i,(batch_x, batch_y) in enumerate(ds):
print(i, batch_x.shape, batch_y.numpy())
0 (3, 3) [0 1 2] 1 (1, 3) [3] 2 (3, 3) [0 1 2] 3 (1, 3) [3]
ds = ds_joint.repeat(count=2).batch(3)
for i,(batch_x, batch_y) in enumerate(ds):
print(i, batch_x.shape, batch_y.numpy())
0 (3, 3) [0 1 2] 1 (3, 3) [3 0 1] 2 (2, 3) [2 3]
tf.random.set_seed(1)
## Order 1: shuffle -> batch -> repeat
ds = ds_joint.shuffle(4).batch(2).repeat(20)
for i,(batch_x, batch_y) in enumerate(ds):
print(i, batch_x.shape, batch_y.numpy())
0 (2, 3) [2 1] 1 (2, 3) [0 3] 2 (2, 3) [0 3] 3 (2, 3) [1 2] 4 (2, 3) [3 0] 5 (2, 3) [1 2] 6 (2, 3) [1 3] 7 (2, 3) [2 0] 8 (2, 3) [1 2] 9 (2, 3) [3 0] 10 (2, 3) [3 0] 11 (2, 3) [2 1] 12 (2, 3) [3 0] 13 (2, 3) [1 2] 14 (2, 3) [3 0] 15 (2, 3) [2 1] 16 (2, 3) [2 3] 17 (2, 3) [0 1] 18 (2, 3) [1 2] 19 (2, 3) [0 3] 20 (2, 3) [0 1] 21 (2, 3) [2 3] 22 (2, 3) [3 2] 23 (2, 3) [0 1] 24 (2, 3) [3 0] 25 (2, 3) [1 2] 26 (2, 3) [1 3] 27 (2, 3) [2 0] 28 (2, 3) [2 1] 29 (2, 3) [0 3] 30 (2, 3) [2 3] 31 (2, 3) [0 1] 32 (2, 3) [3 1] 33 (2, 3) [2 0] 34 (2, 3) [3 2] 35 (2, 3) [1 0] 36 (2, 3) [3 0] 37 (2, 3) [2 1] 38 (2, 3) [0 2] 39 (2, 3) [3 1]
tf.random.set_seed(1)
## Order 2: batch -> shuffle -> repeat
ds = ds_joint.batch(2).shuffle(4).repeat(20)
for i,(batch_x, batch_y) in enumerate(ds):
print(i, batch_x.shape, batch_y.numpy())
0 (2, 3) [0 1] 1 (2, 3) [2 3] 2 (2, 3) [0 1] 3 (2, 3) [2 3] 4 (2, 3) [2 3] 5 (2, 3) [0 1] 6 (2, 3) [2 3] 7 (2, 3) [0 1] 8 (2, 3) [2 3] 9 (2, 3) [0 1] 10 (2, 3) [2 3] 11 (2, 3) [0 1] 12 (2, 3) [2 3] 13 (2, 3) [0 1] 14 (2, 3) [2 3] 15 (2, 3) [0 1] 16 (2, 3) [0 1] 17 (2, 3) [2 3] 18 (2, 3) [2 3] 19 (2, 3) [0 1] 20 (2, 3) [0 1] 21 (2, 3) [2 3] 22 (2, 3) [2 3] 23 (2, 3) [0 1] 24 (2, 3) [2 3] 25 (2, 3) [0 1] 26 (2, 3) [2 3] 27 (2, 3) [0 1] 28 (2, 3) [0 1] 29 (2, 3) [2 3] 30 (2, 3) [0 1] 31 (2, 3) [2 3] 32 (2, 3) [2 3] 33 (2, 3) [0 1] 34 (2, 3) [2 3] 35 (2, 3) [0 1] 36 (2, 3) [2 3] 37 (2, 3) [0 1] 38 (2, 3) [0 1] 39 (2, 3) [2 3]
import pathlib
imgdir_path = pathlib.Path('images/')
file_list = [str(path) for path in imgdir_path.glob('*.jpg')]
print(file_list)
['images/dog-03.jpg', 'images/cat-01.jpg', 'images/cat-02.jpg', 'images/cat-03.jpg', 'images/dog-01.jpg', 'images/dog-02.jpg']
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(10, 5))
for i,file in enumerate(file_list):
img_raw = tf.io.read_file(file)
img = tf.image.decode_image(img_raw)
print('Image shape: ', img.shape)
ax = fig.add_subplot(2, 3, i+1)
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(img)
ax.set_title(file, size=15)
plt.savefig('ch13-catdot-examples.pdf')
plt.tight_layout()
plt.show()
Image shape: (900, 1200, 3) Image shape: (900, 1200, 3) Image shape: (900, 1200, 3) Image shape: (900, 742, 3) Image shape: (800, 1200, 3) Image shape: (800, 1200, 3)
labels = [1 if 'dog' in file else 0
for file in file_list]
print(labels)
[1, 0, 0, 0, 1, 1]
ds_files_labels = tf.data.Dataset.from_tensor_slices(
(file_list, labels))
for item in ds_files_labels:
print(item[0].numpy(), item[1].numpy())
b'images/dog-03.jpg' 1 b'images/cat-01.jpg' 0 b'images/cat-02.jpg' 0 b'images/cat-03.jpg' 0 b'images/dog-01.jpg' 1 b'images/dog-02.jpg' 1
def load_and_preprocess(path, label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [img_height, img_width])
image /= 255.0
return image, label
img_width, img_height = 120, 80
ds_images_labels = ds_files_labels.map(load_and_preprocess)
fig = plt.figure(figsize=(10, 5))
for i,example in enumerate(ds_images_labels):
print(example[0].shape, example[1].numpy())
ax = fig.add_subplot(2, 3, i+1)
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(example[0])
ax.set_title('{}'.format(example[1].numpy()),
size=15)
plt.tight_layout()
plt.savefig('ch13-catdog-dataset.pdf')
plt.show()
(80, 120, 3) 1 (80, 120, 3) 0 (80, 120, 3) 0 (80, 120, 3) 0 (80, 120, 3) 1 (80, 120, 3) 1
! pip install tensorflow-datasets
Collecting tensorflow-datasets Downloading https://files.pythonhosted.org/packages/6c/34/ff424223ed4331006aaa929efc8360b6459d427063dc59fc7b75d7e4bab3/tensorflow_datasets-1.2.0-py3-none-any.whl (2.3MB) |████████████████████████████████| 2.3MB 3.1MB/s eta 0:00:01 Requirement already satisfied: numpy in /home/vahid/anaconda3/envs/tf2/lib/python3.7/site-packages (from tensorflow-datasets) (1.17.2) Collecting tqdm (from tensorflow-datasets) Downloading https://files.pythonhosted.org/packages/e1/c1/bc1dba38b48f4ae3c4428aea669c5e27bd5a7642a74c8348451e0bd8ff86/tqdm-4.36.1-py2.py3-none-any.whl (52kB) |████████████████████████████████| 61kB 20.9MB/s eta 0:00:01 Collecting promise (from tensorflow-datasets) Collecting future (from tensorflow-datasets) Collecting requests>=2.19.0 (from tensorflow-datasets) Using cached https://files.pythonhosted.org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/requests-2.22.0-py2.py3-none-any.whl Requirement already satisfied: absl-py in /home/vahid/anaconda3/envs/tf2/lib/python3.7/site-packages (from tensorflow-datasets) (0.8.0) Collecting psutil (from tensorflow-datasets) Downloading https://files.pythonhosted.org/packages/1c/ca/5b8c1fe032a458c2c4bcbe509d1401dca9dda35c7fc46b36bb81c2834740/psutil-5.6.3.tar.gz (435kB) |████████████████████████████████| 440kB 32.6MB/s eta 0:00:01 Requirement already satisfied: protobuf>=3.6.1 in /home/vahid/anaconda3/envs/tf2/lib/python3.7/site-packages (from tensorflow-datasets) (3.10.0) Collecting tensorflow-metadata (from tensorflow-datasets) Downloading https://files.pythonhosted.org/packages/ea/79/bfb672fc4ad09297f61734992c2658f753fc3d508701c5b5c47390de0ee2/tensorflow_metadata-0.14.0-py2.py3-none-any.whl Requirement already satisfied: six in /home/vahid/anaconda3/envs/tf2/lib/python3.7/site-packages (from tensorflow-datasets) (1.12.0) Requirement already satisfied: termcolor in /home/vahid/anaconda3/envs/tf2/lib/python3.7/site-packages (from tensorflow-datasets) (1.1.0) Collecting dill (from tensorflow-datasets) Downloading https://files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz (151kB) |████████████████████████████████| 153kB 37.6MB/s eta 0:00:01 Requirement already satisfied: wrapt in /home/vahid/anaconda3/envs/tf2/lib/python3.7/site-packages (from tensorflow-datasets) (1.11.2) Requirement already satisfied: attrs in /home/vahid/anaconda3/envs/tf2/lib/python3.7/site-packages (from tensorflow-datasets) (19.2.0) Requirement already satisfied: certifi>=2017.4.17 in /home/vahid/anaconda3/envs/tf2/lib/python3.7/site-packages (from requests>=2.19.0->tensorflow-datasets) (2019.9.11) Collecting idna<2.9,>=2.5 (from requests>=2.19.0->tensorflow-datasets) Using cached https://files.pythonhosted.org/packages/14/2c/cd551d81dbe15200be1cf41cd03869a46fe7226e7450af7a6545bfc474c9/idna-2.8-py2.py3-none-any.whl Collecting urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 (from requests>=2.19.0->tensorflow-datasets) Downloading https://files.pythonhosted.org/packages/e0/da/55f51ea951e1b7c63a579c09dd7db825bb730ec1fe9c0180fc77bfb31448/urllib3-1.25.6-py2.py3-none-any.whl (125kB) |████████████████████████████████| 133kB 37.9MB/s eta 0:00:01 Collecting chardet<3.1.0,>=3.0.2 (from requests>=2.19.0->tensorflow-datasets) Using cached https://files.pythonhosted.org/packages/bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8/chardet-3.0.4-py2.py3-none-any.whl Requirement already satisfied: setuptools in /home/vahid/anaconda3/envs/tf2/lib/python3.7/site-packages (from protobuf>=3.6.1->tensorflow-datasets) (41.2.0) Collecting googleapis-common-protos (from tensorflow-metadata->tensorflow-datasets) Building wheels for collected packages: psutil, dill Building wheel for psutil (setup.py) ... done Created wheel for psutil: filename=psutil-5.6.3-cp37-cp37m-linux_x86_64.whl size=260058 sha256=3e1a33eda12bf74a5d99f0470060ea4a5d4660c5ad5953572a340df4307109a1 Stored in directory: /home/vahid/.cache/pip/wheels/90/7e/74/bb640d77775e6b6a78bcc3120f9fea4d2a28b2706de1cff37d Building wheel for dill (setup.py) ... done Created wheel for dill: filename=dill-0.3.1.1-cp37-none-any.whl size=78533 sha256=7b4e5cf164d15a12b099d0ba8927cf166b65dff39609235976041c27c6c024d1 Stored in directory: /home/vahid/.cache/pip/wheels/59/b1/91/f02e76c732915c4015ab4010f3015469866c1eb9b14058d8e7 Successfully built psutil dill Installing collected packages: tqdm, promise, future, idna, urllib3, chardet, requests, psutil, googleapis-common-protos, tensorflow-metadata, dill, tensorflow-datasets Successfully installed chardet-3.0.4 dill-0.3.1.1 future-0.17.1 googleapis-common-protos-1.6.0 idna-2.8 promise-2.2.1 psutil-5.6.3 requests-2.22.0 tensorflow-datasets-1.2.0 tensorflow-metadata-0.14.0 tqdm-4.36.1 urllib3-1.25.6
import tensorflow_datasets as tfds
print(len(tfds.list_builders()))
print(tfds.list_builders()[:5])
101 ['abstract_reasoning', 'aflw2k3d', 'amazon_us_reviews', 'bair_robot_pushing_small', 'bigearthnet']
## Run this to see the full list:
tfds.list_builders()
['abstract_reasoning', 'aflw2k3d', 'amazon_us_reviews', 'bair_robot_pushing_small', 'bigearthnet', 'binarized_mnist', 'binary_alpha_digits', 'caltech101', 'caltech_birds2010', 'caltech_birds2011', 'cats_vs_dogs', 'celeb_a', 'celeb_a_hq', 'chexpert', 'cifar10', 'cifar100', 'cifar10_corrupted', 'clevr', 'cnn_dailymail', 'coco', 'coco2014', 'coil100', 'colorectal_histology', 'colorectal_histology_large', 'curated_breast_imaging_ddsm', 'cycle_gan', 'deep_weeds', 'definite_pronoun_resolution', 'diabetic_retinopathy_detection', 'downsampled_imagenet', 'dsprites', 'dtd', 'dummy_dataset_shared_generator', 'dummy_mnist', 'emnist', 'eurosat', 'fashion_mnist', 'flores', 'food101', 'gap', 'glue', 'groove', 'higgs', 'horses_or_humans', 'image_label_folder', 'imagenet2012', 'imagenet2012_corrupted', 'imdb_reviews', 'iris', 'kitti', 'kmnist', 'lfw', 'lm1b', 'lsun', 'mnist', 'mnist_corrupted', 'moving_mnist', 'multi_nli', 'nsynth', 'omniglot', 'open_images_v4', 'oxford_flowers102', 'oxford_iiit_pet', 'para_crawl', 'patch_camelyon', 'pet_finder', 'quickdraw_bitmap', 'resisc45', 'rock_paper_scissors', 'rock_you', 'scene_parse150', 'shapes3d', 'smallnorb', 'snli', 'so2sat', 'squad', 'stanford_dogs', 'stanford_online_products', 'starcraft_video', 'sun397', 'super_glue', 'svhn_cropped', 'ted_hrlr_translate', 'ted_multi_translate', 'tf_flowers', 'titanic', 'trivia_qa', 'uc_merced', 'ucf101', 'visual_domain_decathlon', 'voc2007', 'wikipedia', 'wmt14_translate', 'wmt15_translate', 'wmt16_translate', 'wmt17_translate', 'wmt18_translate', 'wmt19_translate', 'wmt_t2t_translate', 'wmt_translate', 'xnli']
celeba_bldr = tfds.builder('celeb_a')
print(celeba_bldr.info.features)
print()
print(celeba_bldr.info.features.keys())
print()
print(celeba_bldr.info.features['image'])
print()
print(celeba_bldr.info.features['attributes'].keys())
print()
print(celeba_bldr.info.citation)
FeaturesDict({ 'attributes': FeaturesDict({ '5_o_Clock_Shadow': Tensor(shape=(), dtype=tf.bool), 'Arched_Eyebrows': Tensor(shape=(), dtype=tf.bool), 'Attractive': Tensor(shape=(), dtype=tf.bool), 'Bags_Under_Eyes': Tensor(shape=(), dtype=tf.bool), 'Bald': Tensor(shape=(), dtype=tf.bool), 'Bangs': Tensor(shape=(), dtype=tf.bool), 'Big_Lips': Tensor(shape=(), dtype=tf.bool), 'Big_Nose': Tensor(shape=(), dtype=tf.bool), 'Black_Hair': Tensor(shape=(), dtype=tf.bool), 'Blond_Hair': Tensor(shape=(), dtype=tf.bool), 'Blurry': Tensor(shape=(), dtype=tf.bool), 'Brown_Hair': Tensor(shape=(), dtype=tf.bool), 'Bushy_Eyebrows': Tensor(shape=(), dtype=tf.bool), 'Chubby': Tensor(shape=(), dtype=tf.bool), 'Double_Chin': Tensor(shape=(), dtype=tf.bool), 'Eyeglasses': Tensor(shape=(), dtype=tf.bool), 'Goatee': Tensor(shape=(), dtype=tf.bool), 'Gray_Hair': Tensor(shape=(), dtype=tf.bool), 'Heavy_Makeup': Tensor(shape=(), dtype=tf.bool), 'High_Cheekbones': Tensor(shape=(), dtype=tf.bool), 'Male': Tensor(shape=(), dtype=tf.bool), 'Mouth_Slightly_Open': Tensor(shape=(), dtype=tf.bool), 'Mustache': Tensor(shape=(), dtype=tf.bool), 'Narrow_Eyes': Tensor(shape=(), dtype=tf.bool), 'No_Beard': Tensor(shape=(), dtype=tf.bool), 'Oval_Face': Tensor(shape=(), dtype=tf.bool), 'Pale_Skin': Tensor(shape=(), dtype=tf.bool), 'Pointy_Nose': Tensor(shape=(), dtype=tf.bool), 'Receding_Hairline': Tensor(shape=(), dtype=tf.bool), 'Rosy_Cheeks': Tensor(shape=(), dtype=tf.bool), 'Sideburns': Tensor(shape=(), dtype=tf.bool), 'Smiling': Tensor(shape=(), dtype=tf.bool), 'Straight_Hair': Tensor(shape=(), dtype=tf.bool), 'Wavy_Hair': Tensor(shape=(), dtype=tf.bool), 'Wearing_Earrings': Tensor(shape=(), dtype=tf.bool), 'Wearing_Hat': Tensor(shape=(), dtype=tf.bool), 'Wearing_Lipstick': Tensor(shape=(), dtype=tf.bool), 'Wearing_Necklace': Tensor(shape=(), dtype=tf.bool), 'Wearing_Necktie': Tensor(shape=(), dtype=tf.bool), 'Young': Tensor(shape=(), dtype=tf.bool), }), 'image': Image(shape=(218, 178, 3), dtype=tf.uint8), 'landmarks': FeaturesDict({ 'lefteye_x': Tensor(shape=(), dtype=tf.int64), 'lefteye_y': Tensor(shape=(), dtype=tf.int64), 'leftmouth_x': Tensor(shape=(), dtype=tf.int64), 'leftmouth_y': Tensor(shape=(), dtype=tf.int64), 'nose_x': Tensor(shape=(), dtype=tf.int64), 'nose_y': Tensor(shape=(), dtype=tf.int64), 'righteye_x': Tensor(shape=(), dtype=tf.int64), 'righteye_y': Tensor(shape=(), dtype=tf.int64), 'rightmouth_x': Tensor(shape=(), dtype=tf.int64), 'rightmouth_y': Tensor(shape=(), dtype=tf.int64), }), }) dict_keys(['image', 'landmarks', 'attributes']) Image(shape=(218, 178, 3), dtype=tf.uint8) dict_keys(['5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs', 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows', 'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones', 'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin', 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair', 'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young']) @inproceedings{conf/iccv/LiuLWT15, added-at = {2018-10-09T00:00:00.000+0200}, author = {Liu, Ziwei and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, biburl = {https://www.bibsonomy.org/bibtex/250e4959be61db325d2f02c1d8cd7bfbb/dblp}, booktitle = {ICCV}, crossref = {conf/iccv/2015}, ee = {http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.425}, interhash = {3f735aaa11957e73914bbe2ca9d5e702}, intrahash = {50e4959be61db325d2f02c1d8cd7bfbb}, isbn = {978-1-4673-8391-2}, keywords = {dblp}, pages = {3730-3738}, publisher = {IEEE Computer Society}, timestamp = {2018-10-11T11:43:28.000+0200}, title = {Deep Learning Face Attributes in the Wild.}, url = {http://dblp.uni-trier.de/db/conf/iccv/iccv2015.html#LiuLWT15}, year = 2015 }
# Download the data, prepare it, and write it to disk
celeba_bldr.download_and_prepare()
# Load data from disk as tf.data.Datasets
datasets = celeba_bldr.as_dataset()
datasets.keys()
WARNING:absl:Warning: Setting shuffle_files=True because split=TRAIN and shuffle_files=None. This behavior will be deprecated on 2019-08-06, at which point shuffle_files=False will be the default for all splits.
dict_keys(['test', 'train', 'validation'])
#import tensorflow as tf
ds_train = datasets['train']
assert isinstance(ds_train, tf.data.Dataset)
example = next(iter(ds_train))
print(type(example))
print(example.keys())
<class 'dict'> dict_keys(['attributes', 'image', 'landmarks'])
ds_train = ds_train.map(lambda item:
(item['image'], tf.cast(item['attributes']['Male'], tf.int32)))
import matplotlib.pyplot as plt
%matplotlib inline
ds_train = ds_train.batch(18)
images, labels = next(iter(ds_train))
print(images.shape, labels)
(18, 218, 178, 3) tf.Tensor([1 1 1 1 1 0 1 0 0 0 0 1 0 0 1 0 1 0], shape=(18,), dtype=int32)
fig = plt.figure(figsize=(12, 8))
for i,(image,label) in enumerate(zip(images, labels)):
ax = fig.add_subplot(3, 6, i+1)
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(image)
ax.set_title('{}'.format(label), size=15)
plt.show()
mnist, mnist_info = tfds.load('mnist', with_info=True)
print(mnist_info)
print(mnist.keys())
WARNING:absl:Warning: Setting shuffle_files=True because split=TRAIN and shuffle_files=None. This behavior will be deprecated on 2019-08-06, at which point shuffle_files=False will be the default for all splits.
tfds.core.DatasetInfo( name='mnist', version=1.0.0, description='The MNIST database of handwritten digits.', urls=['https://storage.googleapis.com/cvdf-datasets/mnist/'], features=FeaturesDict({ 'image': Image(shape=(28, 28, 1), dtype=tf.uint8), 'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=10), }), total_num_examples=70000, splits={ 'test': 10000, 'train': 60000, }, supervised_keys=('image', 'label'), citation="""@article{lecun2010mnist, title={MNIST handwritten digit database}, author={LeCun, Yann and Cortes, Corinna and Burges, CJ}, journal={ATT Labs [Online]. Available: http://yann. lecun. com/exdb/mnist}, volume={2}, year={2010} }""", redistribution_info=, ) dict_keys(['test', 'train'])
ds_train = mnist['train']
assert isinstance(ds_train, tf.data.Dataset)
ds_train = ds_train.map(lambda item:
(item['image'], item['label']))
ds_train = ds_train.batch(10)
batch = next(iter(ds_train))
print(batch[0].shape, batch[1])
fig = plt.figure(figsize=(15, 6))
for i,(image,label) in enumerate(zip(batch[0], batch[1])):
ax = fig.add_subplot(2, 5, i+1)
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(image[:, :, 0], cmap='gray_r')
ax.set_title('{}'.format(label), size=15)
(10, 28, 28, 1) tf.Tensor([3 9 2 9 8 6 0 2 3 4], shape=(10,), dtype=int64)
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
X_train = np.arange(10).reshape((10, 1))
y_train = np.array([1.0, 1.3, 3.1,
2.0, 5.0, 6.3,
6.6, 7.4, 8.0,
9.0])
plt.plot(X_train, y_train, 'o', markersize=10)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
X_train_norm = (X_train - np.mean(X_train))/np.std(X_train)
ds_train_orig = tf.data.Dataset.from_tensor_slices(
(tf.cast(X_train_norm, tf.float32),
tf.cast(y_train, tf.float32)))
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.w = tf.Variable(0.0, name='weight')
self.b = tf.Variable(0.0, name='bias')
def call(self, x):
return self.w*x + self.b
model = MyModel()
model.build(input_shape=(None, 1))
model.summary()
Model: "my_model_5" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= Total params: 2 Trainable params: 2 Non-trainable params: 0 _________________________________________________________________
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
## testing the function:
yt = tf.convert_to_tensor([1.0])
yp = tf.convert_to_tensor([1.5])
loss_fn(yt, yp)
<tf.Tensor: id=51, shape=(), dtype=float32, numpy=0.25>
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as tape:
current_loss = loss_fn(model(inputs), outputs)
dW, db = tape.gradient(current_loss, [model.w, model.b])
model.w.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
tf.random.set_seed(1)
num_epochs = 200
log_steps = 100
learning_rate = 0.001
batch_size = 1
steps_per_epoch = int(np.ceil(len(y_train) / batch_size))
ds_train = ds_train_orig.shuffle(buffer_size=len(y_train))
ds_train = ds_train.repeat(count=None)
ds_train = ds_train.batch(1)
Ws, bs = [], []
for i, batch in enumerate(ds_train):
if i >= steps_per_epoch * num_epochs:
break
Ws.append(model.w.numpy())
bs.append(model.b.numpy())
bx, by = batch
loss_val = loss_fn(model(bx), by)
train(model, bx, by, learning_rate=learning_rate)
if i%log_steps==0:
print('Epoch {:4d} Step {:2d} Loss {:6.4f}'.format(
int(i/steps_per_epoch), i, loss_val))
Epoch 0 Step 0 Loss 43.5600 Epoch 10 Step 100 Loss 0.7530 Epoch 20 Step 200 Loss 20.1759 Epoch 30 Step 300 Loss 23.3976 Epoch 40 Step 400 Loss 6.3481 Epoch 50 Step 500 Loss 4.6356 Epoch 60 Step 600 Loss 0.2411 Epoch 70 Step 700 Loss 0.2036 Epoch 80 Step 800 Loss 3.8177 Epoch 90 Step 900 Loss 0.9416 Epoch 100 Step 1000 Loss 0.7035 Epoch 110 Step 1100 Loss 0.0348 Epoch 120 Step 1200 Loss 0.5404 Epoch 130 Step 1300 Loss 0.1170 Epoch 140 Step 1400 Loss 0.1195 Epoch 150 Step 1500 Loss 0.0944 Epoch 160 Step 1600 Loss 0.4670 Epoch 170 Step 1700 Loss 2.0695 Epoch 180 Step 1800 Loss 0.0020 Epoch 190 Step 1900 Loss 0.3612
print('Final Parameters: ', model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training examples', 'Linear Reg.'], fontsize=15)
ax.set_xlabel('x', size=15)
ax.set_ylabel('y', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['Weight w', 'Bias unit b'], fontsize=15)
ax.set_xlabel('Iteration', size=15)
ax.set_ylabel('Value', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.savefig('ch13-linreg-1.pdf')
plt.show()
Final Parameters: 2.6576622 4.8798566
.compile()
and .fit()
methods¶tf.random.set_seed(1)
model = MyModel()
#model.build((None, 1))
model.compile(optimizer='sgd',
loss=loss_fn,
metrics=['mae', 'mse'])
model.fit(X_train_norm, y_train,
epochs=num_epochs, batch_size=batch_size,
verbose=1)
Train on 10 samples Epoch 1/200 10/10 [==============================] - 0s 46ms/sample - loss: 27.8562 - mae: 4.5967 - mse: 27.8562 Epoch 2/200 10/10 [==============================] - 0s 2ms/sample - loss: 18.6235 - mae: 3.7249 - mse: 18.6235 Epoch 3/200 10/10 [==============================] - 0s 2ms/sample - loss: 12.5081 - mae: 3.0572 - mse: 12.5081 Epoch 4/200 10/10 [==============================] - 0s 2ms/sample - loss: 8.4484 - mae: 2.4816 - mse: 8.4484 Epoch 5/200 10/10 [==============================] - 0s 1ms/sample - loss: 5.7520 - mae: 2.0644 - mse: 5.7520 Epoch 6/200 10/10 [==============================] - 0s 1ms/sample - loss: 3.9580 - mae: 1.7283 - mse: 3.9580 Epoch 7/200 10/10 [==============================] - 0s 1ms/sample - loss: 2.7617 - mae: 1.4792 - mse: 2.7617 Epoch 8/200 10/10 [==============================] - 0s 1ms/sample - loss: 1.9714 - mae: 1.2577 - mse: 1.9714 Epoch 9/200 10/10 [==============================] - 0s 1ms/sample - loss: 1.4485 - mae: 1.0911 - mse: 1.4485 Epoch 10/200 10/10 [==============================] - 0s 1ms/sample - loss: 1.1002 - mae: 0.9636 - mse: 1.1002 Epoch 11/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.8714 - mae: 0.8620 - mse: 0.8714 Epoch 12/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.7190 - mae: 0.7764 - mse: 0.7190 Epoch 13/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.6173 - mae: 0.7089 - mse: 0.6173 Epoch 14/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.5492 - mae: 0.6419 - mse: 0.5492 Epoch 15/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.5042 - mae: 0.6007 - mse: 0.5042 Epoch 16/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4740 - mae: 0.5553 - mse: 0.4740 Epoch 17/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4546 - mae: 0.5334 - mse: 0.4546 Epoch 18/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4402 - mae: 0.5155 - mse: 0.4402 Epoch 19/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4310 - mae: 0.5022 - mse: 0.4310 Epoch 20/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4248 - mae: 0.5025 - mse: 0.4248 Epoch 21/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4211 - mae: 0.4927 - mse: 0.4211 Epoch 22/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4175 - mae: 0.5016 - mse: 0.4175 Epoch 23/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4166 - mae: 0.4911 - mse: 0.4166 Epoch 24/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4155 - mae: 0.4918 - mse: 0.4155 Epoch 25/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4147 - mae: 0.4931 - mse: 0.4147 Epoch 26/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4144 - mae: 0.4922 - mse: 0.4144 Epoch 27/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4141 - mae: 0.4902 - mse: 0.4141 Epoch 28/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4141 - mae: 0.4900 - mse: 0.4141 Epoch 29/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4923 - mse: 0.4139 Epoch 30/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4917 - mse: 0.4138 Epoch 31/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4893 - mse: 0.4137 Epoch 32/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4854 - mse: 0.4139 Epoch 33/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4882 - mse: 0.4139 Epoch 34/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4898 - mse: 0.4139 Epoch 35/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4816 - mse: 0.4137 Epoch 36/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4933 - mse: 0.4137 Epoch 37/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4883 - mse: 0.4137 Epoch 38/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4132 - mae: 0.4801 - mse: 0.4132 Epoch 39/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4855 - mse: 0.4137 Epoch 40/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4975 - mse: 0.4135 Epoch 41/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4905 - mse: 0.4137 Epoch 42/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4851 - mse: 0.4138 Epoch 43/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4132 - mae: 0.4889 - mse: 0.4132 Epoch 44/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4928 - mse: 0.4137 Epoch 45/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4864 - mse: 0.4136 Epoch 46/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4923 - mse: 0.4140 Epoch 47/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4837 - mse: 0.4138 Epoch 48/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4909 - mse: 0.4138 Epoch 49/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4855 - mse: 0.4137 Epoch 50/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4883 - mse: 0.4138 Epoch 51/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4890 - mse: 0.4139 Epoch 52/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4979 - mse: 0.4138 Epoch 53/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4861 - mse: 0.4136 Epoch 54/200 10/10 [==============================] - 0s 2ms/sample - loss: 0.4132 - mae: 0.4830 - mse: 0.4132 Epoch 55/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4130 - mae: 0.4840 - mse: 0.4130 Epoch 56/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4892 - mse: 0.4135 Epoch 57/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4133 - mae: 0.4945 - mse: 0.4133 Epoch 58/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4131 - mae: 0.4946 - mse: 0.4131 Epoch 59/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4834 - mse: 0.4135 Epoch 60/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4922 - mse: 0.4139 Epoch 61/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4860 - mse: 0.4138 Epoch 62/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4134 - mae: 0.4980 - mse: 0.4134 Epoch 63/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4926 - mse: 0.4137 Epoch 64/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4892 - mse: 0.4140 Epoch 65/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4129 - mae: 0.4811 - mse: 0.4129 Epoch 66/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4914 - mse: 0.4138 Epoch 67/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4957 - mse: 0.4137 Epoch 68/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4915 - mse: 0.4138 Epoch 69/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4129 - mae: 0.4910 - mse: 0.4129 Epoch 70/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4919 - mse: 0.4138 Epoch 71/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4926 - mse: 0.4137 Epoch 72/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4884 - mse: 0.4138 Epoch 73/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4141 - mae: 0.4875 - mse: 0.4141 Epoch 74/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4875 - mse: 0.4136 Epoch 75/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4810 - mse: 0.4136 Epoch 76/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4874 - mse: 0.4140 Epoch 77/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4874 - mse: 0.4140 Epoch 78/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4810 - mse: 0.4135 Epoch 79/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4840 - mse: 0.4137 Epoch 80/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4855 - mse: 0.4138 Epoch 81/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4132 - mae: 0.4876 - mse: 0.4132 Epoch 82/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4899 - mse: 0.4138 Epoch 83/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4850 - mse: 0.4136 Epoch 84/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4958 - mse: 0.4137 Epoch 85/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4924 - mse: 0.4138 Epoch 86/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4956 - mse: 0.4135 Epoch 87/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4132 - mae: 0.4832 - mse: 0.4132 Epoch 88/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4920 - mse: 0.4139 Epoch 89/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4900 - mse: 0.4136 Epoch 90/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4911 - mse: 0.4139 Epoch 91/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4846 - mse: 0.4137 Epoch 92/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4901 - mse: 0.4140 Epoch 93/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4845 - mse: 0.4137 Epoch 94/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4134 - mae: 0.4819 - mse: 0.4134 Epoch 95/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4921 - mse: 0.4139 Epoch 96/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4903 - mse: 0.4139 Epoch 97/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4885 - mse: 0.4135 Epoch 98/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4818 - mse: 0.4137 Epoch 99/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4875 - mse: 0.4138 Epoch 100/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4857 - mse: 0.4137 Epoch 101/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4911 - mse: 0.4139 Epoch 102/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4904 - mse: 0.4139 Epoch 103/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4911 - mse: 0.4138 Epoch 104/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4912 - mse: 0.4140 Epoch 105/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4946 - mse: 0.4139 Epoch 106/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4861 - mse: 0.4139 Epoch 107/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4843 - mse: 0.4135 Epoch 108/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4863 - mse: 0.4136 Epoch 109/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4938 - mse: 0.4139 Epoch 110/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4134 - mae: 0.4853 - mse: 0.4134 Epoch 111/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4918 - mse: 0.4138 Epoch 112/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4904 - mse: 0.4139 Epoch 113/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4854 - mse: 0.4135 Epoch 114/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4937 - mse: 0.4139 Epoch 115/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4901 - mse: 0.4140 Epoch 116/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4897 - mse: 0.4138 Epoch 117/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4881 - mse: 0.4139 Epoch 118/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4906 - mse: 0.4140 Epoch 119/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4877 - mse: 0.4139 Epoch 120/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4859 - mse: 0.4136 Epoch 121/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4972 - mse: 0.4136 Epoch 122/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4900 - mse: 0.4139 Epoch 123/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4132 - mae: 0.4932 - mse: 0.4132 Epoch 124/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4805 - mse: 0.4135 Epoch 125/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4132 - mae: 0.4915 - mse: 0.4132 Epoch 126/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4916 - mse: 0.4139 Epoch 127/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4923 - mse: 0.4138 Epoch 128/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4930 - mse: 0.4138 Epoch 129/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4863 - mse: 0.4139 Epoch 130/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4908 - mse: 0.4140 Epoch 131/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4875 - mse: 0.4138 Epoch 132/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4131 - mae: 0.4763 - mse: 0.4131 Epoch 133/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4887 - mse: 0.4139 Epoch 134/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4131 - mae: 0.4830 - mse: 0.4131 Epoch 135/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4861 - mse: 0.4137 Epoch 136/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4861 - mse: 0.4135 Epoch 137/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4842 - mse: 0.4137 Epoch 138/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4141 - mae: 0.4924 - mse: 0.4141 Epoch 139/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4948 - mse: 0.4138 Epoch 140/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4842 - mse: 0.4136 Epoch 141/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4956 - mse: 0.4137 Epoch 142/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4872 - mse: 0.4137 Epoch 143/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4895 - mse: 0.4139 Epoch 144/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4865 - mse: 0.4136 Epoch 145/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4940 - mse: 0.4137 Epoch 146/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4884 - mse: 0.4139 Epoch 147/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4130 - mae: 0.4828 - mse: 0.4130 Epoch 148/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4875 - mse: 0.4140 Epoch 149/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4133 - mae: 0.4924 - mse: 0.4133 Epoch 150/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4875 - mse: 0.4137 Epoch 151/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4866 - mse: 0.4138 Epoch 152/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4133 - mae: 0.4924 - mse: 0.4133 Epoch 153/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4889 - mse: 0.4140 Epoch 154/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4895 - mse: 0.4140 Epoch 155/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4835 - mse: 0.4138 Epoch 156/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4862 - mse: 0.4139 Epoch 157/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4868 - mse: 0.4138 Epoch 158/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4910 - mse: 0.4138 Epoch 159/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4858 - mse: 0.4138 Epoch 160/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4881 - mse: 0.4139 Epoch 161/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4895 - mse: 0.4140 Epoch 162/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4131 - mae: 0.4949 - mse: 0.4131 Epoch 163/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4830 - mse: 0.4138 Epoch 164/200 10/10 [==============================] - 0s 2ms/sample - loss: 0.4135 - mae: 0.4943 - mse: 0.4135 Epoch 165/200 10/10 [==============================] - 0s 2ms/sample - loss: 0.4138 - mae: 0.4824 - mse: 0.4138 Epoch 166/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4889 - mse: 0.4140 Epoch 167/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4134 - mae: 0.4799 - mse: 0.4134 Epoch 168/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4864 - mse: 0.4137 Epoch 169/200 10/10 [==============================] - 0s 2ms/sample - loss: 0.4134 - mae: 0.4937 - mse: 0.4134 Epoch 170/200 10/10 [==============================] - 0s 2ms/sample - loss: 0.4140 - mae: 0.4894 - mse: 0.4140 Epoch 171/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4887 - mse: 0.4137 Epoch 172/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4906 - mse: 0.4139 Epoch 173/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4860 - mse: 0.4135 Epoch 174/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4947 - mse: 0.4135 Epoch 175/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4869 - mse: 0.4139 Epoch 176/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4864 - mse: 0.4135 Epoch 177/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4841 - mse: 0.4138 Epoch 178/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4133 - mae: 0.4857 - mse: 0.4133 Epoch 179/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4915 - mse: 0.4140 Epoch 180/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4901 - mse: 0.4139 Epoch 181/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4932 - mse: 0.4137 Epoch 182/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4887 - mse: 0.4138 Epoch 183/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4905 - mse: 0.4137 Epoch 184/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4861 - mse: 0.4138 Epoch 185/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4136 - mae: 0.4954 - mse: 0.4136 Epoch 186/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4933 - mse: 0.4139 Epoch 187/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4905 - mse: 0.4139 Epoch 188/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4797 - mse: 0.4135 Epoch 189/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4140 - mae: 0.4899 - mse: 0.4140 Epoch 190/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4138 - mae: 0.4942 - mse: 0.4138 Epoch 191/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4856 - mse: 0.4139 Epoch 192/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4137 - mae: 0.4908 - mse: 0.4137 Epoch 193/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4914 - mse: 0.4139 Epoch 194/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4909 - mse: 0.4139 Epoch 195/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4139 - mae: 0.4894 - mse: 0.4139 Epoch 196/200 10/10 [==============================] - 0s 1ms/sample - loss: 0.4135 - mae: 0.4965 - mse: 0.4135 Epoch 197/200 10/10 [==============================] - 0s 2ms/sample - loss: 0.4138 - mae: 0.4930 - mse: 0.4138 Epoch 198/200 10/10 [==============================] - 0s 2ms/sample - loss: 0.4136 - mae: 0.4804 - mse: 0.4136 Epoch 199/200 10/10 [==============================] - 0s 2ms/sample - loss: 0.4137 - mae: 0.4902 - mse: 0.4137 Epoch 200/200 10/10 [==============================] - 0s 2ms/sample - loss: 0.4139 - mae: 0.4925 - mse: 0.4139
<tensorflow.python.keras.callbacks.History at 0x7f966057f2d0>
print(model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training Samples', 'Linear Reg.'], fontsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['W', 'bias'], fontsize=15)
plt.show()
[2.706] 4.971019
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
iris, iris_info = tfds.load('iris', with_info=True)
print(iris_info)
WARNING:absl:Warning: Setting shuffle_files=True because split=TRAIN and shuffle_files=None. This behavior will be deprecated on 2019-08-06, at which point shuffle_files=False will be the default for all splits.
tfds.core.DatasetInfo( name='iris', version=1.0.0, description='This is perhaps the best known database to be found in the pattern recognition literature. Fisher's paper is a classic in the field and is referenced frequently to this day. (See Duda & Hart, for example.) The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. One class is linearly separable from the other 2; the latter are NOT linearly separable from each other. ', urls=['https://archive.ics.uci.edu/ml/datasets/iris'], features=FeaturesDict({ 'features': Tensor(shape=(4,), dtype=tf.float32), 'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=3), }), total_num_examples=150, splits={ 'train': 150, }, supervised_keys=('features', 'label'), citation="""@misc{Dua:2019 , author = "Dua, Dheeru and Graff, Casey", year = "2017", title = "{UCI} Machine Learning Repository", url = "http://archive.ics.uci.edu/ml", institution = "University of California, Irvine, School of Information and Computer Sciences" }""", redistribution_info=, )
tf.random.set_seed(1)
ds_orig = iris['train']
ds_orig = ds_orig.shuffle(150, reshuffle_each_iteration=False)
print(next(iter(ds_orig)))
ds_train_orig = ds_orig.take(100)
ds_test = ds_orig.skip(100)
{'features': <tf.Tensor: id=69, shape=(4,), dtype=float32, numpy=array([5.5, 3.5, 1.3, 0.2], dtype=float32)>, 'label': <tf.Tensor: id=70, shape=(), dtype=int64, numpy=0>}
## checking the number of examples:
n = 0
for example in ds_train_orig:
n += 1
print(n)
n = 0
for example in ds_test:
n += 1
print(n)
100 50
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
next(iter(ds_train_orig))
(<tf.Tensor: id=94, shape=(4,), dtype=float32, numpy=array([5.5, 3.5, 1.3, 0.2], dtype=float32)>, <tf.Tensor: id=95, shape=(), dtype=int64, numpy=0>)
model = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation='sigmoid',
name='fc1', input_shape=(4,)),
tf.keras.layers.Dense(3, name='fc2', activation='softmax')])
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= fc1 (Dense) (None, 16) 80 _________________________________________________________________ fc2 (Dense) (None, 3) 51 ================================================================= Total params: 131 Trainable params: 131 Non-trainable params: 0 _________________________________________________________________
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
num_epochs = 100
training_size = 100
batch_size = 2
steps_per_epoch = np.ceil(training_size / batch_size)
ds_train = ds_train_orig.shuffle(buffer_size=training_size)
ds_train = ds_train.repeat()
ds_train = ds_train.batch(batch_size=batch_size)
ds_train = ds_train.prefetch(buffer_size=1000)
history = model.fit(ds_train, epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
verbose=0)
hist = history.history
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(1, 2, 1)
ax.plot(hist['loss'], lw=3)
ax.set_title('Training loss', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(hist['accuracy'], lw=3)
ax.set_title('Training accuracy', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.tight_layout()
plt.savefig('ch13-cls-learning-curve.pdf')
plt.show()
! echo $PWD/ch13-cls-learning-curve.pdf
/home/vahid/github/pyml-book-3rd-edition/ch13/ch13-cls-learning-curve.pdf
results = model.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
Test loss: 0.0692 Test Acc.: 0.9800
model.save('iris-classifier.h5',
overwrite=True,
include_optimizer=True,
save_format='h5')
model_new = tf.keras.models.load_model('iris-classifier.h5')
model_new.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= fc1 (Dense) (None, 16) 80 _________________________________________________________________ fc2 (Dense) (None, 3) 51 ================================================================= Total params: 131 Trainable params: 131 Non-trainable params: 0 _________________________________________________________________
results = model_new.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
Test loss: 0.0692 Test Acc.: 0.9800
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
Training Set: 100 Test Set: 50
model.to_json()
'{"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "Dense", "config": {"name": "fc1", "trainable": true, "batch_input_shape": [null, 4], "dtype": "float32", "units": 16, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dense", "config": {"name": "fc2", "trainable": true, "dtype": "float32", "units": 3, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.2.4-tf", "backend": "tensorflow"}'
import numpy as np
X = np.array([1, 1.4, 2.5]) ## first value must be 1
w = np.array([0.4, 0.3, 0.5])
def net_input(X, w):
return np.dot(X, w)
def logistic(z):
return 1.0 / (1.0 + np.exp(-z))
def logistic_activation(X, w):
z = net_input(X, w)
return logistic(z)
print('P(y=1|x) = %.3f' % logistic_activation(X, w))
P(y=1|x) = 0.888
# W : array with shape = (n_output_units, n_hidden_units+1)
# note that the first column are the bias units
W = np.array([[1.1, 1.2, 0.8, 0.4],
[0.2, 0.4, 1.0, 0.2],
[0.6, 1.5, 1.2, 0.7]])
# A : data array with shape = (n_hidden_units + 1, n_samples)
# note that the first column of this array must be 1
A = np.array([[1, 0.1, 0.4, 0.6]])
Z = np.dot(W, A[0])
y_probas = logistic(Z)
print('Net Input: \n', Z)
print('Output Units:\n', y_probas)
Net Input: [1.78 0.76 1.65] Output Units: [0.85569687 0.68135373 0.83889105]
y_class = np.argmax(Z, axis=0)
print('Predicted class label: %d' % y_class)
Predicted class label: 0
def softmax(z):
return np.exp(z) / np.sum(np.exp(z))
y_probas = softmax(Z)
print('Probabilities:\n', y_probas)
np.sum(y_probas)
Probabilities: [0.44668973 0.16107406 0.39223621]
1.0
import matplotlib.pyplot as plt
%matplotlib inline
def tanh(z):
e_p = np.exp(z)
e_m = np.exp(-z)
return (e_p - e_m) / (e_p + e_m)
z = np.arange(-5, 5, 0.005)
log_act = logistic(z)
tanh_act = tanh(z)
plt.ylim([-1.5, 1.5])
plt.xlabel('Net input $z$')
plt.ylabel('Activation $\phi(z)$')
plt.axhline(1, color='black', linestyle=':')
plt.axhline(0.5, color='black', linestyle=':')
plt.axhline(0, color='black', linestyle=':')
plt.axhline(-0.5, color='black', linestyle=':')
plt.axhline(-1, color='black', linestyle=':')
plt.plot(z, tanh_act,
linewidth=3, linestyle='--',
label='Tanh')
plt.plot(z, log_act,
linewidth=3,
label='Logistic')
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
np.tanh(z)
array([-0.9999092 , -0.99990829, -0.99990737, ..., 0.99990644, 0.99990737, 0.99990829])
from scipy.special import expit
log_act = expit(z)
## the correct way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=False)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
{0, 1, 2, 3, 6, 7, 9, 10, 11, 13} {4, 5, 8, 12, 14}
## The wrong way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=True)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
tfds.Split
¶#first_67_percent = tfds.Split.TRAIN.subsplit(tfds.percent[:67])
#last_33_percent = tfds.Split.TRAIN.subsplit(tfds.percent[-33:])
split_train, split_test = tfds.Split.TRAIN.subsplit([2, 1])
#ds_train_orig = tfds.load('iris', split=first_67_percent)
#ds_test = tfds.load('iris', split=last_33_percent)
ds_train_orig = tfds.load('iris', split=split_train)
ds_test = tfds.load('iris', split=split_test)
print(next(iter(ds_train_orig)))
print()
print(next(iter(ds_test)))
ds_train_orig = ds_train_orig.shuffle(100, reshuffle_each_iteration=True)
ds_test = ds_test.shuffle(50, reshuffle_each_iteration=False)
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
print(next(iter(ds_train_orig)))
for j in range(5):
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
labels_test = np.array(labels_test)
print(np.sum(labels_test == 0), np.sum(labels_test == 1), np.sum(labels_test == 2))
{'features': <tf.Tensor: id=140312, shape=(4,), dtype=float32, numpy=array([6.1, 2.8, 4.7, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=140313, shape=(), dtype=int64, numpy=1>} {'features': <tf.Tensor: id=140319, shape=(4,), dtype=float32, numpy=array([5.7, 3. , 4.2, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=140320, shape=(), dtype=int64, numpy=1>} (<tf.Tensor: id=140364, shape=(4,), dtype=float32, numpy=array([6.8, 2.8, 4.8, 1.4], dtype=float32)>, <tf.Tensor: id=140365, shape=(), dtype=int64, numpy=1>) Training Set: 116 Test Set: 34 10 12 12 Training Set: 116 Test Set: 34 10 12 12 Training Set: 116 Test Set: 34 10 12 12 Training Set: 116 Test Set: 34 10 12 12 Training Set: 116 Test Set: 34 10 12 12
...
...
Readers may ignore the next cell.
! python ../.convert_notebook_to_script.py --input ch13.ipynb --output ch13.py
[NbConvertApp] Converting notebook ch12.ipynb to script [NbConvertApp] Writing 19212 bytes to ch12.py