import os
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from collections import Counter
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
!wget https://github.com/shenasa-ai/iranian-car-license-plate/raw/main/iclp-part1.zip
!unzip -qq iclp-part1.zip
--2023-11-01 12:00:56-- https://github.com/shenasa-ai/iranian-car-license-plate/raw/main/iclp-part1.zip Resolving github.com (github.com)... 192.30.255.112 Connecting to github.com (github.com)|192.30.255.112|:443... connected. HTTP request sent, awaiting response... 302 Found Location: https://raw.githubusercontent.com/shenasa-ai/iranian-car-license-plate/main/iclp-part1.zip [following] --2023-11-01 12:00:56-- https://raw.githubusercontent.com/shenasa-ai/iranian-car-license-plate/main/iclp-part1.zip Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 21385713 (20M) [application/zip] Saving to: ‘iclp-part1.zip’ iclp-part1.zip 100%[===================>] 20.39M --.-KB/s in 0.09s 2023-11-01 12:00:58 (225 MB/s) - ‘iclp-part1.zip’ saved [21385713/21385713]
!ls /content/iclp-part1 | wc -l
3000
# Path to the data directory
data_dir = Path("./iclp-part1/")
# Get list of all the images and labels
images = sorted(list(map(str, list(data_dir.glob("*.jpg")))))
labels = [img.split(os.path.sep)[-1].split(".png")[0].split("_")[0] for img in images]
print("Number of images found: ", len(images))
print("Number of labels found: ", len(labels))
Number of images found: 3000 Number of labels found: 3000
characters = set(char for label in labels for char in label)
characters = sorted(list(characters))
print("Number of unique characters: ", len(characters))
print("Characters present: ", characters)
Number of unique characters: 26 Characters present: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'H', 'J', 'K', 'L', 'M', 'N', 'S', 'T', 'V', 'X', 'Y', 'Z']
# Batch size for training and validation
batch_size = 16
# Desired image dimensions
img_width = 256
img_height = 65
# Factor by which the image is going to be downsampled
# by the convolutional blocks. We will be using two
# convolution blocks and each block will have
# a pooling layer which downsample the features by a factor of 2.
# Hence total downsampling factor would be 4.
downsample_factor = 4
# hom many digits and char the plates have
plate_length = len(labels[0])
# Mapping characters to integers
char_to_num = layers.StringLookup(
vocabulary=list(characters), mask_token=None
)
# Mapping integers back to original characters
num_to_char = layers.StringLookup(
vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True
)
print(char_to_num.get_vocabulary())
['[UNK]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'H', 'J', 'K', 'L', 'M', 'N', 'S', 'T', 'V', 'X', 'Y', 'Z']
char_to_num('A').numpy()
11
num_to_char(11).numpy()
b'A'
train_size=0.9
images, labels = np.array(images), np.array(labels)
# 1. Get the total size of the dataset
size = len(images)
# 2. Make an indices array and shuffle it, if required
indices = np.arange(size)
np.random.shuffle(indices)
# 3. Get the size of training samples
train_samples = int(size * train_size)
# 4. Split data into training and validation sets
x_train, y_train = images[indices[:train_samples]], labels[indices[:train_samples]]
x_valid, y_valid = images[indices[train_samples:]], labels[indices[train_samples:]]
def encode_single_sample(img_path, label):
# 1. Read image
img = tf.io.read_file(img_path)
# 2. Decode and convert to grayscale
img = tf.io.decode_jpeg(img, channels=1)
# 3. Convert to float32 in [0, 1] range
img = tf.image.convert_image_dtype(img, tf.float32)
# 4. Resize to the desired size
img = tf.image.resize(img, [img_height, img_width])
# 5. Transpose the image because we want the time
# dimension to correspond to the width of the image.
img = tf.transpose(img, perm=[1, 0, 2])
# 6. Map the characters in label to numbers
label = char_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8"))
# 7. Return a dict as our model is expecting two inputs
return img, label
out = encode_single_sample('/content/iclp-part1/11B39235_15685.jpg', '11B39235')
print(out[0].shape)
print(out[1].numpy())
(256, 65, 1) [ 2 2 12 4 10 3 4 6]
Dataset
objects¶train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = (
train_dataset.map(
encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE
)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
validation_dataset = tf.data.Dataset.from_tensor_slices((x_valid, y_valid))
validation_dataset = (
validation_dataset.map(
encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE
)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
a, b = next(iter(train_dataset))
print(a.shape)
print(b.shape)
(16, 256, 65, 1) (16, 8)
_, ax = plt.subplots(4, 4, figsize=(10, 5))
for batch in train_dataset.take(1):
images = batch[0]
labels = batch[1]
for i in range(16):
img = (images[i] * 255).numpy().astype("uint8")
label = tf.strings.reduce_join(num_to_char(labels[i])).numpy().decode("utf-8")
ax[i // 4, i % 4].imshow(img[:, :, 0].T, cmap="gray")
ax[i // 4, i % 4].set_title(label)
ax[i // 4, i % 4].axis("off")
plt.show()
# We have used two max pool with pool size and strides 2.
# Hence, downsampled feature maps are 16x smaller. The number of
# filters in the last layer is 128. Reshape accordingly before
# passing the output to the RNN part of the model
new_shape = (img_width // 16, (img_height // 16) * 256)
nclasses = len(char_to_num.get_vocabulary())
model=keras.models.Sequential([
layers.Input(shape=(img_width, img_height, 1)),#256
layers.Conv2D(32,(3, 3), activation="relu", padding="same"),
layers.MaxPooling2D((2, 2)),#128
layers.Conv2D(64,(3, 3), activation="relu", padding="same"),
layers.MaxPooling2D((2, 2)),#64
layers.Conv2D(128,(3, 3), activation="relu", padding="same"),
layers.MaxPooling2D((2, 2)),#32
layers.Conv2D(256,(3, 3), activation="relu", padding="same"),
layers.MaxPooling2D((2, 2)),#16
layers.Reshape(target_shape=new_shape),
layers.Dense(128, activation="relu"),
layers.Dropout(0.2),
layers.Conv1D(64, 3,activation="relu", padding="same"),
layers.MaxPool1D(2),
layers.Dropout(0.5),
layers.Dense(nclasses, activation="softmax")
])
model.summary()
Model: "sequential_16" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_71 (Conv2D) (None, 256, 65, 32) 320 max_pooling2d_68 (MaxPooli (None, 128, 32, 32) 0 ng2D) conv2d_72 (Conv2D) (None, 128, 32, 64) 18496 max_pooling2d_69 (MaxPooli (None, 64, 16, 64) 0 ng2D) conv2d_73 (Conv2D) (None, 64, 16, 128) 73856 max_pooling2d_70 (MaxPooli (None, 32, 8, 128) 0 ng2D) conv2d_74 (Conv2D) (None, 32, 8, 256) 295168 max_pooling2d_71 (MaxPooli (None, 16, 4, 256) 0 ng2D) reshape_16 (Reshape) (None, 16, 1024) 0 dense_31 (Dense) (None, 16, 128) 131200 dropout_30 (Dropout) (None, 16, 128) 0 conv1d_20 (Conv1D) (None, 16, 64) 24640 max_pooling1d_26 (MaxPooli (None, 8, 64) 0 ng1D) dropout_31 (Dropout) (None, 8, 64) 0 dense_32 (Dense) (None, 8, 27) 1755 ================================================================= Total params: 545435 (2.08 MB) Trainable params: 545435 (2.08 MB) Non-trainable params: 0 (0.00 Byte) _________________________________________________________________
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
epochs = 100
early_stopping_patience = 30
reduce_lr_patience=10
# Add early stopping
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_accuracy", patience=early_stopping_patience, restore_best_weights=True
)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=reduce_lr_patience)
# Train the model
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=epochs,
callbacks=[early_stopping, reduce_lr],
)
Epoch 1/100 169/169 [==============================] - 6s 19ms/step - loss: 2.6207 - accuracy: 0.1724 - val_loss: 2.3344 - val_accuracy: 0.2204 - lr: 0.0010 Epoch 2/100 169/169 [==============================] - 3s 16ms/step - loss: 2.1622 - accuracy: 0.2818 - val_loss: 1.5741 - val_accuracy: 0.5038 - lr: 0.0010 Epoch 3/100 169/169 [==============================] - 3s 16ms/step - loss: 1.3502 - accuracy: 0.5630 - val_loss: 0.8482 - val_accuracy: 0.7196 - lr: 0.0010 Epoch 4/100 169/169 [==============================] - 3s 20ms/step - loss: 0.9489 - accuracy: 0.6864 - val_loss: 0.6243 - val_accuracy: 0.8037 - lr: 0.0010 Epoch 5/100 169/169 [==============================] - 3s 16ms/step - loss: 0.7557 - accuracy: 0.7581 - val_loss: 0.4837 - val_accuracy: 0.8525 - lr: 0.0010 Epoch 6/100 169/169 [==============================] - 3s 17ms/step - loss: 0.6132 - accuracy: 0.8108 - val_loss: 0.3654 - val_accuracy: 0.8983 - lr: 0.0010 Epoch 7/100 169/169 [==============================] - 3s 16ms/step - loss: 0.5161 - accuracy: 0.8487 - val_loss: 0.3266 - val_accuracy: 0.9050 - lr: 0.0010 Epoch 8/100 169/169 [==============================] - 4s 22ms/step - loss: 0.4410 - accuracy: 0.8687 - val_loss: 0.2825 - val_accuracy: 0.9229 - lr: 0.0010 Epoch 9/100 169/169 [==============================] - 3s 15ms/step - loss: 0.3762 - accuracy: 0.8911 - val_loss: 0.2655 - val_accuracy: 0.9279 - lr: 0.0010 Epoch 10/100 169/169 [==============================] - 3s 16ms/step - loss: 0.3452 - accuracy: 0.8999 - val_loss: 0.2514 - val_accuracy: 0.9312 - lr: 0.0010 Epoch 11/100 169/169 [==============================] - 3s 17ms/step - loss: 0.3054 - accuracy: 0.9077 - val_loss: 0.2435 - val_accuracy: 0.9321 - lr: 0.0010 Epoch 12/100 169/169 [==============================] - 3s 20ms/step - loss: 0.2792 - accuracy: 0.9168 - val_loss: 0.2247 - val_accuracy: 0.9392 - lr: 0.0010 Epoch 13/100 169/169 [==============================] - 3s 16ms/step - loss: 0.2533 - accuracy: 0.9235 - val_loss: 0.2035 - val_accuracy: 0.9450 - lr: 0.0010 Epoch 14/100 169/169 [==============================] - 2s 15ms/step - loss: 0.2335 - accuracy: 0.9314 - val_loss: 0.2208 - val_accuracy: 0.9408 - lr: 0.0010 Epoch 15/100 169/169 [==============================] - 3s 16ms/step - loss: 0.2161 - accuracy: 0.9356 - val_loss: 0.1993 - val_accuracy: 0.9483 - lr: 0.0010 Epoch 16/100 169/169 [==============================] - 4s 22ms/step - loss: 0.1937 - accuracy: 0.9433 - val_loss: 0.2110 - val_accuracy: 0.9408 - lr: 0.0010 Epoch 17/100 169/169 [==============================] - 3s 16ms/step - loss: 0.1948 - accuracy: 0.9419 - val_loss: 0.2114 - val_accuracy: 0.9463 - lr: 0.0010 Epoch 18/100 169/169 [==============================] - 3s 17ms/step - loss: 0.1647 - accuracy: 0.9483 - val_loss: 0.2074 - val_accuracy: 0.9446 - lr: 0.0010 Epoch 19/100 169/169 [==============================] - 4s 21ms/step - loss: 0.1653 - accuracy: 0.9495 - val_loss: 0.2213 - val_accuracy: 0.9404 - lr: 0.0010 Epoch 20/100 169/169 [==============================] - 3s 16ms/step - loss: 0.1613 - accuracy: 0.9505 - val_loss: 0.1965 - val_accuracy: 0.9558 - lr: 0.0010 Epoch 21/100 169/169 [==============================] - 3s 16ms/step - loss: 0.1472 - accuracy: 0.9552 - val_loss: 0.2309 - val_accuracy: 0.9483 - lr: 0.0010 Epoch 22/100 169/169 [==============================] - 3s 15ms/step - loss: 0.1420 - accuracy: 0.9554 - val_loss: 0.1923 - val_accuracy: 0.9513 - lr: 0.0010 Epoch 23/100 169/169 [==============================] - 3s 17ms/step - loss: 0.1272 - accuracy: 0.9597 - val_loss: 0.2112 - val_accuracy: 0.9529 - lr: 0.0010 Epoch 24/100 169/169 [==============================] - 3s 19ms/step - loss: 0.1297 - accuracy: 0.9593 - val_loss: 0.2280 - val_accuracy: 0.9558 - lr: 0.0010 Epoch 25/100 169/169 [==============================] - 3s 16ms/step - loss: 0.1170 - accuracy: 0.9629 - val_loss: 0.2113 - val_accuracy: 0.9475 - lr: 0.0010 Epoch 26/100 169/169 [==============================] - 3s 19ms/step - loss: 0.1124 - accuracy: 0.9658 - val_loss: 0.2206 - val_accuracy: 0.9517 - lr: 0.0010 Epoch 27/100 169/169 [==============================] - 3s 19ms/step - loss: 0.1146 - accuracy: 0.9636 - val_loss: 0.2167 - val_accuracy: 0.9529 - lr: 0.0010 Epoch 28/100 169/169 [==============================] - 3s 16ms/step - loss: 0.1080 - accuracy: 0.9655 - val_loss: 0.2062 - val_accuracy: 0.9538 - lr: 0.0010 Epoch 29/100 169/169 [==============================] - 4s 22ms/step - loss: 0.1106 - accuracy: 0.9646 - val_loss: 0.2098 - val_accuracy: 0.9558 - lr: 0.0010 Epoch 30/100 169/169 [==============================] - 3s 16ms/step - loss: 0.1100 - accuracy: 0.9644 - val_loss: 0.2416 - val_accuracy: 0.9504 - lr: 0.0010 Epoch 31/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0846 - accuracy: 0.9732 - val_loss: 0.2300 - val_accuracy: 0.9567 - lr: 1.0000e-04 Epoch 32/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0706 - accuracy: 0.9768 - val_loss: 0.2318 - val_accuracy: 0.9558 - lr: 1.0000e-04 Epoch 33/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0653 - accuracy: 0.9785 - val_loss: 0.2409 - val_accuracy: 0.9567 - lr: 1.0000e-04 Epoch 34/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0642 - accuracy: 0.9768 - val_loss: 0.2328 - val_accuracy: 0.9567 - lr: 1.0000e-04 Epoch 35/100 169/169 [==============================] - 3s 19ms/step - loss: 0.0546 - accuracy: 0.9808 - val_loss: 0.2397 - val_accuracy: 0.9558 - lr: 1.0000e-04 Epoch 36/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0552 - accuracy: 0.9809 - val_loss: 0.2447 - val_accuracy: 0.9563 - lr: 1.0000e-04 Epoch 37/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0559 - accuracy: 0.9807 - val_loss: 0.2377 - val_accuracy: 0.9538 - lr: 1.0000e-04 Epoch 38/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0594 - accuracy: 0.9787 - val_loss: 0.2402 - val_accuracy: 0.9563 - lr: 1.0000e-04 Epoch 39/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0557 - accuracy: 0.9808 - val_loss: 0.2477 - val_accuracy: 0.9567 - lr: 1.0000e-04 Epoch 40/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0531 - accuracy: 0.9806 - val_loss: 0.2553 - val_accuracy: 0.9542 - lr: 1.0000e-04 Epoch 41/100 169/169 [==============================] - 3s 20ms/step - loss: 0.0536 - accuracy: 0.9823 - val_loss: 0.2514 - val_accuracy: 0.9579 - lr: 1.0000e-04 Epoch 42/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0504 - accuracy: 0.9823 - val_loss: 0.2535 - val_accuracy: 0.9583 - lr: 1.0000e-04 Epoch 43/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0512 - accuracy: 0.9816 - val_loss: 0.2450 - val_accuracy: 0.9558 - lr: 1.0000e-04 Epoch 44/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0516 - accuracy: 0.9816 - val_loss: 0.2471 - val_accuracy: 0.9558 - lr: 1.0000e-04 Epoch 45/100 169/169 [==============================] - 3s 19ms/step - loss: 0.0504 - accuracy: 0.9826 - val_loss: 0.2459 - val_accuracy: 0.9596 - lr: 1.0000e-04 Epoch 46/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0460 - accuracy: 0.9841 - val_loss: 0.2599 - val_accuracy: 0.9588 - lr: 1.0000e-04 Epoch 47/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0466 - accuracy: 0.9839 - val_loss: 0.2720 - val_accuracy: 0.9554 - lr: 1.0000e-04 Epoch 48/100 169/169 [==============================] - 3s 21ms/step - loss: 0.0436 - accuracy: 0.9844 - val_loss: 0.2661 - val_accuracy: 0.9575 - lr: 1.0000e-04 Epoch 49/100 169/169 [==============================] - 2s 15ms/step - loss: 0.0453 - accuracy: 0.9840 - val_loss: 0.2658 - val_accuracy: 0.9567 - lr: 1.0000e-04 Epoch 50/100 169/169 [==============================] - 2s 15ms/step - loss: 0.0431 - accuracy: 0.9843 - val_loss: 0.2687 - val_accuracy: 0.9575 - lr: 1.0000e-04 Epoch 51/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0444 - accuracy: 0.9842 - val_loss: 0.2736 - val_accuracy: 0.9571 - lr: 1.0000e-04 Epoch 52/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0446 - accuracy: 0.9839 - val_loss: 0.2735 - val_accuracy: 0.9563 - lr: 1.0000e-04 Epoch 53/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0461 - accuracy: 0.9840 - val_loss: 0.2750 - val_accuracy: 0.9571 - lr: 1.0000e-04 Epoch 54/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0434 - accuracy: 0.9853 - val_loss: 0.2595 - val_accuracy: 0.9571 - lr: 1.0000e-04 Epoch 55/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0433 - accuracy: 0.9847 - val_loss: 0.2737 - val_accuracy: 0.9571 - lr: 1.0000e-04 Epoch 56/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0411 - accuracy: 0.9843 - val_loss: 0.2730 - val_accuracy: 0.9575 - lr: 1.0000e-05 Epoch 57/100 169/169 [==============================] - 3s 21ms/step - loss: 0.0383 - accuracy: 0.9864 - val_loss: 0.2713 - val_accuracy: 0.9588 - lr: 1.0000e-05 Epoch 58/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0412 - accuracy: 0.9851 - val_loss: 0.2694 - val_accuracy: 0.9592 - lr: 1.0000e-05 Epoch 59/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0404 - accuracy: 0.9854 - val_loss: 0.2671 - val_accuracy: 0.9600 - lr: 1.0000e-05 Epoch 60/100 169/169 [==============================] - 3s 20ms/step - loss: 0.0394 - accuracy: 0.9858 - val_loss: 0.2685 - val_accuracy: 0.9600 - lr: 1.0000e-05 Epoch 61/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0399 - accuracy: 0.9852 - val_loss: 0.2689 - val_accuracy: 0.9600 - lr: 1.0000e-05 Epoch 62/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0365 - accuracy: 0.9874 - val_loss: 0.2657 - val_accuracy: 0.9608 - lr: 1.0000e-05 Epoch 63/100 169/169 [==============================] - 3s 18ms/step - loss: 0.0389 - accuracy: 0.9868 - val_loss: 0.2675 - val_accuracy: 0.9604 - lr: 1.0000e-05 Epoch 64/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0399 - accuracy: 0.9861 - val_loss: 0.2666 - val_accuracy: 0.9608 - lr: 1.0000e-05 Epoch 65/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0408 - accuracy: 0.9857 - val_loss: 0.2649 - val_accuracy: 0.9604 - lr: 1.0000e-05 Epoch 66/100 169/169 [==============================] - 3s 20ms/step - loss: 0.0401 - accuracy: 0.9859 - val_loss: 0.2631 - val_accuracy: 0.9604 - lr: 1.0000e-05 Epoch 67/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0400 - accuracy: 0.9848 - val_loss: 0.2644 - val_accuracy: 0.9608 - lr: 1.0000e-05 Epoch 68/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0416 - accuracy: 0.9852 - val_loss: 0.2639 - val_accuracy: 0.9608 - lr: 1.0000e-05 Epoch 69/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0347 - accuracy: 0.9868 - val_loss: 0.2667 - val_accuracy: 0.9604 - lr: 1.0000e-05 Epoch 70/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0398 - accuracy: 0.9850 - val_loss: 0.2678 - val_accuracy: 0.9608 - lr: 1.0000e-05 Epoch 71/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0366 - accuracy: 0.9858 - val_loss: 0.2702 - val_accuracy: 0.9600 - lr: 1.0000e-05 Epoch 72/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0399 - accuracy: 0.9863 - val_loss: 0.2705 - val_accuracy: 0.9588 - lr: 1.0000e-05 Epoch 73/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0375 - accuracy: 0.9872 - val_loss: 0.2703 - val_accuracy: 0.9588 - lr: 1.0000e-06 Epoch 74/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0375 - accuracy: 0.9875 - val_loss: 0.2703 - val_accuracy: 0.9592 - lr: 1.0000e-06 Epoch 75/100 169/169 [==============================] - 4s 22ms/step - loss: 0.0366 - accuracy: 0.9866 - val_loss: 0.2701 - val_accuracy: 0.9592 - lr: 1.0000e-06 Epoch 76/100 169/169 [==============================] - 2s 14ms/step - loss: 0.0362 - accuracy: 0.9874 - val_loss: 0.2703 - val_accuracy: 0.9592 - lr: 1.0000e-06 Epoch 77/100 169/169 [==============================] - 2s 14ms/step - loss: 0.0436 - accuracy: 0.9849 - val_loss: 0.2700 - val_accuracy: 0.9592 - lr: 1.0000e-06 Epoch 78/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0377 - accuracy: 0.9866 - val_loss: 0.2699 - val_accuracy: 0.9596 - lr: 1.0000e-06 Epoch 79/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0390 - accuracy: 0.9862 - val_loss: 0.2696 - val_accuracy: 0.9596 - lr: 1.0000e-06 Epoch 80/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0362 - accuracy: 0.9872 - val_loss: 0.2696 - val_accuracy: 0.9596 - lr: 1.0000e-06 Epoch 81/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0387 - accuracy: 0.9860 - val_loss: 0.2694 - val_accuracy: 0.9596 - lr: 1.0000e-06 Epoch 82/100 169/169 [==============================] - 3s 18ms/step - loss: 0.0373 - accuracy: 0.9864 - val_loss: 0.2693 - val_accuracy: 0.9600 - lr: 1.0000e-06 Epoch 83/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0396 - accuracy: 0.9856 - val_loss: 0.2692 - val_accuracy: 0.9600 - lr: 1.0000e-07 Epoch 84/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0348 - accuracy: 0.9881 - val_loss: 0.2692 - val_accuracy: 0.9600 - lr: 1.0000e-07 Epoch 85/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0372 - accuracy: 0.9858 - val_loss: 0.2693 - val_accuracy: 0.9600 - lr: 1.0000e-07 Epoch 86/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0397 - accuracy: 0.9861 - val_loss: 0.2693 - val_accuracy: 0.9600 - lr: 1.0000e-07 Epoch 87/100 169/169 [==============================] - 3s 18ms/step - loss: 0.0381 - accuracy: 0.9855 - val_loss: 0.2693 - val_accuracy: 0.9600 - lr: 1.0000e-07 Epoch 88/100 169/169 [==============================] - 3s 16ms/step - loss: 0.0383 - accuracy: 0.9860 - val_loss: 0.2693 - val_accuracy: 0.9600 - lr: 1.0000e-07 Epoch 89/100 169/169 [==============================] - 3s 15ms/step - loss: 0.0368 - accuracy: 0.9860 - val_loss: 0.2693 - val_accuracy: 0.9600 - lr: 1.0000e-07 Epoch 90/100 169/169 [==============================] - 3s 19ms/step - loss: 0.0369 - accuracy: 0.9864 - val_loss: 0.2693 - val_accuracy: 0.9600 - lr: 1.0000e-07 Epoch 91/100 169/169 [==============================] - 3s 18ms/step - loss: 0.0367 - accuracy: 0.9867 - val_loss: 0.2693 - val_accuracy: 0.9600 - lr: 1.0000e-07 Epoch 92/100 169/169 [==============================] - 3s 17ms/step - loss: 0.0384 - accuracy: 0.9862 - val_loss: 0.2692 - val_accuracy: 0.9600 - lr: 1.0000e-07
model.evaluate(validation_dataset)
19/19 [==============================] - 0s 11ms/step - loss: 0.2657 - accuracy: 0.9608
[0.26570412516593933, 0.9608333110809326]
# Let's check results on some validation samples
for batch in validation_dataset.take(1):
batch_images = batch[0]
batch_labels = batch[1]
preds = model.predict(batch_images, verbose=0)
preds = preds.argmax(axis=-1)
pred_texts = []
for p in preds:
label = tf.strings.reduce_join(num_to_char(p)).numpy().decode("utf-8")
pred_texts.append(label)
orig_texts = []
for label in batch_labels:
label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
orig_texts.append(label)
_, ax = plt.subplots(4, 4, figsize=(15, 5))
for i in range(len(pred_texts)):
img = (batch_images[i, :, :, 0] * 255).numpy().astype(np.uint8)
img = img.T
title = f"Prediction: {pred_texts[i]}"
ax[i // 4, i % 4].imshow(img, cmap="gray")
ax[i // 4, i % 4].set_title(title)
ax[i // 4, i % 4].axis("off")
plt.show()