!wget https://raw.githubusercontent.com/Alireza-Akhavan/deeplearning-tensorflow2-notebooks/master/dataset.py
!wget https://raw.githubusercontent.com/Alireza-Akhavan/deeplearning-tensorflow2-notebooks/master/dataset/Data_hoda_full.mat -P dataset
--2023-12-02 15:07:39-- https://raw.githubusercontent.com/Alireza-Akhavan/deeplearning-tensorflow2-notebooks/master/dataset.py Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 917 [text/plain] Saving to: ‘dataset.py’ dataset.py 100%[===================>] 917 --.-KB/s in 0s 2023-12-02 15:07:39 (52.2 MB/s) - ‘dataset.py’ saved [917/917] --2023-12-02 15:07:40-- https://raw.githubusercontent.com/Alireza-Akhavan/deeplearning-tensorflow2-notebooks/master/dataset/Data_hoda_full.mat Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.109.133, 185.199.110.133, 185.199.108.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.109.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 3989009 (3.8M) [application/octet-stream] Saving to: ‘dataset/Data_hoda_full.mat’ Data_hoda_full.mat 100%[===================>] 3.80M --.-KB/s in 0.01s 2023-12-02 15:07:40 (254 MB/s) - ‘dataset/Data_hoda_full.mat’ saved [3989009/3989009]
!pip install onnx==1.14.1.
!pip install tf2onnx onnxruntime
Collecting onnx==1.14.1. Downloading onnx-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (14.6 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 14.6/14.6 MB 34.8 MB/s eta 0:00:00 Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from onnx==1.14.1.) (1.23.5) Requirement already satisfied: protobuf>=3.20.2 in /usr/local/lib/python3.10/dist-packages (from onnx==1.14.1.) (3.20.3) Requirement already satisfied: typing-extensions>=3.6.2.1 in /usr/local/lib/python3.10/dist-packages (from onnx==1.14.1.) (4.5.0) Installing collected packages: onnx Successfully installed onnx-1.14.1 Collecting tf2onnx Downloading tf2onnx-1.15.1-py3-none-any.whl (454 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 454.7/454.7 kB 7.4 MB/s eta 0:00:00 Collecting onnxruntime Downloading onnxruntime-1.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (6.4 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.4/6.4 MB 75.4 MB/s eta 0:00:00 Requirement already satisfied: numpy>=1.14.1 in /usr/local/lib/python3.10/dist-packages (from tf2onnx) (1.23.5) Requirement already satisfied: onnx>=1.4.1 in /usr/local/lib/python3.10/dist-packages (from tf2onnx) (1.14.1) Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from tf2onnx) (2.31.0) Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from tf2onnx) (1.16.0) Requirement already satisfied: flatbuffers>=1.12 in /usr/local/lib/python3.10/dist-packages (from tf2onnx) (23.5.26) Requirement already satisfied: protobuf~=3.20.2 in /usr/local/lib/python3.10/dist-packages (from tf2onnx) (3.20.3) Collecting coloredlogs (from onnxruntime) Downloading coloredlogs-15.0.1-py2.py3-none-any.whl (46 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 46.0/46.0 kB 6.1 MB/s eta 0:00:00 Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from onnxruntime) (23.2) Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from onnxruntime) (1.12) Requirement already satisfied: typing-extensions>=3.6.2.1 in /usr/local/lib/python3.10/dist-packages (from onnx>=1.4.1->tf2onnx) (4.5.0) Collecting humanfriendly>=9.1 (from coloredlogs->onnxruntime) Downloading humanfriendly-10.0-py2.py3-none-any.whl (86 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 86.8/86.8 kB 3.5 MB/s eta 0:00:00 Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->tf2onnx) (3.3.2) Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->tf2onnx) (3.6) Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->tf2onnx) (2.0.7) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->tf2onnx) (2023.11.17) Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->onnxruntime) (1.3.0) Installing collected packages: humanfriendly, tf2onnx, coloredlogs, onnxruntime Successfully installed coloredlogs-15.0.1 humanfriendly-10.0 onnxruntime-1.16.3 tf2onnx-1.15.1
# 1. Import libraries and modules
import tensorflow as tf
from tensorflow import keras
from keras import layers
import numpy as np
from dataset import load_hoda
import matplotlib.pyplot as plt
# Load pre-shuffled HODA data into train and test sets
X_train, y_train, X_test, y_test = load_hoda(
training_sample_size=5500,
test_sample_size=400,size=28)
# Reshape to original image shape (n x 784) ==> (n x 28 x 28 x 1)
X_train = X_train.reshape(-1,28,28,1).astype('float32')
X_test = X_test.reshape(-1,28,28,1).astype('float32')
# Define residual block
def residual_block(x, filters):
y = layers.Conv2D(filters, (3, 3), padding='same')(x)
y = layers.BatchNormalization()(y)
y = layers.Activation('relu')(y)
y = layers.Conv2D(filters, (3, 3), padding='same')(y)
y = layers.BatchNormalization()(y)
# Add the residual connection
y = layers.add([x, y])
y = layers.Activation('relu')(y)
return y
# Define model architecture with residual connections
inp = keras.Input(shape=(28,28,1))
x = layers.Rescaling(1./127.5, offset=-1)(inp)
x = layers.Conv2D(32, (3, 3), activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D((2, 2))(x)
# Add first residual block
x = residual_block(x, filters=32)
x = layers.Conv2D(64, (3, 3), activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D((2, 2))(x)
# Add second residual block
x = residual_block(x, filters=64)
x = layers.BatchNormalization()(x)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.5)(x)
out = layers.Dense(10, activation='softmax')(x)
model = keras.Model(inp, out)
keras.utils.plot_model(model, show_shapes=True)
early_stopping_patience = 30
reduce_lr_patience=10
# Add early stopping
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_accuracy", patience=early_stopping_patience, restore_best_weights=True
)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=reduce_lr_patience)
# 6. Compile model
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 7. Fit model on training data
history = model.fit(X_train, y_train,
epochs=100, batch_size=256, validation_split=0.1,
callbacks=[early_stopping, reduce_lr])
Epoch 1/100 20/20 [==============================] - 18s 51ms/step - loss: 1.1530 - accuracy: 0.6956 - val_loss: 2.0021 - val_accuracy: 0.5782 - lr: 0.0010 Epoch 2/100 20/20 [==============================] - 0s 16ms/step - loss: 0.3748 - accuracy: 0.9218 - val_loss: 2.1812 - val_accuracy: 0.1218 - lr: 0.0010 Epoch 3/100 20/20 [==============================] - 0s 16ms/step - loss: 0.2067 - accuracy: 0.9596 - val_loss: 2.6175 - val_accuracy: 0.1182 - lr: 0.0010 Epoch 4/100 20/20 [==============================] - 0s 16ms/step - loss: 0.1373 - accuracy: 0.9715 - val_loss: 2.8976 - val_accuracy: 0.1182 - lr: 0.0010 Epoch 5/100 20/20 [==============================] - 0s 16ms/step - loss: 0.0990 - accuracy: 0.9828 - val_loss: 3.6635 - val_accuracy: 0.1182 - lr: 0.0010 Epoch 6/100 20/20 [==============================] - 0s 16ms/step - loss: 0.0808 - accuracy: 0.9832 - val_loss: 4.0595 - val_accuracy: 0.1182 - lr: 0.0010 Epoch 7/100 20/20 [==============================] - 0s 16ms/step - loss: 0.0679 - accuracy: 0.9859 - val_loss: 3.9929 - val_accuracy: 0.1182 - lr: 0.0010 Epoch 8/100 20/20 [==============================] - 0s 16ms/step - loss: 0.0474 - accuracy: 0.9937 - val_loss: 4.0378 - val_accuracy: 0.1582 - lr: 0.0010 Epoch 9/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0375 - accuracy: 0.9945 - val_loss: 4.3725 - val_accuracy: 0.1182 - lr: 0.0010 Epoch 10/100 20/20 [==============================] - 0s 16ms/step - loss: 0.0610 - accuracy: 0.9867 - val_loss: 3.5772 - val_accuracy: 0.1673 - lr: 0.0010 Epoch 11/100 20/20 [==============================] - 0s 16ms/step - loss: 0.0371 - accuracy: 0.9935 - val_loss: 3.9867 - val_accuracy: 0.1309 - lr: 0.0010 Epoch 12/100 20/20 [==============================] - 0s 16ms/step - loss: 0.0245 - accuracy: 0.9984 - val_loss: 3.9251 - val_accuracy: 0.1564 - lr: 1.0000e-04 Epoch 13/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0199 - accuracy: 1.0000 - val_loss: 3.7438 - val_accuracy: 0.1636 - lr: 1.0000e-04 Epoch 14/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0202 - accuracy: 0.9994 - val_loss: 3.2837 - val_accuracy: 0.2036 - lr: 1.0000e-04 Epoch 15/100 20/20 [==============================] - 0s 18ms/step - loss: 0.0183 - accuracy: 0.9996 - val_loss: 2.9039 - val_accuracy: 0.2236 - lr: 1.0000e-04 Epoch 16/100 20/20 [==============================] - 0s 18ms/step - loss: 0.0170 - accuracy: 0.9994 - val_loss: 2.4366 - val_accuracy: 0.2745 - lr: 1.0000e-04 Epoch 17/100 20/20 [==============================] - 0s 19ms/step - loss: 0.0176 - accuracy: 0.9992 - val_loss: 1.9763 - val_accuracy: 0.3182 - lr: 1.0000e-04 Epoch 18/100 20/20 [==============================] - 0s 17ms/step - loss: 0.0165 - accuracy: 0.9994 - val_loss: 1.4992 - val_accuracy: 0.4345 - lr: 1.0000e-04 Epoch 19/100 20/20 [==============================] - 0s 17ms/step - loss: 0.0161 - accuracy: 0.9998 - val_loss: 1.0756 - val_accuracy: 0.5891 - lr: 1.0000e-04 Epoch 20/100 20/20 [==============================] - 0s 18ms/step - loss: 0.0155 - accuracy: 0.9996 - val_loss: 0.7563 - val_accuracy: 0.7145 - lr: 1.0000e-04 Epoch 21/100 20/20 [==============================] - 0s 17ms/step - loss: 0.0154 - accuracy: 0.9996 - val_loss: 0.5107 - val_accuracy: 0.8109 - lr: 1.0000e-04 Epoch 22/100 20/20 [==============================] - 0s 17ms/step - loss: 0.0154 - accuracy: 0.9996 - val_loss: 0.3643 - val_accuracy: 0.8745 - lr: 1.0000e-04 Epoch 23/100 20/20 [==============================] - 0s 18ms/step - loss: 0.0154 - accuracy: 0.9990 - val_loss: 0.2435 - val_accuracy: 0.9182 - lr: 1.0000e-04 Epoch 24/100 20/20 [==============================] - 0s 18ms/step - loss: 0.0145 - accuracy: 0.9994 - val_loss: 0.1622 - val_accuracy: 0.9455 - lr: 1.0000e-04 Epoch 25/100 20/20 [==============================] - 0s 17ms/step - loss: 0.0138 - accuracy: 0.9998 - val_loss: 0.1170 - val_accuracy: 0.9636 - lr: 1.0000e-04 Epoch 26/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0146 - accuracy: 0.9992 - val_loss: 0.0829 - val_accuracy: 0.9745 - lr: 1.0000e-04 Epoch 27/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0135 - accuracy: 0.9996 - val_loss: 0.0636 - val_accuracy: 0.9818 - lr: 1.0000e-04 Epoch 28/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0138 - accuracy: 0.9996 - val_loss: 0.0555 - val_accuracy: 0.9855 - lr: 1.0000e-04 Epoch 29/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0130 - accuracy: 0.9996 - val_loss: 0.0452 - val_accuracy: 0.9873 - lr: 1.0000e-04 Epoch 30/100 20/20 [==============================] - 0s 16ms/step - loss: 0.0130 - accuracy: 0.9994 - val_loss: 0.0385 - val_accuracy: 0.9891 - lr: 1.0000e-04 Epoch 31/100 20/20 [==============================] - 0s 16ms/step - loss: 0.0121 - accuracy: 0.9998 - val_loss: 0.0364 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 32/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0117 - accuracy: 0.9998 - val_loss: 0.0316 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 33/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0111 - accuracy: 0.9996 - val_loss: 0.0297 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 34/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0112 - accuracy: 1.0000 - val_loss: 0.0311 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 35/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0117 - accuracy: 0.9992 - val_loss: 0.0286 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 36/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0110 - accuracy: 0.9994 - val_loss: 0.0271 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 37/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0103 - accuracy: 0.9998 - val_loss: 0.0272 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 38/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0102 - accuracy: 0.9996 - val_loss: 0.0270 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 39/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0100 - accuracy: 1.0000 - val_loss: 0.0272 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 40/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0093 - accuracy: 1.0000 - val_loss: 0.0254 - val_accuracy: 0.9927 - lr: 1.0000e-04 Epoch 41/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0094 - accuracy: 0.9998 - val_loss: 0.0262 - val_accuracy: 0.9909 - lr: 1.0000e-04 Epoch 42/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0090 - accuracy: 0.9998 - val_loss: 0.0261 - val_accuracy: 0.9909 - lr: 1.0000e-05 Epoch 43/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0095 - accuracy: 1.0000 - val_loss: 0.0257 - val_accuracy: 0.9909 - lr: 1.0000e-05 Epoch 44/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0100 - accuracy: 0.9998 - val_loss: 0.0256 - val_accuracy: 0.9909 - lr: 1.0000e-05 Epoch 45/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0085 - accuracy: 1.0000 - val_loss: 0.0254 - val_accuracy: 0.9909 - lr: 1.0000e-05 Epoch 46/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0096 - accuracy: 0.9998 - val_loss: 0.0252 - val_accuracy: 0.9927 - lr: 1.0000e-05 Epoch 47/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0091 - accuracy: 1.0000 - val_loss: 0.0254 - val_accuracy: 0.9927 - lr: 1.0000e-05 Epoch 48/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0094 - accuracy: 1.0000 - val_loss: 0.0258 - val_accuracy: 0.9927 - lr: 1.0000e-05 Epoch 49/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0093 - accuracy: 1.0000 - val_loss: 0.0259 - val_accuracy: 0.9927 - lr: 1.0000e-05 Epoch 50/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0090 - accuracy: 0.9994 - val_loss: 0.0261 - val_accuracy: 0.9927 - lr: 1.0000e-05 Epoch 51/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0093 - accuracy: 0.9996 - val_loss: 0.0261 - val_accuracy: 0.9927 - lr: 1.0000e-05 Epoch 52/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0095 - accuracy: 0.9994 - val_loss: 0.0261 - val_accuracy: 0.9927 - lr: 1.0000e-06 Epoch 53/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0093 - accuracy: 1.0000 - val_loss: 0.0260 - val_accuracy: 0.9927 - lr: 1.0000e-06 Epoch 54/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0092 - accuracy: 0.9996 - val_loss: 0.0260 - val_accuracy: 0.9927 - lr: 1.0000e-06 Epoch 55/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0093 - accuracy: 1.0000 - val_loss: 0.0260 - val_accuracy: 0.9927 - lr: 1.0000e-06 Epoch 56/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0098 - accuracy: 0.9996 - val_loss: 0.0260 - val_accuracy: 0.9927 - lr: 1.0000e-06 Epoch 57/100 20/20 [==============================] - 0s 14ms/step - loss: 0.0089 - accuracy: 1.0000 - val_loss: 0.0259 - val_accuracy: 0.9927 - lr: 1.0000e-06 Epoch 58/100 20/20 [==============================] - 0s 15ms/step - loss: 0.0095 - accuracy: 0.9996 - val_loss: 0.0259 - val_accuracy: 0.9927 - lr: 1.0000e-06 Epoch 59/100 20/20 [==============================] - 0s 20ms/step - loss: 0.0091 - accuracy: 0.9998 - val_loss: 0.0258 - val_accuracy: 0.9927 - lr: 1.0000e-06 Epoch 60/100 20/20 [==============================] - 0s 17ms/step - loss: 0.0096 - accuracy: 0.9998 - val_loss: 0.0259 - val_accuracy: 0.9927 - lr: 1.0000e-06 Epoch 61/100 20/20 [==============================] - 0s 19ms/step - loss: 0.0091 - accuracy: 0.9996 - val_loss: 0.0259 - val_accuracy: 0.9927 - lr: 1.0000e-06
model.evaluate(X_test, y_test)
13/13 [==============================] - 0s 13ms/step - loss: 0.0474 - accuracy: 0.9825
[0.04735103249549866, 0.9825000166893005]
model.save('persian_number_recognizer.keras')
model = keras.models.load_model('persian_number_recognizer.keras')
keras_prediction = model.predict(X_test)
13/13 [==============================] - 0s 4ms/step
import tf2onnx
import onnxruntime as rt
import tensorflow as tf
spec = (tf.TensorSpec((None, 28, 28, 1), tf.float32, name="input"),)
model_proto, _ = tf2onnx.convert.from_keras(model, input_signature=spec, output_path="export.onnx")
output_names = [n.name for n in model_proto.graph.output]
WARNING:tf2onnx.tf_loader:Could not search for non-variable resources. Concrete function internal representation may have changed.
providers = ['CPUExecutionProvider']
m = rt.InferenceSession('export.onnx', providers=providers)
onnx_prediction = m.run(output_names, {"input": X_test})
# make sure ONNX and keras have the same results
np.testing.assert_allclose(keras_prediction, onnx_prediction[0], rtol=1e-4)
%timeit model.predict(X_test, verbose=0)
181 ms ± 49.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
%timeit m.run(output_names, {"input": X_test})
235 ms ± 20.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)