#!/usr/bin/env python # coding: utf-8 # ## Transfer Learning with InceptionV3 (From ImageNet to Cifar-10) # - https://gogul09.github.io/software/flower-recognition-deep-learning # In[1]: # boilerplate code import tensorflow as tf print(tf.__version__) # In[2]: from tensorflow import keras import cv2 #python -m pip install opencv-python import numpy as np from tensorflow.keras.datasets import cifar10 from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.keras.optimizers import SGD from tensorflow.keras.callbacks import LearningRateScheduler import math # In[3]: num_classes = 10 # In[4]: def load_cifar10_data(img_rows, img_cols): # Load cifar10 training and test sets (X_train, Y_train), (X_test, Y_test) = cifar10.load_data() # Resize training images X_train = np.array([cv2.resize(img, (img_rows, img_cols)) for img in X_train[:, :, :, :]]) X_test = np.array([cv2.resize(img, (img_rows, img_cols)) for img in X_test[:, :, :, :]]) # X_train = X_train.astype('float16') / 255.0 # X_test = X_test.astype('float16') / 255.0 # Transform targets to keras compatible format Y_train = to_categorical(Y_train, num_classes) Y_test = to_categorical(Y_test, num_classes) print("X_train: {0}".format(X_train.shape)) print("Y_train: {0}".format(Y_train.shape)) print("X_test: {0}".format(X_test.shape)) print("Y_test: {0}".format(Y_test.shape)) return X_train, Y_train, X_test, Y_test # In[5]: X_train, y_train, X_test, y_test = load_cifar10_data(299, 299) # In[9]: from tensorflow.keras.applications.inception_v3 import InceptionV3 def build_model(nb_classes): base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=[299, 299, 3]) # add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer x = Dense(1024, activation='relu')(x) # and a logistic layer predictions = Dense(nb_classes, activation='softmax')(x) # this is the model we will train model = Model(inputs=base_model.input, outputs=predictions) # first: train only the top layers (which were randomly initialized) # i.e. freeze all convolutional InceptionV3 layers for layer in base_model.layers: layer.trainable = False return model # In[10]: model = build_model(10) model.summary() # In[11]: initial_lrate = 0.01 def decay(epoch, steps=100): drop = 0.96 epochs_drop = 8 lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop)) return lrate lr_sc = LearningRateScheduler(decay, verbose=1) sgd = SGD(lr=initial_lrate, momentum=0.9, nesterov=True) model.compile( loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'] ) epochs = 35 history = model.fit( x=X_train, y=y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=256, callbacks=[lr_sc] ) # In[ ]: