!mkdir -p ~/.kaggle/
mv kaggle.json ~/.kaggle/
!kaggle competitions download -c rsna-pneumonia-detection-challenge
Warning: Your Kaggle API key is readable by other users on this system! To fix this, you can run 'chmod 600 /root/.kaggle/kaggle.json' Downloading stage_2_detailed_class_info.csv.zip to /content 0% 0.00/583k [00:00<?, ?B/s] 100% 583k/583k [00:00<00:00, 80.4MB/s] Downloading stage_2_sample_submission.csv to /content 0% 0.00/155k [00:00<?, ?B/s] 100% 155k/155k [00:00<00:00, 46.5MB/s] Downloading stage_2_train_labels.csv.zip to /content 0% 0.00/661k [00:00<?, ?B/s] 100% 661k/661k [00:00<00:00, 95.9MB/s] Downloading stage_2_test_images.zip to /content 95% 360M/377M [00:01<00:00, 222MB/s] 100% 377M/377M [00:01<00:00, 211MB/s] Downloading stage_2_train_images.zip to /content 100% 3.29G/3.29G [00:23<00:00, 127MB/s] 100% 3.29G/3.29G [00:23<00:00, 148MB/s] Downloading GCP%20Credits%20Request%20Link%20-%20RSNA.txt to /content 0% 0.00/55.0 [00:00<?, ?B/s] 100% 55.0/55.0 [00:00<00:00, 48.5kB/s]
!unzip -q stage_2_train_labels.csv.zip
!mkdir images
!unzip -q stage_2_train_images.zip -d images
!pip install pydicom
Collecting pydicom Downloading https://files.pythonhosted.org/packages/43/88/d3c419ab2e753e7651510882a53219373e78fb55294cb247dffd3934ea55/pydicom-1.2.2-py2.py3-none-any.whl (7.0MB) |████████████████████████████████| 7.0MB 2.7MB/s Installing collected packages: pydicom Successfully installed pydicom-1.2.2
import tensorflow as tf
from tensorflow.keras.optimizers import SGD
from tensorflow.train import AdamOptimizer
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.keras.layers import Flatten, Dropout, BatchNormalization, Reshape, GlobalAveragePooling2D
from tensorflow.keras.layers import Dense, Dropout, Input, Conv2D, MaxPooling2D
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, LearningRateScheduler
from keras.preprocessing import image
import itertools
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import confusion_matrix
import matplotlib.image as mpimg
import numpy as np
import shutil
import os
import pandas as pd
import pylab
import pydicom
import re
Using TensorFlow backend.
tf.enable_eager_execution()
train_labels = pd.read_csv("stage_2_train_labels.csv")
train_labels.head()
patientId | x | y | width | height | Target | |
---|---|---|---|---|---|---|
0 | 0004cfab-14fd-4e49-80ba-63a80b6bddd6 | NaN | NaN | NaN | NaN | 0 |
1 | 00313ee0-9eaa-42f4-b0ab-c148ed3241cd | NaN | NaN | NaN | NaN | 0 |
2 | 00322d4d-1c29-4943-afc9-b6754be640eb | NaN | NaN | NaN | NaN | 0 |
3 | 003d8fa0-6bf1-40ed-b54c-ac657f8495c5 | NaN | NaN | NaN | NaN | 0 |
4 | 00436515-870c-4b36-a041-de91049b9ab4 | 264.0 | 152.0 | 213.0 | 379.0 | 1 |
train_labels["patientId"].unique().size
26684
images_paths = os.listdir("images")
X = [f'images/{image_path}' for image_path in images_paths]
original_image_width = 1024
original_image_height = 1024
y = []
boxes = []
for image_path in images_paths:
image_name = image_path[:-4]
patient_data = train_labels[train_labels["patientId"] == image_name]
image_class = patient_data.iloc[0]["Target"]
patient_boxes = []
for patient in patient_data.values:
box_width = patient[3]
box_height = patient[4]
if np.isnan(box_width):
patient_boxes.append([0, 0, 0, 0])
else:
x_min = patient[1] / original_image_width
y_min = patient[2] / original_image_height
x_max = (patient[1] + box_width) / original_image_width
y_max = (patient[2] + box_height) / original_image_height
patient_boxes.append([x_min, y_min, x_max, y_max])
boxes.append(patient_boxes)
y.append(image_class)
!nvidia-smi
Fri Apr 26 02:04:22 2019 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 418.56 Driver Version: 410.79 CUDA Version: 10.0 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | |===============================+======================+======================| | 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 | | N/A 45C P0 26W / 70W | 8813MiB / 15079MiB | 0% Default | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: GPU Memory | | GPU PID Type Process name Usage | |=============================================================================| +-----------------------------------------------------------------------------+
!cat /proc/meminfo
training_information = pd.DataFrame({'X': X, 'y': y, 'boxes': boxes})
training_information.to_csv("training_information.csv")
training_information = pd.read_csv("training_information.csv")
training_information.head()
Unnamed: 0 | X | boxes | y | |
---|---|---|---|---|
0 | 0 | images/b89bd111-9093-46cd-a75c-c58c58b80824.dcm | [[0, 0, 0, 0]] | 0 |
1 | 1 | images/651cd9f4-24c3-4ed0-9139-bb1595b18522.dcm | [[0, 0, 0, 0]] | 0 |
2 | 2 | images/764ae03e-966a-48d2-ad73-bdbe10274eea.dcm | [[0, 0, 0, 0]] | 0 |
3 | 3 | images/91b0043a-35bd-4cf9-b338-1cbc9b05b76d.dcm | [[0, 0, 0, 0]] | 0 |
4 | 4 | images/7aea6188-dc10-4758-9a8e-b4d3de1dbc2d.dcm | [[0, 0, 0, 0]] | 0 |
positive_images = training_information[training_information["y"] == 1]["X"][0:3]
negative_images = training_information[training_information["y"] == 0]["X"][0:3]
def plot_images(image_index, image_path, title):
sp = figure.add_subplot(2, 3, image_index + 1)
sp.axis('Off')
dcm_data = pydicom.read_file(image_path)
image_loaded = dcm_data.pixel_array
sp.set_title(title, fontsize=16)
plt.imshow(image_loaded, cmap=pylab.cm.gist_gray)
figure = plt.figure(figsize=(12, 6))
for image_index, image_path in enumerate(positive_images):
plot_images(image_index, image_path, "Positive")
for image_index, image_path in enumerate(negative_images):
plot_images(image_index + 3, image_path, "Negative")
def iou(box, anchor_box):
x_min = np.maximum(box[0], anchor_box[0])
y_min = np.maximum(box[1], anchor_box[1])
x_max = np.maximum(box[2], anchor_box[2])
y_max = np.maximum(box[3], anchor_box[3])
overlap_area = np.maximum(0.0, x_max - x_min + 1) * np.maximum(0.0, y_max - y_min + 1)
true_boxes_area = (box[2] - box[0] + 1) * (box[3] - box[1] + 1)
anchor_boxes_area = (anchor_box[2] - anchor_box[0] + 1) * (anchor_box[3] - anchor_box[1] + 1)
union_area = (true_boxes_area + anchor_boxes_area - overlap_area)
return overlap_area / union_area
grid_size = 5
def get_anchor(box):
max_iou = 0.0
best_anchor = [0, 0, 0, 0]
best_anchor_index = (0, 0)
column = 0
row = 0
cell_width, cell_height = (1 / grid_size, 1 / grid_size)
for cell_x_position in np.linspace(0, 1, grid_size + 1)[:-1]:
row = 0
for cell_y_position in np.linspace(0, 1, grid_size + 1)[:-1]:
x_min = cell_x_position
y_min = cell_y_position
x_max = (cell_x_position + cell_width)
y_max = (cell_y_position + cell_height)
anchor_box = [x_min, y_min, x_max, y_max]
current_iou = iou(box, anchor_box)
if current_iou > max_iou:
best_anchor = anchor_box
max_iou = current_iou
best_anchor_index = (column, row)
row += 1
column += 1
return best_anchor, best_anchor_index
def create_volume(boxes):
grid_volume = np.zeros((grid_size, grid_size, 5))
for box in boxes:
if max(box) == 0:
continue
_, (column, row) = get_anchor(box)
grid_volume[column, row, :] = [1, *box]
return grid_volume
positive_information = training_information[training_information["y"] == 1]
negative_information = training_information[training_information["y"] == 0]
negative_information["X"].size, positive_information["X"].size
(20672, 6012)
def get_data_from_files(information):
data = {}
for file in information.values:
image_path = file[1]
boxes_text = file[2]
boxes_list = re.findall(r'\[(.*?)\]', boxes_text[1:-1])
ground_truth_boxes = [np.fromstring(box, dtype=float, sep=', ') for box in boxes_list]
data[image_path] = create_volume(ground_truth_boxes)
return data
positive_data = get_data_from_files(positive_information)
negative_data = get_data_from_files(negative_information)
len(negative_data.values()), len(positive_data.values())
(20672, 6012)
positive_images_paths = list(positive_data.keys())
positive_tensor_volumes = list(positive_data.values())
negative_images_paths = list(negative_data.keys())
negative_tensor_volumes = list(negative_data.values())
positive_data_size = len(positive_images_paths)
negative_data_size = len(negative_images_paths)
positive_validation_size = int(positive_data_size * 0.20)
negative_validation_size = int(negative_data_size * 0.20)
negative_validation_size, positive_validation_size
(4134, 1202)
X_train = [*positive_images_paths[positive_validation_size:], *negative_images_paths[negative_validation_size:]]
X_val = [*positive_images_paths[:positive_validation_size], *negative_images_paths[:negative_validation_size]]
y_train = [*positive_tensor_volumes[positive_validation_size:], *negative_tensor_volumes[negative_validation_size:]]
y_val = [*positive_tensor_volumes[:positive_validation_size], *negative_tensor_volumes[:negative_validation_size]]
X_train = np.array(X_train)
X_val = np.array(X_val)
y_train = np.array(y_train)
y_val = np.array(y_val)
len(X_train), len(X_val)
(21348, 5336)
len(y_train), len(y_val)
(21348, 5336)
class DataGenerator(Sequence):
def __init__(self, images_paths, tensor_volumes, batch_size=32, predict=False, shuffle=True):
self.batch_size = batch_size
self.tensor_volumes = tensor_volumes
self.images_paths = images_paths
self.shuffle = shuffle
self.predict = predict
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.images_paths) / self.batch_size))
def __getitem__(self, index):
from_range = index * self.batch_size
to_range = (index + 1) * self.batch_size
indexes = self.indexes[from_range:to_range]
if self.predict:
X = self.__load_predict__(indexes)
return X
else:
X, y = self.__load__(indexes)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.images_paths))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __load_image__(self, index):
image_path = self.images_paths[index]
tensor_volume = self.tensor_volumes[index]
dcm_data = pydicom.read_file(image_path)
image_loaded = dcm_data.pixel_array
img = np.stack([image_loaded.copy()] * 3, axis=2)
img = image.array_to_img(img, scale=False)
img = img.resize((img_size, img_size))
img = image.img_to_array(img)
img = preprocess_input(img)
return img
def __load_predict__(self, indexes):
X = []
for index in indexes:
img = self.__load_image__(index)
X.append(img)
return np.array(X)
def __load__(self, indexes):
X = []
y = []
for index in indexes:
img = self.__load_image__(index)
tensor_volume = self.tensor_volumes[index]
X.append(img)
y.append(tensor_volume)
return np.array(X), np.array(y)
def create_model():
mobile_model = MobileNetV2(
weights=None,
input_shape=input_img_size,
alpha=1.5,
include_top=False)
for layer in mobile_model.layers:
layer.trainable = True
mobile_model_output = mobile_model.output
feature_map = Conv2D(5, 3, padding='valid', activation='sigmoid')(mobile_model_output)
model = Model(inputs=mobile_model.input, outputs=feature_map)
return model
def custom_loss(y_true, y_pred):
mask = y_true[..., 0]
true_boxes = tf.boolean_mask(y_true, mask)
predicted_boxes = tf.boolean_mask(y_pred, mask)
prediction_loss = tf.keras.losses.binary_crossentropy(y_true[..., 0], y_pred[..., 0])
detection_loss = tf.losses.absolute_difference(true_boxes[..., 1:], predicted_boxes[..., 1:])
return tf.reduce_mean(prediction_loss) + 10 * detection_loss
def custom_accuracy(y_true, y_pred):
return tf.keras.metrics.binary_accuracy(y_true[..., 0], y_pred[..., 0])
def mean_iou(y_true, y_pred):
mask = y_true[..., 0]
true_boxes = tf.boolean_mask(y_true, mask)[..., 1:]
predicted_boxes = tf.boolean_mask(y_pred, mask)[..., 1:]
x_min = tf.math.maximum(true_boxes[..., 0], predicted_boxes[..., 0])
y_min = tf.math.maximum(true_boxes[..., 1], predicted_boxes[..., 1])
x_max = tf.math.maximum(true_boxes[..., 2], predicted_boxes[..., 2])
y_max = tf.math.maximum(true_boxes[..., 3], predicted_boxes[..., 3])
overlap_area = tf.math.maximum(0.0, x_max - x_min + 1) * tf.math.maximum(0.0, y_max - y_min + 1)
true_boxes_area = (true_boxes[..., 2] - true_boxes[..., 0] + 1) * (true_boxes[..., 3] - true_boxes[..., 1] + 1)
predicted_boxes_area = (predicted_boxes[..., 2] - predicted_boxes[..., 0] + 1) * (predicted_boxes[..., 3] - predicted_boxes[..., 1] + 1)
union_area = (true_boxes_area + predicted_boxes_area - overlap_area)
return tf.math.reduce_mean(overlap_area / union_area)
batch_size = 32
img_size = 224
input_img_size = (224, 224, 3)
learning_rate = 3e-4
epochs = 50
def polynomial_decay(epoch):
power = 1.0
alpha = learning_rate * (1 - (epoch / float(epochs))) ** power
return alpha
weights_name = "epoch={epoch:02d}|loss={loss:.4f}|mean_iou={mean_iou:.4f}.h5"
checkpoint = ModelCheckpoint(weights_name, monitor="loss", verbose=1, save_best_only=True,
save_weights_only=True, mode="min", period=1)
lr_decay = LearningRateScheduler(polynomial_decay)
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
train_steps = int(len(X_train) / batch_size)
val_steps = int( len(X_val) / batch_size)
train_steps, val_steps
(667, 166)
model = create_model()
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:642: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer.
model.compile(loss=custom_loss, metrics=[custom_accuracy, mean_iou], optimizer=optimizer)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/losses/losses_impl.py:277: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead.
model.metrics_names
['loss', 'custom_accuracy', 'mean_iou']
train_generator = DataGenerator(X_train, y_train, batch_size)
val_generator = DataGenerator(X_val, y_val, batch_size)
trained_model = model.fit_generator(train_generator,
epochs=epochs,
steps_per_epoch=train_steps,
callbacks=[checkpoint, lr_decay],
validation_data=val_generator,
validation_steps=val_steps,
verbose=1)
Epoch 1/50 WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gradients_impl.py:110: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory. "Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
166/166 [==============================] - 98s 593ms/step - loss: 1.6360 - custom_accuracy: 0.9857 - mean_iou: 1.0002 Epoch 00001: loss improved from inf to 1.20958, saving model to epoch=01|loss=1.2096|mean_iou=1.0118.h5 667/667 [==============================] - 644s 965ms/step - loss: 1.2096 - custom_accuracy: 0.9851 - mean_iou: 1.0118 - val_loss: 1.6360 - val_custom_accuracy: 0.9857 - val_mean_iou: 1.0002 Epoch 2/50 166/166 [==============================] - 98s 589ms/step - loss: 1.6043 - custom_accuracy: 0.9857 - mean_iou: 0.9997 Epoch 00002: loss improved from 1.20958 to 0.66199, saving model to epoch=02|loss=0.6620|mean_iou=0.9991.h5 667/667 [==============================] - 610s 914ms/step - loss: 0.6620 - custom_accuracy: 0.9857 - mean_iou: 0.9991 - val_loss: 1.6043 - val_custom_accuracy: 0.9857 - val_mean_iou: 0.9997 Epoch 3/50 166/166 [==============================] - 97s 587ms/step - loss: 1.6035 - custom_accuracy: 0.9857 - mean_iou: 0.9913 Epoch 00003: loss improved from 0.66199 to 0.62514, saving model to epoch=03|loss=0.6251|mean_iou=0.9989.h5 667/667 [==============================] - 611s 916ms/step - loss: 0.6251 - custom_accuracy: 0.9857 - mean_iou: 0.9989 - val_loss: 1.6035 - val_custom_accuracy: 0.9857 - val_mean_iou: 0.9913 Epoch 4/50 166/166 [==============================] - 98s 592ms/step - loss: 1.6554 - custom_accuracy: 0.9858 - mean_iou: 1.0143 Epoch 00004: loss improved from 0.62514 to 0.59481, saving model to epoch=04|loss=0.5948|mean_iou=0.9980.h5 667/667 [==============================] - 608s 911ms/step - loss: 0.5948 - custom_accuracy: 0.9857 - mean_iou: 0.9980 - val_loss: 1.6554 - val_custom_accuracy: 0.9858 - val_mean_iou: 1.0143 Epoch 5/50 166/166 [==============================] - 99s 594ms/step - loss: 1.5775 - custom_accuracy: 0.9858 - mean_iou: 1.0033 Epoch 00005: loss improved from 0.59481 to 0.57673, saving model to epoch=05|loss=0.5767|mean_iou=nan.h5 667/667 [==============================] - 614s 921ms/step - loss: 0.5767 - custom_accuracy: 0.9857 - mean_iou: nan - val_loss: 1.5775 - val_custom_accuracy: 0.9858 - val_mean_iou: 1.0033 Epoch 6/50 166/166 [==============================] - 97s 585ms/step - loss: 1.4732 - custom_accuracy: 0.9857 - mean_iou: 0.9965 Epoch 00006: loss improved from 0.57673 to 0.55884, saving model to epoch=06|loss=0.5588|mean_iou=0.9979.h5 667/667 [==============================] - 613s 919ms/step - loss: 0.5588 - custom_accuracy: 0.9857 - mean_iou: 0.9979 - val_loss: 1.4732 - val_custom_accuracy: 0.9857 - val_mean_iou: 0.9965 Epoch 7/50 166/166 [==============================] - 97s 586ms/step - loss: 1.2823 - custom_accuracy: 0.9858 - mean_iou: 0.9896 Epoch 00007: loss improved from 0.55884 to 0.53841, saving model to epoch=07|loss=0.5384|mean_iou=0.9956.h5 667/667 [==============================] - 617s 925ms/step - loss: 0.5384 - custom_accuracy: 0.9857 - mean_iou: 0.9956 - val_loss: 1.2823 - val_custom_accuracy: 0.9858 - val_mean_iou: 0.9896 Epoch 8/50 166/166 [==============================] - 99s 594ms/step - loss: 1.0434 - custom_accuracy: 0.9857 - mean_iou: 0.9855 Epoch 00008: loss improved from 0.53841 to 0.52544, saving model to epoch=08|loss=0.5254|mean_iou=0.9959.h5 667/667 [==============================] - 618s 927ms/step - loss: 0.5254 - custom_accuracy: 0.9857 - mean_iou: 0.9959 - val_loss: 1.0434 - val_custom_accuracy: 0.9857 - val_mean_iou: 0.9855 Epoch 9/50 166/166 [==============================] - 97s 582ms/step - loss: 0.8220 - custom_accuracy: 0.9857 - mean_iou: 0.9803 Epoch 00009: loss improved from 0.52544 to 0.51854, saving model to epoch=09|loss=0.5185|mean_iou=nan.h5 667/667 [==============================] - 616s 923ms/step - loss: 0.5185 - custom_accuracy: 0.9857 - mean_iou: nan - val_loss: 0.8220 - val_custom_accuracy: 0.9857 - val_mean_iou: 0.9803 Epoch 10/50 166/166 [==============================] - 98s 590ms/step - loss: 0.6328 - custom_accuracy: 0.9858 - mean_iou: 0.9844 Epoch 00010: loss improved from 0.51854 to 0.50182, saving model to epoch=10|loss=0.5018|mean_iou=0.9950.h5 667/667 [==============================] - 616s 923ms/step - loss: 0.5018 - custom_accuracy: 0.9858 - mean_iou: 0.9950 - val_loss: 0.6328 - val_custom_accuracy: 0.9858 - val_mean_iou: 0.9844 Epoch 11/50 166/166 [==============================] - 97s 586ms/step - loss: 0.6287 - custom_accuracy: 0.9857 - mean_iou: 0.9939 Epoch 00011: loss improved from 0.50182 to 0.49507, saving model to epoch=11|loss=0.4951|mean_iou=0.9944.h5 667/667 [==============================] - 618s 926ms/step - loss: 0.4951 - custom_accuracy: 0.9857 - mean_iou: 0.9944 - val_loss: 0.6287 - val_custom_accuracy: 0.9857 - val_mean_iou: 0.9939 Epoch 12/50 166/166 [==============================] - 96s 580ms/step - loss: 0.5860 - custom_accuracy: 0.9857 - mean_iou: 0.9915 Epoch 00012: loss improved from 0.49507 to 0.49168, saving model to epoch=12|loss=0.4917|mean_iou=0.9953.h5 667/667 [==============================] - 613s 920ms/step - loss: 0.4917 - custom_accuracy: 0.9858 - mean_iou: 0.9953 - val_loss: 0.5860 - val_custom_accuracy: 0.9857 - val_mean_iou: 0.9915 Epoch 13/50 166/166 [==============================] - 97s 583ms/step - loss: 0.5467 - custom_accuracy: 0.9860 - mean_iou: 0.9894 Epoch 00013: loss improved from 0.49168 to 0.47869, saving model to epoch=13|loss=0.4787|mean_iou=0.9949.h5 667/667 [==============================] - 613s 918ms/step - loss: 0.4787 - custom_accuracy: 0.9857 - mean_iou: 0.9949 - val_loss: 0.5467 - val_custom_accuracy: 0.9860 - val_mean_iou: 0.9894 Epoch 14/50 166/166 [==============================] - 93s 561ms/step - loss: 0.6184 - custom_accuracy: 0.9858 - mean_iou: 0.9933 Epoch 00014: loss improved from 0.47869 to 0.46922, saving model to epoch=14|loss=0.4692|mean_iou=0.9952.h5 667/667 [==============================] - 604s 905ms/step - loss: 0.4692 - custom_accuracy: 0.9858 - mean_iou: 0.9952 - val_loss: 0.6184 - val_custom_accuracy: 0.9858 - val_mean_iou: 0.9933 Epoch 15/50 166/166 [==============================] - 95s 574ms/step - loss: 0.5580 - custom_accuracy: 0.9861 - mean_iou: 0.9870 Epoch 00015: loss improved from 0.46922 to 0.46137, saving model to epoch=15|loss=0.4614|mean_iou=0.9948.h5 667/667 [==============================] - 617s 925ms/step - loss: 0.4614 - custom_accuracy: 0.9859 - mean_iou: 0.9948 - val_loss: 0.5580 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9870 Epoch 16/50 166/166 [==============================] - 105s 630ms/step - loss: 0.5394 - custom_accuracy: 0.9859 - mean_iou: 0.9929 Epoch 00016: loss improved from 0.46137 to 0.44691, saving model to epoch=16|loss=0.4469|mean_iou=0.9938.h5 667/667 [==============================] - 657s 985ms/step - loss: 0.4469 - custom_accuracy: 0.9859 - mean_iou: 0.9938 - val_loss: 0.5394 - val_custom_accuracy: 0.9859 - val_mean_iou: 0.9929 Epoch 17/50 166/166 [==============================] - 106s 637ms/step - loss: 0.5312 - custom_accuracy: 0.9860 - mean_iou: 0.9914 Epoch 00017: loss improved from 0.44691 to 0.43889, saving model to epoch=17|loss=0.4389|mean_iou=0.9948.h5 667/667 [==============================] - 653s 979ms/step - loss: 0.4389 - custom_accuracy: 0.9859 - mean_iou: 0.9948 - val_loss: 0.5312 - val_custom_accuracy: 0.9860 - val_mean_iou: 0.9914 Epoch 18/50 166/166 [==============================] - 106s 639ms/step - loss: 0.5312 - custom_accuracy: 0.9859 - mean_iou: 0.9966 Epoch 00018: loss improved from 0.43889 to 0.41748, saving model to epoch=18|loss=0.4175|mean_iou=0.9948.h5 667/667 [==============================] - 658s 986ms/step - loss: 0.4175 - custom_accuracy: 0.9859 - mean_iou: 0.9948 - val_loss: 0.5312 - val_custom_accuracy: 0.9859 - val_mean_iou: 0.9966 Epoch 19/50 166/166 [==============================] - 107s 644ms/step - loss: 0.5397 - custom_accuracy: 0.9861 - mean_iou: 0.9960 Epoch 00019: loss improved from 0.41748 to 0.40982, saving model to epoch=19|loss=0.4098|mean_iou=0.9948.h5 667/667 [==============================] - 656s 984ms/step - loss: 0.4098 - custom_accuracy: 0.9860 - mean_iou: 0.9948 - val_loss: 0.5397 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9960 Epoch 20/50 166/166 [==============================] - 107s 644ms/step - loss: 0.5114 - custom_accuracy: 0.9862 - mean_iou: 0.9901 Epoch 00020: loss improved from 0.40982 to 0.38965, saving model to epoch=20|loss=0.3897|mean_iou=0.9950.h5 667/667 [==============================] - 652s 978ms/step - loss: 0.3897 - custom_accuracy: 0.9861 - mean_iou: 0.9950 - val_loss: 0.5114 - val_custom_accuracy: 0.9862 - val_mean_iou: 0.9901 Epoch 21/50 166/166 [==============================] - 105s 634ms/step - loss: 0.5572 - custom_accuracy: 0.9860 - mean_iou: 0.9901 Epoch 00021: loss improved from 0.38965 to 0.37514, saving model to epoch=21|loss=0.3751|mean_iou=0.9955.h5 667/667 [==============================] - 652s 978ms/step - loss: 0.3751 - custom_accuracy: 0.9861 - mean_iou: 0.9955 - val_loss: 0.5572 - val_custom_accuracy: 0.9860 - val_mean_iou: 0.9901 Epoch 22/50 166/166 [==============================] - 106s 636ms/step - loss: 0.5153 - custom_accuracy: 0.9862 - mean_iou: 0.9909 Epoch 00022: loss improved from 0.37514 to 0.36302, saving model to epoch=22|loss=0.3630|mean_iou=0.9957.h5 667/667 [==============================] - 653s 979ms/step - loss: 0.3630 - custom_accuracy: 0.9861 - mean_iou: 0.9957 - val_loss: 0.5153 - val_custom_accuracy: 0.9862 - val_mean_iou: 0.9909 Epoch 23/50 166/166 [==============================] - 106s 636ms/step - loss: 0.4980 - custom_accuracy: 0.9861 - mean_iou: 0.9912 Epoch 00023: loss improved from 0.36302 to 0.34428, saving model to epoch=23|loss=0.3443|mean_iou=0.9961.h5 667/667 [==============================] - 654s 981ms/step - loss: 0.3443 - custom_accuracy: 0.9862 - mean_iou: 0.9961 - val_loss: 0.4980 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9912 Epoch 24/50 166/166 [==============================] - 105s 635ms/step - loss: 0.4931 - custom_accuracy: 0.9863 - mean_iou: 0.9910 Epoch 00024: loss improved from 0.34428 to 0.32694, saving model to epoch=24|loss=0.3269|mean_iou=nan.h5 667/667 [==============================] - 652s 977ms/step - loss: 0.3269 - custom_accuracy: 0.9863 - mean_iou: nan - val_loss: 0.4931 - val_custom_accuracy: 0.9863 - val_mean_iou: 0.9910 Epoch 25/50 166/166 [==============================] - 95s 571ms/step - loss: 0.5048 - custom_accuracy: 0.9862 - mean_iou: 0.9907 Epoch 00025: loss improved from 0.32694 to 0.31456, saving model to epoch=25|loss=0.3146|mean_iou=nan.h5 667/667 [==============================] - 614s 920ms/step - loss: 0.3146 - custom_accuracy: 0.9863 - mean_iou: nan - val_loss: 0.5048 - val_custom_accuracy: 0.9862 - val_mean_iou: 0.9907 Epoch 26/50 166/166 [==============================] - 98s 593ms/step - loss: 0.5111 - custom_accuracy: 0.9862 - mean_iou: 0.9918 Epoch 00026: loss improved from 0.31456 to 0.30083, saving model to epoch=26|loss=0.3008|mean_iou=0.9962.h5 667/667 [==============================] - 616s 923ms/step - loss: 0.3008 - custom_accuracy: 0.9864 - mean_iou: 0.9962 - val_loss: 0.5111 - val_custom_accuracy: 0.9862 - val_mean_iou: 0.9918 Epoch 27/50 166/166 [==============================] - 98s 590ms/step - loss: 0.4973 - custom_accuracy: 0.9862 - mean_iou: 0.9917 Epoch 00027: loss improved from 0.30083 to 0.28508, saving model to epoch=27|loss=0.2851|mean_iou=0.9969.h5 667/667 [==============================] - 623s 934ms/step - loss: 0.2851 - custom_accuracy: 0.9865 - mean_iou: 0.9969 - val_loss: 0.4973 - val_custom_accuracy: 0.9862 - val_mean_iou: 0.9917 Epoch 28/50 166/166 [==============================] - 97s 582ms/step - loss: 0.5094 - custom_accuracy: 0.9861 - mean_iou: 0.9924 Epoch 00028: loss improved from 0.28508 to 0.27412, saving model to epoch=28|loss=0.2741|mean_iou=0.9964.h5 667/667 [==============================] - 627s 939ms/step - loss: 0.2741 - custom_accuracy: 0.9866 - mean_iou: 0.9964 - val_loss: 0.5094 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9924 Epoch 29/50 166/166 [==============================] - 97s 586ms/step - loss: 0.5102 - custom_accuracy: 0.9862 - mean_iou: 0.9917 Epoch 00029: loss improved from 0.27412 to 0.26504, saving model to epoch=29|loss=0.2650|mean_iou=nan.h5 667/667 [==============================] - 621s 930ms/step - loss: 0.2650 - custom_accuracy: 0.9866 - mean_iou: nan - val_loss: 0.5102 - val_custom_accuracy: 0.9862 - val_mean_iou: 0.9917 Epoch 30/50 166/166 [==============================] - 96s 580ms/step - loss: 0.4935 - custom_accuracy: 0.9864 - mean_iou: 0.9916 Epoch 00030: loss improved from 0.26504 to 0.25096, saving model to epoch=30|loss=0.2510|mean_iou=0.9973.h5 667/667 [==============================] - 622s 933ms/step - loss: 0.2510 - custom_accuracy: 0.9868 - mean_iou: 0.9973 - val_loss: 0.4935 - val_custom_accuracy: 0.9864 - val_mean_iou: 0.9916 Epoch 31/50 166/166 [==============================] - 96s 578ms/step - loss: 0.4985 - custom_accuracy: 0.9863 - mean_iou: 0.9932 Epoch 00031: loss improved from 0.25096 to 0.24230, saving model to epoch=31|loss=0.2423|mean_iou=0.9973.h5 667/667 [==============================] - 622s 932ms/step - loss: 0.2423 - custom_accuracy: 0.9869 - mean_iou: 0.9973 - val_loss: 0.4985 - val_custom_accuracy: 0.9863 - val_mean_iou: 0.9932 Epoch 32/50 166/166 [==============================] - 95s 572ms/step - loss: 0.4992 - custom_accuracy: 0.9863 - mean_iou: 0.9927 Epoch 00032: loss improved from 0.24230 to 0.22928, saving model to epoch=32|loss=0.2293|mean_iou=0.9976.h5 667/667 [==============================] - 617s 925ms/step - loss: 0.2293 - custom_accuracy: 0.9870 - mean_iou: 0.9976 - val_loss: 0.4992 - val_custom_accuracy: 0.9863 - val_mean_iou: 0.9927 Epoch 33/50 166/166 [==============================] - 94s 569ms/step - loss: 0.4855 - custom_accuracy: 0.9862 - mean_iou: 0.9907 Epoch 00033: loss improved from 0.22928 to 0.21918, saving model to epoch=33|loss=0.2192|mean_iou=0.9980.h5 667/667 [==============================] - 611s 916ms/step - loss: 0.2192 - custom_accuracy: 0.9870 - mean_iou: 0.9980 - val_loss: 0.4855 - val_custom_accuracy: 0.9862 - val_mean_iou: 0.9907 Epoch 34/50 166/166 [==============================] - 94s 568ms/step - loss: 0.4933 - custom_accuracy: 0.9863 - mean_iou: 0.9919 Epoch 00034: loss improved from 0.21918 to 0.20760, saving model to epoch=34|loss=0.2076|mean_iou=0.9980.h5 667/667 [==============================] - 611s 916ms/step - loss: 0.2076 - custom_accuracy: 0.9871 - mean_iou: 0.9980 - val_loss: 0.4933 - val_custom_accuracy: 0.9863 - val_mean_iou: 0.9919 Epoch 35/50 166/166 [==============================] - 95s 570ms/step - loss: 0.4896 - custom_accuracy: 0.9864 - mean_iou: 0.9906 Epoch 00035: loss improved from 0.20760 to 0.20027, saving model to epoch=35|loss=0.2003|mean_iou=nan.h5 667/667 [==============================] - 608s 912ms/step - loss: 0.2003 - custom_accuracy: 0.9873 - mean_iou: nan - val_loss: 0.4896 - val_custom_accuracy: 0.9864 - val_mean_iou: 0.9906 Epoch 36/50 166/166 [==============================] - 94s 566ms/step - loss: 0.4862 - custom_accuracy: 0.9862 - mean_iou: 0.9925 Epoch 00036: loss improved from 0.20027 to 0.18930, saving model to epoch=36|loss=0.1893|mean_iou=0.9984.h5 667/667 [==============================] - 607s 911ms/step - loss: 0.1893 - custom_accuracy: 0.9875 - mean_iou: 0.9984 - val_loss: 0.4862 - val_custom_accuracy: 0.9862 - val_mean_iou: 0.9925 Epoch 37/50 166/166 [==============================] - 94s 568ms/step - loss: 0.4868 - custom_accuracy: 0.9861 - mean_iou: 0.9924 Epoch 00037: loss improved from 0.18930 to 0.18190, saving model to epoch=37|loss=0.1819|mean_iou=0.9982.h5 667/667 [==============================] - 608s 912ms/step - loss: 0.1819 - custom_accuracy: 0.9877 - mean_iou: 0.9982 - val_loss: 0.4868 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9924 Epoch 38/50 166/166 [==============================] - 94s 566ms/step - loss: 0.4905 - custom_accuracy: 0.9862 - mean_iou: nan Epoch 00038: loss improved from 0.18190 to 0.17232, saving model to epoch=38|loss=0.1723|mean_iou=0.9981.h5 667/667 [==============================] - 606s 908ms/step - loss: 0.1723 - custom_accuracy: 0.9877 - mean_iou: 0.9981 - val_loss: 0.4905 - val_custom_accuracy: 0.9862 - val_mean_iou: nan Epoch 39/50 166/166 [==============================] - 94s 564ms/step - loss: 0.4846 - custom_accuracy: 0.9860 - mean_iou: 0.9901 Epoch 00039: loss improved from 0.17232 to 0.16364, saving model to epoch=39|loss=0.1636|mean_iou=nan.h5 667/667 [==============================] - 608s 912ms/step - loss: 0.1636 - custom_accuracy: 0.9879 - mean_iou: nan - val_loss: 0.4846 - val_custom_accuracy: 0.9860 - val_mean_iou: 0.9901 Epoch 40/50 166/166 [==============================] - 93s 560ms/step - loss: 0.4837 - custom_accuracy: 0.9862 - mean_iou: 0.9910 Epoch 00040: loss improved from 0.16364 to 0.15525, saving model to epoch=40|loss=0.1553|mean_iou=0.9985.h5 667/667 [==============================] - 605s 907ms/step - loss: 0.1553 - custom_accuracy: 0.9881 - mean_iou: 0.9985 - val_loss: 0.4837 - val_custom_accuracy: 0.9862 - val_mean_iou: 0.9910 Epoch 41/50 166/166 [==============================] - 95s 570ms/step - loss: 0.4856 - custom_accuracy: 0.9861 - mean_iou: 0.9914 Epoch 00041: loss improved from 0.15525 to 0.14682, saving model to epoch=41|loss=0.1468|mean_iou=0.9987.h5 667/667 [==============================] - 611s 915ms/step - loss: 0.1468 - custom_accuracy: 0.9882 - mean_iou: 0.9987 - val_loss: 0.4856 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9914 Epoch 42/50 166/166 [==============================] - 96s 581ms/step - loss: 0.4854 - custom_accuracy: 0.9861 - mean_iou: 0.9914 Epoch 00042: loss improved from 0.14682 to 0.14014, saving model to epoch=42|loss=0.1401|mean_iou=0.9987.h5 667/667 [==============================] - 616s 924ms/step - loss: 0.1401 - custom_accuracy: 0.9883 - mean_iou: 0.9987 - val_loss: 0.4854 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9914 Epoch 43/50 166/166 [==============================] - 93s 562ms/step - loss: 0.4812 - custom_accuracy: 0.9860 - mean_iou: 0.9917 Epoch 00043: loss improved from 0.14014 to 0.13173, saving model to epoch=43|loss=0.1317|mean_iou=0.9989.h5 667/667 [==============================] - 613s 919ms/step - loss: 0.1317 - custom_accuracy: 0.9885 - mean_iou: 0.9989 - val_loss: 0.4812 - val_custom_accuracy: 0.9860 - val_mean_iou: 0.9917 Epoch 44/50 166/166 [==============================] - 96s 581ms/step - loss: 0.4782 - custom_accuracy: 0.9858 - mean_iou: 0.9912 Epoch 00044: loss improved from 0.13173 to 0.12265, saving model to epoch=44|loss=0.1227|mean_iou=0.9990.h5 667/667 [==============================] - 613s 919ms/step - loss: 0.1227 - custom_accuracy: 0.9888 - mean_iou: 0.9990 - val_loss: 0.4782 - val_custom_accuracy: 0.9858 - val_mean_iou: 0.9912 Epoch 45/50 166/166 [==============================] - 97s 586ms/step - loss: 0.4824 - custom_accuracy: 0.9861 - mean_iou: 0.9909 Epoch 00045: loss improved from 0.12265 to 0.11457, saving model to epoch=45|loss=0.1146|mean_iou=0.9990.h5 667/667 [==============================] - 620s 930ms/step - loss: 0.1146 - custom_accuracy: 0.9889 - mean_iou: 0.9990 - val_loss: 0.4824 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9909 Epoch 46/50 166/166 [==============================] - 96s 579ms/step - loss: 0.4805 - custom_accuracy: 0.9861 - mean_iou: 0.9913 Epoch 00046: loss improved from 0.11457 to 0.10836, saving model to epoch=46|loss=0.1084|mean_iou=0.9991.h5 667/667 [==============================] - 617s 925ms/step - loss: 0.1084 - custom_accuracy: 0.9892 - mean_iou: 0.9991 - val_loss: 0.4805 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9913 Epoch 47/50 166/166 [==============================] - 98s 590ms/step - loss: 0.4808 - custom_accuracy: 0.9861 - mean_iou: 0.9915 Epoch 00047: loss improved from 0.10836 to 0.10227, saving model to epoch=47|loss=0.1023|mean_iou=nan.h5 667/667 [==============================] - 618s 927ms/step - loss: 0.1023 - custom_accuracy: 0.9893 - mean_iou: nan - val_loss: 0.4808 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9915 Epoch 48/50 166/166 [==============================] - 97s 586ms/step - loss: 0.4795 - custom_accuracy: 0.9860 - mean_iou: 0.9897 Epoch 00048: loss improved from 0.10227 to 0.09496, saving model to epoch=48|loss=0.0950|mean_iou=0.9993.h5 667/667 [==============================] - 615s 923ms/step - loss: 0.0950 - custom_accuracy: 0.9894 - mean_iou: 0.9993 - val_loss: 0.4795 - val_custom_accuracy: 0.9860 - val_mean_iou: 0.9897 Epoch 49/50 166/166 [==============================] - 98s 587ms/step - loss: 0.4788 - custom_accuracy: 0.9861 - mean_iou: 0.9911 Epoch 00049: loss improved from 0.09496 to 0.08859, saving model to epoch=49|loss=0.0886|mean_iou=nan.h5 667/667 [==============================] - 616s 924ms/step - loss: 0.0886 - custom_accuracy: 0.9896 - mean_iou: nan - val_loss: 0.4788 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9911 Epoch 50/50 166/166 [==============================] - 96s 581ms/step - loss: 0.4780 - custom_accuracy: 0.9861 - mean_iou: 0.9909 Epoch 00050: loss improved from 0.08859 to 0.08462, saving model to epoch=50|loss=0.0846|mean_iou=0.9992.h5 667/667 [==============================] - 621s 930ms/step - loss: 0.0846 - custom_accuracy: 0.9897 - mean_iou: 0.9992 - val_loss: 0.4780 - val_custom_accuracy: 0.9861 - val_mean_iou: 0.9909
def plot_validation_training(metric, trained_model):
validation_metric = trained_model.history[f'val_{metric}']
training_metric = trained_model.history[metric]
epochs = range(len(training_metric))
plt.plot(epochs, training_metric, 'b', label=f'Training {metric}')
plt.plot(epochs, validation_metric, 'r', label=f'Validation {metric}')
plt.ylim(bottom=0)
plt.xlabel('Epochs ', fontsize=16)
plt.ylabel(metric, fontsize=16)
loc = 'upper right' if metric == "loss" else 'lower right'
plt.legend(loc=loc)
plt.title(f'Training and validation {metric}', fontsize = 20)
plt.show()
plot_validation_training("loss", trained_model)
plot_validation_training("custom_accuracy", trained_model)
model.load_weights("epoch=50_loss=0.0846_mean_iou=0.9992.h5")
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
predictions = model.predict_generator(val_generator, steps=val_steps, verbose=1)
166/166 [==============================] - 102s 615ms/step
len(predictions)
5312
classes_predicted = predictions[..., 0].reshape(-1)
classes_predicted = np.where(classes_predicted >= 0.5, 1, 0)
real_classes = y_val[..., 0][:5312].reshape(-1)
classes_predicted.shape, real_classes.shape
((132800,), (132800,))
len(real_classes[real_classes == 1])
1566
len(real_classes[real_classes == 0])
131234
classes_names = ["Negatives", "Positives"]
cm = confusion_matrix(real_classes, classes_predicted, labels=range(2))
plot_confusion_matrix(cm, classes_names)
Confusion matrix, without normalization
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
print(f'sensitivity: {sensitivity}')
specificity = cm[1, 1] / (cm[1, 1] + cm[1, 0])
print(f'specificity: {specificity}')
sensitivity: 0.9955346937531432 specificity: 0.0006385696040868455
import cv2
test_generator = DataGenerator(X_val[:10], y_val[:10], batch_size=10, shuffle=False)
font = cv2.FONT_HERSHEY_SIMPLEX
for images, tensor_volumes in test_generator:
predictions = model.predict(images)
plt.figure()
figure, axis = plt.subplots(5, 2, figsize=(8, 16))
plt.suptitle("Predictions")
plot_row_indeces_array = np.repeat([0, 1, 2, 3, 4], 2)
plot_column_indeces_array = np.tile([0, 1], 5)
for index, (predicted_tensor, tensor_volume) in enumerate(zip(predictions, tensor_volumes)):
predicted_classes = predicted_tensor[..., 0]
predicted_boxes = predicted_tensor[..., 1:]
real_classes = tensor_volume[..., 0]
real_boxes = tensor_volume[..., 1:]
plot_row_index = plot_row_indeces_array[index]
plot_column_index = plot_column_indeces_array[index]
img = images[index]
for column_index, (column_predicted_classes, column_real_classes) in enumerate(zip(predicted_classes, real_classes)):
for row_index, (predicted_class, real_class) in enumerate(zip(column_predicted_classes, column_real_classes)):
if real_class > 0.5:
boxes = real_boxes[column_index][row_index] * img_size
img = cv2.rectangle(img, (int(boxes[0]), int(boxes[1])), (int(boxes[2]), int(boxes[3])), (0, 255, 0), 3)
if predicted_class > 0.5:
boxes = predicted_boxes[column_index][row_index] * img_size
img = cv2.rectangle(img, (int(boxes[0]), int(boxes[1])), (int(boxes[2]), int(boxes[3])), (255, 0, 0), 3)
axis[plot_row_index, plot_column_index].imshow(img)
break
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
<Figure size 432x288 with 0 Axes>