# Jovian Commit Essentials
# Please retain and execute this cell without modifying the contents for `jovian.commit` to work
!pip install jovian --upgrade -q
import jovian
jovian.utils.colab.set_colab_file_id('1gYVKzJiFWSfPQj9O4nfo7GIom_hUqR1A')
|████████████████████████████████| 71kB 5.0MB/s Building wheel for uuid (setup.py) ... done
import os
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from PIL import Image
from tqdm import tqdm
import torchvision
import torchvision.transforms as T
from torchvision.utils import make_grid, save_image
from torchvision.datasets import MNIST
from torchsummary import summary
import matplotlib.pyplot as plt
%matplotlib inline
# move to Device
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = get_default_device()
device
def to_device(data, device):
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
return len(self.dl)
# Denormalize images
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
# Visualization
def show_images(images, nmax=64):
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(denorm(images.cpu().detach()[:nmax]), nrow=8).permute(1, 2, 0))
def show_batch(dl, nmax=64):
for images, _ in dl:
show_images(images, nmax)
break
def save_samples(index, model, latent_tensors, show=True, verbose=False):
fake_images = model(latent_tensors)
fake_fname = 'generated-images-{0:0=4d}.png'.format(index)
save_image(denorm(fake_images), os.path.join(sample_dir, fake_fname), nrow=8)
if verbose:
print('Saving', fake_fname)
if show:
show_images(fake_images)
BATCH_SIZE=256
LEARNING_RATE = 0.0002
IMAGE_CHANNELS = 1
CAPACITY = 64
LATENT_SIZE = 100
EPOCHS = 100
dataset = MNIST(root='./data',
train=True,
download=True,
transform=T.Compose(
[
T.ToTensor(),
T.Normalize(mean=(0.5,), std=(0.5,))
]
))
data_loader = DataLoader(dataset,
BATCH_SIZE,
shuffle=True,
pin_memory=True,
num_workers=4)
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to ./data/MNIST/raw/train-images-idx3-ubyte.gz
HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))
Extracting ./data/MNIST/raw/train-images-idx3-ubyte.gz to ./data/MNIST/raw Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz to ./data/MNIST/raw/train-labels-idx1-ubyte.gz
HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))
Extracting ./data/MNIST/raw/train-labels-idx1-ubyte.gz to ./data/MNIST/raw Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz to ./data/MNIST/raw/t10k-images-idx3-ubyte.gz
HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))
Extracting ./data/MNIST/raw/t10k-images-idx3-ubyte.gz to ./data/MNIST/raw Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz to ./data/MNIST/raw/t10k-labels-idx1-ubyte.gz
HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))
Extracting ./data/MNIST/raw/t10k-labels-idx1-ubyte.gz to ./data/MNIST/raw Processing... Done!
/usr/local/lib/python3.6/dist-packages/torchvision/datasets/mnist.py:480: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /pytorch/torch/csrc/utils/tensor_numpy.cpp:141.) return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
mnist = DeviceDataLoader(data_loader, device)
img, label = dataset[torch.randint(50000, (1,), device=device)[0].item()]
img_norm = denorm(img)
plt.imshow(img_norm[0], cmap='gray')
plt.axis("off")
print('Label:', label)
print(img[:,10:15,10:15])
torch.min(img), torch.max(img)
Label: 2 tensor([[[-1.0000, -1.0000, -1.0000, -1.0000, -0.6941], [-1.0000, -1.0000, -1.0000, -1.0000, 0.6706], [-1.0000, -1.0000, -1.0000, -0.7647, 0.8353], [-1.0000, -1.0000, -1.0000, 0.0667, 0.9843], [-1.0000, -1.0000, -0.3255, 0.9686, 0.9843]]])
(tensor(-1.), tensor(0.9922))
show_batch(mnist)
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=IMAGE_CHANNELS, out_channels=CAPACITY, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm = nn.BatchNorm2d(CAPACITY)
self.conv2 = nn.Conv2d(in_channels=CAPACITY, out_channels=CAPACITY, kernel_size=4, stride=2, padding=1, bias=False)
self.fc1 = nn.Linear(in_features=3136, out_features=1, bias=False)
self.leaky_activation = nn.LeakyReLU(0.2, inplace=True)
self.drop_out = nn.Dropout2d(0.4)
def forward(self, xb):
xb = self.conv1(xb)
xb = self.leaky_activation(xb)
xb = self.drop_out(xb)
xb = self.conv2(xb)
xb = self.batch_norm(xb)
xb = self.leaky_activation(xb)
xb = xb.view(xb.size(0), -1)
xb = self.fc1(xb)
xb = torch.sigmoid(xb)
return xb
summary(to_device(Discriminator(), device), (1, 28, 28))
---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Conv2d-1 [-1, 64, 14, 14] 1,024 LeakyReLU-2 [-1, 64, 14, 14] 0 Dropout2d-3 [-1, 64, 14, 14] 0 Conv2d-4 [-1, 64, 7, 7] 65,536 BatchNorm2d-5 [-1, 64, 7, 7] 128 LeakyReLU-6 [-1, 64, 7, 7] 0 Linear-7 [-1, 1] 3,136 ================================================================ Total params: 69,824 Trainable params: 69,824 Non-trainable params: 0 ---------------------------------------------------------------- Input size (MB): 0.00 Forward/backward pass size (MB): 0.36 Params size (MB): 0.27 Estimated Total Size (MB): 0.63 ----------------------------------------------------------------
class Generator(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(in_features=LATENT_SIZE, out_features=128*7*7, bias=False)
self.conv_transpose_1 = nn.ConvTranspose2d(in_channels=CAPACITY*2, out_channels=CAPACITY*2, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm_1 = nn.BatchNorm2d(CAPACITY*2)
self.conv_transpose_2 = nn.ConvTranspose2d(in_channels=CAPACITY*2, out_channels=CAPACITY, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm_2 = nn.BatchNorm2d(CAPACITY)
self.relu_activation = nn.ReLU(True)
self.conv = nn.Conv2d(in_channels=CAPACITY, out_channels=IMAGE_CHANNELS, kernel_size=7, stride=1, padding=3, bias=False)
# self.conv = nn.Conv2d(in_channels=CAPACITY*2, out_channels=IMAGE_CHANNELS, kernel_size=1, stride=1, bias=False)
self.tanh_activation = nn.Tanh()
def forward(self, xb):
xb = self.fc(xb)
xb = self.relu_activation(xb)
xb = xb.view(xb.size(0), 128, 7, 7)
xb = self.conv_transpose_1(xb)
xb = self.batch_norm_1(xb)
xb = self.relu_activation(xb)
xb = self.conv_transpose_2(xb)
xb = self.batch_norm_2(xb)
xb = self.relu_activation(xb)
xb = self.conv(xb)
xb = self.tanh_activation(xb)
return xb
summary(to_device(Generator(), device), (LATENT_SIZE,))
---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Linear-1 [-1, 6272] 627,200 ReLU-2 [-1, 6272] 0 ConvTranspose2d-3 [-1, 128, 14, 14] 262,144 BatchNorm2d-4 [-1, 128, 14, 14] 256 ReLU-5 [-1, 128, 14, 14] 0 ConvTranspose2d-6 [-1, 64, 28, 28] 131,072 BatchNorm2d-7 [-1, 64, 28, 28] 128 ReLU-8 [-1, 64, 28, 28] 0 Conv2d-9 [-1, 1, 28, 28] 3,136 Tanh-10 [-1, 1, 28, 28] 0 ================================================================ Total params: 1,023,936 Trainable params: 1,023,936 Non-trainable params: 0 ---------------------------------------------------------------- Input size (MB): 0.00 Forward/backward pass size (MB): 1.83 Params size (MB): 3.91 Estimated Total Size (MB): 5.74 ----------------------------------------------------------------
xb = torch.randn(BATCH_SIZE, LATENT_SIZE, device="cpu") # random latent tensors
fake_images = Generator()(xb)
show_images(fake_images.cpu().detach())
def training_discriminator(discriminator, generator, real_images, optimizer_d):
optimizer_d.zero_grad()
# Pass real images through discriminator
real_preds = discriminator(real_images)
real_targets = torch.ones(real_images.size(0), 1, device=device)
real_loss = F.binary_cross_entropy(real_preds, real_targets)
real_score = torch.mean(real_preds).item()
# Generate fake images
latent = torch.randn(BATCH_SIZE, LATENT_SIZE, device=device)
fake_images = generator(latent)
# Pass fake images through discriminator
fake_targets = torch.zeros(fake_images.size(0), 1, device=device)
fake_preds = discriminator(fake_images)
fake_loss = F.binary_cross_entropy(fake_preds, fake_targets)
fake_score = torch.mean(fake_preds).item()
# Update discriminator weights
loss = real_loss + fake_loss
loss.backward()
optimizer_d.step()
return loss.item(), real_score, fake_score
def training_generator(discriminator, generator, optimizer_g):
optimizer_g.zero_grad()
# Generate fake images
latent = torch.randn(BATCH_SIZE, LATENT_SIZE, device=device)
fake_images = generator(latent)
# Try to fool the discriminator
preds = discriminator(fake_images)
targets = torch.ones(BATCH_SIZE, 1, device=device)
loss = F.binary_cross_entropy(preds, targets)
# Update generator weights
loss.backward()
optimizer_g.step()
return loss.item()
sample_dir = 'generated'
!rm -rf {sample_dir}
os.makedirs(sample_dir)
# fixed latents for visualizing progress
fixed_latents = torch.randn(64, LATENT_SIZE, device=device)
save_samples(0, to_device(Generator(), device), fixed_latents, show=True, verbose=True)
Saving generated-images-0000.png
def fit(generator, discriminator, epochs, lr):
torch.cuda.empty_cache()
generator_loss = []
discriminator_loss = []
generator_score = []
discriminator_score = []
# create optimizers
optimizer_generator = torch.optim.Adam(generator.parameters(),
lr=lr,
betas=(0.5, 0.999))
optimizer_discriminator = torch.optim.Adam(discriminator.parameters(),
lr=lr,
betas=(0.5, 0.999))
for epoch in range(epochs):
# Losses & scores
losses_g = []
losses_d = []
real_scores = []
fake_scores = []
for real_images, _ in mnist:
# train discriminator
loss_d, real_score, fake_score = training_discriminator(discriminator,
generator,
real_images,
optimizer_discriminator)
# train generator
loss_g = training_generator(discriminator,
generator,
optimizer_generator)
# Record losses & scores
losses_g.append(loss_g)
losses_d.append(loss_d)
real_scores.append(real_score)
fake_scores.append(fake_score)
losses_g_mean = sum(losses_g)/ len(losses_g)
losses_d_mean = sum(losses_d)/ len(losses_d)
real_scores_mean = sum(real_scores)/ len(real_scores)
fake_scores_mean = sum(fake_scores)/ len(fake_scores)
generator_loss.append(losses_g_mean)
discriminator_loss.append(losses_d_mean)
discriminator_score.append(real_scores_mean)
generator_score.append(fake_scores_mean)
# Log losses & scores (last batch)
print(f"Epoch [{epoch+1:03}/{epochs}], loss_g: {losses_g_mean:.4f}, loss_d: {losses_d_mean:.4f}, real_score: {real_scores_mean:.4f}, fake_score: {fake_scores_mean:.4f}")
# Save generated images
with torch.no_grad():
save_samples(epoch+1, generator, fixed_latents, show=False, verbose=False)
return generator_loss, discriminator_loss, discriminator_score, generator_score
jovian.reset()
jovian.log_hyperparams(lr=LEARNING_RATE,
epochs=EPOCHS,
BATCH_SIZE=BATCH_SIZE,
channels=CAPACITY,
LATENT_SIZE=LATENT_SIZE)
[jovian] Hyperparams logged.
# models
generator_model = Generator()
discriminator_model = Discriminator()
generator_model = to_device(generator_model, device=device)
discriminator_model = to_device(discriminator_model, device=device)
history = fit(generator_model, discriminator_model, EPOCHS, LEARNING_RATE)
Epoch [001/100], loss_g: 0.8867, loss_d: 1.2724, real_score: 0.5469, fake_score: 0.4645 Epoch [002/100], loss_g: 0.8135, loss_d: 1.2782, real_score: 0.5322, fake_score: 0.4654 Epoch [003/100], loss_g: 0.8674, loss_d: 1.2132, real_score: 0.5502, fake_score: 0.4452 Epoch [004/100], loss_g: 0.8319, loss_d: 1.2671, real_score: 0.5384, fake_score: 0.4598 Epoch [005/100], loss_g: 0.8306, loss_d: 1.2673, real_score: 0.5396, fake_score: 0.4597 Epoch [006/100], loss_g: 0.8317, loss_d: 1.2583, real_score: 0.5426, fake_score: 0.4565 Epoch [007/100], loss_g: 0.8347, loss_d: 1.2661, real_score: 0.5411, fake_score: 0.4583 Epoch [008/100], loss_g: 0.8383, loss_d: 1.2610, real_score: 0.5431, fake_score: 0.4565 Epoch [009/100], loss_g: 0.8372, loss_d: 1.2629, real_score: 0.5432, fake_score: 0.4560 Epoch [010/100], loss_g: 0.8326, loss_d: 1.2682, real_score: 0.5421, fake_score: 0.4573 Epoch [011/100], loss_g: 0.8308, loss_d: 1.2714, real_score: 0.5409, fake_score: 0.4584 Epoch [012/100], loss_g: 0.8207, loss_d: 1.2825, real_score: 0.5387, fake_score: 0.4611 Epoch [013/100], loss_g: 0.8215, loss_d: 1.2862, real_score: 0.5376, fake_score: 0.4621 Epoch [014/100], loss_g: 0.8172, loss_d: 1.2905, real_score: 0.5361, fake_score: 0.4633 Epoch [015/100], loss_g: 0.8101, loss_d: 1.2954, real_score: 0.5352, fake_score: 0.4648 Epoch [016/100], loss_g: 0.8001, loss_d: 1.3039, real_score: 0.5321, fake_score: 0.4674 Epoch [017/100], loss_g: 0.7999, loss_d: 1.3104, real_score: 0.5307, fake_score: 0.4694 Epoch [018/100], loss_g: 0.7965, loss_d: 1.3128, real_score: 0.5294, fake_score: 0.4700 Epoch [019/100], loss_g: 0.7902, loss_d: 1.3147, real_score: 0.5289, fake_score: 0.4706 Epoch [020/100], loss_g: 0.7863, loss_d: 1.3178, real_score: 0.5279, fake_score: 0.4718 Epoch [021/100], loss_g: 0.7883, loss_d: 1.3198, real_score: 0.5272, fake_score: 0.4726 Epoch [022/100], loss_g: 0.7855, loss_d: 1.3222, real_score: 0.5266, fake_score: 0.4730 Epoch [023/100], loss_g: 0.7831, loss_d: 1.3234, real_score: 0.5263, fake_score: 0.4738 Epoch [024/100], loss_g: 0.7811, loss_d: 1.3230, real_score: 0.5262, fake_score: 0.4735 Epoch [025/100], loss_g: 0.7807, loss_d: 1.3240, real_score: 0.5258, fake_score: 0.4738 Epoch [026/100], loss_g: 0.7774, loss_d: 1.3333, real_score: 0.5237, fake_score: 0.4767 Epoch [027/100], loss_g: 0.7756, loss_d: 1.3292, real_score: 0.5243, fake_score: 0.4752 Epoch [028/100], loss_g: 0.7708, loss_d: 1.3356, real_score: 0.5225, fake_score: 0.4774 Epoch [029/100], loss_g: 0.7771, loss_d: 1.3356, real_score: 0.5228, fake_score: 0.4771 Epoch [030/100], loss_g: 0.7681, loss_d: 1.3361, real_score: 0.5222, fake_score: 0.4776 Epoch [031/100], loss_g: 0.7729, loss_d: 1.3358, real_score: 0.5224, fake_score: 0.4776 Epoch [032/100], loss_g: 0.7696, loss_d: 1.3408, real_score: 0.5209, fake_score: 0.4787 Epoch [033/100], loss_g: 0.7630, loss_d: 1.3449, real_score: 0.5194, fake_score: 0.4799 Epoch [034/100], loss_g: 0.7645, loss_d: 1.3422, real_score: 0.5202, fake_score: 0.4800 Epoch [035/100], loss_g: 0.7626, loss_d: 1.3500, real_score: 0.5175, fake_score: 0.4816 Epoch [036/100], loss_g: 0.7598, loss_d: 1.3442, real_score: 0.5193, fake_score: 0.4809 Epoch [037/100], loss_g: 0.7614, loss_d: 1.3464, real_score: 0.5186, fake_score: 0.4812 Epoch [038/100], loss_g: 0.7572, loss_d: 1.3462, real_score: 0.5187, fake_score: 0.4808 Epoch [039/100], loss_g: 0.7581, loss_d: 1.3539, real_score: 0.5168, fake_score: 0.4835 Epoch [040/100], loss_g: 0.7579, loss_d: 1.3506, real_score: 0.5171, fake_score: 0.4822 Epoch [041/100], loss_g: 0.7537, loss_d: 1.3545, real_score: 0.5160, fake_score: 0.4838 Epoch [042/100], loss_g: 0.7528, loss_d: 1.3517, real_score: 0.5168, fake_score: 0.4830 Epoch [043/100], loss_g: 0.7530, loss_d: 1.3518, real_score: 0.5168, fake_score: 0.4831 Epoch [044/100], loss_g: 0.7510, loss_d: 1.3548, real_score: 0.5157, fake_score: 0.4839 Epoch [045/100], loss_g: 0.7566, loss_d: 1.3596, real_score: 0.5145, fake_score: 0.4850 Epoch [046/100], loss_g: 0.7482, loss_d: 1.3574, real_score: 0.5153, fake_score: 0.4845 Epoch [047/100], loss_g: 0.7460, loss_d: 1.3587, real_score: 0.5146, fake_score: 0.4853 Epoch [048/100], loss_g: 0.7513, loss_d: 1.3605, real_score: 0.5144, fake_score: 0.4859 Epoch [049/100], loss_g: 0.7465, loss_d: 1.3587, real_score: 0.5145, fake_score: 0.4851 Epoch [050/100], loss_g: 0.7559, loss_d: 1.3582, real_score: 0.5150, fake_score: 0.4849 Epoch [051/100], loss_g: 0.7474, loss_d: 1.3602, real_score: 0.5143, fake_score: 0.4855 Epoch [052/100], loss_g: 0.7425, loss_d: 1.3648, real_score: 0.5129, fake_score: 0.4873 Epoch [053/100], loss_g: 0.7366, loss_d: 1.3645, real_score: 0.5127, fake_score: 0.4873 Epoch [054/100], loss_g: 0.7408, loss_d: 1.3640, real_score: 0.5128, fake_score: 0.4874 Epoch [055/100], loss_g: 0.7405, loss_d: 1.3695, real_score: 0.5111, fake_score: 0.4881 Epoch [056/100], loss_g: 0.7351, loss_d: 1.3646, real_score: 0.5123, fake_score: 0.4879 Epoch [057/100], loss_g: 0.7334, loss_d: 1.3645, real_score: 0.5124, fake_score: 0.4878 Epoch [058/100], loss_g: 0.7340, loss_d: 1.3723, real_score: 0.5102, fake_score: 0.4897 Epoch [059/100], loss_g: 0.7389, loss_d: 1.3685, real_score: 0.5107, fake_score: 0.4891 Epoch [060/100], loss_g: 0.7318, loss_d: 1.3693, real_score: 0.5106, fake_score: 0.4892 Epoch [061/100], loss_g: 0.7320, loss_d: 1.3713, real_score: 0.5102, fake_score: 0.4894 Epoch [062/100], loss_g: 0.7318, loss_d: 1.3722, real_score: 0.5099, fake_score: 0.4903 Epoch [063/100], loss_g: 0.7341, loss_d: 1.3718, real_score: 0.5096, fake_score: 0.4900 Epoch [064/100], loss_g: 0.7358, loss_d: 1.3702, real_score: 0.5101, fake_score: 0.4899 Epoch [065/100], loss_g: 0.7290, loss_d: 1.3731, real_score: 0.5095, fake_score: 0.4908 Epoch [066/100], loss_g: 0.7299, loss_d: 1.3735, real_score: 0.5090, fake_score: 0.4908 Epoch [067/100], loss_g: 0.7293, loss_d: 1.3736, real_score: 0.5089, fake_score: 0.4913 Epoch [068/100], loss_g: 0.7293, loss_d: 1.3748, real_score: 0.5087, fake_score: 0.4911 Epoch [069/100], loss_g: 0.7239, loss_d: 1.3754, real_score: 0.5083, fake_score: 0.4915 Epoch [070/100], loss_g: 0.7327, loss_d: 1.3752, real_score: 0.5085, fake_score: 0.4917 Epoch [071/100], loss_g: 0.7284, loss_d: 1.3753, real_score: 0.5084, fake_score: 0.4916 Epoch [072/100], loss_g: 0.7260, loss_d: 1.3753, real_score: 0.5083, fake_score: 0.4917 Epoch [073/100], loss_g: 0.7252, loss_d: 1.3811, real_score: 0.5069, fake_score: 0.4935 Epoch [074/100], loss_g: 0.7252, loss_d: 1.3790, real_score: 0.5067, fake_score: 0.4928 Epoch [075/100], loss_g: 0.7247, loss_d: 1.3757, real_score: 0.5082, fake_score: 0.4922 Epoch [076/100], loss_g: 0.7253, loss_d: 1.3789, real_score: 0.5070, fake_score: 0.4926 Epoch [077/100], loss_g: 0.7277, loss_d: 1.3751, real_score: 0.5083, fake_score: 0.4918 Epoch [078/100], loss_g: 0.7264, loss_d: 1.3791, real_score: 0.5073, fake_score: 0.4932 Epoch [079/100], loss_g: 0.7233, loss_d: 1.3777, real_score: 0.5067, fake_score: 0.4927 Epoch [080/100], loss_g: 0.7247, loss_d: 1.3786, real_score: 0.5066, fake_score: 0.4931 Epoch [081/100], loss_g: 0.7235, loss_d: 1.3784, real_score: 0.5070, fake_score: 0.4933 Epoch [082/100], loss_g: 0.7204, loss_d: 1.3798, real_score: 0.5061, fake_score: 0.4934 Epoch [083/100], loss_g: 0.7184, loss_d: 1.3773, real_score: 0.5069, fake_score: 0.4931 Epoch [084/100], loss_g: 0.7206, loss_d: 1.3807, real_score: 0.5060, fake_score: 0.4940 Epoch [085/100], loss_g: 0.7184, loss_d: 1.3826, real_score: 0.5055, fake_score: 0.4945 Epoch [086/100], loss_g: 0.7221, loss_d: 1.3809, real_score: 0.5060, fake_score: 0.4941 Epoch [087/100], loss_g: 0.7190, loss_d: 1.3792, real_score: 0.5063, fake_score: 0.4938 Epoch [088/100], loss_g: 0.7205, loss_d: 1.3788, real_score: 0.5062, fake_score: 0.4936 Epoch [089/100], loss_g: 0.7229, loss_d: 1.3805, real_score: 0.5058, fake_score: 0.4940 Epoch [090/100], loss_g: 0.7240, loss_d: 1.3775, real_score: 0.5063, fake_score: 0.4934 Epoch [091/100], loss_g: 0.7199, loss_d: 1.3807, real_score: 0.5056, fake_score: 0.4942 Epoch [092/100], loss_g: 0.7177, loss_d: 1.3787, real_score: 0.5062, fake_score: 0.4937 Epoch [093/100], loss_g: 0.7198, loss_d: 1.3820, real_score: 0.5053, fake_score: 0.4944 Epoch [094/100], loss_g: 0.7186, loss_d: 1.3849, real_score: 0.5046, fake_score: 0.4951 Epoch [095/100], loss_g: 0.7170, loss_d: 1.3833, real_score: 0.5049, fake_score: 0.4952 Epoch [096/100], loss_g: 0.7162, loss_d: 1.3786, real_score: 0.5058, fake_score: 0.4940 Epoch [097/100], loss_g: 0.7160, loss_d: 1.3814, real_score: 0.5054, fake_score: 0.4949 Epoch [098/100], loss_g: 0.7164, loss_d: 1.3820, real_score: 0.5051, fake_score: 0.4948 Epoch [099/100], loss_g: 0.7159, loss_d: 1.3799, real_score: 0.5056, fake_score: 0.4946 Epoch [100/100], loss_g: 0.7165, loss_d: 1.3826, real_score: 0.5048, fake_score: 0.4950
# log and plot metrics
losses_g, losses_d, real_scores, fake_scores = history
jovian.log_metrics(loss_g=losses_g[-1],
loss_d=losses_d[-1],
real_score=real_scores[-1],
fake_score=fake_scores[-1])
[jovian] Metrics logged.
import imageio
from numpy import asarray
import glob
from PIL import Image
import IPython.display as disp
generated_file = './generation.gif'
filenames = glob.glob('/content/generated/*.png')
filenames = sorted(filenames)
imgs = [asarray(Image.open(img)) for img in filenames]
imageio.mimsave(generated_file, imgs)
with open(generated_file,'rb') as file:
disp.display(disp.Image(file.read()))
plt.plot(losses_d, '-')
plt.plot(losses_g, '-')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['Discriminator', 'Generator'])
plt.title('Losses')
Text(0.5, 1.0, 'Losses')
plt.plot(real_scores, '-')
plt.plot(fake_scores, '-')
plt.xlabel('epoch')
plt.ylabel('score')
plt.legend(['Real', 'Fake'])
plt.title('Scores');
# save model
torch.save(generator_model.to("cpu"), 'generator.pt')
torch.save(discriminator_model.to("cpu"), 'discriminator.pt')
jovian.commit(project="DCGAN_MNIST",
outputs=['generator.pt', 'discriminator.pt', 'generation.gif'],
environment=None)
[jovian] Detected Colab notebook... [jovian] Uploading colab notebook to Jovian... [jovian] Uploading additional outputs... [jovian] Attaching records (metrics, hyperparameters, dataset etc.) [jovian] Committed successfully! https://jovian.ai/vaibhav-singh-3001/dcgan-mnist
'https://jovian.ai/vaibhav-singh-3001/dcgan-mnist'