import pandas
import tqdm
import gzip
import os
import urllib.request
import pickle
import numpy
import time
import PIL.Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as dataset
import scipy.signal as signal
import matplotlib.pyplot as plt
import matplotlib as mpl
DATA_URL = ('https://crudata.uea.ac.uk/cru/data/hrg/cru_ts_4.01/'
'cruts.1709081022.v4.01/tmp/cru_ts4.01.1901.2016.tmp.dat.gz')
DATA_DIR = 'data'
DATA_CSV = os.path.join(DATA_DIR, 'cru_ts4.01.1901.2016.tmp.dat.gz')
DATA_PKL = os.path.join(DATA_DIR, 'cru_ts4.01.1901.2016.tmp.dat.pkl')
SAMPLE_SIZE = 519718
EPOCH = 30
BATCH_SIZE = 128
LEARNING_RATE = 0.01
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0001
class TqdmUpTo(tqdm.tqdm):
'''copied from https://github.com/tqdm/tqdm/blob/master/examples/tqdm_wget.py'''
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def load_data():
os.makedirs(DATA_DIR, exist_ok=True)
# download
if not os.path.isfile(DATA_CSV):
with TqdmUpTo(unit='B', unit_scale=True,
unit_divisor=1024, desc=os.path.basename(DATA_CSV)) as t:
urllib.request.urlretrieve(DATA_URL, DATA_CSV, reporthook=t.update_to)
# make data file (remove invalid data)
if not os.path.isfile(DATA_PKL):
with gzip.open(DATA_CSV, 'r') as reader:
data = pandas.read_csv(reader, delim_whitespace=True, header=None).values
data = data.reshape((-1, 360 * 720))
data = data[:, ((data == -999).sum(axis=0)) == 0].transpose().astype("int32")
with open(DATA_PKL, 'wb') as writer:
pickle.dump(data, writer)
with open(DATA_PKL, 'rb') as reader:
return pickle.load(reader)
data = load_data()
data = (data - data.min()) * (1.0 / (data.max() - data.min()))
print('available data size:', data.shape)
year_mean = data.reshape(data.shape[0], -1, 12)
year_mean = numpy.average(year_mean, axis=2)
future_mean = signal.convolve2d(
year_mean[:, 30:], numpy.ones((1, 10)) / 10, mode='valid')
past_mean = signal.convolve2d(
year_mean[:, :-10], numpy.ones((1, 30)) / 30, mode='valid')
chance = (future_mean > past_mean).astype(numpy.float)
chance = numpy.average(chance, axis=0)
plt.plot(range(1930, 2007), chance)
plt.xlabel('year')
plt.ylabel('probability of rising tempalatures')
plt.show()
print('chance:', numpy.average(chance))
cru_ts4.01.1901.2016.tmp.dat.gz: 144MB [00:33, 4.49MB/s]
available data size: (67420, 1392)
chance: 0.6983876995149614
class Dataset(dataset.Dataset):
def __init__(self, data, indices):
self.data = data
self.indices = indices
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
pos = self.indices[idx] % self.data.shape[0]
year = self.indices[idx] // self.data.shape[0]
past = self.data[pos, (year + 0) * 12:(year + 30) * 12]
future = self.data[pos, (year + 30) * 12:(year + 40) * 12]
target = 1 if numpy.average(future) > numpy.average(past) else 0
img = past.reshape(-1, 12)
img = img[None, :28].astype(numpy.float32)
return img, target
numpy.random.seed(0)
indices = numpy.random.choice(
data.shape[0] * (data.shape[1] // 12 - 39), SAMPLE_SIZE, replace=False)
train_dataset = Dataset(data, indices[len(indices) // 4:])
valid_dataset = Dataset(data, indices[:len(indices) // 4])
print('train data size:', len(train_dataset))
print('valid data size:', len(valid_dataset))
for i in range(8):
image = train_dataset[i][0]
plt.subplot(2, 4, i + 1)
plt.tick_params(
labelbottom=False, labelleft=False, labelright=False, labeltop=False)
plt.gray()
plt.imshow(image[0])
plt.show()
train data size: 389789 valid data size: 129929
class Identity(nn.Module):
def __init__(self, out_channels, stride):
super().__init__()
self.out_channels = out_channels
self.stride = stride
def forward(self, x):
if self.stride != 1:
x = x[:, :, ::self.stride, ::self.stride]
if self.out_channels < x.shape[1]:
return x[:, :self.out_channels]
elif self.out_channels > x.shape[1]:
return nn.functional.pad(x, (0, 0, 0, 0, 0, self.out_channels - x.shape[1]), 'constant', 0)
else:
return x
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.identity = Identity(out_channels, stride)
self.op = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=stride, padding=1,
groups=in_channels, bias=False),
nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1,
groups=out_channels, bias=False),
nn.Conv2d(out_channels, out_channels, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels))
def forward(self, x):
return self.identity(x) + self.op(x)
class ResSection(nn.Module):
def __init__(self, blocks, in_channels, out_channels, stride):
super().__init__()
self.blocks = nn.Sequential(
ResBlock(in_channels, out_channels, stride),
*[ResBlock(out_channels, out_channels, 1) for _ in range(blocks - 1)])
def forward(self, x):
return self.blocks(x)
class ResNet(nn.Module):
def __init__(self):
super().__init__()
channels_list = [16] + [16 * (2 ** i) for i in range(3)]
self.input = nn.Sequential(
nn.Conv2d(1, channels_list[0], 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(channels_list[0]))
self.sections = nn.Sequential(*[
ResSection(6, ic, oc, 1 if i == 0 else 2)
for i, (ic, oc) in enumerate(zip(channels_list[:-1], channels_list[1:]))])
self.pool = nn.Sequential(
nn.BatchNorm2d(channels_list[-1]),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(1))
self.output = nn.Linear(channels_list[-1], 2, bias=False)
def forward(self, x):
x = self.input(x)
x = self.sections(x)
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
model = ResNet()
optimizer = optim.SGD(
model.parameters(), lr=LEARNING_RATE,
momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
criterion = nn.CrossEntropyLoss()
print('model size:', sum(p.numel() for p in model.parameters()))
model size: 77984
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
with torch.no_grad():
return output.argmax(dim=1).eq(target).float().sum() / target.size(0)
def perform(model, loader, optimizer=None):
loss_avg = AverageMeter()
acc_avg = AverageMeter()
for x, t in loader:
x = x.cuda(non_blocking=True)
t = t.cuda(non_blocking=True)
# forward
y = model(x)
loss = criterion(y, t)
acc = accuracy(y, t)
# update parameters
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update results
loss_avg.update(float(loss), x.size(0))
acc_avg.update(float(acc), x.size(0))
return loss_avg.avg, acc_avg.avg
model = model.cuda()
criterion = criterion.cuda()
torch.torch.backends.cudnn.benchmark = True
torch.torch.backends.cudnn.enabled = True
train_loader = dataset.DataLoader(
train_dataset, batch_size=BATCH_SIZE, shuffle=True,
pin_memory=True, num_workers=2)
valid_loader = dataset.DataLoader(
valid_dataset, batch_size=BATCH_SIZE, shuffle=False,
pin_memory=True, num_workers=2)
schesuler = optim.lr_scheduler.CosineAnnealingLR(optimizer, EPOCH)
train_time = AverageMeter()
valid_time = AverageMeter()
train_loss = []
train_acc = []
valid_loss = []
valid_acc = []
for epoch in range(EPOCH):
schesuler.step()
start_time = time.time()
model.train()
loss, acc = perform(model, train_loader, optimizer)
train_loss.append(loss)
train_acc.append(acc)
train_time.update(time.time() - start_time)
print('[{}] train: loss={:.4f}, accuracy={:.4f}'.format(epoch, loss, acc))
start_time = time.time()
model.eval()
with torch.no_grad():
loss, acc = perform(model, valid_loader)
valid_loss.append(loss)
valid_acc.append(acc)
valid_time.update(time.time() - start_time)
print('[{}] valid: loss={:.4f}, accuracy={:.4f}'.format(epoch, loss, acc))
print('train time/epoch: {:.4f} sec'.format(train_time.avg))
print('valid time/epoch: {:.4f} sec'.format(valid_time.avg))
[0] train: loss=0.5290, accuracy=0.7421 [0] valid: loss=0.4720, accuracy=0.7580 [1] train: loss=0.3846, accuracy=0.8215 [1] valid: loss=0.3464, accuracy=0.8406 [2] train: loss=0.3288, accuracy=0.8512 [2] valid: loss=0.3736, accuracy=0.8302 [3] train: loss=0.2879, accuracy=0.8730 [3] valid: loss=0.4038, accuracy=0.8082 [4] train: loss=0.2606, accuracy=0.8874 [4] valid: loss=0.2660, accuracy=0.8855 [5] train: loss=0.2391, accuracy=0.8972 [5] valid: loss=0.2507, accuracy=0.8916 [6] train: loss=0.2228, accuracy=0.9046 [6] valid: loss=0.2661, accuracy=0.8854 [7] train: loss=0.2095, accuracy=0.9113 [7] valid: loss=0.2053, accuracy=0.9132 [8] train: loss=0.1969, accuracy=0.9174 [8] valid: loss=0.2294, accuracy=0.9009 [9] train: loss=0.1865, accuracy=0.9219 [9] valid: loss=0.1984, accuracy=0.9165 [10] train: loss=0.1775, accuracy=0.9265 [10] valid: loss=0.2559, accuracy=0.8838 [11] train: loss=0.1687, accuracy=0.9302 [11] valid: loss=0.1744, accuracy=0.9284 [12] train: loss=0.1618, accuracy=0.9333 [12] valid: loss=0.1797, accuracy=0.9228 [13] train: loss=0.1531, accuracy=0.9371 [13] valid: loss=0.1692, accuracy=0.9298 [14] train: loss=0.1455, accuracy=0.9405 [14] valid: loss=0.1526, accuracy=0.9382 [15] train: loss=0.1383, accuracy=0.9434 [15] valid: loss=0.1628, accuracy=0.9339 [16] train: loss=0.1321, accuracy=0.9461 [16] valid: loss=0.2259, accuracy=0.9157 [17] train: loss=0.1243, accuracy=0.9493 [17] valid: loss=0.1858, accuracy=0.9245 [18] train: loss=0.1167, accuracy=0.9523 [18] valid: loss=0.1293, accuracy=0.9476 [19] train: loss=0.1098, accuracy=0.9555 [19] valid: loss=0.1244, accuracy=0.9503 [20] train: loss=0.1030, accuracy=0.9582 [20] valid: loss=0.1236, accuracy=0.9505 [21] train: loss=0.0960, accuracy=0.9611 [21] valid: loss=0.1134, accuracy=0.9552 [22] train: loss=0.0891, accuracy=0.9640 [22] valid: loss=0.1090, accuracy=0.9570 [23] train: loss=0.0816, accuracy=0.9670 [23] valid: loss=0.1063, accuracy=0.9585 [24] train: loss=0.0761, accuracy=0.9694 [24] valid: loss=0.1083, accuracy=0.9589 [25] train: loss=0.0703, accuracy=0.9718 [25] valid: loss=0.1004, accuracy=0.9615 [26] train: loss=0.0661, accuracy=0.9736 [26] valid: loss=0.0986, accuracy=0.9628 [27] train: loss=0.0622, accuracy=0.9755 [27] valid: loss=0.0972, accuracy=0.9632 [28] train: loss=0.0611, accuracy=0.9761 [28] valid: loss=0.0981, accuracy=0.9630 [29] train: loss=0.0607, accuracy=0.9762 [29] valid: loss=0.0982, accuracy=0.9628 train time/epoch: 149.0084 sec valid time/epoch: 18.3645 sec
print('train: loss={:.4f}, accuracy={:.4f}'.format(train_loss[-1], train_acc[-1]))
print('valid: loss={:.4f}, accuracy={:.4f}'.format(valid_loss[-1], valid_acc[-1]))
figure = plt.figure(figsize=(8, 10))
axis_loss, axis_acc = figure.subplots(2, 1)
mpl.rcParams["legend.loc"] = 'upper right'
axis_loss.set_xlabel('epoch')
axis_loss.set_ylabel('loss')
axis_loss.plot(range(len(train_loss)), train_loss, linestyle='--', label='training')
axis_loss.plot(range(len(valid_loss)), valid_loss, linestyle='-', label='validation')
axis_loss.legend()
axis_acc.set_xlabel('epoch')
axis_acc.set_ylabel('accuracy')
axis_acc.plot(range(len(train_acc)), train_acc, linestyle='--', label='training')
axis_acc.plot(range(len(valid_acc)), valid_acc, linestyle='-', label='validation')
axis_acc.legend()
plt.show()
train: loss=0.0607, accuracy=0.9762 valid: loss=0.0982, accuracy=0.9628