import pandas
import tqdm
import gzip
import os
import urllib.request
import pickle
import numpy
import time
import PIL.Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as dataset
import scipy.signal as signal
import matplotlib.pyplot as plt
import matplotlib as mpl
DATA_URL = ('https://crudata.uea.ac.uk/cru/data/hrg/cru_ts_4.01/'
'cruts.1709081022.v4.01/tmp/cru_ts4.01.1901.2016.tmp.dat.gz')
DATA_DIR = 'data'
DATA_CSV = os.path.join(DATA_DIR, 'cru_ts4.01.1901.2016.tmp.dat.gz')
DATA_PKL = os.path.join(DATA_DIR, 'cru_ts4.01.1901.2016.tmp.dat.pkl')
SAMPLE_SIZE = 519718
TRAIN_SIZE = int(519718 * 0.75)
VALID_SIZE = int(519718 * 0.25)
TEST_SIZE = int(519718 * 0.25)
EPOCH = 30
BATCH_SIZE = 128
LEARNING_RATE = 0.01
class TqdmUpTo(tqdm.tqdm):
'''copied from https://github.com/tqdm/tqdm/blob/master/examples/tqdm_wget.py'''
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def load_data():
os.makedirs(DATA_DIR, exist_ok=True)
# download
if not os.path.isfile(DATA_CSV):
with TqdmUpTo(unit='B', unit_scale=True,
unit_divisor=1024, desc=os.path.basename(DATA_CSV)) as t:
urllib.request.urlretrieve(DATA_URL, DATA_CSV, reporthook=t.update_to)
# make data file (remove invalid data)
if not os.path.isfile(DATA_PKL):
with gzip.open(DATA_CSV, 'r') as reader:
data = pandas.read_csv(reader, delim_whitespace=True, header=None).values
data = data.reshape((-1, 360 * 720))
data = data[:, ((data == -999).sum(axis=0)) == 0].transpose().astype("int32")
with open(DATA_PKL, 'wb') as writer:
pickle.dump(data, writer)
with open(DATA_PKL, 'rb') as reader:
return pickle.load(reader)
data = load_data()
data = (data - data.min()) * (1.0 / (data.max() - data.min()))
print('available data size:', data.shape)
year_mean = data.reshape(data.shape[0], -1, 12)
year_mean = numpy.average(year_mean, axis=2)
future_mean = signal.convolve2d(
year_mean[:, 30:], numpy.ones((1, 10)) / 10, mode='valid')
past_mean = signal.convolve2d(
year_mean[:, :-10], numpy.ones((1, 30)) / 30, mode='valid')
chance = (future_mean > past_mean).astype(numpy.float)
chance = numpy.average(chance, axis=0)
plt.plot(range(1930, 2007), chance)
plt.xlabel('year')
plt.ylabel('probability of rising tempalatures')
plt.show()
print('chance(1931-2016):', numpy.average(chance))
print('chance(1982-2016):', numpy.average(chance[-26:]))
available data size: (67420, 1392)
chance(1931-2016): 0.6983876995149614 chance(1982-2016): 0.9552649293749856
class Dataset(dataset.Dataset):
def __init__(self, data, indices):
self.data = data
self.indices = indices
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
pos = self.indices[idx] % self.data.shape[0]
year = self.indices[idx] // self.data.shape[0]
past = self.data[pos, (year + 0) * 12:(year + 30) * 12]
future = self.data[pos, (year + 30) * 12:(year + 40) * 12]
target = 1 if numpy.average(future) > numpy.average(past) else 0
# resize to 60x60
img = past.reshape(-1, 12)
img = PIL.Image.fromarray(img)
img = img.resize((60, 60), resample=PIL.Image.NEAREST)
img = numpy.array(img).T
# heat color
r = numpy.ones_like(img)
g = img
b = numpy.zeros_like(img)
img = numpy.stack([r, g, b], axis=0)
return img, target
numpy.random.seed(0)
risefall = future_mean > past_mean
train_data = data[:, :-36 * 12] # 1901-1981 (label: 1931-1981)
train_risefall = risefall[:, :-36].T.reshape(-1)
train_rise_indices = numpy.random.choice(
numpy.where(train_risefall == 1)[0], TRAIN_SIZE // 2, replace=False)
train_fall_indices = numpy.random.choice(
numpy.where(train_risefall == 0)[0], TRAIN_SIZE // 2, replace=False)
train_indices = numpy.concatenate([train_rise_indices, train_fall_indices])
train_dataset = Dataset(train_data, train_indices)
valid_data = data[:, -65 * 12:] # 1952-2016 (label: 1982-2016)
valid_risefall = risefall[:, -26:].T.reshape(-1)
valid_rise_indices = numpy.random.choice(
numpy.where(valid_risefall == 1)[0], VALID_SIZE // 2, replace=False)
valid_fall_indices = numpy.random.choice(
numpy.where(valid_risefall == 0)[0], VALID_SIZE // 2, replace=False)
valid_indices = numpy.concatenate([valid_rise_indices, valid_fall_indices])
valid_dataset = Dataset(valid_data, valid_indices)
test_data = data[:, -65 * 12:] # 1952-2016 (label: 1982-2016)
test_indices = numpy.random.choice(
risefall[:, -26:].T.reshape(-1).shape[0], TEST_SIZE, replace=False)
test_dataset = Dataset(test_data, test_indices)
print('train data size:', len(train_dataset))
print('valid data size:', len(valid_dataset))
print('test data size:', len(test_dataset))
for i in range(8):
image = train_dataset[i][0]
image = numpy.rollaxis(image, 0, 3)
plt.subplot(2, 4, i + 1)
plt.tick_params(
labelbottom=False, labelleft=False, labelright=False, labeltop=False)
plt.imshow(image)
plt.show()
train data size: 389788 valid data size: 129928 test data size: 129929
class LeNet(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(3, 6, kernel_size=5),
nn.Tanh(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5),
nn.Tanh(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.full = nn.Sequential(
nn.Linear(16 * 12 * 12, 120),
nn.Tanh(),
nn.Linear(120, 84),
nn.Tanh(),
nn.Linear(84, 2))
def forward(self, x):
x = self.conv(x)
x = x.view(x.size(0), -1)
x = self.full(x)
return x
model = LeNet()
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
criterion = nn.CrossEntropyLoss()
print('model size:', sum(p.numel() for p in model.parameters()))
model size: 289806
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
with torch.no_grad():
return output.argmax(dim=1).eq(target).float().sum() / target.size(0)
def perform(model, loader, optimizer=None):
loss_avg = AverageMeter()
acc_avg = AverageMeter()
for x, t in loader:
x = x.cuda(non_blocking=True)
t = t.cuda(non_blocking=True)
# forward
y = model(x)
loss = criterion(y, t)
acc = accuracy(y, t)
# update parameters
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update results
loss_avg.update(float(loss), x.size(0))
acc_avg.update(float(acc), x.size(0))
return loss_avg.avg, acc_avg.avg
model = model.cuda()
criterion = criterion.cuda()
torch.torch.backends.cudnn.benchmark = True
torch.torch.backends.cudnn.enabled = True
train_loader = dataset.DataLoader(
train_dataset, batch_size=BATCH_SIZE, shuffle=True,
pin_memory=True, num_workers=2)
valid_loader = dataset.DataLoader(
valid_dataset, batch_size=BATCH_SIZE, shuffle=False,
pin_memory=True, num_workers=2)
schesuler = optim.lr_scheduler.CosineAnnealingLR(optimizer, EPOCH)
train_time = AverageMeter()
valid_time = AverageMeter()
train_loss = []
train_acc = []
valid_loss = []
valid_acc = []
for epoch in range(EPOCH):
schesuler.step()
start_time = time.time()
model.train()
loss, acc = perform(model, train_loader, optimizer)
train_loss.append(loss)
train_acc.append(acc)
train_time.update(time.time() - start_time)
print('[{}] train: loss={:.4f}, accuracy={:.4f}'.format(epoch, loss, acc))
start_time = time.time()
model.eval()
with torch.no_grad():
loss, acc = perform(model, valid_loader)
valid_loss.append(loss)
valid_acc.append(acc)
valid_time.update(time.time() - start_time)
print('[{}] valid: loss={:.4f}, accuracy={:.4f}'.format(epoch, loss, acc))
print('train time/epoch: {:.4f} sec'.format(train_time.avg))
print('valid time/epoch: {:.4f} sec'.format(valid_time.avg))
[0] train: loss=0.6918, accuracy=0.5212 [0] valid: loss=0.6912, accuracy=0.5021 [1] train: loss=0.6906, accuracy=0.5297 [1] valid: loss=0.6883, accuracy=0.5114 [2] train: loss=0.6875, accuracy=0.5395 [2] valid: loss=0.6857, accuracy=0.5377 [3] train: loss=0.6694, accuracy=0.5805 [3] valid: loss=0.6465, accuracy=0.6162 [4] train: loss=0.6301, accuracy=0.6320 [4] valid: loss=0.5899, accuracy=0.6696 [5] train: loss=0.6017, accuracy=0.6607 [5] valid: loss=0.5945, accuracy=0.6714 [6] train: loss=0.5827, accuracy=0.6811 [6] valid: loss=0.5689, accuracy=0.6926 [7] train: loss=0.5596, accuracy=0.7020 [7] valid: loss=0.5557, accuracy=0.7059 [8] train: loss=0.5275, accuracy=0.7276 [8] valid: loss=0.6015, accuracy=0.6823 [9] train: loss=0.4947, accuracy=0.7499 [9] valid: loss=0.6140, accuracy=0.6940 [10] train: loss=0.4664, accuracy=0.7676 [10] valid: loss=0.6990, accuracy=0.6574 [11] train: loss=0.4405, accuracy=0.7839 [11] valid: loss=0.6652, accuracy=0.6700 [12] train: loss=0.4176, accuracy=0.7969 [12] valid: loss=0.7058, accuracy=0.6639 [13] train: loss=0.3978, accuracy=0.8095 [13] valid: loss=0.7156, accuracy=0.6648 [14] train: loss=0.3796, accuracy=0.8199 [14] valid: loss=0.7516, accuracy=0.6545 [15] train: loss=0.3647, accuracy=0.8289 [15] valid: loss=0.7899, accuracy=0.6526 [16] train: loss=0.3520, accuracy=0.8365 [16] valid: loss=0.7783, accuracy=0.6612 [17] train: loss=0.3413, accuracy=0.8428 [17] valid: loss=0.7951, accuracy=0.6580 [18] train: loss=0.3328, accuracy=0.8475 [18] valid: loss=0.9119, accuracy=0.6333 [19] train: loss=0.3254, accuracy=0.8518 [19] valid: loss=0.8454, accuracy=0.6613 [20] train: loss=0.3196, accuracy=0.8548 [20] valid: loss=0.8592, accuracy=0.6519 [21] train: loss=0.3146, accuracy=0.8573 [21] valid: loss=0.8924, accuracy=0.6430 [22] train: loss=0.3109, accuracy=0.8597 [22] valid: loss=0.8911, accuracy=0.6453 [23] train: loss=0.3078, accuracy=0.8613 [23] valid: loss=0.8896, accuracy=0.6459 [24] train: loss=0.3054, accuracy=0.8628 [24] valid: loss=0.8924, accuracy=0.6462 [25] train: loss=0.3037, accuracy=0.8638 [25] valid: loss=0.8903, accuracy=0.6466 [26] train: loss=0.3027, accuracy=0.8642 [26] valid: loss=0.9040, accuracy=0.6459 [27] train: loss=0.3020, accuracy=0.8646 [27] valid: loss=0.8964, accuracy=0.6476 [28] train: loss=0.3016, accuracy=0.8648 [28] valid: loss=0.8963, accuracy=0.6464 [29] train: loss=0.3015, accuracy=0.8649 [29] valid: loss=0.8963, accuracy=0.6464 train time/epoch: 80.6036 sec valid time/epoch: 23.8461 sec
test_loader = dataset.DataLoader(
test_dataset, batch_size=BATCH_SIZE, shuffle=False,
pin_memory=True, num_workers=2)
model.eval()
with torch.no_grad():
test_loss, test_acc = perform(model, test_loader)
print('train: loss={:.4f}, accuracy={:.4f}'.format(train_loss[-1], train_acc[-1]))
print('valid: loss={:.4f}, accuracy={:.4f}'.format(valid_loss[-1], valid_acc[-1]))
print('test: loss={:.4f}, accuracy={:.4f}'.format(test_loss, test_acc))
figure = plt.figure(figsize=(8, 10))
axis_loss, axis_acc = figure.subplots(2, 1)
mpl.rcParams["legend.loc"] = 'upper right'
axis_loss.set_xlabel('epoch')
axis_loss.set_ylabel('loss')
axis_loss.plot(range(len(train_loss)), train_loss, linestyle='--', label='training')
axis_loss.plot(range(len(valid_loss)), valid_loss, linestyle='-', label='validation')
axis_loss.legend()
axis_acc.set_xlabel('epoch')
axis_acc.set_ylabel('accuracy')
axis_acc.plot(range(len(train_acc)), train_acc, linestyle='--', label='training')
axis_acc.plot(range(len(valid_acc)), valid_acc, linestyle='-', label='validation')
axis_acc.legend()
plt.show()
train: loss=0.3015, accuracy=0.8649 valid: loss=0.8963, accuracy=0.6464 test: loss=0.7757, accuracy=0.7111