forked from foamliu/Autoencoder
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
160 lines (119 loc) · 5.02 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import time
import torch.optim as optim
from torch import nn
from torch.utils.data import DataLoader
from data_gen import VaeDataset
from models import SegNet
from utils import *
def train(epoch, train_loader, model, optimizer):
# Ensure dropout layers are in train mode
model.train()
# Loss function
# criterion = nn.MSELoss().to(device)
batch_time = ExpoAverageMeter() # forward prop. + back prop. time
losses = ExpoAverageMeter() # loss (per word decoded)
start = time.time()
# Batches
for i_batch, (x, y) in enumerate(train_loader):
# Set device options
x = x.to(device)
y = y.to(device)
# print('x.size(): ' + str(x.size())) # [32, 3, 224, 224]
# print('y.size(): ' + str(y.size())) # [32, 3, 224, 224]
# Zero gradients
optimizer.zero_grad()
y_hat = model(x)
# print('y_hat.size(): ' + str(y_hat.size())) # [32, 3, 224, 224]
loss = torch.sqrt((y_hat - y).pow(2).mean())
loss.backward()
# def closure():
# optimizer.zero_grad()
# y_hat = model(x)
# loss = torch.sqrt((y_hat - y).pow(2).mean())
# loss.backward()
# losses.update(loss.item())
# return loss
# optimizer.step(closure)
optimizer.step()
# Keep track of metrics
losses.update(loss.item())
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i_batch % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i_batch, len(train_loader),
batch_time=batch_time,
loss=losses))
def valid(val_loader, model):
model.eval() # eval mode (no dropout or batchnorm)
# Loss function
# criterion = nn.MSELoss().to(device)
batch_time = ExpoAverageMeter() # forward prop. + back prop. time
losses = ExpoAverageMeter() # loss (per word decoded)
start = time.time()
with torch.no_grad():
# Batches
for i_batch, (x, y) in enumerate(val_loader):
# Set device options
x = x.to(device)
y = y.to(device)
y_hat = model(x)
loss = torch.sqrt((y_hat - y).pow(2).mean())
# Keep track of metrics
losses.update(loss.item())
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i_batch % print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(i_batch, len(val_loader),
batch_time=batch_time,
loss=losses))
return losses.avg
def main():
train_loader = DataLoader(dataset=VaeDataset('train'), batch_size=batch_size, shuffle=True,
pin_memory=True, drop_last=True)
val_loader = DataLoader(dataset=VaeDataset('valid'), batch_size=batch_size, shuffle=False,
pin_memory=True, drop_last=True)
# Create SegNet model
label_nbr = 3
model = SegNet(label_nbr)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [40, xxx] -> [10, ...], [10, ...], [10, ...], [10, ...] on 4 GPUs
model = nn.DataParallel(model)
# Use appropriate device
model = model.to(device)
# print(model)
# define the optimizer
# optimizer = optim.LBFGS(model.parameters(), lr=0.8)
optimizer = optim.Adam(model.parameters(), lr=lr)
best_loss = 100000
epochs_since_improvement = 0
# Epochs
for epoch in range(start_epoch, epochs):
# Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
if epochs_since_improvement == 20:
break
if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
adjust_learning_rate(optimizer, 0.8)
# One epoch's training
train(epoch, train_loader, model, optimizer)
# One epoch's validation
val_loss = valid(val_loader, model)
print('\n * LOSS - {loss:.3f}\n'.format(loss=val_loss))
# Check if there was an improvement
is_best = val_loss < best_loss
best_loss = min(best_loss, val_loss)
if not is_best:
epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
else:
epochs_since_improvement = 0
# Save checkpoint
save_checkpoint(epoch, model, optimizer, val_loss, is_best)
if __name__ == '__main__':
main()