-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel_helpers.py
More file actions
261 lines (203 loc) · 8.48 KB
/
model_helpers.py
File metadata and controls
261 lines (203 loc) · 8.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
import torch
import torch.nn as nn
import os
import pickle
from sklearn.metrics import confusion_matrix
train_params = {
'batch_size': 256,
'shuffle': True
}
val_params = {
'batch_size': 256,
'shuffle': False
}
test_params = {
'batch_size': 256,
'shuffle': False
}
num_classes = 8
image_size = 96
def do_setup(USE_GPU, type):
global dtype
global device
dtype = type
if USE_GPU and torch.cuda.is_available():
print("Using gpu")
device = torch.device('cuda')
else:
print("Using cpu")
device = torch.device('cpu')
def check_accuracy(loader, model):
"""
Finds the accuracy of a model
Inputs:
- loader: A dataloader containing the validation / testing dataset
- model: A PyTorch Module giving the model to evaluate.
Returns:
- The number of correct predictions
- The number of samples
- The accuracy of the model
"""
num_correct = 0
num_samples = 0
model = model.to(device=device) # move the model parameters to CPU/GPU
model.eval() # set model to evaluation mode
with torch.no_grad(): # no need to store computation graph or local gradients
for x, y in loader:
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
scores = model(x)
_, preds = scores.max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
return num_correct, num_samples, 100 * acc
def train(model, optimizer, loader_train, loader_val, epochs=1, scheduler=None, print_every=1):
"""
Train a model using the PyTorch Module API.
Inputs:
- model: A PyTorch Module giving the model to train.
- optimizer: An Optimizer object we will use to train the model
- loader_train: A dataloader containing the train dataset
- loader_val: A dataloader containing the validation dataset
- epochs: (Optional) An integer giving the number of epochs to train for
- scheduler: (Optional) A learning rate scheduler
- print_every: (Optional) An integer specifying how often to print the loss.
Prints model losses and accuracies during training.
Returns:
- The training losses after each epoch
- The validation accuracy after each epoch
"""
training_losses = []
training_accuracies = []
validation_losses = []
validation_accuracies = []
model = model.to(device=device) # move the model parameters to CPU/GPU
for e in range(epochs):
total_training_loss = 0
num_iterations_training = 0
total_validation_loss = 0
num_iterations_validation = 0
for t, (x, y) in enumerate(loader_train):
model.train() # put model to training mode
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
scores = model(x)
loss = torch.nn.functional.cross_entropy(scores, y)
# Since we are updating the model as we go along, this
# only estimates the loss of the whole epoch
total_training_loss += loss.item()
num_iterations_training += 1
# Zero out all of the gradients for the variables which the optimizer
# will update.
optimizer.zero_grad()
# This is the backwards pass: compute the gradient of the loss with
# respect to each parameter of the model.
loss.backward()
# Actually update the parameters of the model using the gradients
# computed by the backwards pass.
optimizer.step()
# if t % print_every == 0:
# print('Epoch {}, iteration {}, loss = {}'.format(e, t, loss.item()))
# Calculate the validation loss
for t, (x, y) in enumerate(loader_val):
model.eval() # set model to evaluation mode
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
scores = model(x)
loss = torch.nn.functional.cross_entropy(scores, y)
total_validation_loss += loss.item()
num_iterations_validation += 1
training_losses.append(total_training_loss / num_iterations_training)
validation_losses.append(total_validation_loss / num_iterations_validation)
# Calculate accuracies
training_num_correct, training_num_samples, training_accuracy = check_accuracy(loader_train, model)
training_accuracies.append(training_accuracy)
validation_num_correct, validation_num_samples, validation_accuracy = check_accuracy(loader_val, model)
validation_accuracies.append(validation_accuracy)
if type(scheduler) is torch.optim.lr_scheduler.ReduceLROnPlateau:
scheduler.step(validation_accuracy)
elif scheduler is not None:
scheduler.step()
print((
"Epoch %d: "
"training loss = %f, training accuracy = %d / %d correct (%.2f), "
"validation loss = %f, validation accuracy = %d / %d correct (%.2f)"
) %
(e,
training_losses[-1], training_num_correct, training_num_samples, training_accuracies[-1],
validation_losses[-1], validation_num_correct, validation_num_samples, validation_accuracies[-1]))
# print("Epoch %d: training loss = %f, validation accuracy = %d / %d correct (%.2f)" %
# (e, total_training_loss / num_iterations_training, validation_num_correct, validation_num_samples, validation_accuracy))
return (training_losses, training_accuracies, validation_losses, validation_accuracies)
def check_accuracy_for_ensemble(model_info):
"""
Finds the accuracy of a model
Inputs:
- loader: A dataloader containing the validation / testing dataset
- model: A PyTorch Module giving the model to evaluate.
Returns:
- The number of correct predictions
- The number of samples
- The accuracy of the model
"""
num_correct = 0
num_samples = 0
with torch.no_grad(): # no need to store computation graph or local gradients
final_results = []
for (loader, model) in model_info:
model = model.to(device=device)
model.eval() # set model to evaluation mode
results = []
for (x, y) in loader:
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
results.append(model(x))
final_results.append(results)
all_preds = []
for i in range(len(final_results[0])):
scores = torch.sum(torch.stack([results[i] for results in final_results]), 0)
_, preds = scores.max(1)
all_preds.append(preds)
for t, (x, y) in enumerate(model_info[0][0]):
y = y.to(device=device, dtype=torch.long)
num_correct += (all_preds[t] == y).sum()
num_samples += all_preds[t].size(0)
acc = float(num_correct) / num_samples
return num_correct, num_samples, 100 * acc, all_preds
def create_confusion_matrix(model, loader):
true_labels = []
pred_labels = []
model = model.to(device=device)
model.eval() # set model to evaluation mode
with torch.no_grad(): # no need to store computation graph or local gradients
for x, y in loader:
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
scores = model(x)
_, preds = scores.max(1)
pred_labels.extend(preds.cpu().numpy())
true_labels.extend(y.cpu().numpy())
return confusion_matrix(true_labels, pred_labels, normalize='true')
def load_model(model, model_file):
model_file = "results/" + model_file
if os.path.exists(model_file):
model.load_state_dict(torch.load(model_file))
def save_model(model, model_file):
model_file = "results/" + model_file
if not os.path.exists(model_file):
torch.save(model.state_dict(), model_file)
def load_results(filename):
filename = "results/" + filename
with open(filename, 'rb') as f:
return pickle.load(f)
def save_results(results, filename):
filename = "results/" + filename
with open(filename, 'wb') as f:
pickle.dump(results, f)
def flatten(x):
N = x.shape[0]
return x.view(N, -1)
class Flatten(nn.Module):
def forward(self, x):
return flatten(x)