forked from LeanManager/PyTorch_Image_Classifier
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel_functions.py
More file actions
195 lines (124 loc) · 6.13 KB
/
model_functions.py
File metadata and controls
195 lines (124 loc) · 6.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import processing_functions
from collections import OrderedDict
from workspace_utils import active_session
# Function for saving the model checkpoint
def save_checkpoint(model, training_dataset, arch, epochs, lr, hidden_units, input_size):
model.class_to_idx = training_dataset.class_to_idx
checkpoint = {'input_size': (3, 224, 224),
'output_size': 102,
'hidden_layer_units': hidden_units,
'batch_size': 64,
'learning_rate': lr,
'model_name': arch,
'model_state_dict': model.state_dict(),
'epochs': epochs,
'class_to_idx': model.class_to_idx,
'clf_input': input_size}
torch.save(checkpoint, 'checkpoint.pth')
# Function for loading the model checkpoint
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint['model_name'] == 'vgg':
model = models.vgg16(pretrained=True)
elif checkpoint['model_name'] == 'alexnet':
model = models.alexnet(pretrained=True)
else:
print("Architecture not recognized.")
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
classifier = nn.Sequential(OrderedDict([('fc1', nn.Linear(checkpoint['clf_input'], checkpoint['hidden_layer_units'])),
('relu', nn.ReLU()),
('drop', nn.Dropout(p=0.5)),
('fc2', nn.Linear(checkpoint['hidden_layer_units'], 102)),
('output', nn.LogSoftmax(dim=1))]))
model.classifier = classifier
model.load_state_dict(checkpoint['model_state_dict'])
return model
# Function for the validation pass
def validation(model, validateloader, criterion, gpu):
val_loss = 0
accuracy = 0
for images, labels in iter(validateloader):
images, labels = images.to(gpu), labels.to(gpu)
output = model.forward(images)
val_loss += criterion(output, labels).item()
probabilities = torch.exp(output)
equality = (labels.data == probabilities.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return val_loss, accuracy
# Function for measuring network accuracy on test data
def test_accuracy(model, test_loader, gpu):
# Do validation on the test set
model.eval()
model.to(gpu)
with torch.no_grad():
accuracy = 0
for images, labels in iter(test_loader):
images, labels = images.to(gpu), labels.to(gpu)
output = model.forward(images)
probabilities = torch.exp(output)
equality = (labels.data == probabilities.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
print("Test Accuracy: {}".format(accuracy/len(test_loader)))
# Train the classifier
def train_classifier(model, optimizer, criterion, arg_epochs, train_loader, validate_loader, gpu):
with active_session():
epochs = arg_epochs
steps = 0
print_every = 40
model.to(gpu)
for e in range(epochs):
model.train()
running_loss = 0
for images, labels in iter(train_loader):
steps += 1
images, labels = images.to(gpu), labels.to(gpu)
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
validation_loss, accuracy = validation(model, validate_loader, criterion, gpu)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(validation_loss/len(validate_loader)),
"Validation Accuracy: {:.3f}".format(accuracy/len(validate_loader)))
running_loss = 0
model.train()
def predict(image_path, model, topk=5, gpu='cuda'):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model.to(gpu)
image = processing_functions.process_image(image_path)
# Convert image to PyTorch tensor first
image = torch.from_numpy(image).type(torch.cuda.FloatTensor)
# Returns a new tensor with a dimension of size one inserted at the specified position.
image = image.unsqueeze(0)
output = model.forward(image)
probabilities = torch.exp(output)
# Probabilities and the indices of those probabilities corresponding to the classes
top_probabilities, top_indices = probabilities.topk(topk)
# Convert to lists
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
# Convert topk_indices to the actual class labels using class_to_idx
# Invert the dictionary so you get a mapping from index to class.
idx_to_class = {value: key for key, value in model.class_to_idx.items()}
#print(idx_to_class)
top_classes = [idx_to_class[index] for index in top_indices]
return top_probabilities, top_classes