Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-10-6-mnist_batchnorm.py
618 views
1
# Lab 10 MNIST and softmax
2
import torch
3
import torchvision.datasets as dsets
4
import torchvision.transforms as transforms
5
import matplotlib.pylab as plt
6
7
device = 'cuda' if torch.cuda.is_available() else 'cpu'
8
9
# for reproducibility
10
torch.manual_seed(1)
11
if device == 'cuda':
12
torch.cuda.manual_seed_all(1)
13
14
# parameters
15
learning_rate = 0.01
16
training_epochs = 10
17
batch_size = 32
18
19
# MNIST dataset
20
mnist_train = dsets.MNIST(root='MNIST_data/',
21
train=True,
22
transform=transforms.ToTensor(),
23
download=True)
24
25
mnist_test = dsets.MNIST(root='MNIST_data/',
26
train=False,
27
transform=transforms.ToTensor(),
28
download=True)
29
30
# dataset loader
31
train_loader = torch.utils.data.DataLoader(dataset=mnist_train,
32
batch_size=batch_size,
33
shuffle=True,
34
drop_last=True)
35
36
test_loader = torch.utils.data.DataLoader(dataset=mnist_test,
37
batch_size=batch_size,
38
shuffle=False,
39
drop_last=True)
40
41
# nn layers
42
linear1 = torch.nn.Linear(784, 32, bias=True)
43
linear2 = torch.nn.Linear(32, 32, bias=True)
44
linear3 = torch.nn.Linear(32, 10, bias=True)
45
relu = torch.nn.ReLU()
46
bn1 = torch.nn.BatchNorm1d(32)
47
bn2 = torch.nn.BatchNorm1d(32)
48
49
nn_linear1 = torch.nn.Linear(784, 32, bias=True)
50
nn_linear2 = torch.nn.Linear(32, 32, bias=True)
51
nn_linear3 = torch.nn.Linear(32, 10, bias=True)
52
53
# model
54
bn_model = torch.nn.Sequential(linear1, relu, bn1,
55
linear2, relu, bn2,
56
linear3).to(device)
57
nn_model = torch.nn.Sequential(nn_linear1, relu,
58
nn_linear2, relu,
59
nn_linear3).to(device)
60
61
# define cost/loss & optimizer
62
criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed.
63
bn_optimizer = torch.optim.Adam(bn_model.parameters(), lr=learning_rate)
64
nn_optimizer = torch.optim.Adam(nn_model.parameters(), lr=learning_rate)
65
66
# Save Losses and Accuracies every epoch
67
# We are going to plot them later
68
train_losses = []
69
train_accs = []
70
71
valid_losses = []
72
valid_accs = []
73
74
train_total_batch = len(train_loader)
75
test_total_batch = len(test_loader)
76
for epoch in range(training_epochs):
77
bn_model.train() # set the model to train mode
78
79
for X, Y in train_loader:
80
# reshape input image into [batch_size by 784]
81
# label is not one-hot encoded
82
X = X.view(-1, 28 * 28).to(device)
83
Y = Y.to(device)
84
85
bn_optimizer.zero_grad()
86
bn_prediction = bn_model(X)
87
bn_loss = criterion(bn_prediction, Y)
88
bn_loss.backward()
89
bn_optimizer.step()
90
91
nn_optimizer.zero_grad()
92
nn_prediction = nn_model(X)
93
nn_loss = criterion(nn_prediction, Y)
94
nn_loss.backward()
95
nn_optimizer.step()
96
97
with torch.no_grad():
98
bn_model.eval() # set the model to evaluation mode
99
100
# Test the model using train sets
101
bn_loss, nn_loss, bn_acc, nn_acc = 0, 0, 0, 0
102
for i, (X, Y) in enumerate(train_loader):
103
X = X.view(-1, 28 * 28).to(device)
104
Y = Y.to(device)
105
106
bn_prediction = bn_model(X)
107
bn_correct_prediction = torch.argmax(bn_prediction, 1) == Y
108
bn_loss += criterion(bn_prediction, Y)
109
bn_acc += bn_correct_prediction.float().mean()
110
111
nn_prediction = nn_model(X)
112
nn_correct_prediction = torch.argmax(nn_prediction, 1) == Y
113
nn_loss += criterion(nn_prediction, Y)
114
nn_acc += nn_correct_prediction.float().mean()
115
116
bn_loss, nn_loss, bn_acc, nn_acc = bn_loss / train_total_batch, nn_loss / train_total_batch, bn_acc / train_total_batch, nn_acc / train_total_batch
117
118
# Save train losses/acc
119
train_losses.append([bn_loss, nn_loss])
120
train_accs.append([bn_acc, nn_acc])
121
print(
122
'[Epoch %d-TRAIN] Batchnorm Loss(Acc): bn_loss:%.5f(bn_acc:%.2f) vs No Batchnorm Loss(Acc): nn_loss:%.5f(nn_acc:%.2f)' % (
123
(epoch + 1), bn_loss.item(), bn_acc.item(), nn_loss.item(), nn_acc.item()))
124
# Test the model using test sets
125
bn_loss, nn_loss, bn_acc, nn_acc = 0, 0, 0, 0
126
for i, (X, Y) in enumerate(test_loader):
127
X = X.view(-1, 28 * 28).to(device)
128
Y = Y.to(device)
129
130
bn_prediction = bn_model(X)
131
bn_correct_prediction = torch.argmax(bn_prediction, 1) == Y
132
bn_loss += criterion(bn_prediction, Y)
133
bn_acc += bn_correct_prediction.float().mean()
134
135
nn_prediction = nn_model(X)
136
nn_correct_prediction = torch.argmax(nn_prediction, 1) == Y
137
nn_loss += criterion(nn_prediction, Y)
138
nn_acc += nn_correct_prediction.float().mean()
139
140
bn_loss, nn_loss, bn_acc, nn_acc = bn_loss / test_total_batch, nn_loss / test_total_batch, bn_acc / test_total_batch, nn_acc / test_total_batch
141
142
# Save valid losses/acc
143
valid_losses.append([bn_loss, nn_loss])
144
valid_accs.append([bn_acc, nn_acc])
145
print(
146
'[Epoch %d-VALID] Batchnorm Loss(Acc): bn_loss:%.5f(bn_acc:%.2f) vs No Batchnorm Loss(Acc): nn_loss:%.5f(nn_acc:%.2f)' % (
147
(epoch + 1), bn_loss.item(), bn_acc.item(), nn_loss.item(), nn_acc.item()))
148
print()
149
150
print('Learning finished')
151
152
def plot_compare(loss_list: list, ylim=None, title=None) -> None:
153
bn = [i[0] for i in loss_list]
154
nn = [i[1] for i in loss_list]
155
156
plt.figure(figsize=(15, 10))
157
plt.plot(bn, label='With BN')
158
plt.plot(nn, label='Without BN')
159
if ylim:
160
plt.ylim(ylim)
161
162
if title:
163
plt.title(title)
164
plt.legend()
165
plt.grid('on')
166
plt.show()
167
168
plot_compare(train_losses, title='Training Loss at Epoch')
169
plot_compare(train_accs, [0, 1.0], title='Training Acc at Epoch')
170
plot_compare(valid_losses, title='Validation Loss at Epoch')
171
plot_compare(valid_accs, [0, 1.0], title='Validation Acc at Epoch')
172