Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-11-5-mnist_cnn_ensemble.py
618 views
1
# Lab 11 MNIST and Deep learning CNN
2
import torch
3
import torchvision.datasets as dsets
4
import torchvision.transforms as transforms
5
import torch.nn.init
6
import numpy as np
7
8
device = 'cuda' if torch.cuda.is_available() else 'cpu'
9
10
# for reproducibility
11
torch.manual_seed(777)
12
if device == 'cuda':
13
torch.cuda.manual_seed_all(777)
14
15
# parameters
16
learning_rate = 0.001
17
training_epochs = 15
18
batch_size = 100
19
20
# MNIST dataset
21
mnist_train = dsets.MNIST(root='MNIST_data/',
22
train=True,
23
transform=transforms.ToTensor(),
24
download=True)
25
26
mnist_test = dsets.MNIST(root='MNIST_data/',
27
train=False,
28
transform=transforms.ToTensor(),
29
download=True)
30
31
# dataset loader
32
data_loader = torch.utils.data.DataLoader(dataset=mnist_train,
33
batch_size=batch_size,
34
shuffle=True,
35
drop_last=True)
36
37
# CNN Model
38
class CNN(torch.nn.Module):
39
40
def __init__(self):
41
super(CNN, self).__init__()
42
self._build_net()
43
44
def _build_net(self):
45
# dropout (keep_prob) rate 0.7~0.5 on training, but should be 1
46
self.keep_prob = 0.5
47
# L1 ImgIn shape=(?, 28, 28, 1)
48
# Conv -> (?, 28, 28, 32)
49
# Pool -> (?, 14, 14, 32)
50
self.layer1 = torch.nn.Sequential(
51
torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
52
torch.nn.ReLU(),
53
torch.nn.MaxPool2d(kernel_size=2, stride=2))
54
# L2 ImgIn shape=(?, 14, 14, 32)
55
# Conv ->(?, 14, 14, 64)
56
# Pool ->(?, 7, 7, 64)
57
self.layer2 = torch.nn.Sequential(
58
torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
59
torch.nn.ReLU(),
60
torch.nn.MaxPool2d(kernel_size=2, stride=2))
61
# L3 ImgIn shape=(?, 7, 7, 64)
62
# Conv ->(?, 7, 7, 128)
63
# Pool ->(?, 4, 4, 128)
64
self.layer3 = torch.nn.Sequential(
65
torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
66
torch.nn.ReLU(),
67
torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1))
68
# L4 FC 4x4x128 inputs -> 625 outputs
69
self.fc1 = torch.nn.Linear(4 * 4 * 128, 625, bias=True)
70
torch.nn.init.xavier_uniform_(self.fc1.weight)
71
self.layer4 = torch.nn.Sequential(
72
self.fc1,
73
torch.nn.ReLU(),
74
torch.nn.Dropout(p=1 - self.keep_prob))
75
# L5 Final FC 625 inputs -> 10 outputs
76
self.fc2 = torch.nn.Linear(625, 10, bias=True)
77
torch.nn.init.xavier_uniform_(self.fc2.weight)
78
79
# define cost/loss & optimizer
80
self.criterion = torch.nn.CrossEntropyLoss() # Softmax is internally computed.
81
self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
82
83
def forward(self, x):
84
out = self.layer1(x)
85
out = self.layer2(out)
86
out = self.layer3(out)
87
out = out.view(out.size(0), -1) # Flatten them for FC
88
out = self.layer4(out)
89
out = self.fc2(out)
90
return out
91
92
def predict(self, x):
93
self.eval()
94
return self.forward(x)
95
96
def get_accuracy(self, x, y):
97
prediction = self.predict(x)
98
correct_prediction = torch.argmax(prediction, 1) == Y_test
99
self.accuracy = correct_prediction.float().mean()
100
return self.accuracy
101
102
def train_model(self, x, y):
103
self.train()
104
self.optimizer.zero_grad()
105
hypothesis = self.forward(x)
106
self.cost = self.criterion(hypothesis, y)
107
self.cost.backward()
108
self.optimizer.step()
109
return self.cost
110
111
112
# instantiate CNN model
113
models = []
114
num_models = 2
115
for m in range(num_models):
116
models.append(CNN().to(device))
117
118
# train my model
119
total_batch = len(data_loader)
120
print('Learning started. It takes sometime.')
121
for epoch in range(training_epochs):
122
avg_cost_list = np.zeros(len(models))
123
124
for X, Y in data_loader:
125
X = X.to(device)
126
Y = Y.to(device)
127
# image is already size of (28x28), no reshape
128
# label is not one-hot encoded
129
130
for m_idx, m in enumerate(models):
131
cost = m.train_model(X, Y)
132
avg_cost_list[m_idx] += cost / total_batch
133
134
print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost_list.mean()))
135
136
print('Learning Finished!')
137
138
# Test model and check accuracy
139
with torch.no_grad():
140
X_test = mnist_test.test_data.view(len(mnist_test), 1, 28, 28).float().to(device)
141
Y_test = mnist_test.test_labels.to(device)
142
predictions = torch.zeros([len(mnist_test), 10])
143
for m_idx, m in enumerate(models):
144
print(m_idx, 'Accuracy:', m.get_accuracy(X_test, Y_test))
145
p = m.predict(X_test)
146
predictions += p.cpu()
147
148
ensemble_correct_prediction = torch.argmax(predictions, 1) == Y_test.cpu()
149
ensemble_accuracy = ensemble_correct_prediction.float().mean()
150
print('Accuracy:', ensemble_accuracy.item())
151