Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-11-3-mnist_cnn_class.py
618 views
1
# Lab 11 MNIST and Deep learning CNN
2
import torch
3
import torchvision.datasets as dsets
4
import torchvision.transforms as transforms
5
import torch.nn.init
6
7
device = 'cuda' if torch.cuda.is_available() else 'cpu'
8
9
# for reproducibility
10
torch.manual_seed(777)
11
if device == 'cuda':
12
torch.cuda.manual_seed_all(777)
13
14
# parameters
15
learning_rate = 0.001
16
training_epochs = 15
17
batch_size = 100
18
19
# MNIST dataset
20
mnist_train = dsets.MNIST(root='MNIST_data/',
21
train=True,
22
transform=transforms.ToTensor(),
23
download=True)
24
25
mnist_test = dsets.MNIST(root='MNIST_data/',
26
train=False,
27
transform=transforms.ToTensor(),
28
download=True)
29
30
# dataset loader
31
data_loader = torch.utils.data.DataLoader(dataset=mnist_train,
32
batch_size=batch_size,
33
shuffle=True,
34
drop_last=True)
35
36
# CNN Model
37
class CNN(torch.nn.Module):
38
39
def __init__(self):
40
super(CNN, self).__init__()
41
self._build_net()
42
43
def _build_net(self):
44
# dropout (keep_prob) rate 0.7~0.5 on training, but should be 1
45
self.keep_prob = 0.5
46
# L1 ImgIn shape=(?, 28, 28, 1)
47
# Conv -> (?, 28, 28, 32)
48
# Pool -> (?, 14, 14, 32)
49
self.layer1 = torch.nn.Sequential(
50
torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
51
torch.nn.ReLU(),
52
torch.nn.MaxPool2d(kernel_size=2, stride=2))
53
# L2 ImgIn shape=(?, 14, 14, 32)
54
# Conv ->(?, 14, 14, 64)
55
# Pool ->(?, 7, 7, 64)
56
self.layer2 = torch.nn.Sequential(
57
torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
58
torch.nn.ReLU(),
59
torch.nn.MaxPool2d(kernel_size=2, stride=2))
60
# L3 ImgIn shape=(?, 7, 7, 64)
61
# Conv ->(?, 7, 7, 128)
62
# Pool ->(?, 4, 4, 128)
63
self.layer3 = torch.nn.Sequential(
64
torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
65
torch.nn.ReLU(),
66
torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1))
67
# L4 FC 4x4x128 inputs -> 625 outputs
68
self.fc1 = torch.nn.Linear(4 * 4 * 128, 625, bias=True)
69
torch.nn.init.xavier_uniform_(self.fc1.weight)
70
self.layer4 = torch.nn.Sequential(
71
self.fc1,
72
torch.nn.ReLU(),
73
torch.nn.Dropout(p=1 - self.keep_prob))
74
# L5 Final FC 625 inputs -> 10 outputs
75
self.fc2 = torch.nn.Linear(625, 10, bias=True)
76
torch.nn.init.xavier_uniform_(self.fc2.weight)
77
78
# define cost/loss & optimizer
79
self.criterion = torch.nn.CrossEntropyLoss() # Softmax is internally computed.
80
self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
81
82
def forward(self, x):
83
out = self.layer1(x)
84
out = self.layer2(out)
85
out = self.layer3(out)
86
out = out.view(out.size(0), -1) # Flatten them for FC
87
out = self.layer4(out)
88
out = self.fc2(out)
89
return out
90
91
def predict(self, x):
92
self.eval()
93
return self.forward(x)
94
95
def get_accuracy(self, x, y):
96
prediction = self.predict(x)
97
correct_prediction = torch.argmax(prediction, 1) == Y_test
98
self.accuracy = correct_prediction.float().mean().item()
99
return self.accuracy
100
101
def train_model(self, x, y):
102
self.train()
103
self.optimizer.zero_grad()
104
hypothesis = self.forward(x)
105
self.cost = self.criterion(hypothesis, y)
106
self.cost.backward()
107
self.optimizer.step()
108
return self.cost
109
110
111
# instantiate CNN model
112
model = CNN().to(device)
113
114
# train my model
115
total_batch = len(data_loader)
116
print('Learning started. It takes sometime.')
117
for epoch in range(training_epochs):
118
avg_cost = 0
119
120
for X, Y in data_loader:
121
# image is already size of (28x28), no reshape
122
# label is not one-hot encoded
123
X = X.to(device)
124
Y = Y.to(device)
125
126
cost = model.train_model(X, Y)
127
128
avg_cost += cost / total_batch
129
130
print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost))
131
132
print('Learning Finished!')
133
134
# Test model and check accuracy
135
with torch.no_grad():
136
X_test = mnist_test.test_data.view(len(mnist_test), 1, 28, 28).float().to(device)
137
Y_test = mnist_test.test_labels.to(device)
138
139
print('Accuracy:', model.get_accuracy(X_test, Y_test))
140
141