Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-11-1-mnist_cnn.py
618 views
1
# Lab 11 MNIST and Convolutional Neural Network
2
import torch
3
import torchvision.datasets as dsets
4
import torchvision.transforms as transforms
5
import torch.nn.init
6
7
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
8
device = 'cpu'
9
# for reproducibility
10
torch.manual_seed(777)
11
if device == 'cuda':
12
torch.cuda.manual_seed_all(777)
13
14
# parameters
15
learning_rate = 0.001
16
training_epochs = 15
17
batch_size = 100
18
19
# MNIST dataset
20
mnist_train = dsets.MNIST(root='MNIST_data/',
21
train=True,
22
transform=transforms.ToTensor(),
23
download=True)
24
25
mnist_test = dsets.MNIST(root='MNIST_data/',
26
train=False,
27
transform=transforms.ToTensor(),
28
download=True)
29
30
# dataset loader
31
data_loader = torch.utils.data.DataLoader(dataset=mnist_train,
32
batch_size=batch_size,
33
shuffle=True,
34
drop_last=True)
35
36
# CNN Model (2 conv layers)
37
class CNN(torch.nn.Module):
38
39
def __init__(self):
40
super(CNN, self).__init__()
41
# L1 ImgIn shape=(?, 28, 28, 1)
42
# Conv -> (?, 28, 28, 32)
43
# Pool -> (?, 14, 14, 32)
44
self.layer1 = torch.nn.Sequential(
45
torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
46
torch.nn.ReLU(),
47
torch.nn.MaxPool2d(kernel_size=2, stride=2))
48
# L2 ImgIn shape=(?, 14, 14, 32)
49
# Conv ->(?, 14, 14, 64)
50
# Pool ->(?, 7, 7, 64)
51
self.layer2 = torch.nn.Sequential(
52
torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
53
torch.nn.ReLU(),
54
torch.nn.MaxPool2d(kernel_size=2, stride=2))
55
# Final FC 7x7x64 inputs -> 10 outputs
56
self.fc = torch.nn.Linear(7 * 7 * 64, 10, bias=True)
57
torch.nn.init.xavier_uniform_(self.fc.weight)
58
59
def forward(self, x):
60
out = self.layer1(x)
61
out = self.layer2(out)
62
out = out.view(out.size(0), -1) # Flatten them for FC
63
out = self.fc(out)
64
return out
65
66
67
# instantiate CNN model
68
model = CNN().to(device)
69
70
# define cost/loss & optimizer
71
criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed.
72
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
73
74
# train my model
75
total_batch = len(data_loader)
76
print('Learning started. It takes sometime.')
77
for epoch in range(training_epochs):
78
avg_cost = 0
79
80
for X, Y in data_loader:
81
# image is already size of (28x28), no reshape
82
# label is not one-hot encoded
83
X = X.to(device)
84
Y = Y.to(device)
85
86
optimizer.zero_grad()
87
hypothesis = model(X)
88
cost = criterion(hypothesis, Y)
89
cost.backward()
90
optimizer.step()
91
92
avg_cost += cost / total_batch
93
94
print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost))
95
96
print('Learning Finished!')
97
98
# Test model and check accuracy
99
with torch.no_grad():
100
X_test = mnist_test.test_data.view(len(mnist_test), 1, 28, 28).float().to(device)
101
Y_test = mnist_test.test_labels.to(device)
102
103
prediction = model(X_test)
104
correct_prediction = torch.argmax(prediction, 1) == Y_test
105
accuracy = correct_prediction.float().mean()
106
print('Accuracy:', accuracy.item())
107
108