Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-10-5-mnist_nn_dropout.py
618 views
1
# Lab 10 MNIST and softmax
2
import torch
3
import torchvision.datasets as dsets
4
import torchvision.transforms as transforms
5
import random
6
7
device = 'cuda' if torch.cuda.is_available() else 'cpu'
8
9
# for reproducibility
10
random.seed(777)
11
torch.manual_seed(777)
12
if device == 'cuda':
13
torch.cuda.manual_seed_all(777)
14
15
# parameters
16
learning_rate = 0.001
17
training_epochs = 15
18
batch_size = 100
19
keep_prob = 0.7
20
21
# MNIST dataset
22
mnist_train = dsets.MNIST(root='MNIST_data/',
23
train=True,
24
transform=transforms.ToTensor(),
25
download=True)
26
27
mnist_test = dsets.MNIST(root='MNIST_data/',
28
train=False,
29
transform=transforms.ToTensor(),
30
download=True)
31
32
# dataset loader
33
data_loader = torch.utils.data.DataLoader(dataset=mnist_train,
34
batch_size=batch_size,
35
shuffle=True,
36
drop_last=True)
37
38
# nn layers
39
linear1 = torch.nn.Linear(784, 512, bias=True)
40
linear2 = torch.nn.Linear(512, 512, bias=True)
41
linear3 = torch.nn.Linear(512, 512, bias=True)
42
linear4 = torch.nn.Linear(512, 512, bias=True)
43
linear5 = torch.nn.Linear(512, 10, bias=True)
44
relu = torch.nn.ReLU()
45
dropout = torch.nn.Dropout(p=1 - keep_prob)
46
47
# xavier initialization
48
torch.nn.init.xavier_uniform_(linear1.weight)
49
torch.nn.init.xavier_uniform_(linear2.weight)
50
torch.nn.init.xavier_uniform_(linear3.weight)
51
torch.nn.init.xavier_uniform_(linear4.weight)
52
torch.nn.init.xavier_uniform_(linear5.weight)
53
54
# model
55
model = torch.nn.Sequential(linear1, relu, dropout,
56
linear2, relu, dropout,
57
linear3, relu, dropout,
58
linear4, relu, dropout,
59
linear5).to(device)
60
61
# define cost/loss & optimizer
62
criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed.
63
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
64
65
total_batch = len(data_loader)
66
model.train() # set the model to train mode (dropout=True)
67
for epoch in range(training_epochs):
68
avg_cost = 0
69
70
for X, Y in data_loader:
71
# reshape input image into [batch_size by 784]
72
# label is not one-hot encoded
73
X = X.view(-1, 28 * 28).to(device)
74
Y = Y.to(device)
75
76
optimizer.zero_grad()
77
hypothesis = model(X)
78
cost = criterion(hypothesis, Y)
79
cost.backward()
80
optimizer.step()
81
82
avg_cost += cost / total_batch
83
84
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
85
86
print('Learning finished')
87
88
# Test model and check accuracy
89
with torch.no_grad():
90
model.eval() # set the model to evaluation mode (dropout=False)
91
92
# Test the model using test sets
93
X_test = mnist_test.test_data.view(-1, 28 * 28).float().to(device)
94
Y_test = mnist_test.test_labels.to(device)
95
96
prediction = model(X_test)
97
correct_prediction = torch.argmax(prediction, 1) == Y_test
98
accuracy = correct_prediction.float().mean()
99
print('Accuracy:', accuracy.item())
100
101
# Get one and predict
102
r = random.randint(0, len(mnist_test) - 1)
103
X_single_data = mnist_test.test_data[r:r + 1].view(-1, 28 * 28).float().to(device)
104
Y_single_data = mnist_test.test_labels[r:r + 1].to(device)
105
106
print('Label: ', Y_single_data.item())
107
single_prediction = model(X_single_data)
108
print('Prediction: ', torch.argmax(single_prediction, 1).item())
109