Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-11-1-mnist_cnn.ipynb
618 views
Kernel: Python 3
# Lab 11 MNIST and Convolutional Neural Network import torch import torchvision.datasets as dsets import torchvision.transforms as transforms import torch.nn.init
device = 'cuda' if torch.cuda.is_available() else 'cpu' # for reproducibility torch.manual_seed(777) if device == 'cuda': torch.cuda.manual_seed_all(777)
# parameters learning_rate = 0.001 training_epochs = 15 batch_size = 100
# MNIST dataset mnist_train = dsets.MNIST(root='MNIST_data/', train=True, transform=transforms.ToTensor(), download=True) mnist_test = dsets.MNIST(root='MNIST_data/', train=False, transform=transforms.ToTensor(), download=True)
# dataset loader data_loader = torch.utils.data.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, drop_last=True)
# CNN Model (2 conv layers) class CNN(torch.nn.Module): def __init__(self): super(CNN, self).__init__() # L1 ImgIn shape=(?, 28, 28, 1) # Conv -> (?, 28, 28, 32) # Pool -> (?, 14, 14, 32) self.layer1 = torch.nn.Sequential( torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=2, stride=2)) # L2 ImgIn shape=(?, 14, 14, 32) # Conv ->(?, 14, 14, 64) # Pool ->(?, 7, 7, 64) self.layer2 = torch.nn.Sequential( torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=2, stride=2)) # Final FC 7x7x64 inputs -> 10 outputs self.fc = torch.nn.Linear(7 * 7 * 64, 10, bias=True) torch.nn.init.xavier_uniform_(self.fc.weight) def forward(self, x): out = self.layer1(x) out = self.layer2(out) out = out.view(out.size(0), -1) # Flatten them for FC out = self.fc(out) return out
# instantiate CNN model model = CNN().to(device)
# define cost/loss & optimizer criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed. optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# train my model total_batch = len(data_loader) print('Learning started. It takes sometime.') for epoch in range(training_epochs): avg_cost = 0 for X, Y in data_loader: # image is already size of (28x28), no reshape # label is not one-hot encoded X = X.to(device) Y = Y.to(device) optimizer.zero_grad() hypothesis = model(X) cost = criterion(hypothesis, Y) cost.backward() optimizer.step() avg_cost += cost / total_batch print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost)) print('Learning Finished!')
Learning started. It takes sometime. [Epoch: 1] cost = 0.22375311 [Epoch: 2] cost = 0.0621020049 [Epoch: 3] cost = 0.0448426045 [Epoch: 4] cost = 0.0356004424 [Epoch: 5] cost = 0.029105816 [Epoch: 6] cost = 0.0250033811 [Epoch: 7] cost = 0.0207947362 [Epoch: 8] cost = 0.0179701932 [Epoch: 9] cost = 0.0149135841 [Epoch: 10] cost = 0.0125576379 [Epoch: 11] cost = 0.0104763471 [Epoch: 12] cost = 0.0102381511 [Epoch: 13] cost = 0.00805412512 [Epoch: 14] cost = 0.00668241084 [Epoch: 15] cost = 0.00632913783 Learning Finished!
# Test model and check accuracy with torch.no_grad(): X_test = mnist_test.test_data.view(len(mnist_test), 1, 28, 28).float().to(device) Y_test = mnist_test.test_labels.to(device) prediction = model(X_test) correct_prediction = torch.argmax(prediction, 1) == Y_test accuracy = correct_prediction.float().mean() print('Accuracy:', accuracy.item())
Accuracy: 0.9856999516487122