Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-11-3-mnist_cnn_class.ipynb
618 views
Kernel: Python 3
# Lab 11 MNIST and Deep learning CNN import torch import torchvision.datasets as dsets import torchvision.transforms as transforms import torch.nn.init
device = 'cuda' if torch.cuda.is_available() else 'cpu' # for reproducibility torch.manual_seed(777) if device == 'cuda': torch.cuda.manual_seed_all(777)
# parameters learning_rate = 0.001 training_epochs = 15 batch_size = 100
# MNIST dataset mnist_train = dsets.MNIST(root='MNIST_data/', train=True, transform=transforms.ToTensor(), download=True) mnist_test = dsets.MNIST(root='MNIST_data/', train=False, transform=transforms.ToTensor(), download=True)
# dataset loader data_loader = torch.utils.data.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, drop_last=True)
# CNN Model class CNN(torch.nn.Module): def __init__(self): super(CNN, self).__init__() self._build_net() def _build_net(self): # dropout (keep_prob) rate 0.7~0.5 on training, but should be 1 self.keep_prob = 0.5 # L1 ImgIn shape=(?, 28, 28, 1) # Conv -> (?, 28, 28, 32) # Pool -> (?, 14, 14, 32) self.layer1 = torch.nn.Sequential( torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=2, stride=2)) # L2 ImgIn shape=(?, 14, 14, 32) # Conv ->(?, 14, 14, 64) # Pool ->(?, 7, 7, 64) self.layer2 = torch.nn.Sequential( torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=2, stride=2)) # L3 ImgIn shape=(?, 7, 7, 64) # Conv ->(?, 7, 7, 128) # Pool ->(?, 4, 4, 128) self.layer3 = torch.nn.Sequential( torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1)) # L4 FC 4x4x128 inputs -> 625 outputs self.fc1 = torch.nn.Linear(4 * 4 * 128, 625, bias=True) torch.nn.init.xavier_uniform_(self.fc1.weight) self.layer4 = torch.nn.Sequential( self.fc1, torch.nn.ReLU(), torch.nn.Dropout(p=1 - self.keep_prob)) # L5 Final FC 625 inputs -> 10 outputs self.fc2 = torch.nn.Linear(625, 10, bias=True) torch.nn.init.xavier_uniform_(self.fc2.weight) # define cost/loss & optimizer self.criterion = torch.nn.CrossEntropyLoss() # Softmax is internally computed. self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) def forward(self, x): out = self.layer1(x) out = self.layer2(out) out = self.layer3(out) out = out.view(out.size(0), -1) # Flatten them for FC out = self.layer4(out) out = self.fc2(out) return out def predict(self, x): self.eval() return self.forward(x) def get_accuracy(self, x, y): prediction = self.predict(x) correct_prediction = torch.argmax(prediction, 1) == y self.accuracy = correct_prediction.float().mean().item() return self.accuracy def train_model(self, x, y): self.train() self.optimizer.zero_grad() hypothesis = self.forward(x) self.cost = self.criterion(hypothesis, y) self.cost.backward() self.optimizer.step() return self.cost
# instantiate CNN model model = CNN().to(device)
# train my model total_batch = len(data_loader) print('Learning started. It takes sometime.') for epoch in range(training_epochs): avg_cost = 0 for X, Y in data_loader: # image is already size of (28x28), no reshape # label is not one-hot encoded X = X.to(device) Y = Y.to(device) cost = model.train_model(X, Y) avg_cost += cost / total_batch print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost)) print('Learning Finished!')
Learning started. It takes sometime. [Epoch: 1] cost = 0.19067806 [Epoch: 2] cost = 0.0521060713 [Epoch: 3] cost = 0.0369423404 [Epoch: 4] cost = 0.0291828085 [Epoch: 5] cost = 0.0225985274 [Epoch: 6] cost = 0.0183827672 [Epoch: 7] cost = 0.0173153654 [Epoch: 8] cost = 0.0154362749 [Epoch: 9] cost = 0.012914056 [Epoch: 10] cost = 0.0119176535 [Epoch: 11] cost = 0.00957768038 [Epoch: 12] cost = 0.00869975332 [Epoch: 13] cost = 0.00954274181 [Epoch: 14] cost = 0.00643576868 [Epoch: 15] cost = 0.00654692529 Learning Finished!
# Test model and check accuracy with torch.no_grad(): X_test = mnist_test.test_data.view(len(mnist_test), 1, 28, 28).float().to(device) Y_test = mnist_test.test_labels.to(device) print('Accuracy:', model.get_accuracy(X_test, Y_test))
Accuracy: 0.9882999658584595