Path: blob/master/CNN/lab-10-5-mnist_nn_dropout.py
618 views
# Lab 10 MNIST and softmax1import torch2import torchvision.datasets as dsets3import torchvision.transforms as transforms4import random56device = 'cuda' if torch.cuda.is_available() else 'cpu'78# for reproducibility9random.seed(777)10torch.manual_seed(777)11if device == 'cuda':12torch.cuda.manual_seed_all(777)1314# parameters15learning_rate = 0.00116training_epochs = 1517batch_size = 10018keep_prob = 0.71920# MNIST dataset21mnist_train = dsets.MNIST(root='MNIST_data/',22train=True,23transform=transforms.ToTensor(),24download=True)2526mnist_test = dsets.MNIST(root='MNIST_data/',27train=False,28transform=transforms.ToTensor(),29download=True)3031# dataset loader32data_loader = torch.utils.data.DataLoader(dataset=mnist_train,33batch_size=batch_size,34shuffle=True,35drop_last=True)3637# nn layers38linear1 = torch.nn.Linear(784, 512, bias=True)39linear2 = torch.nn.Linear(512, 512, bias=True)40linear3 = torch.nn.Linear(512, 512, bias=True)41linear4 = torch.nn.Linear(512, 512, bias=True)42linear5 = torch.nn.Linear(512, 10, bias=True)43relu = torch.nn.ReLU()44dropout = torch.nn.Dropout(p=1 - keep_prob)4546# xavier initialization47torch.nn.init.xavier_uniform_(linear1.weight)48torch.nn.init.xavier_uniform_(linear2.weight)49torch.nn.init.xavier_uniform_(linear3.weight)50torch.nn.init.xavier_uniform_(linear4.weight)51torch.nn.init.xavier_uniform_(linear5.weight)5253# model54model = torch.nn.Sequential(linear1, relu, dropout,55linear2, relu, dropout,56linear3, relu, dropout,57linear4, relu, dropout,58linear5).to(device)5960# define cost/loss & optimizer61criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed.62optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)6364total_batch = len(data_loader)65model.train() # set the model to train mode (dropout=True)66for epoch in range(training_epochs):67avg_cost = 06869for X, Y in data_loader:70# reshape input image into [batch_size by 784]71# label is not one-hot encoded72X = X.view(-1, 28 * 28).to(device)73Y = Y.to(device)7475optimizer.zero_grad()76hypothesis = model(X)77cost = criterion(hypothesis, Y)78cost.backward()79optimizer.step()8081avg_cost += cost / total_batch8283print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))8485print('Learning finished')8687# Test model and check accuracy88with torch.no_grad():89model.eval() # set the model to evaluation mode (dropout=False)9091# Test the model using test sets92X_test = mnist_test.test_data.view(-1, 28 * 28).float().to(device)93Y_test = mnist_test.test_labels.to(device)9495prediction = model(X_test)96correct_prediction = torch.argmax(prediction, 1) == Y_test97accuracy = correct_prediction.float().mean()98print('Accuracy:', accuracy.item())99100# Get one and predict101r = random.randint(0, len(mnist_test) - 1)102X_single_data = mnist_test.test_data[r:r + 1].view(-1, 28 * 28).float().to(device)103Y_single_data = mnist_test.test_labels[r:r + 1].to(device)104105print('Label: ', Y_single_data.item())106single_prediction = model(X_single_data)107print('Prediction: ', torch.argmax(single_prediction, 1).item())108109