Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-09-3-xor-nn-wide-deep.py
620 views
1
# Lab 9 XOR
2
import torch
3
4
device = 'cuda' if torch.cuda.is_available() else 'cpu'
5
6
# for reproducibility
7
torch.manual_seed(777)
8
if device == 'cuda':
9
torch.cuda.manual_seed_all(777)
10
11
X = torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]]).to(device)
12
Y = torch.FloatTensor([[0], [1], [1], [0]]).to(device)
13
14
# nn layers
15
linear1 = torch.nn.Linear(2, 10, bias=True)
16
linear2 = torch.nn.Linear(10, 10, bias=True)
17
linear3 = torch.nn.Linear(10, 10, bias=True)
18
linear4 = torch.nn.Linear(10, 1, bias=True)
19
sigmoid = torch.nn.Sigmoid()
20
21
# model
22
model = torch.nn.Sequential(linear1, sigmoid, linear2, sigmoid, linear3, sigmoid, linear4, sigmoid).to(device)
23
24
# define cost/loss & optimizer
25
criterion = torch.nn.BCELoss().to(device)
26
optimizer = torch.optim.SGD(model.parameters(), lr=1) # modified learning rate from 0.1 to 1
27
28
for step in range(10001):
29
optimizer.zero_grad()
30
hypothesis = model(X)
31
32
# cost/loss function
33
cost = criterion(hypothesis, Y)
34
cost.backward()
35
optimizer.step()
36
37
if step % 100 == 0:
38
print(step, cost.item())
39
40
# Accuracy computation
41
# True if hypothesis>0.5 else False
42
with torch.no_grad():
43
predicted = (model(X) > 0.5).float()
44
accuracy = (predicted == Y).float().mean()
45
print('\nHypothesis: ', hypothesis.detach().cpu().numpy(), '\nCorrect: ', predicted.detach().cpu().numpy(), '\nAccuracy: ', accuracy.item())
46